2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 120
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "August 18, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 #define TG3_RSS_INDIR_TBL_SIZE 128
140 /* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
147 #define TG3_TX_RING_SIZE 512
148 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
150 #define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
158 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
160 #define TG3_DMA_BYTE_ENAB 64
162 #define TG3_RX_STD_DMA_SZ 1536
163 #define TG3_RX_JMB_DMA_SZ 9046
165 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
167 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
187 #define TG3_RX_COPY_THRESHOLD 256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
197 #define TG3_RX_OFFSET(tp) 0
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version[] __devinitdata =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314 static const struct {
315 const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
349 { "tx_flow_control" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 writel(val, tp->regs + off);
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 return readl(tp->regs + off);
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 writel(val, tp->aperegs + off);
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 return readl(tp->aperegs + off);
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 writel(val, tp->regs + off);
449 readl(tp->regs + off);
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
468 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 TG3_64BIT_REG_LOW, val);
473 if (off == TG3_RX_STD_PROD_IDX_REG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 TG3_64BIT_REG_LOW, val);
479 spin_lock_irqsave(&tp->indirect_lock, flags);
480 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 spin_unlock_irqrestore(&tp->indirect_lock, flags);
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 /* Non-posted methods */
515 tp->write32(tp, off, val);
518 tg3_write32(tp, off, val);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 tp->write32_mbox(tp, off, val);
533 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 tp->read32_mbox(tp, off);
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 void __iomem *mbox = tp->regs + off;
541 if (tg3_flag(tp, TXD_MBOX_HWBUG))
543 if (tg3_flag(tp, MBOX_WRITE_REORDER))
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 return readl(tp->regs + off + GRCMBOX_BASE);
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 writel(val, tp->regs + off + GRCMBOX_BASE);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
576 spin_lock_irqsave(&tp->indirect_lock, flags);
577 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590 spin_unlock_irqrestore(&tp->indirect_lock, flags);
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
603 spin_lock_irqsave(&tp->indirect_lock, flags);
604 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 *val = tr32(TG3PCI_MEM_WIN_DATA);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617 spin_unlock_irqrestore(&tp->indirect_lock, flags);
620 static void tg3_ape_lock_init(struct tg3 *tp)
625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 regbase = TG3_APE_LOCK_GRANT;
628 regbase = TG3_APE_PER_LOCK_GRANT;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i = 0; i < 8; i++) {
632 if (i == TG3_APE_LOCK_GPIO)
634 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
637 /* Clear the correct bit of the GPIO lock too. */
639 bit = APE_LOCK_GRANT_DRIVER;
641 bit = 1 << tp->pci_fn;
643 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
646 static int tg3_ape_lock(struct tg3 *tp, int locknum)
650 u32 status, req, gnt, bit;
652 if (!tg3_flag(tp, ENABLE_APE))
656 case TG3_APE_LOCK_GPIO:
657 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
659 case TG3_APE_LOCK_GRC:
660 case TG3_APE_LOCK_MEM:
666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
667 req = TG3_APE_LOCK_REQ;
668 gnt = TG3_APE_LOCK_GRANT;
670 req = TG3_APE_PER_LOCK_REQ;
671 gnt = TG3_APE_PER_LOCK_GRANT;
676 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
677 bit = APE_LOCK_REQ_DRIVER;
679 bit = 1 << tp->pci_fn;
681 tg3_ape_write32(tp, req + off, bit);
683 /* Wait for up to 1 millisecond to acquire lock. */
684 for (i = 0; i < 100; i++) {
685 status = tg3_ape_read32(tp, gnt + off);
692 /* Revoke the lock request. */
693 tg3_ape_write32(tp, gnt + off, bit);
700 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 if (!tg3_flag(tp, ENABLE_APE))
708 case TG3_APE_LOCK_GPIO:
709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
711 case TG3_APE_LOCK_GRC:
712 case TG3_APE_LOCK_MEM:
718 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
719 gnt = TG3_APE_LOCK_GRANT;
721 gnt = TG3_APE_PER_LOCK_GRANT;
723 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
724 bit = APE_LOCK_GRANT_DRIVER;
726 bit = 1 << tp->pci_fn;
728 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
736 /* NCSI does not support APE events */
737 if (tg3_flag(tp, APE_HAS_NCSI))
740 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
741 if (apedata != APE_SEG_SIG_MAGIC)
744 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
745 if (!(apedata & APE_FW_STATUS_READY))
748 /* Wait for up to 1 millisecond for APE to service previous event. */
749 for (i = 0; i < 10; i++) {
750 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
755 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
756 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
757 event | APE_EVENT_STATUS_EVENT_PENDING);
759 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
761 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
767 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
768 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
776 if (!tg3_flag(tp, ENABLE_APE))
780 case RESET_KIND_INIT:
781 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
782 APE_HOST_SEG_SIG_MAGIC);
783 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
784 APE_HOST_SEG_LEN_MAGIC);
785 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
786 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
787 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
788 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
789 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
790 APE_HOST_BEHAV_NO_PHYLOCK);
791 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
792 TG3_APE_HOST_DRVR_STATE_START);
794 event = APE_EVENT_STATUS_STATE_START;
796 case RESET_KIND_SHUTDOWN:
797 /* With the interface we are currently using,
798 * APE does not track driver state. Wiping
799 * out the HOST SEGMENT SIGNATURE forces
800 * the APE to assume OS absent status.
802 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
804 if (device_may_wakeup(&tp->pdev->dev) &&
805 tg3_flag(tp, WOL_ENABLE)) {
806 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
807 TG3_APE_HOST_WOL_SPEED_AUTO);
808 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
810 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
812 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
814 event = APE_EVENT_STATUS_STATE_UNLOAD;
816 case RESET_KIND_SUSPEND:
817 event = APE_EVENT_STATUS_STATE_SUSPEND;
823 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
825 tg3_ape_send_event(tp, event);
828 static void tg3_disable_ints(struct tg3 *tp)
832 tw32(TG3PCI_MISC_HOST_CTRL,
833 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
834 for (i = 0; i < tp->irq_max; i++)
835 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 static void tg3_enable_ints(struct tg3 *tp)
845 tw32(TG3PCI_MISC_HOST_CTRL,
846 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
848 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
849 for (i = 0; i < tp->irq_cnt; i++) {
850 struct tg3_napi *tnapi = &tp->napi[i];
852 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
853 if (tg3_flag(tp, 1SHOT_MSI))
854 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
856 tp->coal_now |= tnapi->coal_now;
859 /* Force an initial interrupt */
860 if (!tg3_flag(tp, TAGGED_STATUS) &&
861 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
862 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
864 tw32(HOSTCC_MODE, tp->coal_now);
866 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
871 struct tg3 *tp = tnapi->tp;
872 struct tg3_hw_status *sblk = tnapi->hw_status;
873 unsigned int work_exists = 0;
875 /* check for phy events */
876 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
877 if (sblk->status & SD_STATUS_LINK_CHG)
880 /* check for RX/TX work to do */
881 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
882 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
889 * similar to tg3_enable_ints, but it accurately determines whether there
890 * is new work pending and can return without flushing the PIO write
891 * which reenables interrupts
893 static void tg3_int_reenable(struct tg3_napi *tnapi)
895 struct tg3 *tp = tnapi->tp;
897 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900 /* When doing tagged status, this work check is unnecessary.
901 * The last_tag we write above tells the chip which piece of
902 * work we've completed.
904 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
905 tw32(HOSTCC_MODE, tp->coalesce_mode |
906 HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 static void tg3_switch_clocks(struct tg3 *tp)
914 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
919 orig_clock_ctrl = clock_ctrl;
920 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
921 CLOCK_CTRL_CLKRUN_OENABLE |
923 tp->pci_clock_ctrl = clock_ctrl;
925 if (tg3_flag(tp, 5705_PLUS)) {
926 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
927 tw32_wait_f(TG3PCI_CLOCK_CTRL,
928 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
930 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
931 tw32_wait_f(TG3PCI_CLOCK_CTRL,
933 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
935 tw32_wait_f(TG3PCI_CLOCK_CTRL,
936 clock_ctrl | (CLOCK_CTRL_ALTCLK),
939 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 #define PHY_BUSY_LOOPS 5000
944 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
950 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
952 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
958 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
959 MI_COM_PHY_ADDR_MASK);
960 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
961 MI_COM_REG_ADDR_MASK);
962 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
964 tw32_f(MAC_MI_COM, frame_val);
966 loops = PHY_BUSY_LOOPS;
969 frame_val = tr32(MAC_MI_COM);
971 if ((frame_val & MI_COM_BUSY) == 0) {
973 frame_val = tr32(MAC_MI_COM);
981 *val = frame_val & MI_COM_DATA_MASK;
985 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
986 tw32_f(MAC_MI_MODE, tp->mi_mode);
993 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
999 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1000 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1005 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1009 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1010 MI_COM_PHY_ADDR_MASK);
1011 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1012 MI_COM_REG_ADDR_MASK);
1013 frame_val |= (val & MI_COM_DATA_MASK);
1014 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1016 tw32_f(MAC_MI_COM, frame_val);
1018 loops = PHY_BUSY_LOOPS;
1019 while (loops != 0) {
1021 frame_val = tr32(MAC_MI_COM);
1022 if ((frame_val & MI_COM_BUSY) == 0) {
1024 frame_val = tr32(MAC_MI_COM);
1034 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1035 tw32_f(MAC_MI_MODE, tp->mi_mode);
1042 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1046 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1050 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1054 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1055 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1059 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1065 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1069 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1073 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1077 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1078 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1082 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1088 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1092 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1094 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1099 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1103 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1105 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1110 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1114 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1115 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1116 MII_TG3_AUXCTL_SHDWSEL_MISC);
1118 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1123 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1125 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1126 set |= MII_TG3_AUXCTL_MISC_WREN;
1128 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1132 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1133 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1134 MII_TG3_AUXCTL_ACTL_TX_6DB)
1136 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1137 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1138 MII_TG3_AUXCTL_ACTL_TX_6DB);
1140 static int tg3_bmcr_reset(struct tg3 *tp)
1145 /* OK, reset it, and poll the BMCR_RESET bit until it
1146 * clears or we time out.
1148 phy_control = BMCR_RESET;
1149 err = tg3_writephy(tp, MII_BMCR, phy_control);
1155 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1159 if ((phy_control & BMCR_RESET) == 0) {
1171 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1173 struct tg3 *tp = bp->priv;
1176 spin_lock_bh(&tp->lock);
1178 if (tg3_readphy(tp, reg, &val))
1181 spin_unlock_bh(&tp->lock);
1186 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1188 struct tg3 *tp = bp->priv;
1191 spin_lock_bh(&tp->lock);
1193 if (tg3_writephy(tp, reg, val))
1196 spin_unlock_bh(&tp->lock);
1201 static int tg3_mdio_reset(struct mii_bus *bp)
1206 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 struct phy_device *phydev;
1211 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1212 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1213 case PHY_ID_BCM50610:
1214 case PHY_ID_BCM50610M:
1215 val = MAC_PHYCFG2_50610_LED_MODES;
1217 case PHY_ID_BCMAC131:
1218 val = MAC_PHYCFG2_AC131_LED_MODES;
1220 case PHY_ID_RTL8211C:
1221 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1223 case PHY_ID_RTL8201E:
1224 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1230 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1231 tw32(MAC_PHYCFG2, val);
1233 val = tr32(MAC_PHYCFG1);
1234 val &= ~(MAC_PHYCFG1_RGMII_INT |
1235 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1236 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1237 tw32(MAC_PHYCFG1, val);
1242 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1243 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1244 MAC_PHYCFG2_FMODE_MASK_MASK |
1245 MAC_PHYCFG2_GMODE_MASK_MASK |
1246 MAC_PHYCFG2_ACT_MASK_MASK |
1247 MAC_PHYCFG2_QUAL_MASK_MASK |
1248 MAC_PHYCFG2_INBAND_ENABLE;
1250 tw32(MAC_PHYCFG2, val);
1252 val = tr32(MAC_PHYCFG1);
1253 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1254 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1255 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1256 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1257 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1258 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1259 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1261 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1262 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1263 tw32(MAC_PHYCFG1, val);
1265 val = tr32(MAC_EXT_RGMII_MODE);
1266 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1267 MAC_RGMII_MODE_RX_QUALITY |
1268 MAC_RGMII_MODE_RX_ACTIVITY |
1269 MAC_RGMII_MODE_RX_ENG_DET |
1270 MAC_RGMII_MODE_TX_ENABLE |
1271 MAC_RGMII_MODE_TX_LOWPWR |
1272 MAC_RGMII_MODE_TX_RESET);
1273 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1274 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1275 val |= MAC_RGMII_MODE_RX_INT_B |
1276 MAC_RGMII_MODE_RX_QUALITY |
1277 MAC_RGMII_MODE_RX_ACTIVITY |
1278 MAC_RGMII_MODE_RX_ENG_DET;
1279 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1280 val |= MAC_RGMII_MODE_TX_ENABLE |
1281 MAC_RGMII_MODE_TX_LOWPWR |
1282 MAC_RGMII_MODE_TX_RESET;
1284 tw32(MAC_EXT_RGMII_MODE, val);
1287 static void tg3_mdio_start(struct tg3 *tp)
1289 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1290 tw32_f(MAC_MI_MODE, tp->mi_mode);
1293 if (tg3_flag(tp, MDIOBUS_INITED) &&
1294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1295 tg3_mdio_config_5785(tp);
1298 static int tg3_mdio_init(struct tg3 *tp)
1302 struct phy_device *phydev;
1304 if (tg3_flag(tp, 5717_PLUS)) {
1307 tp->phy_addr = tp->pci_fn + 1;
1309 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1310 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1312 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1313 TG3_CPMU_PHY_STRAP_IS_SERDES;
1317 tp->phy_addr = TG3_PHY_MII_ADDR;
1321 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324 tp->mdio_bus = mdiobus_alloc();
1325 if (tp->mdio_bus == NULL)
1328 tp->mdio_bus->name = "tg3 mdio bus";
1329 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1330 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1331 tp->mdio_bus->priv = tp;
1332 tp->mdio_bus->parent = &tp->pdev->dev;
1333 tp->mdio_bus->read = &tg3_mdio_read;
1334 tp->mdio_bus->write = &tg3_mdio_write;
1335 tp->mdio_bus->reset = &tg3_mdio_reset;
1336 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1337 tp->mdio_bus->irq = &tp->mdio_irq[0];
1339 for (i = 0; i < PHY_MAX_ADDR; i++)
1340 tp->mdio_bus->irq[i] = PHY_POLL;
1342 /* The bus registration will look for all the PHYs on the mdio bus.
1343 * Unfortunately, it does not ensure the PHY is powered up before
1344 * accessing the PHY ID registers. A chip reset is the
1345 * quickest way to bring the device back to an operational state..
1347 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1350 i = mdiobus_register(tp->mdio_bus);
1352 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1353 mdiobus_free(tp->mdio_bus);
1357 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1359 if (!phydev || !phydev->drv) {
1360 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1361 mdiobus_unregister(tp->mdio_bus);
1362 mdiobus_free(tp->mdio_bus);
1366 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1367 case PHY_ID_BCM57780:
1368 phydev->interface = PHY_INTERFACE_MODE_GMII;
1369 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1371 case PHY_ID_BCM50610:
1372 case PHY_ID_BCM50610M:
1373 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1374 PHY_BRCM_RX_REFCLK_UNUSED |
1375 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1376 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1377 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1378 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1379 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1380 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1381 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1382 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1384 case PHY_ID_RTL8211C:
1385 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1387 case PHY_ID_RTL8201E:
1388 case PHY_ID_BCMAC131:
1389 phydev->interface = PHY_INTERFACE_MODE_MII;
1390 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1391 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1395 tg3_flag_set(tp, MDIOBUS_INITED);
1397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1398 tg3_mdio_config_5785(tp);
1403 static void tg3_mdio_fini(struct tg3 *tp)
1405 if (tg3_flag(tp, MDIOBUS_INITED)) {
1406 tg3_flag_clear(tp, MDIOBUS_INITED);
1407 mdiobus_unregister(tp->mdio_bus);
1408 mdiobus_free(tp->mdio_bus);
1412 /* tp->lock is held. */
1413 static inline void tg3_generate_fw_event(struct tg3 *tp)
1417 val = tr32(GRC_RX_CPU_EVENT);
1418 val |= GRC_RX_CPU_DRIVER_EVENT;
1419 tw32_f(GRC_RX_CPU_EVENT, val);
1421 tp->last_event_jiffies = jiffies;
1424 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1426 /* tp->lock is held. */
1427 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 unsigned int delay_cnt;
1433 /* If enough time has passed, no wait is necessary. */
1434 time_remain = (long)(tp->last_event_jiffies + 1 +
1435 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1437 if (time_remain < 0)
1440 /* Check if we can shorten the wait time. */
1441 delay_cnt = jiffies_to_usecs(time_remain);
1442 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1443 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1444 delay_cnt = (delay_cnt >> 3) + 1;
1446 for (i = 0; i < delay_cnt; i++) {
1447 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1453 /* tp->lock is held. */
1454 static void tg3_ump_link_report(struct tg3 *tp)
1459 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462 tg3_wait_for_event_ack(tp);
1464 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1466 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469 if (!tg3_readphy(tp, MII_BMCR, ®))
1471 if (!tg3_readphy(tp, MII_BMSR, ®))
1472 val |= (reg & 0xffff);
1473 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1478 if (!tg3_readphy(tp, MII_LPA, ®))
1479 val |= (reg & 0xffff);
1480 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1484 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1486 if (!tg3_readphy(tp, MII_STAT1000, ®))
1487 val |= (reg & 0xffff);
1489 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1491 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1495 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1497 tg3_generate_fw_event(tp);
1500 /* tp->lock is held. */
1501 static void tg3_stop_fw(struct tg3 *tp)
1503 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1504 /* Wait for RX cpu to ACK the previous event. */
1505 tg3_wait_for_event_ack(tp);
1507 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1509 tg3_generate_fw_event(tp);
1511 /* Wait for RX cpu to ACK this event. */
1512 tg3_wait_for_event_ack(tp);
1516 /* tp->lock is held. */
1517 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1519 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1520 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1522 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1524 case RESET_KIND_INIT:
1525 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1529 case RESET_KIND_SHUTDOWN:
1530 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1534 case RESET_KIND_SUSPEND:
1535 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1544 if (kind == RESET_KIND_INIT ||
1545 kind == RESET_KIND_SUSPEND)
1546 tg3_ape_driver_state_change(tp, kind);
1549 /* tp->lock is held. */
1550 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1552 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1554 case RESET_KIND_INIT:
1555 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1556 DRV_STATE_START_DONE);
1559 case RESET_KIND_SHUTDOWN:
1560 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1561 DRV_STATE_UNLOAD_DONE);
1569 if (kind == RESET_KIND_SHUTDOWN)
1570 tg3_ape_driver_state_change(tp, kind);
1573 /* tp->lock is held. */
1574 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1576 if (tg3_flag(tp, ENABLE_ASF)) {
1578 case RESET_KIND_INIT:
1579 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1583 case RESET_KIND_SHUTDOWN:
1584 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1588 case RESET_KIND_SUSPEND:
1589 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1599 static int tg3_poll_fw(struct tg3 *tp)
1604 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1605 /* Wait up to 20ms for init done. */
1606 for (i = 0; i < 200; i++) {
1607 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1614 /* Wait for firmware initialization to complete. */
1615 for (i = 0; i < 100000; i++) {
1616 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1617 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1622 /* Chip might not be fitted with firmware. Some Sun onboard
1623 * parts are configured like that. So don't signal the timeout
1624 * of the above loop as an error, but do report the lack of
1625 * running firmware once.
1627 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1628 tg3_flag_set(tp, NO_FWARE_REPORTED);
1630 netdev_info(tp->dev, "No firmware running\n");
1633 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1634 /* The 57765 A0 needs a little more
1635 * time to do some important work.
1643 static void tg3_link_report(struct tg3 *tp)
1645 if (!netif_carrier_ok(tp->dev)) {
1646 netif_info(tp, link, tp->dev, "Link is down\n");
1647 tg3_ump_link_report(tp);
1648 } else if (netif_msg_link(tp)) {
1649 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1650 (tp->link_config.active_speed == SPEED_1000 ?
1652 (tp->link_config.active_speed == SPEED_100 ?
1654 (tp->link_config.active_duplex == DUPLEX_FULL ?
1657 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1658 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1660 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1664 netdev_info(tp->dev, "EEE is %s\n",
1665 tp->setlpicnt ? "enabled" : "disabled");
1667 tg3_ump_link_report(tp);
1671 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1675 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1676 miireg = ADVERTISE_PAUSE_CAP;
1677 else if (flow_ctrl & FLOW_CTRL_TX)
1678 miireg = ADVERTISE_PAUSE_ASYM;
1679 else if (flow_ctrl & FLOW_CTRL_RX)
1680 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1687 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1691 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1692 miireg = ADVERTISE_1000XPAUSE;
1693 else if (flow_ctrl & FLOW_CTRL_TX)
1694 miireg = ADVERTISE_1000XPSE_ASYM;
1695 else if (flow_ctrl & FLOW_CTRL_RX)
1696 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1703 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1707 if (lcladv & ADVERTISE_1000XPAUSE) {
1708 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1709 if (rmtadv & LPA_1000XPAUSE)
1710 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1711 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1714 if (rmtadv & LPA_1000XPAUSE)
1715 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1717 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1718 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1725 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1729 u32 old_rx_mode = tp->rx_mode;
1730 u32 old_tx_mode = tp->tx_mode;
1732 if (tg3_flag(tp, USE_PHYLIB))
1733 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1735 autoneg = tp->link_config.autoneg;
1737 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1738 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1739 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1741 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1743 flowctrl = tp->link_config.flowctrl;
1745 tp->link_config.active_flowctrl = flowctrl;
1747 if (flowctrl & FLOW_CTRL_RX)
1748 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1750 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1752 if (old_rx_mode != tp->rx_mode)
1753 tw32_f(MAC_RX_MODE, tp->rx_mode);
1755 if (flowctrl & FLOW_CTRL_TX)
1756 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1758 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1760 if (old_tx_mode != tp->tx_mode)
1761 tw32_f(MAC_TX_MODE, tp->tx_mode);
1764 static void tg3_adjust_link(struct net_device *dev)
1766 u8 oldflowctrl, linkmesg = 0;
1767 u32 mac_mode, lcl_adv, rmt_adv;
1768 struct tg3 *tp = netdev_priv(dev);
1769 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1771 spin_lock_bh(&tp->lock);
1773 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1774 MAC_MODE_HALF_DUPLEX);
1776 oldflowctrl = tp->link_config.active_flowctrl;
1782 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1783 mac_mode |= MAC_MODE_PORT_MODE_MII;
1784 else if (phydev->speed == SPEED_1000 ||
1785 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1786 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1788 mac_mode |= MAC_MODE_PORT_MODE_MII;
1790 if (phydev->duplex == DUPLEX_HALF)
1791 mac_mode |= MAC_MODE_HALF_DUPLEX;
1793 lcl_adv = tg3_advert_flowctrl_1000T(
1794 tp->link_config.flowctrl);
1797 rmt_adv = LPA_PAUSE_CAP;
1798 if (phydev->asym_pause)
1799 rmt_adv |= LPA_PAUSE_ASYM;
1802 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1804 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1806 if (mac_mode != tp->mac_mode) {
1807 tp->mac_mode = mac_mode;
1808 tw32_f(MAC_MODE, tp->mac_mode);
1812 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1813 if (phydev->speed == SPEED_10)
1815 MAC_MI_STAT_10MBPS_MODE |
1816 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1818 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1821 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1822 tw32(MAC_TX_LENGTHS,
1823 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1824 (6 << TX_LENGTHS_IPG_SHIFT) |
1825 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1827 tw32(MAC_TX_LENGTHS,
1828 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1829 (6 << TX_LENGTHS_IPG_SHIFT) |
1830 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1832 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1833 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1834 phydev->speed != tp->link_config.active_speed ||
1835 phydev->duplex != tp->link_config.active_duplex ||
1836 oldflowctrl != tp->link_config.active_flowctrl)
1839 tp->link_config.active_speed = phydev->speed;
1840 tp->link_config.active_duplex = phydev->duplex;
1842 spin_unlock_bh(&tp->lock);
1845 tg3_link_report(tp);
1848 static int tg3_phy_init(struct tg3 *tp)
1850 struct phy_device *phydev;
1852 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1855 /* Bring the PHY back to a known state. */
1858 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1860 /* Attach the MAC to the PHY. */
1861 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1862 phydev->dev_flags, phydev->interface);
1863 if (IS_ERR(phydev)) {
1864 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1865 return PTR_ERR(phydev);
1868 /* Mask with MAC supported features. */
1869 switch (phydev->interface) {
1870 case PHY_INTERFACE_MODE_GMII:
1871 case PHY_INTERFACE_MODE_RGMII:
1872 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1873 phydev->supported &= (PHY_GBIT_FEATURES |
1875 SUPPORTED_Asym_Pause);
1879 case PHY_INTERFACE_MODE_MII:
1880 phydev->supported &= (PHY_BASIC_FEATURES |
1882 SUPPORTED_Asym_Pause);
1885 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1889 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1891 phydev->advertising = phydev->supported;
1896 static void tg3_phy_start(struct tg3 *tp)
1898 struct phy_device *phydev;
1900 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1903 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1905 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1906 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1907 phydev->speed = tp->link_config.orig_speed;
1908 phydev->duplex = tp->link_config.orig_duplex;
1909 phydev->autoneg = tp->link_config.orig_autoneg;
1910 phydev->advertising = tp->link_config.orig_advertising;
1915 phy_start_aneg(phydev);
1918 static void tg3_phy_stop(struct tg3 *tp)
1920 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1923 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1926 static void tg3_phy_fini(struct tg3 *tp)
1928 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1929 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1930 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1934 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1939 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1942 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1943 /* Cannot do read-modify-write on 5401 */
1944 err = tg3_phy_auxctl_write(tp,
1945 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1946 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1951 err = tg3_phy_auxctl_read(tp,
1952 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1956 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1957 err = tg3_phy_auxctl_write(tp,
1958 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1964 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1968 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1971 tg3_writephy(tp, MII_TG3_FET_TEST,
1972 phytest | MII_TG3_FET_SHADOW_EN);
1973 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1975 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1977 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1978 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1980 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1984 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1988 if (!tg3_flag(tp, 5705_PLUS) ||
1989 (tg3_flag(tp, 5717_PLUS) &&
1990 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1993 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1994 tg3_phy_fet_toggle_apd(tp, enable);
1998 reg = MII_TG3_MISC_SHDW_WREN |
1999 MII_TG3_MISC_SHDW_SCR5_SEL |
2000 MII_TG3_MISC_SHDW_SCR5_LPED |
2001 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2002 MII_TG3_MISC_SHDW_SCR5_SDTL |
2003 MII_TG3_MISC_SHDW_SCR5_C125OE;
2004 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2005 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2007 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2010 reg = MII_TG3_MISC_SHDW_WREN |
2011 MII_TG3_MISC_SHDW_APD_SEL |
2012 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2014 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2016 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2019 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2023 if (!tg3_flag(tp, 5705_PLUS) ||
2024 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2027 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2030 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2031 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2033 tg3_writephy(tp, MII_TG3_FET_TEST,
2034 ephy | MII_TG3_FET_SHADOW_EN);
2035 if (!tg3_readphy(tp, reg, &phy)) {
2037 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2039 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2040 tg3_writephy(tp, reg, phy);
2042 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2047 ret = tg3_phy_auxctl_read(tp,
2048 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2051 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2053 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2054 tg3_phy_auxctl_write(tp,
2055 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2060 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2065 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2068 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2070 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2071 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2074 static void tg3_phy_apply_otp(struct tg3 *tp)
2083 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2086 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2087 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2088 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2090 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2091 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2092 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2094 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2095 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2096 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2098 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2099 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2101 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2102 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2104 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2105 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2106 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2108 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2111 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2115 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2120 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2121 current_link_up == 1 &&
2122 tp->link_config.active_duplex == DUPLEX_FULL &&
2123 (tp->link_config.active_speed == SPEED_100 ||
2124 tp->link_config.active_speed == SPEED_1000)) {
2127 if (tp->link_config.active_speed == SPEED_1000)
2128 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2130 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2132 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2134 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2135 TG3_CL45_D7_EEERES_STAT, &val);
2137 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2138 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2142 if (!tp->setlpicnt) {
2143 if (current_link_up == 1 &&
2144 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2145 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2146 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2149 val = tr32(TG3_CPMU_EEE_MODE);
2150 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2154 static void tg3_phy_eee_enable(struct tg3 *tp)
2158 if (tp->link_config.active_speed == SPEED_1000 &&
2159 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2162 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2163 val = MII_TG3_DSP_TAP26_ALNOKO |
2164 MII_TG3_DSP_TAP26_RMRXSTO;
2165 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2166 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2169 val = tr32(TG3_CPMU_EEE_MODE);
2170 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2173 static int tg3_wait_macro_done(struct tg3 *tp)
2180 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2181 if ((tmp32 & 0x1000) == 0)
2191 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2193 static const u32 test_pat[4][6] = {
2194 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2195 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2196 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2197 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2201 for (chan = 0; chan < 4; chan++) {
2204 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2205 (chan * 0x2000) | 0x0200);
2206 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2208 for (i = 0; i < 6; i++)
2209 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2212 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2213 if (tg3_wait_macro_done(tp)) {
2218 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2219 (chan * 0x2000) | 0x0200);
2220 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2221 if (tg3_wait_macro_done(tp)) {
2226 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2227 if (tg3_wait_macro_done(tp)) {
2232 for (i = 0; i < 6; i += 2) {
2235 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2236 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2237 tg3_wait_macro_done(tp)) {
2243 if (low != test_pat[chan][i] ||
2244 high != test_pat[chan][i+1]) {
2245 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2246 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2247 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2257 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2261 for (chan = 0; chan < 4; chan++) {
2264 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2265 (chan * 0x2000) | 0x0200);
2266 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2267 for (i = 0; i < 6; i++)
2268 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2269 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2270 if (tg3_wait_macro_done(tp))
2277 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2279 u32 reg32, phy9_orig;
2280 int retries, do_phy_reset, err;
2286 err = tg3_bmcr_reset(tp);
2292 /* Disable transmitter and interrupt. */
2293 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2297 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2299 /* Set full-duplex, 1000 mbps. */
2300 tg3_writephy(tp, MII_BMCR,
2301 BMCR_FULLDPLX | BMCR_SPEED1000);
2303 /* Set to master mode. */
2304 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2307 tg3_writephy(tp, MII_CTRL1000,
2308 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2310 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2314 /* Block the PHY control access. */
2315 tg3_phydsp_write(tp, 0x8005, 0x0800);
2317 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2320 } while (--retries);
2322 err = tg3_phy_reset_chanpat(tp);
2326 tg3_phydsp_write(tp, 0x8005, 0x0000);
2328 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2329 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2331 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2333 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2335 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2337 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2344 /* This will reset the tigon3 PHY if there is no valid
2345 * link unless the FORCE argument is non-zero.
2347 static int tg3_phy_reset(struct tg3 *tp)
2352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2353 val = tr32(GRC_MISC_CFG);
2354 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2357 err = tg3_readphy(tp, MII_BMSR, &val);
2358 err |= tg3_readphy(tp, MII_BMSR, &val);
2362 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2363 netif_carrier_off(tp->dev);
2364 tg3_link_report(tp);
2367 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2368 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2369 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2370 err = tg3_phy_reset_5703_4_5(tp);
2377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2378 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2379 cpmuctrl = tr32(TG3_CPMU_CTRL);
2380 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2382 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2385 err = tg3_bmcr_reset(tp);
2389 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2390 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2391 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2393 tw32(TG3_CPMU_CTRL, cpmuctrl);
2396 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2397 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2398 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2399 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2400 CPMU_LSPD_1000MB_MACCLK_12_5) {
2401 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2403 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2407 if (tg3_flag(tp, 5717_PLUS) &&
2408 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2411 tg3_phy_apply_otp(tp);
2413 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2414 tg3_phy_toggle_apd(tp, true);
2416 tg3_phy_toggle_apd(tp, false);
2419 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2420 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2421 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2422 tg3_phydsp_write(tp, 0x000a, 0x0323);
2423 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2426 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2427 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2428 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2431 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2432 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2433 tg3_phydsp_write(tp, 0x000a, 0x310b);
2434 tg3_phydsp_write(tp, 0x201f, 0x9506);
2435 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2436 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2438 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2439 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2440 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2441 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2442 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2443 tg3_writephy(tp, MII_TG3_TEST1,
2444 MII_TG3_TEST1_TRIM_EN | 0x4);
2446 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2448 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2452 /* Set Extended packet length bit (bit 14) on all chips that */
2453 /* support jumbo frames */
2454 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2455 /* Cannot do read-modify-write on 5401 */
2456 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2457 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2458 /* Set bit 14 with read-modify-write to preserve other bits */
2459 err = tg3_phy_auxctl_read(tp,
2460 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2462 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2463 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2466 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2467 * jumbo frames transmission.
2469 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2470 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2471 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2472 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2475 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2476 /* adjust output voltage */
2477 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2480 tg3_phy_toggle_automdix(tp, 1);
2481 tg3_phy_set_wirespeed(tp);
2485 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2486 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2487 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2488 TG3_GPIO_MSG_NEED_VAUX)
2489 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2490 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2491 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2492 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2493 (TG3_GPIO_MSG_DRVR_PRES << 12))
2495 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2496 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2497 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2498 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2499 (TG3_GPIO_MSG_NEED_VAUX << 12))
2501 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2509 status = tr32(TG3_CPMU_DRV_STATUS);
2511 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2512 status &= ~(TG3_GPIO_MSG_MASK << shift);
2513 status |= (newstat << shift);
2515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2517 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2519 tw32(TG3_CPMU_DRV_STATUS, status);
2521 return status >> TG3_APE_GPIO_MSG_SHIFT;
2524 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2526 if (!tg3_flag(tp, IS_NIC))
2529 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2532 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2535 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2537 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2538 TG3_GRC_LCLCTL_PWRSW_DELAY);
2540 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2542 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2543 TG3_GRC_LCLCTL_PWRSW_DELAY);
2549 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2553 if (!tg3_flag(tp, IS_NIC) ||
2554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2555 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2558 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2560 tw32_wait_f(GRC_LOCAL_CTRL,
2561 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2562 TG3_GRC_LCLCTL_PWRSW_DELAY);
2564 tw32_wait_f(GRC_LOCAL_CTRL,
2566 TG3_GRC_LCLCTL_PWRSW_DELAY);
2568 tw32_wait_f(GRC_LOCAL_CTRL,
2569 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2570 TG3_GRC_LCLCTL_PWRSW_DELAY);
2573 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2575 if (!tg3_flag(tp, IS_NIC))
2578 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2580 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2581 (GRC_LCLCTRL_GPIO_OE0 |
2582 GRC_LCLCTRL_GPIO_OE1 |
2583 GRC_LCLCTRL_GPIO_OE2 |
2584 GRC_LCLCTRL_GPIO_OUTPUT0 |
2585 GRC_LCLCTRL_GPIO_OUTPUT1),
2586 TG3_GRC_LCLCTL_PWRSW_DELAY);
2587 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2588 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2589 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2590 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2591 GRC_LCLCTRL_GPIO_OE1 |
2592 GRC_LCLCTRL_GPIO_OE2 |
2593 GRC_LCLCTRL_GPIO_OUTPUT0 |
2594 GRC_LCLCTRL_GPIO_OUTPUT1 |
2596 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2597 TG3_GRC_LCLCTL_PWRSW_DELAY);
2599 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2600 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2601 TG3_GRC_LCLCTL_PWRSW_DELAY);
2603 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2604 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2605 TG3_GRC_LCLCTL_PWRSW_DELAY);
2608 u32 grc_local_ctrl = 0;
2610 /* Workaround to prevent overdrawing Amps. */
2611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2612 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2613 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2615 TG3_GRC_LCLCTL_PWRSW_DELAY);
2618 /* On 5753 and variants, GPIO2 cannot be used. */
2619 no_gpio2 = tp->nic_sram_data_cfg &
2620 NIC_SRAM_DATA_CFG_NO_GPIO2;
2622 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2623 GRC_LCLCTRL_GPIO_OE1 |
2624 GRC_LCLCTRL_GPIO_OE2 |
2625 GRC_LCLCTRL_GPIO_OUTPUT1 |
2626 GRC_LCLCTRL_GPIO_OUTPUT2;
2628 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2629 GRC_LCLCTRL_GPIO_OUTPUT2);
2631 tw32_wait_f(GRC_LOCAL_CTRL,
2632 tp->grc_local_ctrl | grc_local_ctrl,
2633 TG3_GRC_LCLCTL_PWRSW_DELAY);
2635 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2637 tw32_wait_f(GRC_LOCAL_CTRL,
2638 tp->grc_local_ctrl | grc_local_ctrl,
2639 TG3_GRC_LCLCTL_PWRSW_DELAY);
2642 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2643 tw32_wait_f(GRC_LOCAL_CTRL,
2644 tp->grc_local_ctrl | grc_local_ctrl,
2645 TG3_GRC_LCLCTL_PWRSW_DELAY);
2650 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2654 /* Serialize power state transitions */
2655 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2658 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2659 msg = TG3_GPIO_MSG_NEED_VAUX;
2661 msg = tg3_set_function_status(tp, msg);
2663 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2666 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2667 tg3_pwrsrc_switch_to_vaux(tp);
2669 tg3_pwrsrc_die_with_vmain(tp);
2672 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2675 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2677 bool need_vaux = false;
2679 /* The GPIOs do something completely different on 57765. */
2680 if (!tg3_flag(tp, IS_NIC) ||
2681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2685 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2687 tg3_frob_aux_power_5717(tp, include_wol ?
2688 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2692 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2693 struct net_device *dev_peer;
2695 dev_peer = pci_get_drvdata(tp->pdev_peer);
2697 /* remove_one() may have been run on the peer. */
2699 struct tg3 *tp_peer = netdev_priv(dev_peer);
2701 if (tg3_flag(tp_peer, INIT_COMPLETE))
2704 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2705 tg3_flag(tp_peer, ENABLE_ASF))
2710 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2711 tg3_flag(tp, ENABLE_ASF))
2715 tg3_pwrsrc_switch_to_vaux(tp);
2717 tg3_pwrsrc_die_with_vmain(tp);
2720 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2722 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2724 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2725 if (speed != SPEED_10)
2727 } else if (speed == SPEED_10)
2733 static int tg3_setup_phy(struct tg3 *, int);
2734 static int tg3_halt_cpu(struct tg3 *, u32);
2736 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2740 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2741 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2742 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2743 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2746 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2747 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2748 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2753 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2755 val = tr32(GRC_MISC_CFG);
2756 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2759 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2761 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2764 tg3_writephy(tp, MII_ADVERTISE, 0);
2765 tg3_writephy(tp, MII_BMCR,
2766 BMCR_ANENABLE | BMCR_ANRESTART);
2768 tg3_writephy(tp, MII_TG3_FET_TEST,
2769 phytest | MII_TG3_FET_SHADOW_EN);
2770 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2771 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2773 MII_TG3_FET_SHDW_AUXMODE4,
2776 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2779 } else if (do_low_power) {
2780 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2781 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2783 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2784 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2785 MII_TG3_AUXCTL_PCTL_VREG_11V;
2786 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2789 /* The PHY should not be powered down on some chips because
2792 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2794 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2795 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2798 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2799 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2800 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2801 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2802 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2803 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2806 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2809 /* tp->lock is held. */
2810 static int tg3_nvram_lock(struct tg3 *tp)
2812 if (tg3_flag(tp, NVRAM)) {
2815 if (tp->nvram_lock_cnt == 0) {
2816 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2817 for (i = 0; i < 8000; i++) {
2818 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2823 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2827 tp->nvram_lock_cnt++;
2832 /* tp->lock is held. */
2833 static void tg3_nvram_unlock(struct tg3 *tp)
2835 if (tg3_flag(tp, NVRAM)) {
2836 if (tp->nvram_lock_cnt > 0)
2837 tp->nvram_lock_cnt--;
2838 if (tp->nvram_lock_cnt == 0)
2839 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2843 /* tp->lock is held. */
2844 static void tg3_enable_nvram_access(struct tg3 *tp)
2846 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2847 u32 nvaccess = tr32(NVRAM_ACCESS);
2849 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2853 /* tp->lock is held. */
2854 static void tg3_disable_nvram_access(struct tg3 *tp)
2856 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2857 u32 nvaccess = tr32(NVRAM_ACCESS);
2859 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2863 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2864 u32 offset, u32 *val)
2869 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2872 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2873 EEPROM_ADDR_DEVID_MASK |
2875 tw32(GRC_EEPROM_ADDR,
2877 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2878 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2879 EEPROM_ADDR_ADDR_MASK) |
2880 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2882 for (i = 0; i < 1000; i++) {
2883 tmp = tr32(GRC_EEPROM_ADDR);
2885 if (tmp & EEPROM_ADDR_COMPLETE)
2889 if (!(tmp & EEPROM_ADDR_COMPLETE))
2892 tmp = tr32(GRC_EEPROM_DATA);
2895 * The data will always be opposite the native endian
2896 * format. Perform a blind byteswap to compensate.
2903 #define NVRAM_CMD_TIMEOUT 10000
2905 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2909 tw32(NVRAM_CMD, nvram_cmd);
2910 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2912 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2918 if (i == NVRAM_CMD_TIMEOUT)
2924 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2926 if (tg3_flag(tp, NVRAM) &&
2927 tg3_flag(tp, NVRAM_BUFFERED) &&
2928 tg3_flag(tp, FLASH) &&
2929 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2930 (tp->nvram_jedecnum == JEDEC_ATMEL))
2932 addr = ((addr / tp->nvram_pagesize) <<
2933 ATMEL_AT45DB0X1B_PAGE_POS) +
2934 (addr % tp->nvram_pagesize);
2939 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2941 if (tg3_flag(tp, NVRAM) &&
2942 tg3_flag(tp, NVRAM_BUFFERED) &&
2943 tg3_flag(tp, FLASH) &&
2944 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2945 (tp->nvram_jedecnum == JEDEC_ATMEL))
2947 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2948 tp->nvram_pagesize) +
2949 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2954 /* NOTE: Data read in from NVRAM is byteswapped according to
2955 * the byteswapping settings for all other register accesses.
2956 * tg3 devices are BE devices, so on a BE machine, the data
2957 * returned will be exactly as it is seen in NVRAM. On a LE
2958 * machine, the 32-bit value will be byteswapped.
2960 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2964 if (!tg3_flag(tp, NVRAM))
2965 return tg3_nvram_read_using_eeprom(tp, offset, val);
2967 offset = tg3_nvram_phys_addr(tp, offset);
2969 if (offset > NVRAM_ADDR_MSK)
2972 ret = tg3_nvram_lock(tp);
2976 tg3_enable_nvram_access(tp);
2978 tw32(NVRAM_ADDR, offset);
2979 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2980 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2983 *val = tr32(NVRAM_RDDATA);
2985 tg3_disable_nvram_access(tp);
2987 tg3_nvram_unlock(tp);
2992 /* Ensures NVRAM data is in bytestream format. */
2993 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2996 int res = tg3_nvram_read(tp, offset, &v);
2998 *val = cpu_to_be32(v);
3002 #define RX_CPU_SCRATCH_BASE 0x30000
3003 #define RX_CPU_SCRATCH_SIZE 0x04000
3004 #define TX_CPU_SCRATCH_BASE 0x34000
3005 #define TX_CPU_SCRATCH_SIZE 0x04000
3007 /* tp->lock is held. */
3008 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3012 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3015 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3017 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3020 if (offset == RX_CPU_BASE) {
3021 for (i = 0; i < 10000; i++) {
3022 tw32(offset + CPU_STATE, 0xffffffff);
3023 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3024 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3028 tw32(offset + CPU_STATE, 0xffffffff);
3029 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3032 for (i = 0; i < 10000; i++) {
3033 tw32(offset + CPU_STATE, 0xffffffff);
3034 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3035 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3041 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3042 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3046 /* Clear firmware's nvram arbitration. */
3047 if (tg3_flag(tp, NVRAM))
3048 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3053 unsigned int fw_base;
3054 unsigned int fw_len;
3055 const __be32 *fw_data;
3058 /* tp->lock is held. */
3059 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3060 u32 cpu_scratch_base, int cpu_scratch_size,
3061 struct fw_info *info)
3063 int err, lock_err, i;
3064 void (*write_op)(struct tg3 *, u32, u32);
3066 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3068 "%s: Trying to load TX cpu firmware which is 5705\n",
3073 if (tg3_flag(tp, 5705_PLUS))
3074 write_op = tg3_write_mem;
3076 write_op = tg3_write_indirect_reg32;
3078 /* It is possible that bootcode is still loading at this point.
3079 * Get the nvram lock first before halting the cpu.
3081 lock_err = tg3_nvram_lock(tp);
3082 err = tg3_halt_cpu(tp, cpu_base);
3084 tg3_nvram_unlock(tp);
3088 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3089 write_op(tp, cpu_scratch_base + i, 0);
3090 tw32(cpu_base + CPU_STATE, 0xffffffff);
3091 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3092 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3093 write_op(tp, (cpu_scratch_base +
3094 (info->fw_base & 0xffff) +
3096 be32_to_cpu(info->fw_data[i]));
3104 /* tp->lock is held. */
3105 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3107 struct fw_info info;
3108 const __be32 *fw_data;
3111 fw_data = (void *)tp->fw->data;
3113 /* Firmware blob starts with version numbers, followed by
3114 start address and length. We are setting complete length.
3115 length = end_address_of_bss - start_address_of_text.
3116 Remainder is the blob to be loaded contiguously
3117 from start address. */
3119 info.fw_base = be32_to_cpu(fw_data[1]);
3120 info.fw_len = tp->fw->size - 12;
3121 info.fw_data = &fw_data[3];
3123 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3124 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3129 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3130 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3135 /* Now startup only the RX cpu. */
3136 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3137 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3139 for (i = 0; i < 5; i++) {
3140 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3142 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3143 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3144 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3148 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3149 "should be %08x\n", __func__,
3150 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3153 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3154 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3159 /* tp->lock is held. */
3160 static int tg3_load_tso_firmware(struct tg3 *tp)
3162 struct fw_info info;
3163 const __be32 *fw_data;
3164 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3167 if (tg3_flag(tp, HW_TSO_1) ||
3168 tg3_flag(tp, HW_TSO_2) ||
3169 tg3_flag(tp, HW_TSO_3))
3172 fw_data = (void *)tp->fw->data;
3174 /* Firmware blob starts with version numbers, followed by
3175 start address and length. We are setting complete length.
3176 length = end_address_of_bss - start_address_of_text.
3177 Remainder is the blob to be loaded contiguously
3178 from start address. */
3180 info.fw_base = be32_to_cpu(fw_data[1]);
3181 cpu_scratch_size = tp->fw_len;
3182 info.fw_len = tp->fw->size - 12;
3183 info.fw_data = &fw_data[3];
3185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3186 cpu_base = RX_CPU_BASE;
3187 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3189 cpu_base = TX_CPU_BASE;
3190 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3191 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3194 err = tg3_load_firmware_cpu(tp, cpu_base,
3195 cpu_scratch_base, cpu_scratch_size,
3200 /* Now startup the cpu. */
3201 tw32(cpu_base + CPU_STATE, 0xffffffff);
3202 tw32_f(cpu_base + CPU_PC, info.fw_base);
3204 for (i = 0; i < 5; i++) {
3205 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3207 tw32(cpu_base + CPU_STATE, 0xffffffff);
3208 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3209 tw32_f(cpu_base + CPU_PC, info.fw_base);
3214 "%s fails to set CPU PC, is %08x should be %08x\n",
3215 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3218 tw32(cpu_base + CPU_STATE, 0xffffffff);
3219 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3224 /* tp->lock is held. */
3225 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3227 u32 addr_high, addr_low;
3230 addr_high = ((tp->dev->dev_addr[0] << 8) |
3231 tp->dev->dev_addr[1]);
3232 addr_low = ((tp->dev->dev_addr[2] << 24) |
3233 (tp->dev->dev_addr[3] << 16) |
3234 (tp->dev->dev_addr[4] << 8) |
3235 (tp->dev->dev_addr[5] << 0));
3236 for (i = 0; i < 4; i++) {
3237 if (i == 1 && skip_mac_1)
3239 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3240 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3244 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3245 for (i = 0; i < 12; i++) {
3246 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3247 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3251 addr_high = (tp->dev->dev_addr[0] +
3252 tp->dev->dev_addr[1] +
3253 tp->dev->dev_addr[2] +
3254 tp->dev->dev_addr[3] +
3255 tp->dev->dev_addr[4] +
3256 tp->dev->dev_addr[5]) &
3257 TX_BACKOFF_SEED_MASK;
3258 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3261 static void tg3_enable_register_access(struct tg3 *tp)
3264 * Make sure register accesses (indirect or otherwise) will function
3267 pci_write_config_dword(tp->pdev,
3268 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3271 static int tg3_power_up(struct tg3 *tp)
3275 tg3_enable_register_access(tp);
3277 err = pci_set_power_state(tp->pdev, PCI_D0);
3279 /* Switch out of Vaux if it is a NIC */
3280 tg3_pwrsrc_switch_to_vmain(tp);
3282 netdev_err(tp->dev, "Transition to D0 failed\n");
3288 static int tg3_power_down_prepare(struct tg3 *tp)
3291 bool device_should_wake, do_low_power;
3293 tg3_enable_register_access(tp);
3295 /* Restore the CLKREQ setting. */
3296 if (tg3_flag(tp, CLKREQ_BUG)) {
3299 pci_read_config_word(tp->pdev,
3300 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3302 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3303 pci_write_config_word(tp->pdev,
3304 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3308 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3309 tw32(TG3PCI_MISC_HOST_CTRL,
3310 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3312 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3313 tg3_flag(tp, WOL_ENABLE);
3315 if (tg3_flag(tp, USE_PHYLIB)) {
3316 do_low_power = false;
3317 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3318 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3319 struct phy_device *phydev;
3320 u32 phyid, advertising;
3322 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3324 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3326 tp->link_config.orig_speed = phydev->speed;
3327 tp->link_config.orig_duplex = phydev->duplex;
3328 tp->link_config.orig_autoneg = phydev->autoneg;
3329 tp->link_config.orig_advertising = phydev->advertising;
3331 advertising = ADVERTISED_TP |
3333 ADVERTISED_Autoneg |
3334 ADVERTISED_10baseT_Half;
3336 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3337 if (tg3_flag(tp, WOL_SPEED_100MB))
3339 ADVERTISED_100baseT_Half |
3340 ADVERTISED_100baseT_Full |
3341 ADVERTISED_10baseT_Full;
3343 advertising |= ADVERTISED_10baseT_Full;
3346 phydev->advertising = advertising;
3348 phy_start_aneg(phydev);
3350 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3351 if (phyid != PHY_ID_BCMAC131) {
3352 phyid &= PHY_BCM_OUI_MASK;
3353 if (phyid == PHY_BCM_OUI_1 ||
3354 phyid == PHY_BCM_OUI_2 ||
3355 phyid == PHY_BCM_OUI_3)
3356 do_low_power = true;
3360 do_low_power = true;
3362 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3363 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3364 tp->link_config.orig_speed = tp->link_config.speed;
3365 tp->link_config.orig_duplex = tp->link_config.duplex;
3366 tp->link_config.orig_autoneg = tp->link_config.autoneg;
3369 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3370 tp->link_config.speed = SPEED_10;
3371 tp->link_config.duplex = DUPLEX_HALF;
3372 tp->link_config.autoneg = AUTONEG_ENABLE;
3373 tg3_setup_phy(tp, 0);
3377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3380 val = tr32(GRC_VCPU_EXT_CTRL);
3381 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3382 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3386 for (i = 0; i < 200; i++) {
3387 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3388 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3393 if (tg3_flag(tp, WOL_CAP))
3394 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3395 WOL_DRV_STATE_SHUTDOWN |
3399 if (device_should_wake) {
3402 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3404 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3405 tg3_phy_auxctl_write(tp,
3406 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3407 MII_TG3_AUXCTL_PCTL_WOL_EN |
3408 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3409 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3413 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3414 mac_mode = MAC_MODE_PORT_MODE_GMII;
3416 mac_mode = MAC_MODE_PORT_MODE_MII;
3418 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3419 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3421 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3422 SPEED_100 : SPEED_10;
3423 if (tg3_5700_link_polarity(tp, speed))
3424 mac_mode |= MAC_MODE_LINK_POLARITY;
3426 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3429 mac_mode = MAC_MODE_PORT_MODE_TBI;
3432 if (!tg3_flag(tp, 5750_PLUS))
3433 tw32(MAC_LED_CTRL, tp->led_ctrl);
3435 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3436 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3437 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3438 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3440 if (tg3_flag(tp, ENABLE_APE))
3441 mac_mode |= MAC_MODE_APE_TX_EN |
3442 MAC_MODE_APE_RX_EN |
3443 MAC_MODE_TDE_ENABLE;
3445 tw32_f(MAC_MODE, mac_mode);
3448 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3452 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3453 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3457 base_val = tp->pci_clock_ctrl;
3458 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3459 CLOCK_CTRL_TXCLK_DISABLE);
3461 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3462 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3463 } else if (tg3_flag(tp, 5780_CLASS) ||
3464 tg3_flag(tp, CPMU_PRESENT) ||
3465 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3467 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3468 u32 newbits1, newbits2;
3470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3472 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3473 CLOCK_CTRL_TXCLK_DISABLE |
3475 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3476 } else if (tg3_flag(tp, 5705_PLUS)) {
3477 newbits1 = CLOCK_CTRL_625_CORE;
3478 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3480 newbits1 = CLOCK_CTRL_ALTCLK;
3481 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3484 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3487 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3490 if (!tg3_flag(tp, 5705_PLUS)) {
3493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3494 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3495 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3496 CLOCK_CTRL_TXCLK_DISABLE |
3497 CLOCK_CTRL_44MHZ_CORE);
3499 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3502 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3503 tp->pci_clock_ctrl | newbits3, 40);
3507 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3508 tg3_power_down_phy(tp, do_low_power);
3510 tg3_frob_aux_power(tp, true);
3512 /* Workaround for unstable PLL clock */
3513 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3514 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3515 u32 val = tr32(0x7d00);
3517 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3519 if (!tg3_flag(tp, ENABLE_ASF)) {
3522 err = tg3_nvram_lock(tp);
3523 tg3_halt_cpu(tp, RX_CPU_BASE);
3525 tg3_nvram_unlock(tp);
3529 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3534 static void tg3_power_down(struct tg3 *tp)
3536 tg3_power_down_prepare(tp);
3538 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3539 pci_set_power_state(tp->pdev, PCI_D3hot);
3542 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3544 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3545 case MII_TG3_AUX_STAT_10HALF:
3547 *duplex = DUPLEX_HALF;
3550 case MII_TG3_AUX_STAT_10FULL:
3552 *duplex = DUPLEX_FULL;
3555 case MII_TG3_AUX_STAT_100HALF:
3557 *duplex = DUPLEX_HALF;
3560 case MII_TG3_AUX_STAT_100FULL:
3562 *duplex = DUPLEX_FULL;
3565 case MII_TG3_AUX_STAT_1000HALF:
3566 *speed = SPEED_1000;
3567 *duplex = DUPLEX_HALF;
3570 case MII_TG3_AUX_STAT_1000FULL:
3571 *speed = SPEED_1000;
3572 *duplex = DUPLEX_FULL;
3576 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3577 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3579 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3583 *speed = SPEED_INVALID;
3584 *duplex = DUPLEX_INVALID;
3589 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3594 new_adv = ADVERTISE_CSMA;
3595 if (advertise & ADVERTISED_10baseT_Half)
3596 new_adv |= ADVERTISE_10HALF;
3597 if (advertise & ADVERTISED_10baseT_Full)
3598 new_adv |= ADVERTISE_10FULL;
3599 if (advertise & ADVERTISED_100baseT_Half)
3600 new_adv |= ADVERTISE_100HALF;
3601 if (advertise & ADVERTISED_100baseT_Full)
3602 new_adv |= ADVERTISE_100FULL;
3604 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3606 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3610 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3614 if (advertise & ADVERTISED_1000baseT_Half)
3615 new_adv |= ADVERTISE_1000HALF;
3616 if (advertise & ADVERTISED_1000baseT_Full)
3617 new_adv |= ADVERTISE_1000FULL;
3619 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3620 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3621 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3623 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3627 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3630 tw32(TG3_CPMU_EEE_MODE,
3631 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3633 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3638 /* Advertise 100-BaseTX EEE ability */
3639 if (advertise & ADVERTISED_100baseT_Full)
3640 val |= MDIO_AN_EEE_ADV_100TX;
3641 /* Advertise 1000-BaseT EEE ability */
3642 if (advertise & ADVERTISED_1000baseT_Full)
3643 val |= MDIO_AN_EEE_ADV_1000T;
3644 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3648 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3650 case ASIC_REV_57765:
3652 /* If we advertised any eee advertisements above... */
3654 val = MII_TG3_DSP_TAP26_ALNOKO |
3655 MII_TG3_DSP_TAP26_RMRXSTO |
3656 MII_TG3_DSP_TAP26_OPCSINPT;
3657 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3660 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3661 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3662 MII_TG3_DSP_CH34TP2_HIBW01);
3665 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3674 static void tg3_phy_copper_begin(struct tg3 *tp)
3679 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3680 new_adv = ADVERTISED_10baseT_Half |
3681 ADVERTISED_10baseT_Full;
3682 if (tg3_flag(tp, WOL_SPEED_100MB))
3683 new_adv |= ADVERTISED_100baseT_Half |
3684 ADVERTISED_100baseT_Full;
3686 tg3_phy_autoneg_cfg(tp, new_adv,
3687 FLOW_CTRL_TX | FLOW_CTRL_RX);
3688 } else if (tp->link_config.speed == SPEED_INVALID) {
3689 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3690 tp->link_config.advertising &=
3691 ~(ADVERTISED_1000baseT_Half |
3692 ADVERTISED_1000baseT_Full);
3694 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3695 tp->link_config.flowctrl);
3697 /* Asking for a specific link mode. */
3698 if (tp->link_config.speed == SPEED_1000) {
3699 if (tp->link_config.duplex == DUPLEX_FULL)
3700 new_adv = ADVERTISED_1000baseT_Full;
3702 new_adv = ADVERTISED_1000baseT_Half;
3703 } else if (tp->link_config.speed == SPEED_100) {
3704 if (tp->link_config.duplex == DUPLEX_FULL)
3705 new_adv = ADVERTISED_100baseT_Full;
3707 new_adv = ADVERTISED_100baseT_Half;
3709 if (tp->link_config.duplex == DUPLEX_FULL)
3710 new_adv = ADVERTISED_10baseT_Full;
3712 new_adv = ADVERTISED_10baseT_Half;
3715 tg3_phy_autoneg_cfg(tp, new_adv,
3716 tp->link_config.flowctrl);
3719 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3720 tp->link_config.speed != SPEED_INVALID) {
3721 u32 bmcr, orig_bmcr;
3723 tp->link_config.active_speed = tp->link_config.speed;
3724 tp->link_config.active_duplex = tp->link_config.duplex;
3727 switch (tp->link_config.speed) {
3733 bmcr |= BMCR_SPEED100;
3737 bmcr |= BMCR_SPEED1000;
3741 if (tp->link_config.duplex == DUPLEX_FULL)
3742 bmcr |= BMCR_FULLDPLX;
3744 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3745 (bmcr != orig_bmcr)) {
3746 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3747 for (i = 0; i < 1500; i++) {
3751 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3752 tg3_readphy(tp, MII_BMSR, &tmp))
3754 if (!(tmp & BMSR_LSTATUS)) {
3759 tg3_writephy(tp, MII_BMCR, bmcr);
3763 tg3_writephy(tp, MII_BMCR,
3764 BMCR_ANENABLE | BMCR_ANRESTART);
3768 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3772 /* Turn off tap power management. */
3773 /* Set Extended packet length bit */
3774 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3776 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3777 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3778 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3779 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3780 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3787 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3789 u32 adv_reg, all_mask = 0;
3791 if (mask & ADVERTISED_10baseT_Half)
3792 all_mask |= ADVERTISE_10HALF;
3793 if (mask & ADVERTISED_10baseT_Full)
3794 all_mask |= ADVERTISE_10FULL;
3795 if (mask & ADVERTISED_100baseT_Half)
3796 all_mask |= ADVERTISE_100HALF;
3797 if (mask & ADVERTISED_100baseT_Full)
3798 all_mask |= ADVERTISE_100FULL;
3800 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3803 if ((adv_reg & ADVERTISE_ALL) != all_mask)
3806 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3810 if (mask & ADVERTISED_1000baseT_Half)
3811 all_mask |= ADVERTISE_1000HALF;
3812 if (mask & ADVERTISED_1000baseT_Full)
3813 all_mask |= ADVERTISE_1000FULL;
3815 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3818 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3819 if (tg3_ctrl != all_mask)
3826 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3830 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3833 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3834 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3836 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3837 if (curadv != reqadv)
3840 if (tg3_flag(tp, PAUSE_AUTONEG))
3841 tg3_readphy(tp, MII_LPA, rmtadv);
3843 /* Reprogram the advertisement register, even if it
3844 * does not affect the current link. If the link
3845 * gets renegotiated in the future, we can save an
3846 * additional renegotiation cycle by advertising
3847 * it correctly in the first place.
3849 if (curadv != reqadv) {
3850 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3851 ADVERTISE_PAUSE_ASYM);
3852 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3859 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3861 int current_link_up;
3863 u32 lcl_adv, rmt_adv;
3871 (MAC_STATUS_SYNC_CHANGED |
3872 MAC_STATUS_CFG_CHANGED |
3873 MAC_STATUS_MI_COMPLETION |
3874 MAC_STATUS_LNKSTATE_CHANGED));
3877 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3879 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3883 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3885 /* Some third-party PHYs need to be reset on link going
3888 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3891 netif_carrier_ok(tp->dev)) {
3892 tg3_readphy(tp, MII_BMSR, &bmsr);
3893 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3894 !(bmsr & BMSR_LSTATUS))
3900 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3901 tg3_readphy(tp, MII_BMSR, &bmsr);
3902 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3903 !tg3_flag(tp, INIT_COMPLETE))
3906 if (!(bmsr & BMSR_LSTATUS)) {
3907 err = tg3_init_5401phy_dsp(tp);
3911 tg3_readphy(tp, MII_BMSR, &bmsr);
3912 for (i = 0; i < 1000; i++) {
3914 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3915 (bmsr & BMSR_LSTATUS)) {
3921 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3922 TG3_PHY_REV_BCM5401_B0 &&
3923 !(bmsr & BMSR_LSTATUS) &&
3924 tp->link_config.active_speed == SPEED_1000) {
3925 err = tg3_phy_reset(tp);
3927 err = tg3_init_5401phy_dsp(tp);
3932 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3933 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3934 /* 5701 {A0,B0} CRC bug workaround */
3935 tg3_writephy(tp, 0x15, 0x0a75);
3936 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3937 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3938 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3941 /* Clear pending interrupts... */
3942 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3943 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3945 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3946 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3947 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3948 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3952 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3953 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3954 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3956 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3959 current_link_up = 0;
3960 current_speed = SPEED_INVALID;
3961 current_duplex = DUPLEX_INVALID;
3963 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3964 err = tg3_phy_auxctl_read(tp,
3965 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3967 if (!err && !(val & (1 << 10))) {
3968 tg3_phy_auxctl_write(tp,
3969 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3976 for (i = 0; i < 100; i++) {
3977 tg3_readphy(tp, MII_BMSR, &bmsr);
3978 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3979 (bmsr & BMSR_LSTATUS))
3984 if (bmsr & BMSR_LSTATUS) {
3987 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3988 for (i = 0; i < 2000; i++) {
3990 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3995 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4000 for (i = 0; i < 200; i++) {
4001 tg3_readphy(tp, MII_BMCR, &bmcr);
4002 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4004 if (bmcr && bmcr != 0x7fff)
4012 tp->link_config.active_speed = current_speed;
4013 tp->link_config.active_duplex = current_duplex;
4015 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4016 if ((bmcr & BMCR_ANENABLE) &&
4017 tg3_copper_is_advertising_all(tp,
4018 tp->link_config.advertising)) {
4019 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4021 current_link_up = 1;
4024 if (!(bmcr & BMCR_ANENABLE) &&
4025 tp->link_config.speed == current_speed &&
4026 tp->link_config.duplex == current_duplex &&
4027 tp->link_config.flowctrl ==
4028 tp->link_config.active_flowctrl) {
4029 current_link_up = 1;
4033 if (current_link_up == 1 &&
4034 tp->link_config.active_duplex == DUPLEX_FULL)
4035 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4039 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4040 tg3_phy_copper_begin(tp);
4042 tg3_readphy(tp, MII_BMSR, &bmsr);
4043 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4044 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4045 current_link_up = 1;
4048 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4049 if (current_link_up == 1) {
4050 if (tp->link_config.active_speed == SPEED_100 ||
4051 tp->link_config.active_speed == SPEED_10)
4052 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4054 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4055 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4056 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4058 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4060 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4061 if (tp->link_config.active_duplex == DUPLEX_HALF)
4062 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4064 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4065 if (current_link_up == 1 &&
4066 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4067 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4069 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4072 /* ??? Without this setting Netgear GA302T PHY does not
4073 * ??? send/receive packets...
4075 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4076 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4077 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4078 tw32_f(MAC_MI_MODE, tp->mi_mode);
4082 tw32_f(MAC_MODE, tp->mac_mode);
4085 tg3_phy_eee_adjust(tp, current_link_up);
4087 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4088 /* Polled via timer. */
4089 tw32_f(MAC_EVENT, 0);
4091 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4096 current_link_up == 1 &&
4097 tp->link_config.active_speed == SPEED_1000 &&
4098 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4101 (MAC_STATUS_SYNC_CHANGED |
4102 MAC_STATUS_CFG_CHANGED));
4105 NIC_SRAM_FIRMWARE_MBOX,
4106 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4109 /* Prevent send BD corruption. */
4110 if (tg3_flag(tp, CLKREQ_BUG)) {
4111 u16 oldlnkctl, newlnkctl;
4113 pci_read_config_word(tp->pdev,
4114 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4116 if (tp->link_config.active_speed == SPEED_100 ||
4117 tp->link_config.active_speed == SPEED_10)
4118 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4120 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4121 if (newlnkctl != oldlnkctl)
4122 pci_write_config_word(tp->pdev,
4123 pci_pcie_cap(tp->pdev) +
4124 PCI_EXP_LNKCTL, newlnkctl);
4127 if (current_link_up != netif_carrier_ok(tp->dev)) {
4128 if (current_link_up)
4129 netif_carrier_on(tp->dev);
4131 netif_carrier_off(tp->dev);
4132 tg3_link_report(tp);
4138 struct tg3_fiber_aneginfo {
4140 #define ANEG_STATE_UNKNOWN 0
4141 #define ANEG_STATE_AN_ENABLE 1
4142 #define ANEG_STATE_RESTART_INIT 2
4143 #define ANEG_STATE_RESTART 3
4144 #define ANEG_STATE_DISABLE_LINK_OK 4
4145 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4146 #define ANEG_STATE_ABILITY_DETECT 6
4147 #define ANEG_STATE_ACK_DETECT_INIT 7
4148 #define ANEG_STATE_ACK_DETECT 8
4149 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4150 #define ANEG_STATE_COMPLETE_ACK 10
4151 #define ANEG_STATE_IDLE_DETECT_INIT 11
4152 #define ANEG_STATE_IDLE_DETECT 12
4153 #define ANEG_STATE_LINK_OK 13
4154 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4155 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4158 #define MR_AN_ENABLE 0x00000001
4159 #define MR_RESTART_AN 0x00000002
4160 #define MR_AN_COMPLETE 0x00000004
4161 #define MR_PAGE_RX 0x00000008
4162 #define MR_NP_LOADED 0x00000010
4163 #define MR_TOGGLE_TX 0x00000020
4164 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4165 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4166 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4167 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4168 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4169 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4170 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4171 #define MR_TOGGLE_RX 0x00002000
4172 #define MR_NP_RX 0x00004000
4174 #define MR_LINK_OK 0x80000000
4176 unsigned long link_time, cur_time;
4178 u32 ability_match_cfg;
4179 int ability_match_count;
4181 char ability_match, idle_match, ack_match;
4183 u32 txconfig, rxconfig;
4184 #define ANEG_CFG_NP 0x00000080
4185 #define ANEG_CFG_ACK 0x00000040
4186 #define ANEG_CFG_RF2 0x00000020
4187 #define ANEG_CFG_RF1 0x00000010
4188 #define ANEG_CFG_PS2 0x00000001
4189 #define ANEG_CFG_PS1 0x00008000
4190 #define ANEG_CFG_HD 0x00004000
4191 #define ANEG_CFG_FD 0x00002000
4192 #define ANEG_CFG_INVAL 0x00001f06
4197 #define ANEG_TIMER_ENAB 2
4198 #define ANEG_FAILED -1
4200 #define ANEG_STATE_SETTLE_TIME 10000
4202 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4203 struct tg3_fiber_aneginfo *ap)
4206 unsigned long delta;
4210 if (ap->state == ANEG_STATE_UNKNOWN) {
4214 ap->ability_match_cfg = 0;
4215 ap->ability_match_count = 0;
4216 ap->ability_match = 0;
4222 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4223 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4225 if (rx_cfg_reg != ap->ability_match_cfg) {
4226 ap->ability_match_cfg = rx_cfg_reg;
4227 ap->ability_match = 0;
4228 ap->ability_match_count = 0;
4230 if (++ap->ability_match_count > 1) {
4231 ap->ability_match = 1;
4232 ap->ability_match_cfg = rx_cfg_reg;
4235 if (rx_cfg_reg & ANEG_CFG_ACK)
4243 ap->ability_match_cfg = 0;
4244 ap->ability_match_count = 0;
4245 ap->ability_match = 0;
4251 ap->rxconfig = rx_cfg_reg;
4254 switch (ap->state) {
4255 case ANEG_STATE_UNKNOWN:
4256 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4257 ap->state = ANEG_STATE_AN_ENABLE;
4260 case ANEG_STATE_AN_ENABLE:
4261 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4262 if (ap->flags & MR_AN_ENABLE) {
4265 ap->ability_match_cfg = 0;
4266 ap->ability_match_count = 0;
4267 ap->ability_match = 0;
4271 ap->state = ANEG_STATE_RESTART_INIT;
4273 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4277 case ANEG_STATE_RESTART_INIT:
4278 ap->link_time = ap->cur_time;
4279 ap->flags &= ~(MR_NP_LOADED);
4281 tw32(MAC_TX_AUTO_NEG, 0);
4282 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4283 tw32_f(MAC_MODE, tp->mac_mode);
4286 ret = ANEG_TIMER_ENAB;
4287 ap->state = ANEG_STATE_RESTART;
4290 case ANEG_STATE_RESTART:
4291 delta = ap->cur_time - ap->link_time;
4292 if (delta > ANEG_STATE_SETTLE_TIME)
4293 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4295 ret = ANEG_TIMER_ENAB;
4298 case ANEG_STATE_DISABLE_LINK_OK:
4302 case ANEG_STATE_ABILITY_DETECT_INIT:
4303 ap->flags &= ~(MR_TOGGLE_TX);
4304 ap->txconfig = ANEG_CFG_FD;
4305 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4306 if (flowctrl & ADVERTISE_1000XPAUSE)
4307 ap->txconfig |= ANEG_CFG_PS1;
4308 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4309 ap->txconfig |= ANEG_CFG_PS2;
4310 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4311 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4312 tw32_f(MAC_MODE, tp->mac_mode);
4315 ap->state = ANEG_STATE_ABILITY_DETECT;
4318 case ANEG_STATE_ABILITY_DETECT:
4319 if (ap->ability_match != 0 && ap->rxconfig != 0)
4320 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4323 case ANEG_STATE_ACK_DETECT_INIT:
4324 ap->txconfig |= ANEG_CFG_ACK;
4325 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4326 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4327 tw32_f(MAC_MODE, tp->mac_mode);
4330 ap->state = ANEG_STATE_ACK_DETECT;
4333 case ANEG_STATE_ACK_DETECT:
4334 if (ap->ack_match != 0) {
4335 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4336 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4337 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4339 ap->state = ANEG_STATE_AN_ENABLE;
4341 } else if (ap->ability_match != 0 &&
4342 ap->rxconfig == 0) {
4343 ap->state = ANEG_STATE_AN_ENABLE;
4347 case ANEG_STATE_COMPLETE_ACK_INIT:
4348 if (ap->rxconfig & ANEG_CFG_INVAL) {
4352 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4353 MR_LP_ADV_HALF_DUPLEX |
4354 MR_LP_ADV_SYM_PAUSE |
4355 MR_LP_ADV_ASYM_PAUSE |
4356 MR_LP_ADV_REMOTE_FAULT1 |
4357 MR_LP_ADV_REMOTE_FAULT2 |
4358 MR_LP_ADV_NEXT_PAGE |
4361 if (ap->rxconfig & ANEG_CFG_FD)
4362 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4363 if (ap->rxconfig & ANEG_CFG_HD)
4364 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4365 if (ap->rxconfig & ANEG_CFG_PS1)
4366 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4367 if (ap->rxconfig & ANEG_CFG_PS2)
4368 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4369 if (ap->rxconfig & ANEG_CFG_RF1)
4370 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4371 if (ap->rxconfig & ANEG_CFG_RF2)
4372 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4373 if (ap->rxconfig & ANEG_CFG_NP)
4374 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4376 ap->link_time = ap->cur_time;
4378 ap->flags ^= (MR_TOGGLE_TX);
4379 if (ap->rxconfig & 0x0008)
4380 ap->flags |= MR_TOGGLE_RX;
4381 if (ap->rxconfig & ANEG_CFG_NP)
4382 ap->flags |= MR_NP_RX;
4383 ap->flags |= MR_PAGE_RX;
4385 ap->state = ANEG_STATE_COMPLETE_ACK;
4386 ret = ANEG_TIMER_ENAB;
4389 case ANEG_STATE_COMPLETE_ACK:
4390 if (ap->ability_match != 0 &&
4391 ap->rxconfig == 0) {
4392 ap->state = ANEG_STATE_AN_ENABLE;
4395 delta = ap->cur_time - ap->link_time;
4396 if (delta > ANEG_STATE_SETTLE_TIME) {
4397 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4398 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4400 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4401 !(ap->flags & MR_NP_RX)) {
4402 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4410 case ANEG_STATE_IDLE_DETECT_INIT:
4411 ap->link_time = ap->cur_time;
4412 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4413 tw32_f(MAC_MODE, tp->mac_mode);
4416 ap->state = ANEG_STATE_IDLE_DETECT;
4417 ret = ANEG_TIMER_ENAB;
4420 case ANEG_STATE_IDLE_DETECT:
4421 if (ap->ability_match != 0 &&
4422 ap->rxconfig == 0) {
4423 ap->state = ANEG_STATE_AN_ENABLE;
4426 delta = ap->cur_time - ap->link_time;
4427 if (delta > ANEG_STATE_SETTLE_TIME) {
4428 /* XXX another gem from the Broadcom driver :( */
4429 ap->state = ANEG_STATE_LINK_OK;
4433 case ANEG_STATE_LINK_OK:
4434 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4438 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4439 /* ??? unimplemented */
4442 case ANEG_STATE_NEXT_PAGE_WAIT:
4443 /* ??? unimplemented */
4454 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4457 struct tg3_fiber_aneginfo aninfo;
4458 int status = ANEG_FAILED;
4462 tw32_f(MAC_TX_AUTO_NEG, 0);
4464 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4465 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4468 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4471 memset(&aninfo, 0, sizeof(aninfo));
4472 aninfo.flags |= MR_AN_ENABLE;
4473 aninfo.state = ANEG_STATE_UNKNOWN;
4474 aninfo.cur_time = 0;
4476 while (++tick < 195000) {
4477 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4478 if (status == ANEG_DONE || status == ANEG_FAILED)
4484 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4485 tw32_f(MAC_MODE, tp->mac_mode);
4488 *txflags = aninfo.txconfig;
4489 *rxflags = aninfo.flags;
4491 if (status == ANEG_DONE &&
4492 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4493 MR_LP_ADV_FULL_DUPLEX)))
4499 static void tg3_init_bcm8002(struct tg3 *tp)
4501 u32 mac_status = tr32(MAC_STATUS);
4504 /* Reset when initting first time or we have a link. */
4505 if (tg3_flag(tp, INIT_COMPLETE) &&
4506 !(mac_status & MAC_STATUS_PCS_SYNCED))
4509 /* Set PLL lock range. */
4510 tg3_writephy(tp, 0x16, 0x8007);
4513 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4515 /* Wait for reset to complete. */
4516 /* XXX schedule_timeout() ... */
4517 for (i = 0; i < 500; i++)
4520 /* Config mode; select PMA/Ch 1 regs. */
4521 tg3_writephy(tp, 0x10, 0x8411);
4523 /* Enable auto-lock and comdet, select txclk for tx. */
4524 tg3_writephy(tp, 0x11, 0x0a10);
4526 tg3_writephy(tp, 0x18, 0x00a0);
4527 tg3_writephy(tp, 0x16, 0x41ff);
4529 /* Assert and deassert POR. */
4530 tg3_writephy(tp, 0x13, 0x0400);
4532 tg3_writephy(tp, 0x13, 0x0000);
4534 tg3_writephy(tp, 0x11, 0x0a50);
4536 tg3_writephy(tp, 0x11, 0x0a10);
4538 /* Wait for signal to stabilize */
4539 /* XXX schedule_timeout() ... */
4540 for (i = 0; i < 15000; i++)
4543 /* Deselect the channel register so we can read the PHYID
4546 tg3_writephy(tp, 0x10, 0x8011);
4549 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4552 u32 sg_dig_ctrl, sg_dig_status;
4553 u32 serdes_cfg, expected_sg_dig_ctrl;
4554 int workaround, port_a;
4555 int current_link_up;
4558 expected_sg_dig_ctrl = 0;
4561 current_link_up = 0;
4563 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4564 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4566 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4569 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4570 /* preserve bits 20-23 for voltage regulator */
4571 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4574 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4576 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4577 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4579 u32 val = serdes_cfg;
4585 tw32_f(MAC_SERDES_CFG, val);
4588 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4590 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4591 tg3_setup_flow_control(tp, 0, 0);
4592 current_link_up = 1;
4597 /* Want auto-negotiation. */
4598 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4600 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4601 if (flowctrl & ADVERTISE_1000XPAUSE)
4602 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4603 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4604 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4606 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4607 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4608 tp->serdes_counter &&
4609 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4610 MAC_STATUS_RCVD_CFG)) ==
4611 MAC_STATUS_PCS_SYNCED)) {
4612 tp->serdes_counter--;
4613 current_link_up = 1;
4618 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4619 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4621 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4623 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4624 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4625 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4626 MAC_STATUS_SIGNAL_DET)) {
4627 sg_dig_status = tr32(SG_DIG_STATUS);
4628 mac_status = tr32(MAC_STATUS);
4630 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4631 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4632 u32 local_adv = 0, remote_adv = 0;
4634 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4635 local_adv |= ADVERTISE_1000XPAUSE;
4636 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4637 local_adv |= ADVERTISE_1000XPSE_ASYM;
4639 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4640 remote_adv |= LPA_1000XPAUSE;
4641 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4642 remote_adv |= LPA_1000XPAUSE_ASYM;
4644 tg3_setup_flow_control(tp, local_adv, remote_adv);
4645 current_link_up = 1;
4646 tp->serdes_counter = 0;
4647 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4648 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4649 if (tp->serdes_counter)
4650 tp->serdes_counter--;
4653 u32 val = serdes_cfg;
4660 tw32_f(MAC_SERDES_CFG, val);
4663 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4666 /* Link parallel detection - link is up */
4667 /* only if we have PCS_SYNC and not */
4668 /* receiving config code words */
4669 mac_status = tr32(MAC_STATUS);
4670 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4671 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4672 tg3_setup_flow_control(tp, 0, 0);
4673 current_link_up = 1;
4675 TG3_PHYFLG_PARALLEL_DETECT;
4676 tp->serdes_counter =
4677 SERDES_PARALLEL_DET_TIMEOUT;
4679 goto restart_autoneg;
4683 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4684 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4688 return current_link_up;
4691 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4693 int current_link_up = 0;
4695 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4698 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4699 u32 txflags, rxflags;
4702 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4703 u32 local_adv = 0, remote_adv = 0;
4705 if (txflags & ANEG_CFG_PS1)
4706 local_adv |= ADVERTISE_1000XPAUSE;
4707 if (txflags & ANEG_CFG_PS2)
4708 local_adv |= ADVERTISE_1000XPSE_ASYM;
4710 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4711 remote_adv |= LPA_1000XPAUSE;
4712 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4713 remote_adv |= LPA_1000XPAUSE_ASYM;
4715 tg3_setup_flow_control(tp, local_adv, remote_adv);
4717 current_link_up = 1;
4719 for (i = 0; i < 30; i++) {
4722 (MAC_STATUS_SYNC_CHANGED |
4723 MAC_STATUS_CFG_CHANGED));
4725 if ((tr32(MAC_STATUS) &
4726 (MAC_STATUS_SYNC_CHANGED |
4727 MAC_STATUS_CFG_CHANGED)) == 0)
4731 mac_status = tr32(MAC_STATUS);
4732 if (current_link_up == 0 &&
4733 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4734 !(mac_status & MAC_STATUS_RCVD_CFG))
4735 current_link_up = 1;
4737 tg3_setup_flow_control(tp, 0, 0);
4739 /* Forcing 1000FD link up. */
4740 current_link_up = 1;
4742 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4745 tw32_f(MAC_MODE, tp->mac_mode);
4750 return current_link_up;
4753 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4756 u16 orig_active_speed;
4757 u8 orig_active_duplex;
4759 int current_link_up;
4762 orig_pause_cfg = tp->link_config.active_flowctrl;
4763 orig_active_speed = tp->link_config.active_speed;
4764 orig_active_duplex = tp->link_config.active_duplex;
4766 if (!tg3_flag(tp, HW_AUTONEG) &&
4767 netif_carrier_ok(tp->dev) &&
4768 tg3_flag(tp, INIT_COMPLETE)) {
4769 mac_status = tr32(MAC_STATUS);
4770 mac_status &= (MAC_STATUS_PCS_SYNCED |
4771 MAC_STATUS_SIGNAL_DET |
4772 MAC_STATUS_CFG_CHANGED |
4773 MAC_STATUS_RCVD_CFG);
4774 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4775 MAC_STATUS_SIGNAL_DET)) {
4776 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4777 MAC_STATUS_CFG_CHANGED));
4782 tw32_f(MAC_TX_AUTO_NEG, 0);
4784 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4785 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4786 tw32_f(MAC_MODE, tp->mac_mode);
4789 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4790 tg3_init_bcm8002(tp);
4792 /* Enable link change event even when serdes polling. */
4793 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4796 current_link_up = 0;
4797 mac_status = tr32(MAC_STATUS);
4799 if (tg3_flag(tp, HW_AUTONEG))
4800 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4802 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4804 tp->napi[0].hw_status->status =
4805 (SD_STATUS_UPDATED |
4806 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4808 for (i = 0; i < 100; i++) {
4809 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4810 MAC_STATUS_CFG_CHANGED));
4812 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4813 MAC_STATUS_CFG_CHANGED |
4814 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4818 mac_status = tr32(MAC_STATUS);
4819 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4820 current_link_up = 0;
4821 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4822 tp->serdes_counter == 0) {
4823 tw32_f(MAC_MODE, (tp->mac_mode |
4824 MAC_MODE_SEND_CONFIGS));
4826 tw32_f(MAC_MODE, tp->mac_mode);
4830 if (current_link_up == 1) {
4831 tp->link_config.active_speed = SPEED_1000;
4832 tp->link_config.active_duplex = DUPLEX_FULL;
4833 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4834 LED_CTRL_LNKLED_OVERRIDE |
4835 LED_CTRL_1000MBPS_ON));
4837 tp->link_config.active_speed = SPEED_INVALID;
4838 tp->link_config.active_duplex = DUPLEX_INVALID;
4839 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4840 LED_CTRL_LNKLED_OVERRIDE |
4841 LED_CTRL_TRAFFIC_OVERRIDE));
4844 if (current_link_up != netif_carrier_ok(tp->dev)) {
4845 if (current_link_up)
4846 netif_carrier_on(tp->dev);
4848 netif_carrier_off(tp->dev);
4849 tg3_link_report(tp);
4851 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4852 if (orig_pause_cfg != now_pause_cfg ||
4853 orig_active_speed != tp->link_config.active_speed ||
4854 orig_active_duplex != tp->link_config.active_duplex)
4855 tg3_link_report(tp);
4861 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4863 int current_link_up, err = 0;
4867 u32 local_adv, remote_adv;
4869 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4870 tw32_f(MAC_MODE, tp->mac_mode);
4876 (MAC_STATUS_SYNC_CHANGED |
4877 MAC_STATUS_CFG_CHANGED |
4878 MAC_STATUS_MI_COMPLETION |
4879 MAC_STATUS_LNKSTATE_CHANGED));
4885 current_link_up = 0;
4886 current_speed = SPEED_INVALID;
4887 current_duplex = DUPLEX_INVALID;
4889 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4890 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4892 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4893 bmsr |= BMSR_LSTATUS;
4895 bmsr &= ~BMSR_LSTATUS;
4898 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4900 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4901 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4902 /* do nothing, just check for link up at the end */
4903 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4906 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4907 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4908 ADVERTISE_1000XPAUSE |
4909 ADVERTISE_1000XPSE_ASYM |
4912 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4914 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4915 new_adv |= ADVERTISE_1000XHALF;
4916 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4917 new_adv |= ADVERTISE_1000XFULL;
4919 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4920 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4921 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4922 tg3_writephy(tp, MII_BMCR, bmcr);
4924 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4925 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4926 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4933 bmcr &= ~BMCR_SPEED1000;
4934 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4936 if (tp->link_config.duplex == DUPLEX_FULL)
4937 new_bmcr |= BMCR_FULLDPLX;
4939 if (new_bmcr != bmcr) {
4940 /* BMCR_SPEED1000 is a reserved bit that needs
4941 * to be set on write.
4943 new_bmcr |= BMCR_SPEED1000;
4945 /* Force a linkdown */
4946 if (netif_carrier_ok(tp->dev)) {
4949 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4950 adv &= ~(ADVERTISE_1000XFULL |
4951 ADVERTISE_1000XHALF |
4953 tg3_writephy(tp, MII_ADVERTISE, adv);
4954 tg3_writephy(tp, MII_BMCR, bmcr |
4958 netif_carrier_off(tp->dev);
4960 tg3_writephy(tp, MII_BMCR, new_bmcr);
4962 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4963 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4964 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4966 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4967 bmsr |= BMSR_LSTATUS;
4969 bmsr &= ~BMSR_LSTATUS;
4971 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4975 if (bmsr & BMSR_LSTATUS) {
4976 current_speed = SPEED_1000;
4977 current_link_up = 1;
4978 if (bmcr & BMCR_FULLDPLX)
4979 current_duplex = DUPLEX_FULL;
4981 current_duplex = DUPLEX_HALF;
4986 if (bmcr & BMCR_ANENABLE) {
4989 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4990 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4991 common = local_adv & remote_adv;
4992 if (common & (ADVERTISE_1000XHALF |
4993 ADVERTISE_1000XFULL)) {
4994 if (common & ADVERTISE_1000XFULL)
4995 current_duplex = DUPLEX_FULL;
4997 current_duplex = DUPLEX_HALF;
4998 } else if (!tg3_flag(tp, 5780_CLASS)) {
4999 /* Link is up via parallel detect */
5001 current_link_up = 0;
5006 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5007 tg3_setup_flow_control(tp, local_adv, remote_adv);
5009 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5010 if (tp->link_config.active_duplex == DUPLEX_HALF)
5011 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5013 tw32_f(MAC_MODE, tp->mac_mode);
5016 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5018 tp->link_config.active_speed = current_speed;
5019 tp->link_config.active_duplex = current_duplex;
5021 if (current_link_up != netif_carrier_ok(tp->dev)) {
5022 if (current_link_up)
5023 netif_carrier_on(tp->dev);
5025 netif_carrier_off(tp->dev);
5026 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5028 tg3_link_report(tp);
5033 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5035 if (tp->serdes_counter) {
5036 /* Give autoneg time to complete. */
5037 tp->serdes_counter--;
5041 if (!netif_carrier_ok(tp->dev) &&
5042 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5045 tg3_readphy(tp, MII_BMCR, &bmcr);
5046 if (bmcr & BMCR_ANENABLE) {
5049 /* Select shadow register 0x1f */
5050 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5051 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5053 /* Select expansion interrupt status register */
5054 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5055 MII_TG3_DSP_EXP1_INT_STAT);
5056 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5057 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5059 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5060 /* We have signal detect and not receiving
5061 * config code words, link is up by parallel
5065 bmcr &= ~BMCR_ANENABLE;
5066 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5067 tg3_writephy(tp, MII_BMCR, bmcr);
5068 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5071 } else if (netif_carrier_ok(tp->dev) &&
5072 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5073 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5076 /* Select expansion interrupt status register */
5077 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5078 MII_TG3_DSP_EXP1_INT_STAT);
5079 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5083 /* Config code words received, turn on autoneg. */
5084 tg3_readphy(tp, MII_BMCR, &bmcr);
5085 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5087 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5093 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5098 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5099 err = tg3_setup_fiber_phy(tp, force_reset);
5100 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5101 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5103 err = tg3_setup_copper_phy(tp, force_reset);
5105 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5108 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5109 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5111 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5116 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5117 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5118 tw32(GRC_MISC_CFG, val);
5121 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5122 (6 << TX_LENGTHS_IPG_SHIFT);
5123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5124 val |= tr32(MAC_TX_LENGTHS) &
5125 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5126 TX_LENGTHS_CNT_DWN_VAL_MSK);
5128 if (tp->link_config.active_speed == SPEED_1000 &&
5129 tp->link_config.active_duplex == DUPLEX_HALF)
5130 tw32(MAC_TX_LENGTHS, val |
5131 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5133 tw32(MAC_TX_LENGTHS, val |
5134 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5136 if (!tg3_flag(tp, 5705_PLUS)) {
5137 if (netif_carrier_ok(tp->dev)) {
5138 tw32(HOSTCC_STAT_COAL_TICKS,
5139 tp->coal.stats_block_coalesce_usecs);
5141 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5145 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5146 val = tr32(PCIE_PWR_MGMT_THRESH);
5147 if (!netif_carrier_ok(tp->dev))
5148 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5151 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5152 tw32(PCIE_PWR_MGMT_THRESH, val);
5158 static inline int tg3_irq_sync(struct tg3 *tp)
5160 return tp->irq_sync;
5163 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5167 dst = (u32 *)((u8 *)dst + off);
5168 for (i = 0; i < len; i += sizeof(u32))
5169 *dst++ = tr32(off + i);
5172 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5174 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5175 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5176 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5177 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5178 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5179 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5180 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5181 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5182 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5183 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5184 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5185 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5186 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5187 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5188 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5189 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5190 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5191 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5192 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5194 if (tg3_flag(tp, SUPPORT_MSIX))
5195 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5197 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5198 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5199 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5200 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5201 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5202 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5203 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5204 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5206 if (!tg3_flag(tp, 5705_PLUS)) {
5207 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5208 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5209 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5212 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5213 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5214 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5215 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5216 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5218 if (tg3_flag(tp, NVRAM))
5219 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5222 static void tg3_dump_state(struct tg3 *tp)
5227 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5229 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5233 if (tg3_flag(tp, PCI_EXPRESS)) {
5234 /* Read up to but not including private PCI registers */
5235 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5236 regs[i / sizeof(u32)] = tr32(i);
5238 tg3_dump_legacy_regs(tp, regs);
5240 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5241 if (!regs[i + 0] && !regs[i + 1] &&
5242 !regs[i + 2] && !regs[i + 3])
5245 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5247 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5252 for (i = 0; i < tp->irq_cnt; i++) {
5253 struct tg3_napi *tnapi = &tp->napi[i];
5255 /* SW status block */
5257 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5259 tnapi->hw_status->status,
5260 tnapi->hw_status->status_tag,
5261 tnapi->hw_status->rx_jumbo_consumer,
5262 tnapi->hw_status->rx_consumer,
5263 tnapi->hw_status->rx_mini_consumer,
5264 tnapi->hw_status->idx[0].rx_producer,
5265 tnapi->hw_status->idx[0].tx_consumer);
5268 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5270 tnapi->last_tag, tnapi->last_irq_tag,
5271 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5273 tnapi->prodring.rx_std_prod_idx,
5274 tnapi->prodring.rx_std_cons_idx,
5275 tnapi->prodring.rx_jmb_prod_idx,
5276 tnapi->prodring.rx_jmb_cons_idx);
5280 /* This is called whenever we suspect that the system chipset is re-
5281 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5282 * is bogus tx completions. We try to recover by setting the
5283 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5286 static void tg3_tx_recover(struct tg3 *tp)
5288 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5289 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5291 netdev_warn(tp->dev,
5292 "The system may be re-ordering memory-mapped I/O "
5293 "cycles to the network device, attempting to recover. "
5294 "Please report the problem to the driver maintainer "
5295 "and include system chipset information.\n");
5297 spin_lock(&tp->lock);
5298 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5299 spin_unlock(&tp->lock);
5302 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5304 /* Tell compiler to fetch tx indices from memory. */
5306 return tnapi->tx_pending -
5307 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5310 /* Tigon3 never reports partial packet sends. So we do not
5311 * need special logic to handle SKBs that have not had all
5312 * of their frags sent yet, like SunGEM does.
5314 static void tg3_tx(struct tg3_napi *tnapi)
5316 struct tg3 *tp = tnapi->tp;
5317 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5318 u32 sw_idx = tnapi->tx_cons;
5319 struct netdev_queue *txq;
5320 int index = tnapi - tp->napi;
5322 if (tg3_flag(tp, ENABLE_TSS))
5325 txq = netdev_get_tx_queue(tp->dev, index);
5327 while (sw_idx != hw_idx) {
5328 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5329 struct sk_buff *skb = ri->skb;
5332 if (unlikely(skb == NULL)) {
5337 pci_unmap_single(tp->pdev,
5338 dma_unmap_addr(ri, mapping),
5344 while (ri->fragmented) {
5345 ri->fragmented = false;
5346 sw_idx = NEXT_TX(sw_idx);
5347 ri = &tnapi->tx_buffers[sw_idx];
5350 sw_idx = NEXT_TX(sw_idx);
5352 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5353 ri = &tnapi->tx_buffers[sw_idx];
5354 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5357 pci_unmap_page(tp->pdev,
5358 dma_unmap_addr(ri, mapping),
5359 skb_shinfo(skb)->frags[i].size,
5362 while (ri->fragmented) {
5363 ri->fragmented = false;
5364 sw_idx = NEXT_TX(sw_idx);
5365 ri = &tnapi->tx_buffers[sw_idx];
5368 sw_idx = NEXT_TX(sw_idx);
5373 if (unlikely(tx_bug)) {
5379 tnapi->tx_cons = sw_idx;
5381 /* Need to make the tx_cons update visible to tg3_start_xmit()
5382 * before checking for netif_queue_stopped(). Without the
5383 * memory barrier, there is a small possibility that tg3_start_xmit()
5384 * will miss it and cause the queue to be stopped forever.
5388 if (unlikely(netif_tx_queue_stopped(txq) &&
5389 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5390 __netif_tx_lock(txq, smp_processor_id());
5391 if (netif_tx_queue_stopped(txq) &&
5392 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5393 netif_tx_wake_queue(txq);
5394 __netif_tx_unlock(txq);
5398 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5403 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5404 map_sz, PCI_DMA_FROMDEVICE);
5405 dev_kfree_skb_any(ri->skb);
5409 /* Returns size of skb allocated or < 0 on error.
5411 * We only need to fill in the address because the other members
5412 * of the RX descriptor are invariant, see tg3_init_rings.
5414 * Note the purposeful assymetry of cpu vs. chip accesses. For
5415 * posting buffers we only dirty the first cache line of the RX
5416 * descriptor (containing the address). Whereas for the RX status
5417 * buffers the cpu only reads the last cacheline of the RX descriptor
5418 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5420 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5421 u32 opaque_key, u32 dest_idx_unmasked)
5423 struct tg3_rx_buffer_desc *desc;
5424 struct ring_info *map;
5425 struct sk_buff *skb;
5427 int skb_size, dest_idx;
5429 switch (opaque_key) {
5430 case RXD_OPAQUE_RING_STD:
5431 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5432 desc = &tpr->rx_std[dest_idx];
5433 map = &tpr->rx_std_buffers[dest_idx];
5434 skb_size = tp->rx_pkt_map_sz;
5437 case RXD_OPAQUE_RING_JUMBO:
5438 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5439 desc = &tpr->rx_jmb[dest_idx].std;
5440 map = &tpr->rx_jmb_buffers[dest_idx];
5441 skb_size = TG3_RX_JMB_MAP_SZ;
5448 /* Do not overwrite any of the map or rp information
5449 * until we are sure we can commit to a new buffer.
5451 * Callers depend upon this behavior and assume that
5452 * we leave everything unchanged if we fail.
5454 skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5458 skb_reserve(skb, TG3_RX_OFFSET(tp));
5460 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5461 PCI_DMA_FROMDEVICE);
5462 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5468 dma_unmap_addr_set(map, mapping, mapping);
5470 desc->addr_hi = ((u64)mapping >> 32);
5471 desc->addr_lo = ((u64)mapping & 0xffffffff);
5476 /* We only need to move over in the address because the other
5477 * members of the RX descriptor are invariant. See notes above
5478 * tg3_alloc_rx_skb for full details.
5480 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5481 struct tg3_rx_prodring_set *dpr,
5482 u32 opaque_key, int src_idx,
5483 u32 dest_idx_unmasked)
5485 struct tg3 *tp = tnapi->tp;
5486 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5487 struct ring_info *src_map, *dest_map;
5488 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5491 switch (opaque_key) {
5492 case RXD_OPAQUE_RING_STD:
5493 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5494 dest_desc = &dpr->rx_std[dest_idx];
5495 dest_map = &dpr->rx_std_buffers[dest_idx];
5496 src_desc = &spr->rx_std[src_idx];
5497 src_map = &spr->rx_std_buffers[src_idx];
5500 case RXD_OPAQUE_RING_JUMBO:
5501 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5502 dest_desc = &dpr->rx_jmb[dest_idx].std;
5503 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5504 src_desc = &spr->rx_jmb[src_idx].std;
5505 src_map = &spr->rx_jmb_buffers[src_idx];
5512 dest_map->skb = src_map->skb;
5513 dma_unmap_addr_set(dest_map, mapping,
5514 dma_unmap_addr(src_map, mapping));
5515 dest_desc->addr_hi = src_desc->addr_hi;
5516 dest_desc->addr_lo = src_desc->addr_lo;
5518 /* Ensure that the update to the skb happens after the physical
5519 * addresses have been transferred to the new BD location.
5523 src_map->skb = NULL;
5526 /* The RX ring scheme is composed of multiple rings which post fresh
5527 * buffers to the chip, and one special ring the chip uses to report
5528 * status back to the host.
5530 * The special ring reports the status of received packets to the
5531 * host. The chip does not write into the original descriptor the
5532 * RX buffer was obtained from. The chip simply takes the original
5533 * descriptor as provided by the host, updates the status and length
5534 * field, then writes this into the next status ring entry.
5536 * Each ring the host uses to post buffers to the chip is described
5537 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5538 * it is first placed into the on-chip ram. When the packet's length
5539 * is known, it walks down the TG3_BDINFO entries to select the ring.
5540 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5541 * which is within the range of the new packet's length is chosen.
5543 * The "separate ring for rx status" scheme may sound queer, but it makes
5544 * sense from a cache coherency perspective. If only the host writes
5545 * to the buffer post rings, and only the chip writes to the rx status
5546 * rings, then cache lines never move beyond shared-modified state.
5547 * If both the host and chip were to write into the same ring, cache line
5548 * eviction could occur since both entities want it in an exclusive state.
5550 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5552 struct tg3 *tp = tnapi->tp;
5553 u32 work_mask, rx_std_posted = 0;
5554 u32 std_prod_idx, jmb_prod_idx;
5555 u32 sw_idx = tnapi->rx_rcb_ptr;
5558 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5560 hw_idx = *(tnapi->rx_rcb_prod_idx);
5562 * We need to order the read of hw_idx and the read of
5563 * the opaque cookie.
5568 std_prod_idx = tpr->rx_std_prod_idx;
5569 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5570 while (sw_idx != hw_idx && budget > 0) {
5571 struct ring_info *ri;
5572 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5574 struct sk_buff *skb;
5575 dma_addr_t dma_addr;
5576 u32 opaque_key, desc_idx, *post_ptr;
5578 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5579 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5580 if (opaque_key == RXD_OPAQUE_RING_STD) {
5581 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5582 dma_addr = dma_unmap_addr(ri, mapping);
5584 post_ptr = &std_prod_idx;
5586 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5587 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5588 dma_addr = dma_unmap_addr(ri, mapping);
5590 post_ptr = &jmb_prod_idx;
5592 goto next_pkt_nopost;
5594 work_mask |= opaque_key;
5596 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5597 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5599 tg3_recycle_rx(tnapi, tpr, opaque_key,
5600 desc_idx, *post_ptr);
5602 /* Other statistics kept track of by card. */
5607 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5610 if (len > TG3_RX_COPY_THRESH(tp)) {
5613 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5618 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5619 PCI_DMA_FROMDEVICE);
5621 /* Ensure that the update to the skb happens
5622 * after the usage of the old DMA mapping.
5630 struct sk_buff *copy_skb;
5632 tg3_recycle_rx(tnapi, tpr, opaque_key,
5633 desc_idx, *post_ptr);
5635 copy_skb = netdev_alloc_skb(tp->dev, len +
5637 if (copy_skb == NULL)
5638 goto drop_it_no_recycle;
5640 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5641 skb_put(copy_skb, len);
5642 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5643 skb_copy_from_linear_data(skb, copy_skb->data, len);
5644 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5646 /* We'll reuse the original ring buffer. */
5650 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5651 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5652 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5653 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5654 skb->ip_summed = CHECKSUM_UNNECESSARY;
5656 skb_checksum_none_assert(skb);
5658 skb->protocol = eth_type_trans(skb, tp->dev);
5660 if (len > (tp->dev->mtu + ETH_HLEN) &&
5661 skb->protocol != htons(ETH_P_8021Q)) {
5663 goto drop_it_no_recycle;
5666 if (desc->type_flags & RXD_FLAG_VLAN &&
5667 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5668 __vlan_hwaccel_put_tag(skb,
5669 desc->err_vlan & RXD_VLAN_MASK);
5671 napi_gro_receive(&tnapi->napi, skb);
5679 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5680 tpr->rx_std_prod_idx = std_prod_idx &
5681 tp->rx_std_ring_mask;
5682 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5683 tpr->rx_std_prod_idx);
5684 work_mask &= ~RXD_OPAQUE_RING_STD;
5689 sw_idx &= tp->rx_ret_ring_mask;
5691 /* Refresh hw_idx to see if there is new work */
5692 if (sw_idx == hw_idx) {
5693 hw_idx = *(tnapi->rx_rcb_prod_idx);
5698 /* ACK the status ring. */
5699 tnapi->rx_rcb_ptr = sw_idx;
5700 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5702 /* Refill RX ring(s). */
5703 if (!tg3_flag(tp, ENABLE_RSS)) {
5704 if (work_mask & RXD_OPAQUE_RING_STD) {
5705 tpr->rx_std_prod_idx = std_prod_idx &
5706 tp->rx_std_ring_mask;
5707 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5708 tpr->rx_std_prod_idx);
5710 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5711 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5712 tp->rx_jmb_ring_mask;
5713 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5714 tpr->rx_jmb_prod_idx);
5717 } else if (work_mask) {
5718 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5719 * updated before the producer indices can be updated.
5723 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5724 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5726 if (tnapi != &tp->napi[1])
5727 napi_schedule(&tp->napi[1].napi);
5733 static void tg3_poll_link(struct tg3 *tp)
5735 /* handle link change and other phy events */
5736 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5737 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5739 if (sblk->status & SD_STATUS_LINK_CHG) {
5740 sblk->status = SD_STATUS_UPDATED |
5741 (sblk->status & ~SD_STATUS_LINK_CHG);
5742 spin_lock(&tp->lock);
5743 if (tg3_flag(tp, USE_PHYLIB)) {
5745 (MAC_STATUS_SYNC_CHANGED |
5746 MAC_STATUS_CFG_CHANGED |
5747 MAC_STATUS_MI_COMPLETION |
5748 MAC_STATUS_LNKSTATE_CHANGED));
5751 tg3_setup_phy(tp, 0);
5752 spin_unlock(&tp->lock);
5757 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5758 struct tg3_rx_prodring_set *dpr,
5759 struct tg3_rx_prodring_set *spr)
5761 u32 si, di, cpycnt, src_prod_idx;
5765 src_prod_idx = spr->rx_std_prod_idx;
5767 /* Make sure updates to the rx_std_buffers[] entries and the
5768 * standard producer index are seen in the correct order.
5772 if (spr->rx_std_cons_idx == src_prod_idx)
5775 if (spr->rx_std_cons_idx < src_prod_idx)
5776 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5778 cpycnt = tp->rx_std_ring_mask + 1 -
5779 spr->rx_std_cons_idx;
5781 cpycnt = min(cpycnt,
5782 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5784 si = spr->rx_std_cons_idx;
5785 di = dpr->rx_std_prod_idx;
5787 for (i = di; i < di + cpycnt; i++) {
5788 if (dpr->rx_std_buffers[i].skb) {
5798 /* Ensure that updates to the rx_std_buffers ring and the
5799 * shadowed hardware producer ring from tg3_recycle_skb() are
5800 * ordered correctly WRT the skb check above.
5804 memcpy(&dpr->rx_std_buffers[di],
5805 &spr->rx_std_buffers[si],
5806 cpycnt * sizeof(struct ring_info));
5808 for (i = 0; i < cpycnt; i++, di++, si++) {
5809 struct tg3_rx_buffer_desc *sbd, *dbd;
5810 sbd = &spr->rx_std[si];
5811 dbd = &dpr->rx_std[di];
5812 dbd->addr_hi = sbd->addr_hi;
5813 dbd->addr_lo = sbd->addr_lo;
5816 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5817 tp->rx_std_ring_mask;
5818 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5819 tp->rx_std_ring_mask;
5823 src_prod_idx = spr->rx_jmb_prod_idx;
5825 /* Make sure updates to the rx_jmb_buffers[] entries and
5826 * the jumbo producer index are seen in the correct order.
5830 if (spr->rx_jmb_cons_idx == src_prod_idx)
5833 if (spr->rx_jmb_cons_idx < src_prod_idx)
5834 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5836 cpycnt = tp->rx_jmb_ring_mask + 1 -
5837 spr->rx_jmb_cons_idx;
5839 cpycnt = min(cpycnt,
5840 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5842 si = spr->rx_jmb_cons_idx;
5843 di = dpr->rx_jmb_prod_idx;
5845 for (i = di; i < di + cpycnt; i++) {
5846 if (dpr->rx_jmb_buffers[i].skb) {
5856 /* Ensure that updates to the rx_jmb_buffers ring and the
5857 * shadowed hardware producer ring from tg3_recycle_skb() are
5858 * ordered correctly WRT the skb check above.
5862 memcpy(&dpr->rx_jmb_buffers[di],
5863 &spr->rx_jmb_buffers[si],
5864 cpycnt * sizeof(struct ring_info));
5866 for (i = 0; i < cpycnt; i++, di++, si++) {
5867 struct tg3_rx_buffer_desc *sbd, *dbd;
5868 sbd = &spr->rx_jmb[si].std;
5869 dbd = &dpr->rx_jmb[di].std;
5870 dbd->addr_hi = sbd->addr_hi;
5871 dbd->addr_lo = sbd->addr_lo;
5874 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5875 tp->rx_jmb_ring_mask;
5876 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5877 tp->rx_jmb_ring_mask;
5883 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5885 struct tg3 *tp = tnapi->tp;
5887 /* run TX completion thread */
5888 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5890 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5894 /* run RX thread, within the bounds set by NAPI.
5895 * All RX "locking" is done by ensuring outside
5896 * code synchronizes with tg3->napi.poll()
5898 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5899 work_done += tg3_rx(tnapi, budget - work_done);
5901 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5902 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5904 u32 std_prod_idx = dpr->rx_std_prod_idx;
5905 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5907 for (i = 1; i < tp->irq_cnt; i++)
5908 err |= tg3_rx_prodring_xfer(tp, dpr,
5909 &tp->napi[i].prodring);
5913 if (std_prod_idx != dpr->rx_std_prod_idx)
5914 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5915 dpr->rx_std_prod_idx);
5917 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5918 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5919 dpr->rx_jmb_prod_idx);
5924 tw32_f(HOSTCC_MODE, tp->coal_now);
5930 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5932 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5933 struct tg3 *tp = tnapi->tp;
5935 struct tg3_hw_status *sblk = tnapi->hw_status;
5938 work_done = tg3_poll_work(tnapi, work_done, budget);
5940 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5943 if (unlikely(work_done >= budget))
5946 /* tp->last_tag is used in tg3_int_reenable() below
5947 * to tell the hw how much work has been processed,
5948 * so we must read it before checking for more work.
5950 tnapi->last_tag = sblk->status_tag;
5951 tnapi->last_irq_tag = tnapi->last_tag;
5954 /* check for RX/TX work to do */
5955 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5956 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5957 napi_complete(napi);
5958 /* Reenable interrupts. */
5959 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5968 /* work_done is guaranteed to be less than budget. */
5969 napi_complete(napi);
5970 schedule_work(&tp->reset_task);
5974 static void tg3_process_error(struct tg3 *tp)
5977 bool real_error = false;
5979 if (tg3_flag(tp, ERROR_PROCESSED))
5982 /* Check Flow Attention register */
5983 val = tr32(HOSTCC_FLOW_ATTN);
5984 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5985 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5989 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5990 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5994 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5995 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6004 tg3_flag_set(tp, ERROR_PROCESSED);
6005 schedule_work(&tp->reset_task);
6008 static int tg3_poll(struct napi_struct *napi, int budget)
6010 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6011 struct tg3 *tp = tnapi->tp;
6013 struct tg3_hw_status *sblk = tnapi->hw_status;
6016 if (sblk->status & SD_STATUS_ERROR)
6017 tg3_process_error(tp);
6021 work_done = tg3_poll_work(tnapi, work_done, budget);
6023 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6026 if (unlikely(work_done >= budget))
6029 if (tg3_flag(tp, TAGGED_STATUS)) {
6030 /* tp->last_tag is used in tg3_int_reenable() below
6031 * to tell the hw how much work has been processed,
6032 * so we must read it before checking for more work.
6034 tnapi->last_tag = sblk->status_tag;
6035 tnapi->last_irq_tag = tnapi->last_tag;
6038 sblk->status &= ~SD_STATUS_UPDATED;
6040 if (likely(!tg3_has_work(tnapi))) {
6041 napi_complete(napi);
6042 tg3_int_reenable(tnapi);
6050 /* work_done is guaranteed to be less than budget. */
6051 napi_complete(napi);
6052 schedule_work(&tp->reset_task);
6056 static void tg3_napi_disable(struct tg3 *tp)
6060 for (i = tp->irq_cnt - 1; i >= 0; i--)
6061 napi_disable(&tp->napi[i].napi);
6064 static void tg3_napi_enable(struct tg3 *tp)
6068 for (i = 0; i < tp->irq_cnt; i++)
6069 napi_enable(&tp->napi[i].napi);
6072 static void tg3_napi_init(struct tg3 *tp)
6076 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6077 for (i = 1; i < tp->irq_cnt; i++)
6078 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6081 static void tg3_napi_fini(struct tg3 *tp)
6085 for (i = 0; i < tp->irq_cnt; i++)
6086 netif_napi_del(&tp->napi[i].napi);
6089 static inline void tg3_netif_stop(struct tg3 *tp)
6091 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6092 tg3_napi_disable(tp);
6093 netif_tx_disable(tp->dev);
6096 static inline void tg3_netif_start(struct tg3 *tp)
6098 /* NOTE: unconditional netif_tx_wake_all_queues is only
6099 * appropriate so long as all callers are assured to
6100 * have free tx slots (such as after tg3_init_hw)
6102 netif_tx_wake_all_queues(tp->dev);
6104 tg3_napi_enable(tp);
6105 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6106 tg3_enable_ints(tp);
6109 static void tg3_irq_quiesce(struct tg3 *tp)
6113 BUG_ON(tp->irq_sync);
6118 for (i = 0; i < tp->irq_cnt; i++)
6119 synchronize_irq(tp->napi[i].irq_vec);
6122 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6123 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6124 * with as well. Most of the time, this is not necessary except when
6125 * shutting down the device.
6127 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6129 spin_lock_bh(&tp->lock);
6131 tg3_irq_quiesce(tp);
6134 static inline void tg3_full_unlock(struct tg3 *tp)
6136 spin_unlock_bh(&tp->lock);
6139 /* One-shot MSI handler - Chip automatically disables interrupt
6140 * after sending MSI so driver doesn't have to do it.
6142 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6144 struct tg3_napi *tnapi = dev_id;
6145 struct tg3 *tp = tnapi->tp;
6147 prefetch(tnapi->hw_status);
6149 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6151 if (likely(!tg3_irq_sync(tp)))
6152 napi_schedule(&tnapi->napi);
6157 /* MSI ISR - No need to check for interrupt sharing and no need to
6158 * flush status block and interrupt mailbox. PCI ordering rules
6159 * guarantee that MSI will arrive after the status block.
6161 static irqreturn_t tg3_msi(int irq, void *dev_id)
6163 struct tg3_napi *tnapi = dev_id;
6164 struct tg3 *tp = tnapi->tp;
6166 prefetch(tnapi->hw_status);
6168 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6170 * Writing any value to intr-mbox-0 clears PCI INTA# and
6171 * chip-internal interrupt pending events.
6172 * Writing non-zero to intr-mbox-0 additional tells the
6173 * NIC to stop sending us irqs, engaging "in-intr-handler"
6176 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6177 if (likely(!tg3_irq_sync(tp)))
6178 napi_schedule(&tnapi->napi);
6180 return IRQ_RETVAL(1);
6183 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6185 struct tg3_napi *tnapi = dev_id;
6186 struct tg3 *tp = tnapi->tp;
6187 struct tg3_hw_status *sblk = tnapi->hw_status;
6188 unsigned int handled = 1;
6190 /* In INTx mode, it is possible for the interrupt to arrive at
6191 * the CPU before the status block posted prior to the interrupt.
6192 * Reading the PCI State register will confirm whether the
6193 * interrupt is ours and will flush the status block.
6195 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6196 if (tg3_flag(tp, CHIP_RESETTING) ||
6197 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6204 * Writing any value to intr-mbox-0 clears PCI INTA# and
6205 * chip-internal interrupt pending events.
6206 * Writing non-zero to intr-mbox-0 additional tells the
6207 * NIC to stop sending us irqs, engaging "in-intr-handler"
6210 * Flush the mailbox to de-assert the IRQ immediately to prevent
6211 * spurious interrupts. The flush impacts performance but
6212 * excessive spurious interrupts can be worse in some cases.
6214 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6215 if (tg3_irq_sync(tp))
6217 sblk->status &= ~SD_STATUS_UPDATED;
6218 if (likely(tg3_has_work(tnapi))) {
6219 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6220 napi_schedule(&tnapi->napi);
6222 /* No work, shared interrupt perhaps? re-enable
6223 * interrupts, and flush that PCI write
6225 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6229 return IRQ_RETVAL(handled);
6232 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6234 struct tg3_napi *tnapi = dev_id;
6235 struct tg3 *tp = tnapi->tp;
6236 struct tg3_hw_status *sblk = tnapi->hw_status;
6237 unsigned int handled = 1;
6239 /* In INTx mode, it is possible for the interrupt to arrive at
6240 * the CPU before the status block posted prior to the interrupt.
6241 * Reading the PCI State register will confirm whether the
6242 * interrupt is ours and will flush the status block.
6244 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6245 if (tg3_flag(tp, CHIP_RESETTING) ||
6246 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6253 * writing any value to intr-mbox-0 clears PCI INTA# and
6254 * chip-internal interrupt pending events.
6255 * writing non-zero to intr-mbox-0 additional tells the
6256 * NIC to stop sending us irqs, engaging "in-intr-handler"
6259 * Flush the mailbox to de-assert the IRQ immediately to prevent
6260 * spurious interrupts. The flush impacts performance but
6261 * excessive spurious interrupts can be worse in some cases.
6263 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6266 * In a shared interrupt configuration, sometimes other devices'
6267 * interrupts will scream. We record the current status tag here
6268 * so that the above check can report that the screaming interrupts
6269 * are unhandled. Eventually they will be silenced.
6271 tnapi->last_irq_tag = sblk->status_tag;
6273 if (tg3_irq_sync(tp))
6276 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6278 napi_schedule(&tnapi->napi);
6281 return IRQ_RETVAL(handled);
6284 /* ISR for interrupt test */
6285 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6287 struct tg3_napi *tnapi = dev_id;
6288 struct tg3 *tp = tnapi->tp;
6289 struct tg3_hw_status *sblk = tnapi->hw_status;
6291 if ((sblk->status & SD_STATUS_UPDATED) ||
6292 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6293 tg3_disable_ints(tp);
6294 return IRQ_RETVAL(1);
6296 return IRQ_RETVAL(0);
6299 static int tg3_init_hw(struct tg3 *, int);
6300 static int tg3_halt(struct tg3 *, int, int);
6302 /* Restart hardware after configuration changes, self-test, etc.
6303 * Invoked with tp->lock held.
6305 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6306 __releases(tp->lock)
6307 __acquires(tp->lock)
6311 err = tg3_init_hw(tp, reset_phy);
6314 "Failed to re-initialize device, aborting\n");
6315 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6316 tg3_full_unlock(tp);
6317 del_timer_sync(&tp->timer);
6319 tg3_napi_enable(tp);
6321 tg3_full_lock(tp, 0);
6326 #ifdef CONFIG_NET_POLL_CONTROLLER
6327 static void tg3_poll_controller(struct net_device *dev)
6330 struct tg3 *tp = netdev_priv(dev);
6332 for (i = 0; i < tp->irq_cnt; i++)
6333 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6337 static void tg3_reset_task(struct work_struct *work)
6339 struct tg3 *tp = container_of(work, struct tg3, reset_task);
6341 unsigned int restart_timer;
6343 tg3_full_lock(tp, 0);
6345 if (!netif_running(tp->dev)) {
6346 tg3_full_unlock(tp);
6350 tg3_full_unlock(tp);
6356 tg3_full_lock(tp, 1);
6358 restart_timer = tg3_flag(tp, RESTART_TIMER);
6359 tg3_flag_clear(tp, RESTART_TIMER);
6361 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6362 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6363 tp->write32_rx_mbox = tg3_write_flush_reg32;
6364 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6365 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6368 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6369 err = tg3_init_hw(tp, 1);
6373 tg3_netif_start(tp);
6376 mod_timer(&tp->timer, jiffies + 1);
6379 tg3_full_unlock(tp);
6385 static void tg3_tx_timeout(struct net_device *dev)
6387 struct tg3 *tp = netdev_priv(dev);
6389 if (netif_msg_tx_err(tp)) {
6390 netdev_err(dev, "transmit timed out, resetting\n");
6394 schedule_work(&tp->reset_task);
6397 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6398 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6400 u32 base = (u32) mapping & 0xffffffff;
6402 return (base > 0xffffdcc0) && (base + len + 8 < base);
6405 /* Test for DMA addresses > 40-bit */
6406 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6409 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6410 if (tg3_flag(tp, 40BIT_DMA_BUG))
6411 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6418 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6419 dma_addr_t mapping, u32 len, u32 flags,
6422 txbd->addr_hi = ((u64) mapping >> 32);
6423 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6424 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6425 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6428 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6429 dma_addr_t map, u32 len, u32 flags,
6432 struct tg3 *tp = tnapi->tp;
6435 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6438 if (tg3_4g_overflow_test(map, len))
6441 if (tg3_40bit_overflow_test(tp, map, len))
6444 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6445 u32 tmp_flag = flags & ~TXD_FLAG_END;
6446 while (len > TG3_TX_BD_DMA_MAX) {
6447 u32 frag_len = TG3_TX_BD_DMA_MAX;
6448 len -= TG3_TX_BD_DMA_MAX;
6451 tnapi->tx_buffers[*entry].fragmented = true;
6452 /* Avoid the 8byte DMA problem */
6454 len += TG3_TX_BD_DMA_MAX / 2;
6455 frag_len = TG3_TX_BD_DMA_MAX / 2;
6461 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6462 frag_len, tmp_flag, mss, vlan);
6464 *entry = NEXT_TX(*entry);
6475 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6476 len, flags, mss, vlan);
6478 *entry = NEXT_TX(*entry);
6484 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6485 len, flags, mss, vlan);
6486 *entry = NEXT_TX(*entry);
6492 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6495 struct sk_buff *skb;
6496 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6501 pci_unmap_single(tnapi->tp->pdev,
6502 dma_unmap_addr(txb, mapping),
6506 while (txb->fragmented) {
6507 txb->fragmented = false;
6508 entry = NEXT_TX(entry);
6509 txb = &tnapi->tx_buffers[entry];
6512 for (i = 0; i < last; i++) {
6513 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6515 entry = NEXT_TX(entry);
6516 txb = &tnapi->tx_buffers[entry];
6518 pci_unmap_page(tnapi->tp->pdev,
6519 dma_unmap_addr(txb, mapping),
6520 frag->size, PCI_DMA_TODEVICE);
6522 while (txb->fragmented) {
6523 txb->fragmented = false;
6524 entry = NEXT_TX(entry);
6525 txb = &tnapi->tx_buffers[entry];
6530 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6531 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6532 struct sk_buff *skb,
6533 u32 *entry, u32 *budget,
6534 u32 base_flags, u32 mss, u32 vlan)
6536 struct tg3 *tp = tnapi->tp;
6537 struct sk_buff *new_skb;
6538 dma_addr_t new_addr = 0;
6541 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6542 new_skb = skb_copy(skb, GFP_ATOMIC);
6544 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6546 new_skb = skb_copy_expand(skb,
6547 skb_headroom(skb) + more_headroom,
6548 skb_tailroom(skb), GFP_ATOMIC);
6554 /* New SKB is guaranteed to be linear. */
6555 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6557 /* Make sure the mapping succeeded */
6558 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6559 dev_kfree_skb(new_skb);
6562 base_flags |= TXD_FLAG_END;
6564 tnapi->tx_buffers[*entry].skb = new_skb;
6565 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6568 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6569 new_skb->len, base_flags,
6571 tg3_tx_skb_unmap(tnapi, *entry, 0);
6572 dev_kfree_skb(new_skb);
6583 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6585 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6586 * TSO header is greater than 80 bytes.
6588 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6590 struct sk_buff *segs, *nskb;
6591 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6593 /* Estimate the number of fragments in the worst case */
6594 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6595 netif_stop_queue(tp->dev);
6597 /* netif_tx_stop_queue() must be done before checking
6598 * checking tx index in tg3_tx_avail() below, because in
6599 * tg3_tx(), we update tx index before checking for
6600 * netif_tx_queue_stopped().
6603 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6604 return NETDEV_TX_BUSY;
6606 netif_wake_queue(tp->dev);
6609 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6611 goto tg3_tso_bug_end;
6617 tg3_start_xmit(nskb, tp->dev);
6623 return NETDEV_TX_OK;
6626 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6627 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6629 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6631 struct tg3 *tp = netdev_priv(dev);
6632 u32 len, entry, base_flags, mss, vlan = 0;
6634 int i = -1, would_hit_hwbug;
6636 struct tg3_napi *tnapi;
6637 struct netdev_queue *txq;
6640 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6641 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6642 if (tg3_flag(tp, ENABLE_TSS))
6645 budget = tg3_tx_avail(tnapi);
6647 /* We are running in BH disabled context with netif_tx_lock
6648 * and TX reclaim runs via tp->napi.poll inside of a software
6649 * interrupt. Furthermore, IRQ processing runs lockless so we have
6650 * no IRQ context deadlocks to worry about either. Rejoice!
6652 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6653 if (!netif_tx_queue_stopped(txq)) {
6654 netif_tx_stop_queue(txq);
6656 /* This is a hard error, log it. */
6658 "BUG! Tx Ring full when queue awake!\n");
6660 return NETDEV_TX_BUSY;
6663 entry = tnapi->tx_prod;
6665 if (skb->ip_summed == CHECKSUM_PARTIAL)
6666 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6668 mss = skb_shinfo(skb)->gso_size;
6671 u32 tcp_opt_len, hdr_len;
6673 if (skb_header_cloned(skb) &&
6674 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6680 tcp_opt_len = tcp_optlen(skb);
6682 if (skb_is_gso_v6(skb)) {
6683 hdr_len = skb_headlen(skb) - ETH_HLEN;
6687 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6688 hdr_len = ip_tcp_len + tcp_opt_len;
6691 iph->tot_len = htons(mss + hdr_len);
6694 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6695 tg3_flag(tp, TSO_BUG))
6696 return tg3_tso_bug(tp, skb);
6698 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6699 TXD_FLAG_CPU_POST_DMA);
6701 if (tg3_flag(tp, HW_TSO_1) ||
6702 tg3_flag(tp, HW_TSO_2) ||
6703 tg3_flag(tp, HW_TSO_3)) {
6704 tcp_hdr(skb)->check = 0;
6705 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6707 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6712 if (tg3_flag(tp, HW_TSO_3)) {
6713 mss |= (hdr_len & 0xc) << 12;
6715 base_flags |= 0x00000010;
6716 base_flags |= (hdr_len & 0x3e0) << 5;
6717 } else if (tg3_flag(tp, HW_TSO_2))
6718 mss |= hdr_len << 9;
6719 else if (tg3_flag(tp, HW_TSO_1) ||
6720 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6721 if (tcp_opt_len || iph->ihl > 5) {
6724 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6725 mss |= (tsflags << 11);
6728 if (tcp_opt_len || iph->ihl > 5) {
6731 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6732 base_flags |= tsflags << 12;
6737 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6738 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6739 base_flags |= TXD_FLAG_JMB_PKT;
6741 if (vlan_tx_tag_present(skb)) {
6742 base_flags |= TXD_FLAG_VLAN;
6743 vlan = vlan_tx_tag_get(skb);
6746 len = skb_headlen(skb);
6748 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6749 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6754 tnapi->tx_buffers[entry].skb = skb;
6755 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6757 would_hit_hwbug = 0;
6759 if (tg3_flag(tp, 5701_DMA_BUG))
6760 would_hit_hwbug = 1;
6762 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6763 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6765 would_hit_hwbug = 1;
6767 /* Now loop through additional data fragments, and queue them. */
6768 if (skb_shinfo(skb)->nr_frags > 0) {
6771 if (!tg3_flag(tp, HW_TSO_1) &&
6772 !tg3_flag(tp, HW_TSO_2) &&
6773 !tg3_flag(tp, HW_TSO_3))
6776 last = skb_shinfo(skb)->nr_frags - 1;
6777 for (i = 0; i <= last; i++) {
6778 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6781 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6782 len, DMA_TO_DEVICE);
6784 tnapi->tx_buffers[entry].skb = NULL;
6785 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6787 if (dma_mapping_error(&tp->pdev->dev, mapping))
6790 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6792 ((i == last) ? TXD_FLAG_END : 0),
6794 would_hit_hwbug = 1;
6798 if (would_hit_hwbug) {
6799 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6801 /* If the workaround fails due to memory/mapping
6802 * failure, silently drop this packet.
6804 entry = tnapi->tx_prod;
6805 budget = tg3_tx_avail(tnapi);
6806 if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
6807 base_flags, mss, vlan))
6811 skb_tx_timestamp(skb);
6813 /* Packets are ready, update Tx producer idx local and on card. */
6814 tw32_tx_mbox(tnapi->prodmbox, entry);
6816 tnapi->tx_prod = entry;
6817 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6818 netif_tx_stop_queue(txq);
6820 /* netif_tx_stop_queue() must be done before checking
6821 * checking tx index in tg3_tx_avail() below, because in
6822 * tg3_tx(), we update tx index before checking for
6823 * netif_tx_queue_stopped().
6826 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6827 netif_tx_wake_queue(txq);
6833 return NETDEV_TX_OK;
6836 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6838 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6839 return NETDEV_TX_OK;
6842 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6845 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6846 MAC_MODE_PORT_MODE_MASK);
6848 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6850 if (!tg3_flag(tp, 5705_PLUS))
6851 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6853 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6854 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6856 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6858 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6860 if (tg3_flag(tp, 5705_PLUS) ||
6861 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6863 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6866 tw32(MAC_MODE, tp->mac_mode);
6870 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6872 u32 val, bmcr, mac_mode, ptest = 0;
6874 tg3_phy_toggle_apd(tp, false);
6875 tg3_phy_toggle_automdix(tp, 0);
6877 if (extlpbk && tg3_phy_set_extloopbk(tp))
6880 bmcr = BMCR_FULLDPLX;
6885 bmcr |= BMCR_SPEED100;
6889 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6891 bmcr |= BMCR_SPEED100;
6894 bmcr |= BMCR_SPEED1000;
6899 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6900 tg3_readphy(tp, MII_CTRL1000, &val);
6901 val |= CTL1000_AS_MASTER |
6902 CTL1000_ENABLE_MASTER;
6903 tg3_writephy(tp, MII_CTRL1000, val);
6905 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6906 MII_TG3_FET_PTEST_TRIM_2;
6907 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6910 bmcr |= BMCR_LOOPBACK;
6912 tg3_writephy(tp, MII_BMCR, bmcr);
6914 /* The write needs to be flushed for the FETs */
6915 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6916 tg3_readphy(tp, MII_BMCR, &bmcr);
6920 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6921 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6922 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6923 MII_TG3_FET_PTEST_FRC_TX_LINK |
6924 MII_TG3_FET_PTEST_FRC_TX_LOCK);
6926 /* The write needs to be flushed for the AC131 */
6927 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6930 /* Reset to prevent losing 1st rx packet intermittently */
6931 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6932 tg3_flag(tp, 5780_CLASS)) {
6933 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6935 tw32_f(MAC_RX_MODE, tp->rx_mode);
6938 mac_mode = tp->mac_mode &
6939 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6940 if (speed == SPEED_1000)
6941 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6943 mac_mode |= MAC_MODE_PORT_MODE_MII;
6945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6946 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6948 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6949 mac_mode &= ~MAC_MODE_LINK_POLARITY;
6950 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6951 mac_mode |= MAC_MODE_LINK_POLARITY;
6953 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6954 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6957 tw32(MAC_MODE, mac_mode);
6963 static void tg3_set_loopback(struct net_device *dev, u32 features)
6965 struct tg3 *tp = netdev_priv(dev);
6967 if (features & NETIF_F_LOOPBACK) {
6968 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6971 spin_lock_bh(&tp->lock);
6972 tg3_mac_loopback(tp, true);
6973 netif_carrier_on(tp->dev);
6974 spin_unlock_bh(&tp->lock);
6975 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6977 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6980 spin_lock_bh(&tp->lock);
6981 tg3_mac_loopback(tp, false);
6982 /* Force link status check */
6983 tg3_setup_phy(tp, 1);
6984 spin_unlock_bh(&tp->lock);
6985 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6989 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6991 struct tg3 *tp = netdev_priv(dev);
6993 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6994 features &= ~NETIF_F_ALL_TSO;
6999 static int tg3_set_features(struct net_device *dev, u32 features)
7001 u32 changed = dev->features ^ features;
7003 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7004 tg3_set_loopback(dev, features);
7009 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7014 if (new_mtu > ETH_DATA_LEN) {
7015 if (tg3_flag(tp, 5780_CLASS)) {
7016 netdev_update_features(dev);
7017 tg3_flag_clear(tp, TSO_CAPABLE);
7019 tg3_flag_set(tp, JUMBO_RING_ENABLE);
7022 if (tg3_flag(tp, 5780_CLASS)) {
7023 tg3_flag_set(tp, TSO_CAPABLE);
7024 netdev_update_features(dev);
7026 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7030 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7032 struct tg3 *tp = netdev_priv(dev);
7035 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7038 if (!netif_running(dev)) {
7039 /* We'll just catch it later when the
7042 tg3_set_mtu(dev, tp, new_mtu);
7050 tg3_full_lock(tp, 1);
7052 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7054 tg3_set_mtu(dev, tp, new_mtu);
7056 err = tg3_restart_hw(tp, 0);
7059 tg3_netif_start(tp);
7061 tg3_full_unlock(tp);
7069 static void tg3_rx_prodring_free(struct tg3 *tp,
7070 struct tg3_rx_prodring_set *tpr)
7074 if (tpr != &tp->napi[0].prodring) {
7075 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7076 i = (i + 1) & tp->rx_std_ring_mask)
7077 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7080 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7081 for (i = tpr->rx_jmb_cons_idx;
7082 i != tpr->rx_jmb_prod_idx;
7083 i = (i + 1) & tp->rx_jmb_ring_mask) {
7084 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7092 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7093 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7096 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7097 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7098 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7103 /* Initialize rx rings for packet processing.
7105 * The chip has been shut down and the driver detached from
7106 * the networking, so no interrupts or new tx packets will
7107 * end up in the driver. tp->{tx,}lock are held and thus
7110 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7111 struct tg3_rx_prodring_set *tpr)
7113 u32 i, rx_pkt_dma_sz;
7115 tpr->rx_std_cons_idx = 0;
7116 tpr->rx_std_prod_idx = 0;
7117 tpr->rx_jmb_cons_idx = 0;
7118 tpr->rx_jmb_prod_idx = 0;
7120 if (tpr != &tp->napi[0].prodring) {
7121 memset(&tpr->rx_std_buffers[0], 0,
7122 TG3_RX_STD_BUFF_RING_SIZE(tp));
7123 if (tpr->rx_jmb_buffers)
7124 memset(&tpr->rx_jmb_buffers[0], 0,
7125 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7129 /* Zero out all descriptors. */
7130 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7132 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7133 if (tg3_flag(tp, 5780_CLASS) &&
7134 tp->dev->mtu > ETH_DATA_LEN)
7135 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7136 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7138 /* Initialize invariants of the rings, we only set this
7139 * stuff once. This works because the card does not
7140 * write into the rx buffer posting rings.
7142 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7143 struct tg3_rx_buffer_desc *rxd;
7145 rxd = &tpr->rx_std[i];
7146 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7147 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7148 rxd->opaque = (RXD_OPAQUE_RING_STD |
7149 (i << RXD_OPAQUE_INDEX_SHIFT));
7152 /* Now allocate fresh SKBs for each rx ring. */
7153 for (i = 0; i < tp->rx_pending; i++) {
7154 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7155 netdev_warn(tp->dev,
7156 "Using a smaller RX standard ring. Only "
7157 "%d out of %d buffers were allocated "
7158 "successfully\n", i, tp->rx_pending);
7166 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7169 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7171 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7174 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7175 struct tg3_rx_buffer_desc *rxd;
7177 rxd = &tpr->rx_jmb[i].std;
7178 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7179 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7181 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7182 (i << RXD_OPAQUE_INDEX_SHIFT));
7185 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7186 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7187 netdev_warn(tp->dev,
7188 "Using a smaller RX jumbo ring. Only %d "
7189 "out of %d buffers were allocated "
7190 "successfully\n", i, tp->rx_jumbo_pending);
7193 tp->rx_jumbo_pending = i;
7202 tg3_rx_prodring_free(tp, tpr);
7206 static void tg3_rx_prodring_fini(struct tg3 *tp,
7207 struct tg3_rx_prodring_set *tpr)
7209 kfree(tpr->rx_std_buffers);
7210 tpr->rx_std_buffers = NULL;
7211 kfree(tpr->rx_jmb_buffers);
7212 tpr->rx_jmb_buffers = NULL;
7214 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7215 tpr->rx_std, tpr->rx_std_mapping);
7219 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7220 tpr->rx_jmb, tpr->rx_jmb_mapping);
7225 static int tg3_rx_prodring_init(struct tg3 *tp,
7226 struct tg3_rx_prodring_set *tpr)
7228 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7230 if (!tpr->rx_std_buffers)
7233 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7234 TG3_RX_STD_RING_BYTES(tp),
7235 &tpr->rx_std_mapping,
7240 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7241 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7243 if (!tpr->rx_jmb_buffers)
7246 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7247 TG3_RX_JMB_RING_BYTES(tp),
7248 &tpr->rx_jmb_mapping,
7257 tg3_rx_prodring_fini(tp, tpr);
7261 /* Free up pending packets in all rx/tx rings.
7263 * The chip has been shut down and the driver detached from
7264 * the networking, so no interrupts or new tx packets will
7265 * end up in the driver. tp->{tx,}lock is not held and we are not
7266 * in an interrupt context and thus may sleep.
7268 static void tg3_free_rings(struct tg3 *tp)
7272 for (j = 0; j < tp->irq_cnt; j++) {
7273 struct tg3_napi *tnapi = &tp->napi[j];
7275 tg3_rx_prodring_free(tp, &tnapi->prodring);
7277 if (!tnapi->tx_buffers)
7280 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7281 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7286 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
7288 dev_kfree_skb_any(skb);
7293 /* Initialize tx/rx rings for packet processing.
7295 * The chip has been shut down and the driver detached from
7296 * the networking, so no interrupts or new tx packets will
7297 * end up in the driver. tp->{tx,}lock are held and thus
7300 static int tg3_init_rings(struct tg3 *tp)
7304 /* Free up all the SKBs. */
7307 for (i = 0; i < tp->irq_cnt; i++) {
7308 struct tg3_napi *tnapi = &tp->napi[i];
7310 tnapi->last_tag = 0;
7311 tnapi->last_irq_tag = 0;
7312 tnapi->hw_status->status = 0;
7313 tnapi->hw_status->status_tag = 0;
7314 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7319 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7321 tnapi->rx_rcb_ptr = 0;
7323 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7325 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7335 * Must not be invoked with interrupt sources disabled and
7336 * the hardware shutdown down.
7338 static void tg3_free_consistent(struct tg3 *tp)
7342 for (i = 0; i < tp->irq_cnt; i++) {
7343 struct tg3_napi *tnapi = &tp->napi[i];
7345 if (tnapi->tx_ring) {
7346 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7347 tnapi->tx_ring, tnapi->tx_desc_mapping);
7348 tnapi->tx_ring = NULL;
7351 kfree(tnapi->tx_buffers);
7352 tnapi->tx_buffers = NULL;
7354 if (tnapi->rx_rcb) {
7355 dma_free_coherent(&tp->pdev->dev,
7356 TG3_RX_RCB_RING_BYTES(tp),
7358 tnapi->rx_rcb_mapping);
7359 tnapi->rx_rcb = NULL;
7362 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7364 if (tnapi->hw_status) {
7365 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7367 tnapi->status_mapping);
7368 tnapi->hw_status = NULL;
7373 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7374 tp->hw_stats, tp->stats_mapping);
7375 tp->hw_stats = NULL;
7380 * Must not be invoked with interrupt sources disabled and
7381 * the hardware shutdown down. Can sleep.
7383 static int tg3_alloc_consistent(struct tg3 *tp)
7387 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7388 sizeof(struct tg3_hw_stats),
7394 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7396 for (i = 0; i < tp->irq_cnt; i++) {
7397 struct tg3_napi *tnapi = &tp->napi[i];
7398 struct tg3_hw_status *sblk;
7400 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7402 &tnapi->status_mapping,
7404 if (!tnapi->hw_status)
7407 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7408 sblk = tnapi->hw_status;
7410 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7413 /* If multivector TSS is enabled, vector 0 does not handle
7414 * tx interrupts. Don't allocate any resources for it.
7416 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7417 (i && tg3_flag(tp, ENABLE_TSS))) {
7418 tnapi->tx_buffers = kzalloc(
7419 sizeof(struct tg3_tx_ring_info) *
7420 TG3_TX_RING_SIZE, GFP_KERNEL);
7421 if (!tnapi->tx_buffers)
7424 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7426 &tnapi->tx_desc_mapping,
7428 if (!tnapi->tx_ring)
7433 * When RSS is enabled, the status block format changes
7434 * slightly. The "rx_jumbo_consumer", "reserved",
7435 * and "rx_mini_consumer" members get mapped to the
7436 * other three rx return ring producer indexes.
7440 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7443 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7446 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7449 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7454 * If multivector RSS is enabled, vector 0 does not handle
7455 * rx or tx interrupts. Don't allocate any resources for it.
7457 if (!i && tg3_flag(tp, ENABLE_RSS))
7460 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7461 TG3_RX_RCB_RING_BYTES(tp),
7462 &tnapi->rx_rcb_mapping,
7467 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7473 tg3_free_consistent(tp);
7477 #define MAX_WAIT_CNT 1000
7479 /* To stop a block, clear the enable bit and poll till it
7480 * clears. tp->lock is held.
7482 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7487 if (tg3_flag(tp, 5705_PLUS)) {
7494 /* We can't enable/disable these bits of the
7495 * 5705/5750, just say success.
7508 for (i = 0; i < MAX_WAIT_CNT; i++) {
7511 if ((val & enable_bit) == 0)
7515 if (i == MAX_WAIT_CNT && !silent) {
7516 dev_err(&tp->pdev->dev,
7517 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7525 /* tp->lock is held. */
7526 static int tg3_abort_hw(struct tg3 *tp, int silent)
7530 tg3_disable_ints(tp);
7532 tp->rx_mode &= ~RX_MODE_ENABLE;
7533 tw32_f(MAC_RX_MODE, tp->rx_mode);
7536 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7537 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7538 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7539 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7540 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7541 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7543 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7544 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7545 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7546 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7547 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7548 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7549 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7551 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7552 tw32_f(MAC_MODE, tp->mac_mode);
7555 tp->tx_mode &= ~TX_MODE_ENABLE;
7556 tw32_f(MAC_TX_MODE, tp->tx_mode);
7558 for (i = 0; i < MAX_WAIT_CNT; i++) {
7560 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7563 if (i >= MAX_WAIT_CNT) {
7564 dev_err(&tp->pdev->dev,
7565 "%s timed out, TX_MODE_ENABLE will not clear "
7566 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7570 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7571 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7572 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7574 tw32(FTQ_RESET, 0xffffffff);
7575 tw32(FTQ_RESET, 0x00000000);
7577 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7578 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7580 for (i = 0; i < tp->irq_cnt; i++) {
7581 struct tg3_napi *tnapi = &tp->napi[i];
7582 if (tnapi->hw_status)
7583 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7586 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7591 /* Save PCI command register before chip reset */
7592 static void tg3_save_pci_state(struct tg3 *tp)
7594 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7597 /* Restore PCI state after chip reset */
7598 static void tg3_restore_pci_state(struct tg3 *tp)
7602 /* Re-enable indirect register accesses. */
7603 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7604 tp->misc_host_ctrl);
7606 /* Set MAX PCI retry to zero. */
7607 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7608 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7609 tg3_flag(tp, PCIX_MODE))
7610 val |= PCISTATE_RETRY_SAME_DMA;
7611 /* Allow reads and writes to the APE register and memory space. */
7612 if (tg3_flag(tp, ENABLE_APE))
7613 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7614 PCISTATE_ALLOW_APE_SHMEM_WR |
7615 PCISTATE_ALLOW_APE_PSPACE_WR;
7616 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7618 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7620 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7621 if (tg3_flag(tp, PCI_EXPRESS))
7622 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7624 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7625 tp->pci_cacheline_sz);
7626 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7631 /* Make sure PCI-X relaxed ordering bit is clear. */
7632 if (tg3_flag(tp, PCIX_MODE)) {
7635 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7637 pcix_cmd &= ~PCI_X_CMD_ERO;
7638 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7642 if (tg3_flag(tp, 5780_CLASS)) {
7644 /* Chip reset on 5780 will reset MSI enable bit,
7645 * so need to restore it.
7647 if (tg3_flag(tp, USING_MSI)) {
7650 pci_read_config_word(tp->pdev,
7651 tp->msi_cap + PCI_MSI_FLAGS,
7653 pci_write_config_word(tp->pdev,
7654 tp->msi_cap + PCI_MSI_FLAGS,
7655 ctrl | PCI_MSI_FLAGS_ENABLE);
7656 val = tr32(MSGINT_MODE);
7657 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7662 /* tp->lock is held. */
7663 static int tg3_chip_reset(struct tg3 *tp)
7666 void (*write_op)(struct tg3 *, u32, u32);
7671 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7673 /* No matching tg3_nvram_unlock() after this because
7674 * chip reset below will undo the nvram lock.
7676 tp->nvram_lock_cnt = 0;
7678 /* GRC_MISC_CFG core clock reset will clear the memory
7679 * enable bit in PCI register 4 and the MSI enable bit
7680 * on some chips, so we save relevant registers here.
7682 tg3_save_pci_state(tp);
7684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7685 tg3_flag(tp, 5755_PLUS))
7686 tw32(GRC_FASTBOOT_PC, 0);
7689 * We must avoid the readl() that normally takes place.
7690 * It locks machines, causes machine checks, and other
7691 * fun things. So, temporarily disable the 5701
7692 * hardware workaround, while we do the reset.
7694 write_op = tp->write32;
7695 if (write_op == tg3_write_flush_reg32)
7696 tp->write32 = tg3_write32;
7698 /* Prevent the irq handler from reading or writing PCI registers
7699 * during chip reset when the memory enable bit in the PCI command
7700 * register may be cleared. The chip does not generate interrupt
7701 * at this time, but the irq handler may still be called due to irq
7702 * sharing or irqpoll.
7704 tg3_flag_set(tp, CHIP_RESETTING);
7705 for (i = 0; i < tp->irq_cnt; i++) {
7706 struct tg3_napi *tnapi = &tp->napi[i];
7707 if (tnapi->hw_status) {
7708 tnapi->hw_status->status = 0;
7709 tnapi->hw_status->status_tag = 0;
7711 tnapi->last_tag = 0;
7712 tnapi->last_irq_tag = 0;
7716 for (i = 0; i < tp->irq_cnt; i++)
7717 synchronize_irq(tp->napi[i].irq_vec);
7719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7720 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7721 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7725 val = GRC_MISC_CFG_CORECLK_RESET;
7727 if (tg3_flag(tp, PCI_EXPRESS)) {
7728 /* Force PCIe 1.0a mode */
7729 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7730 !tg3_flag(tp, 57765_PLUS) &&
7731 tr32(TG3_PCIE_PHY_TSTCTL) ==
7732 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7733 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7735 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7736 tw32(GRC_MISC_CFG, (1 << 29));
7741 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7742 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7743 tw32(GRC_VCPU_EXT_CTRL,
7744 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7747 /* Manage gphy power for all CPMU absent PCIe devices. */
7748 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7749 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7751 tw32(GRC_MISC_CFG, val);
7753 /* restore 5701 hardware bug workaround write method */
7754 tp->write32 = write_op;
7756 /* Unfortunately, we have to delay before the PCI read back.
7757 * Some 575X chips even will not respond to a PCI cfg access
7758 * when the reset command is given to the chip.
7760 * How do these hardware designers expect things to work
7761 * properly if the PCI write is posted for a long period
7762 * of time? It is always necessary to have some method by
7763 * which a register read back can occur to push the write
7764 * out which does the reset.
7766 * For most tg3 variants the trick below was working.
7771 /* Flush PCI posted writes. The normal MMIO registers
7772 * are inaccessible at this time so this is the only
7773 * way to make this reliably (actually, this is no longer
7774 * the case, see above). I tried to use indirect
7775 * register read/write but this upset some 5701 variants.
7777 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7781 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7784 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7788 /* Wait for link training to complete. */
7789 for (i = 0; i < 5000; i++)
7792 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7793 pci_write_config_dword(tp->pdev, 0xc4,
7794 cfg_val | (1 << 15));
7797 /* Clear the "no snoop" and "relaxed ordering" bits. */
7798 pci_read_config_word(tp->pdev,
7799 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7801 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7802 PCI_EXP_DEVCTL_NOSNOOP_EN);
7804 * Older PCIe devices only support the 128 byte
7805 * MPS setting. Enforce the restriction.
7807 if (!tg3_flag(tp, CPMU_PRESENT))
7808 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7809 pci_write_config_word(tp->pdev,
7810 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7813 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7815 /* Clear error status */
7816 pci_write_config_word(tp->pdev,
7817 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7818 PCI_EXP_DEVSTA_CED |
7819 PCI_EXP_DEVSTA_NFED |
7820 PCI_EXP_DEVSTA_FED |
7821 PCI_EXP_DEVSTA_URD);
7824 tg3_restore_pci_state(tp);
7826 tg3_flag_clear(tp, CHIP_RESETTING);
7827 tg3_flag_clear(tp, ERROR_PROCESSED);
7830 if (tg3_flag(tp, 5780_CLASS))
7831 val = tr32(MEMARB_MODE);
7832 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7834 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7836 tw32(0x5000, 0x400);
7839 tw32(GRC_MODE, tp->grc_mode);
7841 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7844 tw32(0xc4, val | (1 << 15));
7847 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7848 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7849 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7850 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7851 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7852 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7855 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7856 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7858 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7859 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7864 tw32_f(MAC_MODE, val);
7867 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7869 err = tg3_poll_fw(tp);
7875 if (tg3_flag(tp, PCI_EXPRESS) &&
7876 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7877 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7878 !tg3_flag(tp, 57765_PLUS)) {
7881 tw32(0x7c00, val | (1 << 25));
7884 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7885 val = tr32(TG3_CPMU_CLCK_ORIDE);
7886 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7889 /* Reprobe ASF enable state. */
7890 tg3_flag_clear(tp, ENABLE_ASF);
7891 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7892 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7893 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7896 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7897 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7898 tg3_flag_set(tp, ENABLE_ASF);
7899 tp->last_event_jiffies = jiffies;
7900 if (tg3_flag(tp, 5750_PLUS))
7901 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7908 /* tp->lock is held. */
7909 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7915 tg3_write_sig_pre_reset(tp, kind);
7917 tg3_abort_hw(tp, silent);
7918 err = tg3_chip_reset(tp);
7920 __tg3_set_mac_addr(tp, 0);
7922 tg3_write_sig_legacy(tp, kind);
7923 tg3_write_sig_post_reset(tp, kind);
7931 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7933 struct tg3 *tp = netdev_priv(dev);
7934 struct sockaddr *addr = p;
7935 int err = 0, skip_mac_1 = 0;
7937 if (!is_valid_ether_addr(addr->sa_data))
7940 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7942 if (!netif_running(dev))
7945 if (tg3_flag(tp, ENABLE_ASF)) {
7946 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7948 addr0_high = tr32(MAC_ADDR_0_HIGH);
7949 addr0_low = tr32(MAC_ADDR_0_LOW);
7950 addr1_high = tr32(MAC_ADDR_1_HIGH);
7951 addr1_low = tr32(MAC_ADDR_1_LOW);
7953 /* Skip MAC addr 1 if ASF is using it. */
7954 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7955 !(addr1_high == 0 && addr1_low == 0))
7958 spin_lock_bh(&tp->lock);
7959 __tg3_set_mac_addr(tp, skip_mac_1);
7960 spin_unlock_bh(&tp->lock);
7965 /* tp->lock is held. */
7966 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7967 dma_addr_t mapping, u32 maxlen_flags,
7971 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7972 ((u64) mapping >> 32));
7974 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7975 ((u64) mapping & 0xffffffff));
7977 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7980 if (!tg3_flag(tp, 5705_PLUS))
7982 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7986 static void __tg3_set_rx_mode(struct net_device *);
7987 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7991 if (!tg3_flag(tp, ENABLE_TSS)) {
7992 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7993 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7994 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7996 tw32(HOSTCC_TXCOL_TICKS, 0);
7997 tw32(HOSTCC_TXMAX_FRAMES, 0);
7998 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8001 if (!tg3_flag(tp, ENABLE_RSS)) {
8002 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8003 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8004 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8006 tw32(HOSTCC_RXCOL_TICKS, 0);
8007 tw32(HOSTCC_RXMAX_FRAMES, 0);
8008 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8011 if (!tg3_flag(tp, 5705_PLUS)) {
8012 u32 val = ec->stats_block_coalesce_usecs;
8014 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8015 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8017 if (!netif_carrier_ok(tp->dev))
8020 tw32(HOSTCC_STAT_COAL_TICKS, val);
8023 for (i = 0; i < tp->irq_cnt - 1; i++) {
8026 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8027 tw32(reg, ec->rx_coalesce_usecs);
8028 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8029 tw32(reg, ec->rx_max_coalesced_frames);
8030 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8031 tw32(reg, ec->rx_max_coalesced_frames_irq);
8033 if (tg3_flag(tp, ENABLE_TSS)) {
8034 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8035 tw32(reg, ec->tx_coalesce_usecs);
8036 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8037 tw32(reg, ec->tx_max_coalesced_frames);
8038 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8039 tw32(reg, ec->tx_max_coalesced_frames_irq);
8043 for (; i < tp->irq_max - 1; i++) {
8044 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8045 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8046 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8048 if (tg3_flag(tp, ENABLE_TSS)) {
8049 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8050 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8051 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8056 /* tp->lock is held. */
8057 static void tg3_rings_reset(struct tg3 *tp)
8060 u32 stblk, txrcb, rxrcb, limit;
8061 struct tg3_napi *tnapi = &tp->napi[0];
8063 /* Disable all transmit rings but the first. */
8064 if (!tg3_flag(tp, 5705_PLUS))
8065 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8066 else if (tg3_flag(tp, 5717_PLUS))
8067 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8068 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8069 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8071 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8073 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8074 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8075 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8076 BDINFO_FLAGS_DISABLED);
8079 /* Disable all receive return rings but the first. */
8080 if (tg3_flag(tp, 5717_PLUS))
8081 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8082 else if (!tg3_flag(tp, 5705_PLUS))
8083 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8084 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8085 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8086 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8088 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8090 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8091 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8092 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8093 BDINFO_FLAGS_DISABLED);
8095 /* Disable interrupts */
8096 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8097 tp->napi[0].chk_msi_cnt = 0;
8098 tp->napi[0].last_rx_cons = 0;
8099 tp->napi[0].last_tx_cons = 0;
8101 /* Zero mailbox registers. */
8102 if (tg3_flag(tp, SUPPORT_MSIX)) {
8103 for (i = 1; i < tp->irq_max; i++) {
8104 tp->napi[i].tx_prod = 0;
8105 tp->napi[i].tx_cons = 0;
8106 if (tg3_flag(tp, ENABLE_TSS))
8107 tw32_mailbox(tp->napi[i].prodmbox, 0);
8108 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8109 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8110 tp->napi[i].chk_msi_cnt = 0;
8111 tp->napi[i].last_rx_cons = 0;
8112 tp->napi[i].last_tx_cons = 0;
8114 if (!tg3_flag(tp, ENABLE_TSS))
8115 tw32_mailbox(tp->napi[0].prodmbox, 0);
8117 tp->napi[0].tx_prod = 0;
8118 tp->napi[0].tx_cons = 0;
8119 tw32_mailbox(tp->napi[0].prodmbox, 0);
8120 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8123 /* Make sure the NIC-based send BD rings are disabled. */
8124 if (!tg3_flag(tp, 5705_PLUS)) {
8125 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8126 for (i = 0; i < 16; i++)
8127 tw32_tx_mbox(mbox + i * 8, 0);
8130 txrcb = NIC_SRAM_SEND_RCB;
8131 rxrcb = NIC_SRAM_RCV_RET_RCB;
8133 /* Clear status block in ram. */
8134 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8136 /* Set status block DMA address */
8137 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8138 ((u64) tnapi->status_mapping >> 32));
8139 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8140 ((u64) tnapi->status_mapping & 0xffffffff));
8142 if (tnapi->tx_ring) {
8143 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8144 (TG3_TX_RING_SIZE <<
8145 BDINFO_FLAGS_MAXLEN_SHIFT),
8146 NIC_SRAM_TX_BUFFER_DESC);
8147 txrcb += TG3_BDINFO_SIZE;
8150 if (tnapi->rx_rcb) {
8151 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8152 (tp->rx_ret_ring_mask + 1) <<
8153 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8154 rxrcb += TG3_BDINFO_SIZE;
8157 stblk = HOSTCC_STATBLCK_RING1;
8159 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8160 u64 mapping = (u64)tnapi->status_mapping;
8161 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8162 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8164 /* Clear status block in ram. */
8165 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8167 if (tnapi->tx_ring) {
8168 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8169 (TG3_TX_RING_SIZE <<
8170 BDINFO_FLAGS_MAXLEN_SHIFT),
8171 NIC_SRAM_TX_BUFFER_DESC);
8172 txrcb += TG3_BDINFO_SIZE;
8175 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8176 ((tp->rx_ret_ring_mask + 1) <<
8177 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8180 rxrcb += TG3_BDINFO_SIZE;
8184 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8186 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8188 if (!tg3_flag(tp, 5750_PLUS) ||
8189 tg3_flag(tp, 5780_CLASS) ||
8190 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8191 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8192 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8193 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8194 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8195 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8197 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8199 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8200 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8202 val = min(nic_rep_thresh, host_rep_thresh);
8203 tw32(RCVBDI_STD_THRESH, val);
8205 if (tg3_flag(tp, 57765_PLUS))
8206 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8208 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8211 if (!tg3_flag(tp, 5705_PLUS))
8212 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8214 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8216 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8218 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8219 tw32(RCVBDI_JUMBO_THRESH, val);
8221 if (tg3_flag(tp, 57765_PLUS))
8222 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8225 /* tp->lock is held. */
8226 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8228 u32 val, rdmac_mode;
8230 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8232 tg3_disable_ints(tp);
8236 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8238 if (tg3_flag(tp, INIT_COMPLETE))
8239 tg3_abort_hw(tp, 1);
8241 /* Enable MAC control of LPI */
8242 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8243 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8244 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8245 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8247 tw32_f(TG3_CPMU_EEE_CTRL,
8248 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8250 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8251 TG3_CPMU_EEEMD_LPI_IN_TX |
8252 TG3_CPMU_EEEMD_LPI_IN_RX |
8253 TG3_CPMU_EEEMD_EEE_ENABLE;
8255 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8256 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8258 if (tg3_flag(tp, ENABLE_APE))
8259 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8261 tw32_f(TG3_CPMU_EEE_MODE, val);
8263 tw32_f(TG3_CPMU_EEE_DBTMR1,
8264 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8265 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8267 tw32_f(TG3_CPMU_EEE_DBTMR2,
8268 TG3_CPMU_DBTMR2_APE_TX_2047US |
8269 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8275 err = tg3_chip_reset(tp);
8279 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8281 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8282 val = tr32(TG3_CPMU_CTRL);
8283 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8284 tw32(TG3_CPMU_CTRL, val);
8286 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8287 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8288 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8289 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8291 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8292 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8293 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8294 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8296 val = tr32(TG3_CPMU_HST_ACC);
8297 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8298 val |= CPMU_HST_ACC_MACCLK_6_25;
8299 tw32(TG3_CPMU_HST_ACC, val);
8302 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8303 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8304 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8305 PCIE_PWR_MGMT_L1_THRESH_4MS;
8306 tw32(PCIE_PWR_MGMT_THRESH, val);
8308 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8309 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8311 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8313 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8314 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8317 if (tg3_flag(tp, L1PLLPD_EN)) {
8318 u32 grc_mode = tr32(GRC_MODE);
8320 /* Access the lower 1K of PL PCIE block registers. */
8321 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8322 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8324 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8325 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8326 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8328 tw32(GRC_MODE, grc_mode);
8331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8332 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8333 u32 grc_mode = tr32(GRC_MODE);
8335 /* Access the lower 1K of PL PCIE block registers. */
8336 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8337 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8339 val = tr32(TG3_PCIE_TLDLPL_PORT +
8340 TG3_PCIE_PL_LO_PHYCTL5);
8341 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8342 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8344 tw32(GRC_MODE, grc_mode);
8347 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8348 u32 grc_mode = tr32(GRC_MODE);
8350 /* Access the lower 1K of DL PCIE block registers. */
8351 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8352 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8354 val = tr32(TG3_PCIE_TLDLPL_PORT +
8355 TG3_PCIE_DL_LO_FTSMAX);
8356 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8357 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8358 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8360 tw32(GRC_MODE, grc_mode);
8363 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8364 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8365 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8366 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8369 /* This works around an issue with Athlon chipsets on
8370 * B3 tigon3 silicon. This bit has no effect on any
8371 * other revision. But do not set this on PCI Express
8372 * chips and don't even touch the clocks if the CPMU is present.
8374 if (!tg3_flag(tp, CPMU_PRESENT)) {
8375 if (!tg3_flag(tp, PCI_EXPRESS))
8376 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8377 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8380 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8381 tg3_flag(tp, PCIX_MODE)) {
8382 val = tr32(TG3PCI_PCISTATE);
8383 val |= PCISTATE_RETRY_SAME_DMA;
8384 tw32(TG3PCI_PCISTATE, val);
8387 if (tg3_flag(tp, ENABLE_APE)) {
8388 /* Allow reads and writes to the
8389 * APE register and memory space.
8391 val = tr32(TG3PCI_PCISTATE);
8392 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8393 PCISTATE_ALLOW_APE_SHMEM_WR |
8394 PCISTATE_ALLOW_APE_PSPACE_WR;
8395 tw32(TG3PCI_PCISTATE, val);
8398 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8399 /* Enable some hw fixes. */
8400 val = tr32(TG3PCI_MSI_DATA);
8401 val |= (1 << 26) | (1 << 28) | (1 << 29);
8402 tw32(TG3PCI_MSI_DATA, val);
8405 /* Descriptor ring init may make accesses to the
8406 * NIC SRAM area to setup the TX descriptors, so we
8407 * can only do this after the hardware has been
8408 * successfully reset.
8410 err = tg3_init_rings(tp);
8414 if (tg3_flag(tp, 57765_PLUS)) {
8415 val = tr32(TG3PCI_DMA_RW_CTRL) &
8416 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8417 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8418 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8419 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8420 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8421 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8422 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8423 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8424 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8425 /* This value is determined during the probe time DMA
8426 * engine test, tg3_test_dma.
8428 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8431 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8432 GRC_MODE_4X_NIC_SEND_RINGS |
8433 GRC_MODE_NO_TX_PHDR_CSUM |
8434 GRC_MODE_NO_RX_PHDR_CSUM);
8435 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8437 /* Pseudo-header checksum is done by hardware logic and not
8438 * the offload processers, so make the chip do the pseudo-
8439 * header checksums on receive. For transmit it is more
8440 * convenient to do the pseudo-header checksum in software
8441 * as Linux does that on transmit for us in all cases.
8443 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8447 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8449 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8450 val = tr32(GRC_MISC_CFG);
8452 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8453 tw32(GRC_MISC_CFG, val);
8455 /* Initialize MBUF/DESC pool. */
8456 if (tg3_flag(tp, 5750_PLUS)) {
8458 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8459 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8461 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8463 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8464 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8465 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8466 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8469 fw_len = tp->fw_len;
8470 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8471 tw32(BUFMGR_MB_POOL_ADDR,
8472 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8473 tw32(BUFMGR_MB_POOL_SIZE,
8474 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8477 if (tp->dev->mtu <= ETH_DATA_LEN) {
8478 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8479 tp->bufmgr_config.mbuf_read_dma_low_water);
8480 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8481 tp->bufmgr_config.mbuf_mac_rx_low_water);
8482 tw32(BUFMGR_MB_HIGH_WATER,
8483 tp->bufmgr_config.mbuf_high_water);
8485 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8486 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8487 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8488 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8489 tw32(BUFMGR_MB_HIGH_WATER,
8490 tp->bufmgr_config.mbuf_high_water_jumbo);
8492 tw32(BUFMGR_DMA_LOW_WATER,
8493 tp->bufmgr_config.dma_low_water);
8494 tw32(BUFMGR_DMA_HIGH_WATER,
8495 tp->bufmgr_config.dma_high_water);
8497 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8499 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8500 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8501 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8502 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8503 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8504 tw32(BUFMGR_MODE, val);
8505 for (i = 0; i < 2000; i++) {
8506 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8511 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8515 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8516 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8518 tg3_setup_rxbd_thresholds(tp);
8520 /* Initialize TG3_BDINFO's at:
8521 * RCVDBDI_STD_BD: standard eth size rx ring
8522 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8523 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8526 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8527 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8528 * ring attribute flags
8529 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8531 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8532 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8534 * The size of each ring is fixed in the firmware, but the location is
8537 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8538 ((u64) tpr->rx_std_mapping >> 32));
8539 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8540 ((u64) tpr->rx_std_mapping & 0xffffffff));
8541 if (!tg3_flag(tp, 5717_PLUS))
8542 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8543 NIC_SRAM_RX_BUFFER_DESC);
8545 /* Disable the mini ring */
8546 if (!tg3_flag(tp, 5705_PLUS))
8547 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8548 BDINFO_FLAGS_DISABLED);
8550 /* Program the jumbo buffer descriptor ring control
8551 * blocks on those devices that have them.
8553 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8554 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8556 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8557 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8558 ((u64) tpr->rx_jmb_mapping >> 32));
8559 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8560 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8561 val = TG3_RX_JMB_RING_SIZE(tp) <<
8562 BDINFO_FLAGS_MAXLEN_SHIFT;
8563 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8564 val | BDINFO_FLAGS_USE_EXT_RECV);
8565 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8566 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8567 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8568 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8570 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8571 BDINFO_FLAGS_DISABLED);
8574 if (tg3_flag(tp, 57765_PLUS)) {
8575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8576 val = TG3_RX_STD_MAX_SIZE_5700;
8578 val = TG3_RX_STD_MAX_SIZE_5717;
8579 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8580 val |= (TG3_RX_STD_DMA_SZ << 2);
8582 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8584 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8586 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8588 tpr->rx_std_prod_idx = tp->rx_pending;
8589 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8591 tpr->rx_jmb_prod_idx =
8592 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8593 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8595 tg3_rings_reset(tp);
8597 /* Initialize MAC address and backoff seed. */
8598 __tg3_set_mac_addr(tp, 0);
8600 /* MTU + ethernet header + FCS + optional VLAN tag */
8601 tw32(MAC_RX_MTU_SIZE,
8602 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8604 /* The slot time is changed by tg3_setup_phy if we
8605 * run at gigabit with half duplex.
8607 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8608 (6 << TX_LENGTHS_IPG_SHIFT) |
8609 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8612 val |= tr32(MAC_TX_LENGTHS) &
8613 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8614 TX_LENGTHS_CNT_DWN_VAL_MSK);
8616 tw32(MAC_TX_LENGTHS, val);
8618 /* Receive rules. */
8619 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8620 tw32(RCVLPC_CONFIG, 0x0181);
8622 /* Calculate RDMAC_MODE setting early, we need it to determine
8623 * the RCVLPC_STATE_ENABLE mask.
8625 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8626 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8627 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8628 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8629 RDMAC_MODE_LNGREAD_ENAB);
8631 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8632 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8637 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8638 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8639 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8642 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8643 if (tg3_flag(tp, TSO_CAPABLE) &&
8644 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8645 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8646 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8647 !tg3_flag(tp, IS_5788)) {
8648 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8652 if (tg3_flag(tp, PCI_EXPRESS))
8653 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8655 if (tg3_flag(tp, HW_TSO_1) ||
8656 tg3_flag(tp, HW_TSO_2) ||
8657 tg3_flag(tp, HW_TSO_3))
8658 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8660 if (tg3_flag(tp, 57765_PLUS) ||
8661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8663 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8666 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8668 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8670 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8672 tg3_flag(tp, 57765_PLUS)) {
8673 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8676 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8677 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8678 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8679 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8680 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8681 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8683 tw32(TG3_RDMA_RSRVCTRL_REG,
8684 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8689 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8690 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8691 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8692 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8695 /* Receive/send statistics. */
8696 if (tg3_flag(tp, 5750_PLUS)) {
8697 val = tr32(RCVLPC_STATS_ENABLE);
8698 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8699 tw32(RCVLPC_STATS_ENABLE, val);
8700 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8701 tg3_flag(tp, TSO_CAPABLE)) {
8702 val = tr32(RCVLPC_STATS_ENABLE);
8703 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8704 tw32(RCVLPC_STATS_ENABLE, val);
8706 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8708 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8709 tw32(SNDDATAI_STATSENAB, 0xffffff);
8710 tw32(SNDDATAI_STATSCTRL,
8711 (SNDDATAI_SCTRL_ENABLE |
8712 SNDDATAI_SCTRL_FASTUPD));
8714 /* Setup host coalescing engine. */
8715 tw32(HOSTCC_MODE, 0);
8716 for (i = 0; i < 2000; i++) {
8717 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8722 __tg3_set_coalesce(tp, &tp->coal);
8724 if (!tg3_flag(tp, 5705_PLUS)) {
8725 /* Status/statistics block address. See tg3_timer,
8726 * the tg3_periodic_fetch_stats call there, and
8727 * tg3_get_stats to see how this works for 5705/5750 chips.
8729 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8730 ((u64) tp->stats_mapping >> 32));
8731 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8732 ((u64) tp->stats_mapping & 0xffffffff));
8733 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8735 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8737 /* Clear statistics and status block memory areas */
8738 for (i = NIC_SRAM_STATS_BLK;
8739 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8741 tg3_write_mem(tp, i, 0);
8746 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8748 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8749 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8750 if (!tg3_flag(tp, 5705_PLUS))
8751 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8753 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8754 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8755 /* reset to prevent losing 1st rx packet intermittently */
8756 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8760 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8761 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8762 MAC_MODE_FHDE_ENABLE;
8763 if (tg3_flag(tp, ENABLE_APE))
8764 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8765 if (!tg3_flag(tp, 5705_PLUS) &&
8766 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8767 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8768 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8769 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8772 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8773 * If TG3_FLAG_IS_NIC is zero, we should read the
8774 * register to preserve the GPIO settings for LOMs. The GPIOs,
8775 * whether used as inputs or outputs, are set by boot code after
8778 if (!tg3_flag(tp, IS_NIC)) {
8781 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8782 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8783 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8786 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8787 GRC_LCLCTRL_GPIO_OUTPUT3;
8789 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8790 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8792 tp->grc_local_ctrl &= ~gpio_mask;
8793 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8795 /* GPIO1 must be driven high for eeprom write protect */
8796 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8797 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8798 GRC_LCLCTRL_GPIO_OUTPUT1);
8800 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8803 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8804 val = tr32(MSGINT_MODE);
8805 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8806 if (!tg3_flag(tp, 1SHOT_MSI))
8807 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8808 tw32(MSGINT_MODE, val);
8811 if (!tg3_flag(tp, 5705_PLUS)) {
8812 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8816 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8817 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8818 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8819 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8820 WDMAC_MODE_LNGREAD_ENAB);
8822 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8823 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8824 if (tg3_flag(tp, TSO_CAPABLE) &&
8825 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8826 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8828 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8829 !tg3_flag(tp, IS_5788)) {
8830 val |= WDMAC_MODE_RX_ACCEL;
8834 /* Enable host coalescing bug fix */
8835 if (tg3_flag(tp, 5755_PLUS))
8836 val |= WDMAC_MODE_STATUS_TAG_FIX;
8838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8839 val |= WDMAC_MODE_BURST_ALL_DATA;
8841 tw32_f(WDMAC_MODE, val);
8844 if (tg3_flag(tp, PCIX_MODE)) {
8847 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8849 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8850 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8851 pcix_cmd |= PCI_X_CMD_READ_2K;
8852 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8853 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8854 pcix_cmd |= PCI_X_CMD_READ_2K;
8856 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8860 tw32_f(RDMAC_MODE, rdmac_mode);
8863 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8864 if (!tg3_flag(tp, 5705_PLUS))
8865 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8867 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8869 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8871 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8873 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8874 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8875 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8876 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8877 val |= RCVDBDI_MODE_LRG_RING_SZ;
8878 tw32(RCVDBDI_MODE, val);
8879 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8880 if (tg3_flag(tp, HW_TSO_1) ||
8881 tg3_flag(tp, HW_TSO_2) ||
8882 tg3_flag(tp, HW_TSO_3))
8883 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8884 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8885 if (tg3_flag(tp, ENABLE_TSS))
8886 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8887 tw32(SNDBDI_MODE, val);
8888 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8890 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8891 err = tg3_load_5701_a0_firmware_fix(tp);
8896 if (tg3_flag(tp, TSO_CAPABLE)) {
8897 err = tg3_load_tso_firmware(tp);
8902 tp->tx_mode = TX_MODE_ENABLE;
8904 if (tg3_flag(tp, 5755_PLUS) ||
8905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8906 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8909 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8910 tp->tx_mode &= ~val;
8911 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8914 tw32_f(MAC_TX_MODE, tp->tx_mode);
8917 if (tg3_flag(tp, ENABLE_RSS)) {
8919 u32 reg = MAC_RSS_INDIR_TBL_0;
8921 if (tp->irq_cnt == 2) {
8922 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8929 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8930 val = i % (tp->irq_cnt - 1);
8932 for (; i % 8; i++) {
8934 val |= (i % (tp->irq_cnt - 1));
8941 /* Setup the "secret" hash key. */
8942 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8943 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8944 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8945 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8946 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8947 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8948 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8949 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8950 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8951 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8954 tp->rx_mode = RX_MODE_ENABLE;
8955 if (tg3_flag(tp, 5755_PLUS))
8956 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8958 if (tg3_flag(tp, ENABLE_RSS))
8959 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8960 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8961 RX_MODE_RSS_IPV6_HASH_EN |
8962 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8963 RX_MODE_RSS_IPV4_HASH_EN |
8964 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8966 tw32_f(MAC_RX_MODE, tp->rx_mode);
8969 tw32(MAC_LED_CTRL, tp->led_ctrl);
8971 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8972 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8973 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8976 tw32_f(MAC_RX_MODE, tp->rx_mode);
8979 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8980 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8981 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8982 /* Set drive transmission level to 1.2V */
8983 /* only if the signal pre-emphasis bit is not set */
8984 val = tr32(MAC_SERDES_CFG);
8987 tw32(MAC_SERDES_CFG, val);
8989 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8990 tw32(MAC_SERDES_CFG, 0x616000);
8993 /* Prevent chip from dropping frames when flow control
8996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9000 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9003 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9004 /* Use hardware link auto-negotiation */
9005 tg3_flag_set(tp, HW_AUTONEG);
9008 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9012 tmp = tr32(SERDES_RX_CTRL);
9013 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9014 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9015 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9016 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9019 if (!tg3_flag(tp, USE_PHYLIB)) {
9020 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9021 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9022 tp->link_config.speed = tp->link_config.orig_speed;
9023 tp->link_config.duplex = tp->link_config.orig_duplex;
9024 tp->link_config.autoneg = tp->link_config.orig_autoneg;
9027 err = tg3_setup_phy(tp, 0);
9031 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9032 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9035 /* Clear CRC stats. */
9036 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9037 tg3_writephy(tp, MII_TG3_TEST1,
9038 tmp | MII_TG3_TEST1_CRC_EN);
9039 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9044 __tg3_set_rx_mode(tp->dev);
9046 /* Initialize receive rules. */
9047 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9048 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9049 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9050 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9052 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9056 if (tg3_flag(tp, ENABLE_ASF))
9060 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9062 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9064 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9066 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9068 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9070 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9072 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9074 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9076 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9078 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9080 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9082 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9084 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9086 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9094 if (tg3_flag(tp, ENABLE_APE))
9095 /* Write our heartbeat update interval to APE. */
9096 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9097 APE_HOST_HEARTBEAT_INT_DISABLE);
9099 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9104 /* Called at device open time to get the chip ready for
9105 * packet processing. Invoked with tp->lock held.
9107 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9109 tg3_switch_clocks(tp);
9111 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9113 return tg3_reset_hw(tp, reset_phy);
9116 #define TG3_STAT_ADD32(PSTAT, REG) \
9117 do { u32 __val = tr32(REG); \
9118 (PSTAT)->low += __val; \
9119 if ((PSTAT)->low < __val) \
9120 (PSTAT)->high += 1; \
9123 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9125 struct tg3_hw_stats *sp = tp->hw_stats;
9127 if (!netif_carrier_ok(tp->dev))
9130 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9131 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9132 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9133 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9134 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9135 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9136 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9137 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9138 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9139 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9140 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9141 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9142 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9144 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9145 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9146 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9147 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9148 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9149 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9150 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9151 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9152 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9153 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9154 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9155 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9156 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9157 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9159 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9160 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9161 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9162 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9163 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9165 u32 val = tr32(HOSTCC_FLOW_ATTN);
9166 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9168 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9169 sp->rx_discards.low += val;
9170 if (sp->rx_discards.low < val)
9171 sp->rx_discards.high += 1;
9173 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9175 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9178 static void tg3_chk_missed_msi(struct tg3 *tp)
9182 for (i = 0; i < tp->irq_cnt; i++) {
9183 struct tg3_napi *tnapi = &tp->napi[i];
9185 if (tg3_has_work(tnapi)) {
9186 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9187 tnapi->last_tx_cons == tnapi->tx_cons) {
9188 if (tnapi->chk_msi_cnt < 1) {
9189 tnapi->chk_msi_cnt++;
9195 tnapi->chk_msi_cnt = 0;
9196 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9197 tnapi->last_tx_cons = tnapi->tx_cons;
9201 static void tg3_timer(unsigned long __opaque)
9203 struct tg3 *tp = (struct tg3 *) __opaque;
9208 spin_lock(&tp->lock);
9210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9211 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9212 tg3_chk_missed_msi(tp);
9214 if (!tg3_flag(tp, TAGGED_STATUS)) {
9215 /* All of this garbage is because when using non-tagged
9216 * IRQ status the mailbox/status_block protocol the chip
9217 * uses with the cpu is race prone.
9219 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9220 tw32(GRC_LOCAL_CTRL,
9221 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9223 tw32(HOSTCC_MODE, tp->coalesce_mode |
9224 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9227 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9228 tg3_flag_set(tp, RESTART_TIMER);
9229 spin_unlock(&tp->lock);
9230 schedule_work(&tp->reset_task);
9235 /* This part only runs once per second. */
9236 if (!--tp->timer_counter) {
9237 if (tg3_flag(tp, 5705_PLUS))
9238 tg3_periodic_fetch_stats(tp);
9240 if (tp->setlpicnt && !--tp->setlpicnt)
9241 tg3_phy_eee_enable(tp);
9243 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9247 mac_stat = tr32(MAC_STATUS);
9250 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9251 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9253 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9257 tg3_setup_phy(tp, 0);
9258 } else if (tg3_flag(tp, POLL_SERDES)) {
9259 u32 mac_stat = tr32(MAC_STATUS);
9262 if (netif_carrier_ok(tp->dev) &&
9263 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9266 if (!netif_carrier_ok(tp->dev) &&
9267 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9268 MAC_STATUS_SIGNAL_DET))) {
9272 if (!tp->serdes_counter) {
9275 ~MAC_MODE_PORT_MODE_MASK));
9277 tw32_f(MAC_MODE, tp->mac_mode);
9280 tg3_setup_phy(tp, 0);
9282 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9283 tg3_flag(tp, 5780_CLASS)) {
9284 tg3_serdes_parallel_detect(tp);
9287 tp->timer_counter = tp->timer_multiplier;
9290 /* Heartbeat is only sent once every 2 seconds.
9292 * The heartbeat is to tell the ASF firmware that the host
9293 * driver is still alive. In the event that the OS crashes,
9294 * ASF needs to reset the hardware to free up the FIFO space
9295 * that may be filled with rx packets destined for the host.
9296 * If the FIFO is full, ASF will no longer function properly.
9298 * Unintended resets have been reported on real time kernels
9299 * where the timer doesn't run on time. Netpoll will also have
9302 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9303 * to check the ring condition when the heartbeat is expiring
9304 * before doing the reset. This will prevent most unintended
9307 if (!--tp->asf_counter) {
9308 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9309 tg3_wait_for_event_ack(tp);
9311 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9312 FWCMD_NICDRV_ALIVE3);
9313 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9314 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9315 TG3_FW_UPDATE_TIMEOUT_SEC);
9317 tg3_generate_fw_event(tp);
9319 tp->asf_counter = tp->asf_multiplier;
9322 spin_unlock(&tp->lock);
9325 tp->timer.expires = jiffies + tp->timer_offset;
9326 add_timer(&tp->timer);
9329 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9332 unsigned long flags;
9334 struct tg3_napi *tnapi = &tp->napi[irq_num];
9336 if (tp->irq_cnt == 1)
9337 name = tp->dev->name;
9339 name = &tnapi->irq_lbl[0];
9340 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9341 name[IFNAMSIZ-1] = 0;
9344 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9346 if (tg3_flag(tp, 1SHOT_MSI))
9351 if (tg3_flag(tp, TAGGED_STATUS))
9352 fn = tg3_interrupt_tagged;
9353 flags = IRQF_SHARED;
9356 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9359 static int tg3_test_interrupt(struct tg3 *tp)
9361 struct tg3_napi *tnapi = &tp->napi[0];
9362 struct net_device *dev = tp->dev;
9363 int err, i, intr_ok = 0;
9366 if (!netif_running(dev))
9369 tg3_disable_ints(tp);
9371 free_irq(tnapi->irq_vec, tnapi);
9374 * Turn off MSI one shot mode. Otherwise this test has no
9375 * observable way to know whether the interrupt was delivered.
9377 if (tg3_flag(tp, 57765_PLUS)) {
9378 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9379 tw32(MSGINT_MODE, val);
9382 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9383 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9387 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9388 tg3_enable_ints(tp);
9390 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9393 for (i = 0; i < 5; i++) {
9394 u32 int_mbox, misc_host_ctrl;
9396 int_mbox = tr32_mailbox(tnapi->int_mbox);
9397 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9399 if ((int_mbox != 0) ||
9400 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9405 if (tg3_flag(tp, 57765_PLUS) &&
9406 tnapi->hw_status->status_tag != tnapi->last_tag)
9407 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9412 tg3_disable_ints(tp);
9414 free_irq(tnapi->irq_vec, tnapi);
9416 err = tg3_request_irq(tp, 0);
9422 /* Reenable MSI one shot mode. */
9423 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9424 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9425 tw32(MSGINT_MODE, val);
9433 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9434 * successfully restored
9436 static int tg3_test_msi(struct tg3 *tp)
9441 if (!tg3_flag(tp, USING_MSI))
9444 /* Turn off SERR reporting in case MSI terminates with Master
9447 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9448 pci_write_config_word(tp->pdev, PCI_COMMAND,
9449 pci_cmd & ~PCI_COMMAND_SERR);
9451 err = tg3_test_interrupt(tp);
9453 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9458 /* other failures */
9462 /* MSI test failed, go back to INTx mode */
9463 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9464 "to INTx mode. Please report this failure to the PCI "
9465 "maintainer and include system chipset information\n");
9467 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9469 pci_disable_msi(tp->pdev);
9471 tg3_flag_clear(tp, USING_MSI);
9472 tp->napi[0].irq_vec = tp->pdev->irq;
9474 err = tg3_request_irq(tp, 0);
9478 /* Need to reset the chip because the MSI cycle may have terminated
9479 * with Master Abort.
9481 tg3_full_lock(tp, 1);
9483 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9484 err = tg3_init_hw(tp, 1);
9486 tg3_full_unlock(tp);
9489 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9494 static int tg3_request_firmware(struct tg3 *tp)
9496 const __be32 *fw_data;
9498 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9499 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9504 fw_data = (void *)tp->fw->data;
9506 /* Firmware blob starts with version numbers, followed by
9507 * start address and _full_ length including BSS sections
9508 * (which must be longer than the actual data, of course
9511 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9512 if (tp->fw_len < (tp->fw->size - 12)) {
9513 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9514 tp->fw_len, tp->fw_needed);
9515 release_firmware(tp->fw);
9520 /* We no longer need firmware; we have it. */
9521 tp->fw_needed = NULL;
9525 static bool tg3_enable_msix(struct tg3 *tp)
9527 int i, rc, cpus = num_online_cpus();
9528 struct msix_entry msix_ent[tp->irq_max];
9531 /* Just fallback to the simpler MSI mode. */
9535 * We want as many rx rings enabled as there are cpus.
9536 * The first MSIX vector only deals with link interrupts, etc,
9537 * so we add one to the number of vectors we are requesting.
9539 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9541 for (i = 0; i < tp->irq_max; i++) {
9542 msix_ent[i].entry = i;
9543 msix_ent[i].vector = 0;
9546 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9549 } else if (rc != 0) {
9550 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9552 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9557 for (i = 0; i < tp->irq_max; i++)
9558 tp->napi[i].irq_vec = msix_ent[i].vector;
9560 netif_set_real_num_tx_queues(tp->dev, 1);
9561 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9562 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9563 pci_disable_msix(tp->pdev);
9567 if (tp->irq_cnt > 1) {
9568 tg3_flag_set(tp, ENABLE_RSS);
9570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9571 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9572 tg3_flag_set(tp, ENABLE_TSS);
9573 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9580 static void tg3_ints_init(struct tg3 *tp)
9582 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9583 !tg3_flag(tp, TAGGED_STATUS)) {
9584 /* All MSI supporting chips should support tagged
9585 * status. Assert that this is the case.
9587 netdev_warn(tp->dev,
9588 "MSI without TAGGED_STATUS? Not using MSI\n");
9592 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9593 tg3_flag_set(tp, USING_MSIX);
9594 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9595 tg3_flag_set(tp, USING_MSI);
9597 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9598 u32 msi_mode = tr32(MSGINT_MODE);
9599 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9600 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9601 if (!tg3_flag(tp, 1SHOT_MSI))
9602 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9603 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9606 if (!tg3_flag(tp, USING_MSIX)) {
9608 tp->napi[0].irq_vec = tp->pdev->irq;
9609 netif_set_real_num_tx_queues(tp->dev, 1);
9610 netif_set_real_num_rx_queues(tp->dev, 1);
9614 static void tg3_ints_fini(struct tg3 *tp)
9616 if (tg3_flag(tp, USING_MSIX))
9617 pci_disable_msix(tp->pdev);
9618 else if (tg3_flag(tp, USING_MSI))
9619 pci_disable_msi(tp->pdev);
9620 tg3_flag_clear(tp, USING_MSI);
9621 tg3_flag_clear(tp, USING_MSIX);
9622 tg3_flag_clear(tp, ENABLE_RSS);
9623 tg3_flag_clear(tp, ENABLE_TSS);
9626 static int tg3_open(struct net_device *dev)
9628 struct tg3 *tp = netdev_priv(dev);
9631 if (tp->fw_needed) {
9632 err = tg3_request_firmware(tp);
9633 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9637 netdev_warn(tp->dev, "TSO capability disabled\n");
9638 tg3_flag_clear(tp, TSO_CAPABLE);
9639 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9640 netdev_notice(tp->dev, "TSO capability restored\n");
9641 tg3_flag_set(tp, TSO_CAPABLE);
9645 netif_carrier_off(tp->dev);
9647 err = tg3_power_up(tp);
9651 tg3_full_lock(tp, 0);
9653 tg3_disable_ints(tp);
9654 tg3_flag_clear(tp, INIT_COMPLETE);
9656 tg3_full_unlock(tp);
9659 * Setup interrupts first so we know how
9660 * many NAPI resources to allocate
9664 /* The placement of this call is tied
9665 * to the setup and use of Host TX descriptors.
9667 err = tg3_alloc_consistent(tp);
9673 tg3_napi_enable(tp);
9675 for (i = 0; i < tp->irq_cnt; i++) {
9676 struct tg3_napi *tnapi = &tp->napi[i];
9677 err = tg3_request_irq(tp, i);
9679 for (i--; i >= 0; i--)
9680 free_irq(tnapi->irq_vec, tnapi);
9688 tg3_full_lock(tp, 0);
9690 err = tg3_init_hw(tp, 1);
9692 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9695 if (tg3_flag(tp, TAGGED_STATUS) &&
9696 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9697 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9698 tp->timer_offset = HZ;
9700 tp->timer_offset = HZ / 10;
9702 BUG_ON(tp->timer_offset > HZ);
9703 tp->timer_counter = tp->timer_multiplier =
9704 (HZ / tp->timer_offset);
9705 tp->asf_counter = tp->asf_multiplier =
9706 ((HZ / tp->timer_offset) * 2);
9708 init_timer(&tp->timer);
9709 tp->timer.expires = jiffies + tp->timer_offset;
9710 tp->timer.data = (unsigned long) tp;
9711 tp->timer.function = tg3_timer;
9714 tg3_full_unlock(tp);
9719 if (tg3_flag(tp, USING_MSI)) {
9720 err = tg3_test_msi(tp);
9723 tg3_full_lock(tp, 0);
9724 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9726 tg3_full_unlock(tp);
9731 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9732 u32 val = tr32(PCIE_TRANSACTION_CFG);
9734 tw32(PCIE_TRANSACTION_CFG,
9735 val | PCIE_TRANS_CFG_1SHOT_MSI);
9741 tg3_full_lock(tp, 0);
9743 add_timer(&tp->timer);
9744 tg3_flag_set(tp, INIT_COMPLETE);
9745 tg3_enable_ints(tp);
9747 tg3_full_unlock(tp);
9749 netif_tx_start_all_queues(dev);
9752 * Reset loopback feature if it was turned on while the device was down
9753 * make sure that it's installed properly now.
9755 if (dev->features & NETIF_F_LOOPBACK)
9756 tg3_set_loopback(dev, dev->features);
9761 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9762 struct tg3_napi *tnapi = &tp->napi[i];
9763 free_irq(tnapi->irq_vec, tnapi);
9767 tg3_napi_disable(tp);
9769 tg3_free_consistent(tp);
9773 tg3_frob_aux_power(tp, false);
9774 pci_set_power_state(tp->pdev, PCI_D3hot);
9778 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9779 struct rtnl_link_stats64 *);
9780 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9782 static int tg3_close(struct net_device *dev)
9785 struct tg3 *tp = netdev_priv(dev);
9787 tg3_napi_disable(tp);
9788 cancel_work_sync(&tp->reset_task);
9790 netif_tx_stop_all_queues(dev);
9792 del_timer_sync(&tp->timer);
9796 tg3_full_lock(tp, 1);
9798 tg3_disable_ints(tp);
9800 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9802 tg3_flag_clear(tp, INIT_COMPLETE);
9804 tg3_full_unlock(tp);
9806 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9807 struct tg3_napi *tnapi = &tp->napi[i];
9808 free_irq(tnapi->irq_vec, tnapi);
9813 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9815 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9816 sizeof(tp->estats_prev));
9820 tg3_free_consistent(tp);
9824 netif_carrier_off(tp->dev);
9829 static inline u64 get_stat64(tg3_stat64_t *val)
9831 return ((u64)val->high << 32) | ((u64)val->low);
9834 static u64 calc_crc_errors(struct tg3 *tp)
9836 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9838 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9839 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9843 spin_lock_bh(&tp->lock);
9844 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9845 tg3_writephy(tp, MII_TG3_TEST1,
9846 val | MII_TG3_TEST1_CRC_EN);
9847 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9850 spin_unlock_bh(&tp->lock);
9852 tp->phy_crc_errors += val;
9854 return tp->phy_crc_errors;
9857 return get_stat64(&hw_stats->rx_fcs_errors);
9860 #define ESTAT_ADD(member) \
9861 estats->member = old_estats->member + \
9862 get_stat64(&hw_stats->member)
9864 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9866 struct tg3_ethtool_stats *estats = &tp->estats;
9867 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9868 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9873 ESTAT_ADD(rx_octets);
9874 ESTAT_ADD(rx_fragments);
9875 ESTAT_ADD(rx_ucast_packets);
9876 ESTAT_ADD(rx_mcast_packets);
9877 ESTAT_ADD(rx_bcast_packets);
9878 ESTAT_ADD(rx_fcs_errors);
9879 ESTAT_ADD(rx_align_errors);
9880 ESTAT_ADD(rx_xon_pause_rcvd);
9881 ESTAT_ADD(rx_xoff_pause_rcvd);
9882 ESTAT_ADD(rx_mac_ctrl_rcvd);
9883 ESTAT_ADD(rx_xoff_entered);
9884 ESTAT_ADD(rx_frame_too_long_errors);
9885 ESTAT_ADD(rx_jabbers);
9886 ESTAT_ADD(rx_undersize_packets);
9887 ESTAT_ADD(rx_in_length_errors);
9888 ESTAT_ADD(rx_out_length_errors);
9889 ESTAT_ADD(rx_64_or_less_octet_packets);
9890 ESTAT_ADD(rx_65_to_127_octet_packets);
9891 ESTAT_ADD(rx_128_to_255_octet_packets);
9892 ESTAT_ADD(rx_256_to_511_octet_packets);
9893 ESTAT_ADD(rx_512_to_1023_octet_packets);
9894 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9895 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9896 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9897 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9898 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9900 ESTAT_ADD(tx_octets);
9901 ESTAT_ADD(tx_collisions);
9902 ESTAT_ADD(tx_xon_sent);
9903 ESTAT_ADD(tx_xoff_sent);
9904 ESTAT_ADD(tx_flow_control);
9905 ESTAT_ADD(tx_mac_errors);
9906 ESTAT_ADD(tx_single_collisions);
9907 ESTAT_ADD(tx_mult_collisions);
9908 ESTAT_ADD(tx_deferred);
9909 ESTAT_ADD(tx_excessive_collisions);
9910 ESTAT_ADD(tx_late_collisions);
9911 ESTAT_ADD(tx_collide_2times);
9912 ESTAT_ADD(tx_collide_3times);
9913 ESTAT_ADD(tx_collide_4times);
9914 ESTAT_ADD(tx_collide_5times);
9915 ESTAT_ADD(tx_collide_6times);
9916 ESTAT_ADD(tx_collide_7times);
9917 ESTAT_ADD(tx_collide_8times);
9918 ESTAT_ADD(tx_collide_9times);
9919 ESTAT_ADD(tx_collide_10times);
9920 ESTAT_ADD(tx_collide_11times);
9921 ESTAT_ADD(tx_collide_12times);
9922 ESTAT_ADD(tx_collide_13times);
9923 ESTAT_ADD(tx_collide_14times);
9924 ESTAT_ADD(tx_collide_15times);
9925 ESTAT_ADD(tx_ucast_packets);
9926 ESTAT_ADD(tx_mcast_packets);
9927 ESTAT_ADD(tx_bcast_packets);
9928 ESTAT_ADD(tx_carrier_sense_errors);
9929 ESTAT_ADD(tx_discards);
9930 ESTAT_ADD(tx_errors);
9932 ESTAT_ADD(dma_writeq_full);
9933 ESTAT_ADD(dma_write_prioq_full);
9934 ESTAT_ADD(rxbds_empty);
9935 ESTAT_ADD(rx_discards);
9936 ESTAT_ADD(rx_errors);
9937 ESTAT_ADD(rx_threshold_hit);
9939 ESTAT_ADD(dma_readq_full);
9940 ESTAT_ADD(dma_read_prioq_full);
9941 ESTAT_ADD(tx_comp_queue_full);
9943 ESTAT_ADD(ring_set_send_prod_index);
9944 ESTAT_ADD(ring_status_update);
9945 ESTAT_ADD(nic_irqs);
9946 ESTAT_ADD(nic_avoided_irqs);
9947 ESTAT_ADD(nic_tx_threshold_hit);
9949 ESTAT_ADD(mbuf_lwm_thresh_hit);
9954 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9955 struct rtnl_link_stats64 *stats)
9957 struct tg3 *tp = netdev_priv(dev);
9958 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9959 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9964 stats->rx_packets = old_stats->rx_packets +
9965 get_stat64(&hw_stats->rx_ucast_packets) +
9966 get_stat64(&hw_stats->rx_mcast_packets) +
9967 get_stat64(&hw_stats->rx_bcast_packets);
9969 stats->tx_packets = old_stats->tx_packets +
9970 get_stat64(&hw_stats->tx_ucast_packets) +
9971 get_stat64(&hw_stats->tx_mcast_packets) +
9972 get_stat64(&hw_stats->tx_bcast_packets);
9974 stats->rx_bytes = old_stats->rx_bytes +
9975 get_stat64(&hw_stats->rx_octets);
9976 stats->tx_bytes = old_stats->tx_bytes +
9977 get_stat64(&hw_stats->tx_octets);
9979 stats->rx_errors = old_stats->rx_errors +
9980 get_stat64(&hw_stats->rx_errors);
9981 stats->tx_errors = old_stats->tx_errors +
9982 get_stat64(&hw_stats->tx_errors) +
9983 get_stat64(&hw_stats->tx_mac_errors) +
9984 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9985 get_stat64(&hw_stats->tx_discards);
9987 stats->multicast = old_stats->multicast +
9988 get_stat64(&hw_stats->rx_mcast_packets);
9989 stats->collisions = old_stats->collisions +
9990 get_stat64(&hw_stats->tx_collisions);
9992 stats->rx_length_errors = old_stats->rx_length_errors +
9993 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9994 get_stat64(&hw_stats->rx_undersize_packets);
9996 stats->rx_over_errors = old_stats->rx_over_errors +
9997 get_stat64(&hw_stats->rxbds_empty);
9998 stats->rx_frame_errors = old_stats->rx_frame_errors +
9999 get_stat64(&hw_stats->rx_align_errors);
10000 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10001 get_stat64(&hw_stats->tx_discards);
10002 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10003 get_stat64(&hw_stats->tx_carrier_sense_errors);
10005 stats->rx_crc_errors = old_stats->rx_crc_errors +
10006 calc_crc_errors(tp);
10008 stats->rx_missed_errors = old_stats->rx_missed_errors +
10009 get_stat64(&hw_stats->rx_discards);
10011 stats->rx_dropped = tp->rx_dropped;
10016 static inline u32 calc_crc(unsigned char *buf, int len)
10024 for (j = 0; j < len; j++) {
10027 for (k = 0; k < 8; k++) {
10040 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10042 /* accept or reject all multicast frames */
10043 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10044 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10045 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10046 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10049 static void __tg3_set_rx_mode(struct net_device *dev)
10051 struct tg3 *tp = netdev_priv(dev);
10054 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10055 RX_MODE_KEEP_VLAN_TAG);
10057 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10058 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10061 if (!tg3_flag(tp, ENABLE_ASF))
10062 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10065 if (dev->flags & IFF_PROMISC) {
10066 /* Promiscuous mode. */
10067 rx_mode |= RX_MODE_PROMISC;
10068 } else if (dev->flags & IFF_ALLMULTI) {
10069 /* Accept all multicast. */
10070 tg3_set_multi(tp, 1);
10071 } else if (netdev_mc_empty(dev)) {
10072 /* Reject all multicast. */
10073 tg3_set_multi(tp, 0);
10075 /* Accept one or more multicast(s). */
10076 struct netdev_hw_addr *ha;
10077 u32 mc_filter[4] = { 0, };
10082 netdev_for_each_mc_addr(ha, dev) {
10083 crc = calc_crc(ha->addr, ETH_ALEN);
10085 regidx = (bit & 0x60) >> 5;
10087 mc_filter[regidx] |= (1 << bit);
10090 tw32(MAC_HASH_REG_0, mc_filter[0]);
10091 tw32(MAC_HASH_REG_1, mc_filter[1]);
10092 tw32(MAC_HASH_REG_2, mc_filter[2]);
10093 tw32(MAC_HASH_REG_3, mc_filter[3]);
10096 if (rx_mode != tp->rx_mode) {
10097 tp->rx_mode = rx_mode;
10098 tw32_f(MAC_RX_MODE, rx_mode);
10103 static void tg3_set_rx_mode(struct net_device *dev)
10105 struct tg3 *tp = netdev_priv(dev);
10107 if (!netif_running(dev))
10110 tg3_full_lock(tp, 0);
10111 __tg3_set_rx_mode(dev);
10112 tg3_full_unlock(tp);
10115 static int tg3_get_regs_len(struct net_device *dev)
10117 return TG3_REG_BLK_SIZE;
10120 static void tg3_get_regs(struct net_device *dev,
10121 struct ethtool_regs *regs, void *_p)
10123 struct tg3 *tp = netdev_priv(dev);
10127 memset(_p, 0, TG3_REG_BLK_SIZE);
10129 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10132 tg3_full_lock(tp, 0);
10134 tg3_dump_legacy_regs(tp, (u32 *)_p);
10136 tg3_full_unlock(tp);
10139 static int tg3_get_eeprom_len(struct net_device *dev)
10141 struct tg3 *tp = netdev_priv(dev);
10143 return tp->nvram_size;
10146 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10148 struct tg3 *tp = netdev_priv(dev);
10151 u32 i, offset, len, b_offset, b_count;
10154 if (tg3_flag(tp, NO_NVRAM))
10157 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10160 offset = eeprom->offset;
10164 eeprom->magic = TG3_EEPROM_MAGIC;
10167 /* adjustments to start on required 4 byte boundary */
10168 b_offset = offset & 3;
10169 b_count = 4 - b_offset;
10170 if (b_count > len) {
10171 /* i.e. offset=1 len=2 */
10174 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10177 memcpy(data, ((char *)&val) + b_offset, b_count);
10180 eeprom->len += b_count;
10183 /* read bytes up to the last 4 byte boundary */
10184 pd = &data[eeprom->len];
10185 for (i = 0; i < (len - (len & 3)); i += 4) {
10186 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10191 memcpy(pd + i, &val, 4);
10196 /* read last bytes not ending on 4 byte boundary */
10197 pd = &data[eeprom->len];
10199 b_offset = offset + len - b_count;
10200 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10203 memcpy(pd, &val, b_count);
10204 eeprom->len += b_count;
10209 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10211 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10213 struct tg3 *tp = netdev_priv(dev);
10215 u32 offset, len, b_offset, odd_len;
10219 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10222 if (tg3_flag(tp, NO_NVRAM) ||
10223 eeprom->magic != TG3_EEPROM_MAGIC)
10226 offset = eeprom->offset;
10229 if ((b_offset = (offset & 3))) {
10230 /* adjustments to start on required 4 byte boundary */
10231 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10242 /* adjustments to end on required 4 byte boundary */
10244 len = (len + 3) & ~3;
10245 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10251 if (b_offset || odd_len) {
10252 buf = kmalloc(len, GFP_KERNEL);
10256 memcpy(buf, &start, 4);
10258 memcpy(buf+len-4, &end, 4);
10259 memcpy(buf + b_offset, data, eeprom->len);
10262 ret = tg3_nvram_write_block(tp, offset, len, buf);
10270 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10272 struct tg3 *tp = netdev_priv(dev);
10274 if (tg3_flag(tp, USE_PHYLIB)) {
10275 struct phy_device *phydev;
10276 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10278 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10279 return phy_ethtool_gset(phydev, cmd);
10282 cmd->supported = (SUPPORTED_Autoneg);
10284 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10285 cmd->supported |= (SUPPORTED_1000baseT_Half |
10286 SUPPORTED_1000baseT_Full);
10288 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10289 cmd->supported |= (SUPPORTED_100baseT_Half |
10290 SUPPORTED_100baseT_Full |
10291 SUPPORTED_10baseT_Half |
10292 SUPPORTED_10baseT_Full |
10294 cmd->port = PORT_TP;
10296 cmd->supported |= SUPPORTED_FIBRE;
10297 cmd->port = PORT_FIBRE;
10300 cmd->advertising = tp->link_config.advertising;
10301 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10302 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10303 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10304 cmd->advertising |= ADVERTISED_Pause;
10306 cmd->advertising |= ADVERTISED_Pause |
10307 ADVERTISED_Asym_Pause;
10309 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10310 cmd->advertising |= ADVERTISED_Asym_Pause;
10313 if (netif_running(dev)) {
10314 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10315 cmd->duplex = tp->link_config.active_duplex;
10317 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10318 cmd->duplex = DUPLEX_INVALID;
10320 cmd->phy_address = tp->phy_addr;
10321 cmd->transceiver = XCVR_INTERNAL;
10322 cmd->autoneg = tp->link_config.autoneg;
10328 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10330 struct tg3 *tp = netdev_priv(dev);
10331 u32 speed = ethtool_cmd_speed(cmd);
10333 if (tg3_flag(tp, USE_PHYLIB)) {
10334 struct phy_device *phydev;
10335 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10337 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10338 return phy_ethtool_sset(phydev, cmd);
10341 if (cmd->autoneg != AUTONEG_ENABLE &&
10342 cmd->autoneg != AUTONEG_DISABLE)
10345 if (cmd->autoneg == AUTONEG_DISABLE &&
10346 cmd->duplex != DUPLEX_FULL &&
10347 cmd->duplex != DUPLEX_HALF)
10350 if (cmd->autoneg == AUTONEG_ENABLE) {
10351 u32 mask = ADVERTISED_Autoneg |
10353 ADVERTISED_Asym_Pause;
10355 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10356 mask |= ADVERTISED_1000baseT_Half |
10357 ADVERTISED_1000baseT_Full;
10359 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10360 mask |= ADVERTISED_100baseT_Half |
10361 ADVERTISED_100baseT_Full |
10362 ADVERTISED_10baseT_Half |
10363 ADVERTISED_10baseT_Full |
10366 mask |= ADVERTISED_FIBRE;
10368 if (cmd->advertising & ~mask)
10371 mask &= (ADVERTISED_1000baseT_Half |
10372 ADVERTISED_1000baseT_Full |
10373 ADVERTISED_100baseT_Half |
10374 ADVERTISED_100baseT_Full |
10375 ADVERTISED_10baseT_Half |
10376 ADVERTISED_10baseT_Full);
10378 cmd->advertising &= mask;
10380 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10381 if (speed != SPEED_1000)
10384 if (cmd->duplex != DUPLEX_FULL)
10387 if (speed != SPEED_100 &&
10393 tg3_full_lock(tp, 0);
10395 tp->link_config.autoneg = cmd->autoneg;
10396 if (cmd->autoneg == AUTONEG_ENABLE) {
10397 tp->link_config.advertising = (cmd->advertising |
10398 ADVERTISED_Autoneg);
10399 tp->link_config.speed = SPEED_INVALID;
10400 tp->link_config.duplex = DUPLEX_INVALID;
10402 tp->link_config.advertising = 0;
10403 tp->link_config.speed = speed;
10404 tp->link_config.duplex = cmd->duplex;
10407 tp->link_config.orig_speed = tp->link_config.speed;
10408 tp->link_config.orig_duplex = tp->link_config.duplex;
10409 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10411 if (netif_running(dev))
10412 tg3_setup_phy(tp, 1);
10414 tg3_full_unlock(tp);
10419 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10421 struct tg3 *tp = netdev_priv(dev);
10423 strcpy(info->driver, DRV_MODULE_NAME);
10424 strcpy(info->version, DRV_MODULE_VERSION);
10425 strcpy(info->fw_version, tp->fw_ver);
10426 strcpy(info->bus_info, pci_name(tp->pdev));
10429 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10431 struct tg3 *tp = netdev_priv(dev);
10433 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10434 wol->supported = WAKE_MAGIC;
10436 wol->supported = 0;
10438 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10439 wol->wolopts = WAKE_MAGIC;
10440 memset(&wol->sopass, 0, sizeof(wol->sopass));
10443 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10445 struct tg3 *tp = netdev_priv(dev);
10446 struct device *dp = &tp->pdev->dev;
10448 if (wol->wolopts & ~WAKE_MAGIC)
10450 if ((wol->wolopts & WAKE_MAGIC) &&
10451 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10454 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10456 spin_lock_bh(&tp->lock);
10457 if (device_may_wakeup(dp))
10458 tg3_flag_set(tp, WOL_ENABLE);
10460 tg3_flag_clear(tp, WOL_ENABLE);
10461 spin_unlock_bh(&tp->lock);
10466 static u32 tg3_get_msglevel(struct net_device *dev)
10468 struct tg3 *tp = netdev_priv(dev);
10469 return tp->msg_enable;
10472 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10474 struct tg3 *tp = netdev_priv(dev);
10475 tp->msg_enable = value;
10478 static int tg3_nway_reset(struct net_device *dev)
10480 struct tg3 *tp = netdev_priv(dev);
10483 if (!netif_running(dev))
10486 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10489 if (tg3_flag(tp, USE_PHYLIB)) {
10490 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10492 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10496 spin_lock_bh(&tp->lock);
10498 tg3_readphy(tp, MII_BMCR, &bmcr);
10499 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10500 ((bmcr & BMCR_ANENABLE) ||
10501 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10502 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10506 spin_unlock_bh(&tp->lock);
10512 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10514 struct tg3 *tp = netdev_priv(dev);
10516 ering->rx_max_pending = tp->rx_std_ring_mask;
10517 ering->rx_mini_max_pending = 0;
10518 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10519 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10521 ering->rx_jumbo_max_pending = 0;
10523 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10525 ering->rx_pending = tp->rx_pending;
10526 ering->rx_mini_pending = 0;
10527 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10528 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10530 ering->rx_jumbo_pending = 0;
10532 ering->tx_pending = tp->napi[0].tx_pending;
10535 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10537 struct tg3 *tp = netdev_priv(dev);
10538 int i, irq_sync = 0, err = 0;
10540 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10541 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10542 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10543 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10544 (tg3_flag(tp, TSO_BUG) &&
10545 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10548 if (netif_running(dev)) {
10550 tg3_netif_stop(tp);
10554 tg3_full_lock(tp, irq_sync);
10556 tp->rx_pending = ering->rx_pending;
10558 if (tg3_flag(tp, MAX_RXPEND_64) &&
10559 tp->rx_pending > 63)
10560 tp->rx_pending = 63;
10561 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10563 for (i = 0; i < tp->irq_max; i++)
10564 tp->napi[i].tx_pending = ering->tx_pending;
10566 if (netif_running(dev)) {
10567 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10568 err = tg3_restart_hw(tp, 1);
10570 tg3_netif_start(tp);
10573 tg3_full_unlock(tp);
10575 if (irq_sync && !err)
10581 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10583 struct tg3 *tp = netdev_priv(dev);
10585 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10587 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10588 epause->rx_pause = 1;
10590 epause->rx_pause = 0;
10592 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10593 epause->tx_pause = 1;
10595 epause->tx_pause = 0;
10598 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10600 struct tg3 *tp = netdev_priv(dev);
10603 if (tg3_flag(tp, USE_PHYLIB)) {
10605 struct phy_device *phydev;
10607 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10609 if (!(phydev->supported & SUPPORTED_Pause) ||
10610 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10611 (epause->rx_pause != epause->tx_pause)))
10614 tp->link_config.flowctrl = 0;
10615 if (epause->rx_pause) {
10616 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10618 if (epause->tx_pause) {
10619 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10620 newadv = ADVERTISED_Pause;
10622 newadv = ADVERTISED_Pause |
10623 ADVERTISED_Asym_Pause;
10624 } else if (epause->tx_pause) {
10625 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10626 newadv = ADVERTISED_Asym_Pause;
10630 if (epause->autoneg)
10631 tg3_flag_set(tp, PAUSE_AUTONEG);
10633 tg3_flag_clear(tp, PAUSE_AUTONEG);
10635 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10636 u32 oldadv = phydev->advertising &
10637 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10638 if (oldadv != newadv) {
10639 phydev->advertising &=
10640 ~(ADVERTISED_Pause |
10641 ADVERTISED_Asym_Pause);
10642 phydev->advertising |= newadv;
10643 if (phydev->autoneg) {
10645 * Always renegotiate the link to
10646 * inform our link partner of our
10647 * flow control settings, even if the
10648 * flow control is forced. Let
10649 * tg3_adjust_link() do the final
10650 * flow control setup.
10652 return phy_start_aneg(phydev);
10656 if (!epause->autoneg)
10657 tg3_setup_flow_control(tp, 0, 0);
10659 tp->link_config.orig_advertising &=
10660 ~(ADVERTISED_Pause |
10661 ADVERTISED_Asym_Pause);
10662 tp->link_config.orig_advertising |= newadv;
10667 if (netif_running(dev)) {
10668 tg3_netif_stop(tp);
10672 tg3_full_lock(tp, irq_sync);
10674 if (epause->autoneg)
10675 tg3_flag_set(tp, PAUSE_AUTONEG);
10677 tg3_flag_clear(tp, PAUSE_AUTONEG);
10678 if (epause->rx_pause)
10679 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10681 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10682 if (epause->tx_pause)
10683 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10685 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10687 if (netif_running(dev)) {
10688 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10689 err = tg3_restart_hw(tp, 1);
10691 tg3_netif_start(tp);
10694 tg3_full_unlock(tp);
10700 static int tg3_get_sset_count(struct net_device *dev, int sset)
10704 return TG3_NUM_TEST;
10706 return TG3_NUM_STATS;
10708 return -EOPNOTSUPP;
10712 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10714 switch (stringset) {
10716 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10719 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10722 WARN_ON(1); /* we need a WARN() */
10727 static int tg3_set_phys_id(struct net_device *dev,
10728 enum ethtool_phys_id_state state)
10730 struct tg3 *tp = netdev_priv(dev);
10732 if (!netif_running(tp->dev))
10736 case ETHTOOL_ID_ACTIVE:
10737 return 1; /* cycle on/off once per second */
10739 case ETHTOOL_ID_ON:
10740 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10741 LED_CTRL_1000MBPS_ON |
10742 LED_CTRL_100MBPS_ON |
10743 LED_CTRL_10MBPS_ON |
10744 LED_CTRL_TRAFFIC_OVERRIDE |
10745 LED_CTRL_TRAFFIC_BLINK |
10746 LED_CTRL_TRAFFIC_LED);
10749 case ETHTOOL_ID_OFF:
10750 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10751 LED_CTRL_TRAFFIC_OVERRIDE);
10754 case ETHTOOL_ID_INACTIVE:
10755 tw32(MAC_LED_CTRL, tp->led_ctrl);
10762 static void tg3_get_ethtool_stats(struct net_device *dev,
10763 struct ethtool_stats *estats, u64 *tmp_stats)
10765 struct tg3 *tp = netdev_priv(dev);
10766 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10769 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10773 u32 offset = 0, len = 0;
10776 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10779 if (magic == TG3_EEPROM_MAGIC) {
10780 for (offset = TG3_NVM_DIR_START;
10781 offset < TG3_NVM_DIR_END;
10782 offset += TG3_NVM_DIRENT_SIZE) {
10783 if (tg3_nvram_read(tp, offset, &val))
10786 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10787 TG3_NVM_DIRTYPE_EXTVPD)
10791 if (offset != TG3_NVM_DIR_END) {
10792 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10793 if (tg3_nvram_read(tp, offset + 4, &offset))
10796 offset = tg3_nvram_logical_addr(tp, offset);
10800 if (!offset || !len) {
10801 offset = TG3_NVM_VPD_OFF;
10802 len = TG3_NVM_VPD_LEN;
10805 buf = kmalloc(len, GFP_KERNEL);
10809 if (magic == TG3_EEPROM_MAGIC) {
10810 for (i = 0; i < len; i += 4) {
10811 /* The data is in little-endian format in NVRAM.
10812 * Use the big-endian read routines to preserve
10813 * the byte order as it exists in NVRAM.
10815 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10821 unsigned int pos = 0;
10823 ptr = (u8 *)&buf[0];
10824 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10825 cnt = pci_read_vpd(tp->pdev, pos,
10827 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10845 #define NVRAM_TEST_SIZE 0x100
10846 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10847 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10848 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10849 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10850 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10851 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10852 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10853 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10855 static int tg3_test_nvram(struct tg3 *tp)
10857 u32 csum, magic, len;
10859 int i, j, k, err = 0, size;
10861 if (tg3_flag(tp, NO_NVRAM))
10864 if (tg3_nvram_read(tp, 0, &magic) != 0)
10867 if (magic == TG3_EEPROM_MAGIC)
10868 size = NVRAM_TEST_SIZE;
10869 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10870 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10871 TG3_EEPROM_SB_FORMAT_1) {
10872 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10873 case TG3_EEPROM_SB_REVISION_0:
10874 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10876 case TG3_EEPROM_SB_REVISION_2:
10877 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10879 case TG3_EEPROM_SB_REVISION_3:
10880 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10882 case TG3_EEPROM_SB_REVISION_4:
10883 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10885 case TG3_EEPROM_SB_REVISION_5:
10886 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10888 case TG3_EEPROM_SB_REVISION_6:
10889 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10896 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10897 size = NVRAM_SELFBOOT_HW_SIZE;
10901 buf = kmalloc(size, GFP_KERNEL);
10906 for (i = 0, j = 0; i < size; i += 4, j++) {
10907 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10914 /* Selfboot format */
10915 magic = be32_to_cpu(buf[0]);
10916 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10917 TG3_EEPROM_MAGIC_FW) {
10918 u8 *buf8 = (u8 *) buf, csum8 = 0;
10920 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10921 TG3_EEPROM_SB_REVISION_2) {
10922 /* For rev 2, the csum doesn't include the MBA. */
10923 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10925 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10928 for (i = 0; i < size; i++)
10941 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10942 TG3_EEPROM_MAGIC_HW) {
10943 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10944 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10945 u8 *buf8 = (u8 *) buf;
10947 /* Separate the parity bits and the data bytes. */
10948 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10949 if ((i == 0) || (i == 8)) {
10953 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10954 parity[k++] = buf8[i] & msk;
10956 } else if (i == 16) {
10960 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10961 parity[k++] = buf8[i] & msk;
10964 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10965 parity[k++] = buf8[i] & msk;
10968 data[j++] = buf8[i];
10972 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10973 u8 hw8 = hweight8(data[i]);
10975 if ((hw8 & 0x1) && parity[i])
10977 else if (!(hw8 & 0x1) && !parity[i])
10986 /* Bootstrap checksum at offset 0x10 */
10987 csum = calc_crc((unsigned char *) buf, 0x10);
10988 if (csum != le32_to_cpu(buf[0x10/4]))
10991 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10992 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10993 if (csum != le32_to_cpu(buf[0xfc/4]))
10998 buf = tg3_vpd_readblock(tp, &len);
11002 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11004 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11008 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11011 i += PCI_VPD_LRDT_TAG_SIZE;
11012 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11013 PCI_VPD_RO_KEYWORD_CHKSUM);
11017 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11019 for (i = 0; i <= j; i++)
11020 csum8 += ((u8 *)buf)[i];
11034 #define TG3_SERDES_TIMEOUT_SEC 2
11035 #define TG3_COPPER_TIMEOUT_SEC 6
11037 static int tg3_test_link(struct tg3 *tp)
11041 if (!netif_running(tp->dev))
11044 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11045 max = TG3_SERDES_TIMEOUT_SEC;
11047 max = TG3_COPPER_TIMEOUT_SEC;
11049 for (i = 0; i < max; i++) {
11050 if (netif_carrier_ok(tp->dev))
11053 if (msleep_interruptible(1000))
11060 /* Only test the commonly used registers */
11061 static int tg3_test_registers(struct tg3 *tp)
11063 int i, is_5705, is_5750;
11064 u32 offset, read_mask, write_mask, val, save_val, read_val;
11068 #define TG3_FL_5705 0x1
11069 #define TG3_FL_NOT_5705 0x2
11070 #define TG3_FL_NOT_5788 0x4
11071 #define TG3_FL_NOT_5750 0x8
11075 /* MAC Control Registers */
11076 { MAC_MODE, TG3_FL_NOT_5705,
11077 0x00000000, 0x00ef6f8c },
11078 { MAC_MODE, TG3_FL_5705,
11079 0x00000000, 0x01ef6b8c },
11080 { MAC_STATUS, TG3_FL_NOT_5705,
11081 0x03800107, 0x00000000 },
11082 { MAC_STATUS, TG3_FL_5705,
11083 0x03800100, 0x00000000 },
11084 { MAC_ADDR_0_HIGH, 0x0000,
11085 0x00000000, 0x0000ffff },
11086 { MAC_ADDR_0_LOW, 0x0000,
11087 0x00000000, 0xffffffff },
11088 { MAC_RX_MTU_SIZE, 0x0000,
11089 0x00000000, 0x0000ffff },
11090 { MAC_TX_MODE, 0x0000,
11091 0x00000000, 0x00000070 },
11092 { MAC_TX_LENGTHS, 0x0000,
11093 0x00000000, 0x00003fff },
11094 { MAC_RX_MODE, TG3_FL_NOT_5705,
11095 0x00000000, 0x000007fc },
11096 { MAC_RX_MODE, TG3_FL_5705,
11097 0x00000000, 0x000007dc },
11098 { MAC_HASH_REG_0, 0x0000,
11099 0x00000000, 0xffffffff },
11100 { MAC_HASH_REG_1, 0x0000,
11101 0x00000000, 0xffffffff },
11102 { MAC_HASH_REG_2, 0x0000,
11103 0x00000000, 0xffffffff },
11104 { MAC_HASH_REG_3, 0x0000,
11105 0x00000000, 0xffffffff },
11107 /* Receive Data and Receive BD Initiator Control Registers. */
11108 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11109 0x00000000, 0xffffffff },
11110 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11111 0x00000000, 0xffffffff },
11112 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11113 0x00000000, 0x00000003 },
11114 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11115 0x00000000, 0xffffffff },
11116 { RCVDBDI_STD_BD+0, 0x0000,
11117 0x00000000, 0xffffffff },
11118 { RCVDBDI_STD_BD+4, 0x0000,
11119 0x00000000, 0xffffffff },
11120 { RCVDBDI_STD_BD+8, 0x0000,
11121 0x00000000, 0xffff0002 },
11122 { RCVDBDI_STD_BD+0xc, 0x0000,
11123 0x00000000, 0xffffffff },
11125 /* Receive BD Initiator Control Registers. */
11126 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11127 0x00000000, 0xffffffff },
11128 { RCVBDI_STD_THRESH, TG3_FL_5705,
11129 0x00000000, 0x000003ff },
11130 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11131 0x00000000, 0xffffffff },
11133 /* Host Coalescing Control Registers. */
11134 { HOSTCC_MODE, TG3_FL_NOT_5705,
11135 0x00000000, 0x00000004 },
11136 { HOSTCC_MODE, TG3_FL_5705,
11137 0x00000000, 0x000000f6 },
11138 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11139 0x00000000, 0xffffffff },
11140 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11141 0x00000000, 0x000003ff },
11142 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11143 0x00000000, 0xffffffff },
11144 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11145 0x00000000, 0x000003ff },
11146 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11147 0x00000000, 0xffffffff },
11148 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11149 0x00000000, 0x000000ff },
11150 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11151 0x00000000, 0xffffffff },
11152 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11153 0x00000000, 0x000000ff },
11154 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11155 0x00000000, 0xffffffff },
11156 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11157 0x00000000, 0xffffffff },
11158 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11159 0x00000000, 0xffffffff },
11160 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11161 0x00000000, 0x000000ff },
11162 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11163 0x00000000, 0xffffffff },
11164 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11165 0x00000000, 0x000000ff },
11166 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11167 0x00000000, 0xffffffff },
11168 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11169 0x00000000, 0xffffffff },
11170 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11171 0x00000000, 0xffffffff },
11172 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11173 0x00000000, 0xffffffff },
11174 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11175 0x00000000, 0xffffffff },
11176 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11177 0xffffffff, 0x00000000 },
11178 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11179 0xffffffff, 0x00000000 },
11181 /* Buffer Manager Control Registers. */
11182 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11183 0x00000000, 0x007fff80 },
11184 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11185 0x00000000, 0x007fffff },
11186 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11187 0x00000000, 0x0000003f },
11188 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11189 0x00000000, 0x000001ff },
11190 { BUFMGR_MB_HIGH_WATER, 0x0000,
11191 0x00000000, 0x000001ff },
11192 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11193 0xffffffff, 0x00000000 },
11194 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11195 0xffffffff, 0x00000000 },
11197 /* Mailbox Registers */
11198 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11199 0x00000000, 0x000001ff },
11200 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11201 0x00000000, 0x000001ff },
11202 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11203 0x00000000, 0x000007ff },
11204 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11205 0x00000000, 0x000001ff },
11207 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11210 is_5705 = is_5750 = 0;
11211 if (tg3_flag(tp, 5705_PLUS)) {
11213 if (tg3_flag(tp, 5750_PLUS))
11217 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11218 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11221 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11224 if (tg3_flag(tp, IS_5788) &&
11225 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11228 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11231 offset = (u32) reg_tbl[i].offset;
11232 read_mask = reg_tbl[i].read_mask;
11233 write_mask = reg_tbl[i].write_mask;
11235 /* Save the original register content */
11236 save_val = tr32(offset);
11238 /* Determine the read-only value. */
11239 read_val = save_val & read_mask;
11241 /* Write zero to the register, then make sure the read-only bits
11242 * are not changed and the read/write bits are all zeros.
11246 val = tr32(offset);
11248 /* Test the read-only and read/write bits. */
11249 if (((val & read_mask) != read_val) || (val & write_mask))
11252 /* Write ones to all the bits defined by RdMask and WrMask, then
11253 * make sure the read-only bits are not changed and the
11254 * read/write bits are all ones.
11256 tw32(offset, read_mask | write_mask);
11258 val = tr32(offset);
11260 /* Test the read-only bits. */
11261 if ((val & read_mask) != read_val)
11264 /* Test the read/write bits. */
11265 if ((val & write_mask) != write_mask)
11268 tw32(offset, save_val);
11274 if (netif_msg_hw(tp))
11275 netdev_err(tp->dev,
11276 "Register test failed at offset %x\n", offset);
11277 tw32(offset, save_val);
11281 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11283 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11287 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11288 for (j = 0; j < len; j += 4) {
11291 tg3_write_mem(tp, offset + j, test_pattern[i]);
11292 tg3_read_mem(tp, offset + j, &val);
11293 if (val != test_pattern[i])
11300 static int tg3_test_memory(struct tg3 *tp)
11302 static struct mem_entry {
11305 } mem_tbl_570x[] = {
11306 { 0x00000000, 0x00b50},
11307 { 0x00002000, 0x1c000},
11308 { 0xffffffff, 0x00000}
11309 }, mem_tbl_5705[] = {
11310 { 0x00000100, 0x0000c},
11311 { 0x00000200, 0x00008},
11312 { 0x00004000, 0x00800},
11313 { 0x00006000, 0x01000},
11314 { 0x00008000, 0x02000},
11315 { 0x00010000, 0x0e000},
11316 { 0xffffffff, 0x00000}
11317 }, mem_tbl_5755[] = {
11318 { 0x00000200, 0x00008},
11319 { 0x00004000, 0x00800},
11320 { 0x00006000, 0x00800},
11321 { 0x00008000, 0x02000},
11322 { 0x00010000, 0x0c000},
11323 { 0xffffffff, 0x00000}
11324 }, mem_tbl_5906[] = {
11325 { 0x00000200, 0x00008},
11326 { 0x00004000, 0x00400},
11327 { 0x00006000, 0x00400},
11328 { 0x00008000, 0x01000},
11329 { 0x00010000, 0x01000},
11330 { 0xffffffff, 0x00000}
11331 }, mem_tbl_5717[] = {
11332 { 0x00000200, 0x00008},
11333 { 0x00010000, 0x0a000},
11334 { 0x00020000, 0x13c00},
11335 { 0xffffffff, 0x00000}
11336 }, mem_tbl_57765[] = {
11337 { 0x00000200, 0x00008},
11338 { 0x00004000, 0x00800},
11339 { 0x00006000, 0x09800},
11340 { 0x00010000, 0x0a000},
11341 { 0xffffffff, 0x00000}
11343 struct mem_entry *mem_tbl;
11347 if (tg3_flag(tp, 5717_PLUS))
11348 mem_tbl = mem_tbl_5717;
11349 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11350 mem_tbl = mem_tbl_57765;
11351 else if (tg3_flag(tp, 5755_PLUS))
11352 mem_tbl = mem_tbl_5755;
11353 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11354 mem_tbl = mem_tbl_5906;
11355 else if (tg3_flag(tp, 5705_PLUS))
11356 mem_tbl = mem_tbl_5705;
11358 mem_tbl = mem_tbl_570x;
11360 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11361 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11369 #define TG3_TSO_MSS 500
11371 #define TG3_TSO_IP_HDR_LEN 20
11372 #define TG3_TSO_TCP_HDR_LEN 20
11373 #define TG3_TSO_TCP_OPT_LEN 12
11375 static const u8 tg3_tso_header[] = {
11377 0x45, 0x00, 0x00, 0x00,
11378 0x00, 0x00, 0x40, 0x00,
11379 0x40, 0x06, 0x00, 0x00,
11380 0x0a, 0x00, 0x00, 0x01,
11381 0x0a, 0x00, 0x00, 0x02,
11382 0x0d, 0x00, 0xe0, 0x00,
11383 0x00, 0x00, 0x01, 0x00,
11384 0x00, 0x00, 0x02, 0x00,
11385 0x80, 0x10, 0x10, 0x00,
11386 0x14, 0x09, 0x00, 0x00,
11387 0x01, 0x01, 0x08, 0x0a,
11388 0x11, 0x11, 0x11, 0x11,
11389 0x11, 0x11, 0x11, 0x11,
11392 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11394 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11395 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11397 struct sk_buff *skb, *rx_skb;
11400 int num_pkts, tx_len, rx_len, i, err;
11401 struct tg3_rx_buffer_desc *desc;
11402 struct tg3_napi *tnapi, *rnapi;
11403 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11405 tnapi = &tp->napi[0];
11406 rnapi = &tp->napi[0];
11407 if (tp->irq_cnt > 1) {
11408 if (tg3_flag(tp, ENABLE_RSS))
11409 rnapi = &tp->napi[1];
11410 if (tg3_flag(tp, ENABLE_TSS))
11411 tnapi = &tp->napi[1];
11413 coal_now = tnapi->coal_now | rnapi->coal_now;
11418 skb = netdev_alloc_skb(tp->dev, tx_len);
11422 tx_data = skb_put(skb, tx_len);
11423 memcpy(tx_data, tp->dev->dev_addr, 6);
11424 memset(tx_data + 6, 0x0, 8);
11426 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11428 if (tso_loopback) {
11429 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11431 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11432 TG3_TSO_TCP_OPT_LEN;
11434 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11435 sizeof(tg3_tso_header));
11438 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11439 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11441 /* Set the total length field in the IP header */
11442 iph->tot_len = htons((u16)(mss + hdr_len));
11444 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11445 TXD_FLAG_CPU_POST_DMA);
11447 if (tg3_flag(tp, HW_TSO_1) ||
11448 tg3_flag(tp, HW_TSO_2) ||
11449 tg3_flag(tp, HW_TSO_3)) {
11451 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11452 th = (struct tcphdr *)&tx_data[val];
11455 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11457 if (tg3_flag(tp, HW_TSO_3)) {
11458 mss |= (hdr_len & 0xc) << 12;
11459 if (hdr_len & 0x10)
11460 base_flags |= 0x00000010;
11461 base_flags |= (hdr_len & 0x3e0) << 5;
11462 } else if (tg3_flag(tp, HW_TSO_2))
11463 mss |= hdr_len << 9;
11464 else if (tg3_flag(tp, HW_TSO_1) ||
11465 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11466 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11468 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11471 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11474 data_off = ETH_HLEN;
11477 for (i = data_off; i < tx_len; i++)
11478 tx_data[i] = (u8) (i & 0xff);
11480 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11481 if (pci_dma_mapping_error(tp->pdev, map)) {
11482 dev_kfree_skb(skb);
11486 val = tnapi->tx_prod;
11487 tnapi->tx_buffers[val].skb = skb;
11488 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11490 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11495 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11497 budget = tg3_tx_avail(tnapi);
11498 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11499 base_flags | TXD_FLAG_END, mss, 0)) {
11500 tnapi->tx_buffers[val].skb = NULL;
11501 dev_kfree_skb(skb);
11507 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11508 tr32_mailbox(tnapi->prodmbox);
11512 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11513 for (i = 0; i < 35; i++) {
11514 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11519 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11520 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11521 if ((tx_idx == tnapi->tx_prod) &&
11522 (rx_idx == (rx_start_idx + num_pkts)))
11526 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11527 dev_kfree_skb(skb);
11529 if (tx_idx != tnapi->tx_prod)
11532 if (rx_idx != rx_start_idx + num_pkts)
11536 while (rx_idx != rx_start_idx) {
11537 desc = &rnapi->rx_rcb[rx_start_idx++];
11538 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11539 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11541 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11542 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11545 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11548 if (!tso_loopback) {
11549 if (rx_len != tx_len)
11552 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11553 if (opaque_key != RXD_OPAQUE_RING_STD)
11556 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11559 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11560 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11561 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11565 if (opaque_key == RXD_OPAQUE_RING_STD) {
11566 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11567 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11569 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11570 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11571 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11576 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11577 PCI_DMA_FROMDEVICE);
11579 for (i = data_off; i < rx_len; i++, val++) {
11580 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11587 /* tg3_free_rings will unmap and free the rx_skb */
11592 #define TG3_STD_LOOPBACK_FAILED 1
11593 #define TG3_JMB_LOOPBACK_FAILED 2
11594 #define TG3_TSO_LOOPBACK_FAILED 4
11595 #define TG3_LOOPBACK_FAILED \
11596 (TG3_STD_LOOPBACK_FAILED | \
11597 TG3_JMB_LOOPBACK_FAILED | \
11598 TG3_TSO_LOOPBACK_FAILED)
11600 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11605 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11606 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11608 if (!netif_running(tp->dev)) {
11609 data[0] = TG3_LOOPBACK_FAILED;
11610 data[1] = TG3_LOOPBACK_FAILED;
11612 data[2] = TG3_LOOPBACK_FAILED;
11616 err = tg3_reset_hw(tp, 1);
11618 data[0] = TG3_LOOPBACK_FAILED;
11619 data[1] = TG3_LOOPBACK_FAILED;
11621 data[2] = TG3_LOOPBACK_FAILED;
11625 if (tg3_flag(tp, ENABLE_RSS)) {
11628 /* Reroute all rx packets to the 1st queue */
11629 for (i = MAC_RSS_INDIR_TBL_0;
11630 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11634 /* HW errata - mac loopback fails in some cases on 5780.
11635 * Normal traffic and PHY loopback are not affected by
11636 * errata. Also, the MAC loopback test is deprecated for
11637 * all newer ASIC revisions.
11639 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11640 !tg3_flag(tp, CPMU_PRESENT)) {
11641 tg3_mac_loopback(tp, true);
11643 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11644 data[0] |= TG3_STD_LOOPBACK_FAILED;
11646 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11647 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11648 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11650 tg3_mac_loopback(tp, false);
11653 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11654 !tg3_flag(tp, USE_PHYLIB)) {
11657 tg3_phy_lpbk_set(tp, 0, false);
11659 /* Wait for link */
11660 for (i = 0; i < 100; i++) {
11661 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11666 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11667 data[1] |= TG3_STD_LOOPBACK_FAILED;
11668 if (tg3_flag(tp, TSO_CAPABLE) &&
11669 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11670 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11671 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11672 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11673 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11676 tg3_phy_lpbk_set(tp, 0, true);
11678 /* All link indications report up, but the hardware
11679 * isn't really ready for about 20 msec. Double it
11684 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11685 data[2] |= TG3_STD_LOOPBACK_FAILED;
11686 if (tg3_flag(tp, TSO_CAPABLE) &&
11687 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11688 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11689 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11690 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11691 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11694 /* Re-enable gphy autopowerdown. */
11695 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11696 tg3_phy_toggle_apd(tp, true);
11699 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11702 tp->phy_flags |= eee_cap;
11707 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11710 struct tg3 *tp = netdev_priv(dev);
11711 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11713 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11714 tg3_power_up(tp)) {
11715 etest->flags |= ETH_TEST_FL_FAILED;
11716 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11720 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11722 if (tg3_test_nvram(tp) != 0) {
11723 etest->flags |= ETH_TEST_FL_FAILED;
11726 if (!doextlpbk && tg3_test_link(tp)) {
11727 etest->flags |= ETH_TEST_FL_FAILED;
11730 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11731 int err, err2 = 0, irq_sync = 0;
11733 if (netif_running(dev)) {
11735 tg3_netif_stop(tp);
11739 tg3_full_lock(tp, irq_sync);
11741 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11742 err = tg3_nvram_lock(tp);
11743 tg3_halt_cpu(tp, RX_CPU_BASE);
11744 if (!tg3_flag(tp, 5705_PLUS))
11745 tg3_halt_cpu(tp, TX_CPU_BASE);
11747 tg3_nvram_unlock(tp);
11749 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11752 if (tg3_test_registers(tp) != 0) {
11753 etest->flags |= ETH_TEST_FL_FAILED;
11757 if (tg3_test_memory(tp) != 0) {
11758 etest->flags |= ETH_TEST_FL_FAILED;
11763 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11765 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11766 etest->flags |= ETH_TEST_FL_FAILED;
11768 tg3_full_unlock(tp);
11770 if (tg3_test_interrupt(tp) != 0) {
11771 etest->flags |= ETH_TEST_FL_FAILED;
11775 tg3_full_lock(tp, 0);
11777 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11778 if (netif_running(dev)) {
11779 tg3_flag_set(tp, INIT_COMPLETE);
11780 err2 = tg3_restart_hw(tp, 1);
11782 tg3_netif_start(tp);
11785 tg3_full_unlock(tp);
11787 if (irq_sync && !err2)
11790 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11791 tg3_power_down(tp);
11795 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11797 struct mii_ioctl_data *data = if_mii(ifr);
11798 struct tg3 *tp = netdev_priv(dev);
11801 if (tg3_flag(tp, USE_PHYLIB)) {
11802 struct phy_device *phydev;
11803 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11805 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11806 return phy_mii_ioctl(phydev, ifr, cmd);
11811 data->phy_id = tp->phy_addr;
11814 case SIOCGMIIREG: {
11817 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11818 break; /* We have no PHY */
11820 if (!netif_running(dev))
11823 spin_lock_bh(&tp->lock);
11824 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11825 spin_unlock_bh(&tp->lock);
11827 data->val_out = mii_regval;
11833 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11834 break; /* We have no PHY */
11836 if (!netif_running(dev))
11839 spin_lock_bh(&tp->lock);
11840 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11841 spin_unlock_bh(&tp->lock);
11849 return -EOPNOTSUPP;
11852 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11854 struct tg3 *tp = netdev_priv(dev);
11856 memcpy(ec, &tp->coal, sizeof(*ec));
11860 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11862 struct tg3 *tp = netdev_priv(dev);
11863 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11864 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11866 if (!tg3_flag(tp, 5705_PLUS)) {
11867 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11868 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11869 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11870 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11873 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11874 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11875 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11876 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11877 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11878 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11879 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11880 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11881 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11882 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11885 /* No rx interrupts will be generated if both are zero */
11886 if ((ec->rx_coalesce_usecs == 0) &&
11887 (ec->rx_max_coalesced_frames == 0))
11890 /* No tx interrupts will be generated if both are zero */
11891 if ((ec->tx_coalesce_usecs == 0) &&
11892 (ec->tx_max_coalesced_frames == 0))
11895 /* Only copy relevant parameters, ignore all others. */
11896 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11897 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11898 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11899 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11900 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11901 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11902 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11903 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11904 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11906 if (netif_running(dev)) {
11907 tg3_full_lock(tp, 0);
11908 __tg3_set_coalesce(tp, &tp->coal);
11909 tg3_full_unlock(tp);
11914 static const struct ethtool_ops tg3_ethtool_ops = {
11915 .get_settings = tg3_get_settings,
11916 .set_settings = tg3_set_settings,
11917 .get_drvinfo = tg3_get_drvinfo,
11918 .get_regs_len = tg3_get_regs_len,
11919 .get_regs = tg3_get_regs,
11920 .get_wol = tg3_get_wol,
11921 .set_wol = tg3_set_wol,
11922 .get_msglevel = tg3_get_msglevel,
11923 .set_msglevel = tg3_set_msglevel,
11924 .nway_reset = tg3_nway_reset,
11925 .get_link = ethtool_op_get_link,
11926 .get_eeprom_len = tg3_get_eeprom_len,
11927 .get_eeprom = tg3_get_eeprom,
11928 .set_eeprom = tg3_set_eeprom,
11929 .get_ringparam = tg3_get_ringparam,
11930 .set_ringparam = tg3_set_ringparam,
11931 .get_pauseparam = tg3_get_pauseparam,
11932 .set_pauseparam = tg3_set_pauseparam,
11933 .self_test = tg3_self_test,
11934 .get_strings = tg3_get_strings,
11935 .set_phys_id = tg3_set_phys_id,
11936 .get_ethtool_stats = tg3_get_ethtool_stats,
11937 .get_coalesce = tg3_get_coalesce,
11938 .set_coalesce = tg3_set_coalesce,
11939 .get_sset_count = tg3_get_sset_count,
11942 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11944 u32 cursize, val, magic;
11946 tp->nvram_size = EEPROM_CHIP_SIZE;
11948 if (tg3_nvram_read(tp, 0, &magic) != 0)
11951 if ((magic != TG3_EEPROM_MAGIC) &&
11952 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11953 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11957 * Size the chip by reading offsets at increasing powers of two.
11958 * When we encounter our validation signature, we know the addressing
11959 * has wrapped around, and thus have our chip size.
11963 while (cursize < tp->nvram_size) {
11964 if (tg3_nvram_read(tp, cursize, &val) != 0)
11973 tp->nvram_size = cursize;
11976 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11980 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11983 /* Selfboot format */
11984 if (val != TG3_EEPROM_MAGIC) {
11985 tg3_get_eeprom_size(tp);
11989 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11991 /* This is confusing. We want to operate on the
11992 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11993 * call will read from NVRAM and byteswap the data
11994 * according to the byteswapping settings for all
11995 * other register accesses. This ensures the data we
11996 * want will always reside in the lower 16-bits.
11997 * However, the data in NVRAM is in LE format, which
11998 * means the data from the NVRAM read will always be
11999 * opposite the endianness of the CPU. The 16-bit
12000 * byteswap then brings the data to CPU endianness.
12002 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12006 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12009 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12013 nvcfg1 = tr32(NVRAM_CFG1);
12014 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12015 tg3_flag_set(tp, FLASH);
12017 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12018 tw32(NVRAM_CFG1, nvcfg1);
12021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12022 tg3_flag(tp, 5780_CLASS)) {
12023 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12024 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12025 tp->nvram_jedecnum = JEDEC_ATMEL;
12026 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12027 tg3_flag_set(tp, NVRAM_BUFFERED);
12029 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12030 tp->nvram_jedecnum = JEDEC_ATMEL;
12031 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12033 case FLASH_VENDOR_ATMEL_EEPROM:
12034 tp->nvram_jedecnum = JEDEC_ATMEL;
12035 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12036 tg3_flag_set(tp, NVRAM_BUFFERED);
12038 case FLASH_VENDOR_ST:
12039 tp->nvram_jedecnum = JEDEC_ST;
12040 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12041 tg3_flag_set(tp, NVRAM_BUFFERED);
12043 case FLASH_VENDOR_SAIFUN:
12044 tp->nvram_jedecnum = JEDEC_SAIFUN;
12045 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12047 case FLASH_VENDOR_SST_SMALL:
12048 case FLASH_VENDOR_SST_LARGE:
12049 tp->nvram_jedecnum = JEDEC_SST;
12050 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12054 tp->nvram_jedecnum = JEDEC_ATMEL;
12055 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12056 tg3_flag_set(tp, NVRAM_BUFFERED);
12060 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12062 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12063 case FLASH_5752PAGE_SIZE_256:
12064 tp->nvram_pagesize = 256;
12066 case FLASH_5752PAGE_SIZE_512:
12067 tp->nvram_pagesize = 512;
12069 case FLASH_5752PAGE_SIZE_1K:
12070 tp->nvram_pagesize = 1024;
12072 case FLASH_5752PAGE_SIZE_2K:
12073 tp->nvram_pagesize = 2048;
12075 case FLASH_5752PAGE_SIZE_4K:
12076 tp->nvram_pagesize = 4096;
12078 case FLASH_5752PAGE_SIZE_264:
12079 tp->nvram_pagesize = 264;
12081 case FLASH_5752PAGE_SIZE_528:
12082 tp->nvram_pagesize = 528;
12087 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12091 nvcfg1 = tr32(NVRAM_CFG1);
12093 /* NVRAM protection for TPM */
12094 if (nvcfg1 & (1 << 27))
12095 tg3_flag_set(tp, PROTECTED_NVRAM);
12097 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12098 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12099 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12100 tp->nvram_jedecnum = JEDEC_ATMEL;
12101 tg3_flag_set(tp, NVRAM_BUFFERED);
12103 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12104 tp->nvram_jedecnum = JEDEC_ATMEL;
12105 tg3_flag_set(tp, NVRAM_BUFFERED);
12106 tg3_flag_set(tp, FLASH);
12108 case FLASH_5752VENDOR_ST_M45PE10:
12109 case FLASH_5752VENDOR_ST_M45PE20:
12110 case FLASH_5752VENDOR_ST_M45PE40:
12111 tp->nvram_jedecnum = JEDEC_ST;
12112 tg3_flag_set(tp, NVRAM_BUFFERED);
12113 tg3_flag_set(tp, FLASH);
12117 if (tg3_flag(tp, FLASH)) {
12118 tg3_nvram_get_pagesize(tp, nvcfg1);
12120 /* For eeprom, set pagesize to maximum eeprom size */
12121 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12123 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12124 tw32(NVRAM_CFG1, nvcfg1);
12128 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12130 u32 nvcfg1, protect = 0;
12132 nvcfg1 = tr32(NVRAM_CFG1);
12134 /* NVRAM protection for TPM */
12135 if (nvcfg1 & (1 << 27)) {
12136 tg3_flag_set(tp, PROTECTED_NVRAM);
12140 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12142 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12143 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12144 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12145 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12146 tp->nvram_jedecnum = JEDEC_ATMEL;
12147 tg3_flag_set(tp, NVRAM_BUFFERED);
12148 tg3_flag_set(tp, FLASH);
12149 tp->nvram_pagesize = 264;
12150 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12151 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12152 tp->nvram_size = (protect ? 0x3e200 :
12153 TG3_NVRAM_SIZE_512KB);
12154 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12155 tp->nvram_size = (protect ? 0x1f200 :
12156 TG3_NVRAM_SIZE_256KB);
12158 tp->nvram_size = (protect ? 0x1f200 :
12159 TG3_NVRAM_SIZE_128KB);
12161 case FLASH_5752VENDOR_ST_M45PE10:
12162 case FLASH_5752VENDOR_ST_M45PE20:
12163 case FLASH_5752VENDOR_ST_M45PE40:
12164 tp->nvram_jedecnum = JEDEC_ST;
12165 tg3_flag_set(tp, NVRAM_BUFFERED);
12166 tg3_flag_set(tp, FLASH);
12167 tp->nvram_pagesize = 256;
12168 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12169 tp->nvram_size = (protect ?
12170 TG3_NVRAM_SIZE_64KB :
12171 TG3_NVRAM_SIZE_128KB);
12172 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12173 tp->nvram_size = (protect ?
12174 TG3_NVRAM_SIZE_64KB :
12175 TG3_NVRAM_SIZE_256KB);
12177 tp->nvram_size = (protect ?
12178 TG3_NVRAM_SIZE_128KB :
12179 TG3_NVRAM_SIZE_512KB);
12184 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12188 nvcfg1 = tr32(NVRAM_CFG1);
12190 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12191 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12192 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12193 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12194 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12195 tp->nvram_jedecnum = JEDEC_ATMEL;
12196 tg3_flag_set(tp, NVRAM_BUFFERED);
12197 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12199 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12200 tw32(NVRAM_CFG1, nvcfg1);
12202 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12203 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12204 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12205 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12206 tp->nvram_jedecnum = JEDEC_ATMEL;
12207 tg3_flag_set(tp, NVRAM_BUFFERED);
12208 tg3_flag_set(tp, FLASH);
12209 tp->nvram_pagesize = 264;
12211 case FLASH_5752VENDOR_ST_M45PE10:
12212 case FLASH_5752VENDOR_ST_M45PE20:
12213 case FLASH_5752VENDOR_ST_M45PE40:
12214 tp->nvram_jedecnum = JEDEC_ST;
12215 tg3_flag_set(tp, NVRAM_BUFFERED);
12216 tg3_flag_set(tp, FLASH);
12217 tp->nvram_pagesize = 256;
12222 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12224 u32 nvcfg1, protect = 0;
12226 nvcfg1 = tr32(NVRAM_CFG1);
12228 /* NVRAM protection for TPM */
12229 if (nvcfg1 & (1 << 27)) {
12230 tg3_flag_set(tp, PROTECTED_NVRAM);
12234 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12236 case FLASH_5761VENDOR_ATMEL_ADB021D:
12237 case FLASH_5761VENDOR_ATMEL_ADB041D:
12238 case FLASH_5761VENDOR_ATMEL_ADB081D:
12239 case FLASH_5761VENDOR_ATMEL_ADB161D:
12240 case FLASH_5761VENDOR_ATMEL_MDB021D:
12241 case FLASH_5761VENDOR_ATMEL_MDB041D:
12242 case FLASH_5761VENDOR_ATMEL_MDB081D:
12243 case FLASH_5761VENDOR_ATMEL_MDB161D:
12244 tp->nvram_jedecnum = JEDEC_ATMEL;
12245 tg3_flag_set(tp, NVRAM_BUFFERED);
12246 tg3_flag_set(tp, FLASH);
12247 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12248 tp->nvram_pagesize = 256;
12250 case FLASH_5761VENDOR_ST_A_M45PE20:
12251 case FLASH_5761VENDOR_ST_A_M45PE40:
12252 case FLASH_5761VENDOR_ST_A_M45PE80:
12253 case FLASH_5761VENDOR_ST_A_M45PE16:
12254 case FLASH_5761VENDOR_ST_M_M45PE20:
12255 case FLASH_5761VENDOR_ST_M_M45PE40:
12256 case FLASH_5761VENDOR_ST_M_M45PE80:
12257 case FLASH_5761VENDOR_ST_M_M45PE16:
12258 tp->nvram_jedecnum = JEDEC_ST;
12259 tg3_flag_set(tp, NVRAM_BUFFERED);
12260 tg3_flag_set(tp, FLASH);
12261 tp->nvram_pagesize = 256;
12266 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12269 case FLASH_5761VENDOR_ATMEL_ADB161D:
12270 case FLASH_5761VENDOR_ATMEL_MDB161D:
12271 case FLASH_5761VENDOR_ST_A_M45PE16:
12272 case FLASH_5761VENDOR_ST_M_M45PE16:
12273 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12275 case FLASH_5761VENDOR_ATMEL_ADB081D:
12276 case FLASH_5761VENDOR_ATMEL_MDB081D:
12277 case FLASH_5761VENDOR_ST_A_M45PE80:
12278 case FLASH_5761VENDOR_ST_M_M45PE80:
12279 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12281 case FLASH_5761VENDOR_ATMEL_ADB041D:
12282 case FLASH_5761VENDOR_ATMEL_MDB041D:
12283 case FLASH_5761VENDOR_ST_A_M45PE40:
12284 case FLASH_5761VENDOR_ST_M_M45PE40:
12285 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12287 case FLASH_5761VENDOR_ATMEL_ADB021D:
12288 case FLASH_5761VENDOR_ATMEL_MDB021D:
12289 case FLASH_5761VENDOR_ST_A_M45PE20:
12290 case FLASH_5761VENDOR_ST_M_M45PE20:
12291 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12297 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12299 tp->nvram_jedecnum = JEDEC_ATMEL;
12300 tg3_flag_set(tp, NVRAM_BUFFERED);
12301 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12304 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12308 nvcfg1 = tr32(NVRAM_CFG1);
12310 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12311 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12312 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12313 tp->nvram_jedecnum = JEDEC_ATMEL;
12314 tg3_flag_set(tp, NVRAM_BUFFERED);
12315 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12317 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12318 tw32(NVRAM_CFG1, nvcfg1);
12320 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12321 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12322 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12323 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12324 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12325 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12326 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12327 tp->nvram_jedecnum = JEDEC_ATMEL;
12328 tg3_flag_set(tp, NVRAM_BUFFERED);
12329 tg3_flag_set(tp, FLASH);
12331 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12332 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12333 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12334 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12335 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12337 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12338 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12339 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12341 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12342 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12343 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12347 case FLASH_5752VENDOR_ST_M45PE10:
12348 case FLASH_5752VENDOR_ST_M45PE20:
12349 case FLASH_5752VENDOR_ST_M45PE40:
12350 tp->nvram_jedecnum = JEDEC_ST;
12351 tg3_flag_set(tp, NVRAM_BUFFERED);
12352 tg3_flag_set(tp, FLASH);
12354 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12355 case FLASH_5752VENDOR_ST_M45PE10:
12356 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12358 case FLASH_5752VENDOR_ST_M45PE20:
12359 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12361 case FLASH_5752VENDOR_ST_M45PE40:
12362 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12367 tg3_flag_set(tp, NO_NVRAM);
12371 tg3_nvram_get_pagesize(tp, nvcfg1);
12372 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12373 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12377 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12381 nvcfg1 = tr32(NVRAM_CFG1);
12383 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12384 case FLASH_5717VENDOR_ATMEL_EEPROM:
12385 case FLASH_5717VENDOR_MICRO_EEPROM:
12386 tp->nvram_jedecnum = JEDEC_ATMEL;
12387 tg3_flag_set(tp, NVRAM_BUFFERED);
12388 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12390 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12391 tw32(NVRAM_CFG1, nvcfg1);
12393 case FLASH_5717VENDOR_ATMEL_MDB011D:
12394 case FLASH_5717VENDOR_ATMEL_ADB011B:
12395 case FLASH_5717VENDOR_ATMEL_ADB011D:
12396 case FLASH_5717VENDOR_ATMEL_MDB021D:
12397 case FLASH_5717VENDOR_ATMEL_ADB021B:
12398 case FLASH_5717VENDOR_ATMEL_ADB021D:
12399 case FLASH_5717VENDOR_ATMEL_45USPT:
12400 tp->nvram_jedecnum = JEDEC_ATMEL;
12401 tg3_flag_set(tp, NVRAM_BUFFERED);
12402 tg3_flag_set(tp, FLASH);
12404 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12405 case FLASH_5717VENDOR_ATMEL_MDB021D:
12406 /* Detect size with tg3_nvram_get_size() */
12408 case FLASH_5717VENDOR_ATMEL_ADB021B:
12409 case FLASH_5717VENDOR_ATMEL_ADB021D:
12410 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12413 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12417 case FLASH_5717VENDOR_ST_M_M25PE10:
12418 case FLASH_5717VENDOR_ST_A_M25PE10:
12419 case FLASH_5717VENDOR_ST_M_M45PE10:
12420 case FLASH_5717VENDOR_ST_A_M45PE10:
12421 case FLASH_5717VENDOR_ST_M_M25PE20:
12422 case FLASH_5717VENDOR_ST_A_M25PE20:
12423 case FLASH_5717VENDOR_ST_M_M45PE20:
12424 case FLASH_5717VENDOR_ST_A_M45PE20:
12425 case FLASH_5717VENDOR_ST_25USPT:
12426 case FLASH_5717VENDOR_ST_45USPT:
12427 tp->nvram_jedecnum = JEDEC_ST;
12428 tg3_flag_set(tp, NVRAM_BUFFERED);
12429 tg3_flag_set(tp, FLASH);
12431 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12432 case FLASH_5717VENDOR_ST_M_M25PE20:
12433 case FLASH_5717VENDOR_ST_M_M45PE20:
12434 /* Detect size with tg3_nvram_get_size() */
12436 case FLASH_5717VENDOR_ST_A_M25PE20:
12437 case FLASH_5717VENDOR_ST_A_M45PE20:
12438 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12441 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12446 tg3_flag_set(tp, NO_NVRAM);
12450 tg3_nvram_get_pagesize(tp, nvcfg1);
12451 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12452 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12455 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12457 u32 nvcfg1, nvmpinstrp;
12459 nvcfg1 = tr32(NVRAM_CFG1);
12460 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12462 switch (nvmpinstrp) {
12463 case FLASH_5720_EEPROM_HD:
12464 case FLASH_5720_EEPROM_LD:
12465 tp->nvram_jedecnum = JEDEC_ATMEL;
12466 tg3_flag_set(tp, NVRAM_BUFFERED);
12468 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12469 tw32(NVRAM_CFG1, nvcfg1);
12470 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12471 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12473 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12475 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12476 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12477 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12478 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12479 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12480 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12481 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12482 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12483 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12484 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12485 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12486 case FLASH_5720VENDOR_ATMEL_45USPT:
12487 tp->nvram_jedecnum = JEDEC_ATMEL;
12488 tg3_flag_set(tp, NVRAM_BUFFERED);
12489 tg3_flag_set(tp, FLASH);
12491 switch (nvmpinstrp) {
12492 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12493 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12494 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12495 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12497 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12498 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12499 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12500 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12502 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12503 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12504 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12507 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12511 case FLASH_5720VENDOR_M_ST_M25PE10:
12512 case FLASH_5720VENDOR_M_ST_M45PE10:
12513 case FLASH_5720VENDOR_A_ST_M25PE10:
12514 case FLASH_5720VENDOR_A_ST_M45PE10:
12515 case FLASH_5720VENDOR_M_ST_M25PE20:
12516 case FLASH_5720VENDOR_M_ST_M45PE20:
12517 case FLASH_5720VENDOR_A_ST_M25PE20:
12518 case FLASH_5720VENDOR_A_ST_M45PE20:
12519 case FLASH_5720VENDOR_M_ST_M25PE40:
12520 case FLASH_5720VENDOR_M_ST_M45PE40:
12521 case FLASH_5720VENDOR_A_ST_M25PE40:
12522 case FLASH_5720VENDOR_A_ST_M45PE40:
12523 case FLASH_5720VENDOR_M_ST_M25PE80:
12524 case FLASH_5720VENDOR_M_ST_M45PE80:
12525 case FLASH_5720VENDOR_A_ST_M25PE80:
12526 case FLASH_5720VENDOR_A_ST_M45PE80:
12527 case FLASH_5720VENDOR_ST_25USPT:
12528 case FLASH_5720VENDOR_ST_45USPT:
12529 tp->nvram_jedecnum = JEDEC_ST;
12530 tg3_flag_set(tp, NVRAM_BUFFERED);
12531 tg3_flag_set(tp, FLASH);
12533 switch (nvmpinstrp) {
12534 case FLASH_5720VENDOR_M_ST_M25PE20:
12535 case FLASH_5720VENDOR_M_ST_M45PE20:
12536 case FLASH_5720VENDOR_A_ST_M25PE20:
12537 case FLASH_5720VENDOR_A_ST_M45PE20:
12538 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12540 case FLASH_5720VENDOR_M_ST_M25PE40:
12541 case FLASH_5720VENDOR_M_ST_M45PE40:
12542 case FLASH_5720VENDOR_A_ST_M25PE40:
12543 case FLASH_5720VENDOR_A_ST_M45PE40:
12544 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12546 case FLASH_5720VENDOR_M_ST_M25PE80:
12547 case FLASH_5720VENDOR_M_ST_M45PE80:
12548 case FLASH_5720VENDOR_A_ST_M25PE80:
12549 case FLASH_5720VENDOR_A_ST_M45PE80:
12550 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12553 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12558 tg3_flag_set(tp, NO_NVRAM);
12562 tg3_nvram_get_pagesize(tp, nvcfg1);
12563 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12564 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12567 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12568 static void __devinit tg3_nvram_init(struct tg3 *tp)
12570 tw32_f(GRC_EEPROM_ADDR,
12571 (EEPROM_ADDR_FSM_RESET |
12572 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12573 EEPROM_ADDR_CLKPERD_SHIFT)));
12577 /* Enable seeprom accesses. */
12578 tw32_f(GRC_LOCAL_CTRL,
12579 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12582 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12583 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12584 tg3_flag_set(tp, NVRAM);
12586 if (tg3_nvram_lock(tp)) {
12587 netdev_warn(tp->dev,
12588 "Cannot get nvram lock, %s failed\n",
12592 tg3_enable_nvram_access(tp);
12594 tp->nvram_size = 0;
12596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12597 tg3_get_5752_nvram_info(tp);
12598 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12599 tg3_get_5755_nvram_info(tp);
12600 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12601 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12602 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12603 tg3_get_5787_nvram_info(tp);
12604 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12605 tg3_get_5761_nvram_info(tp);
12606 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12607 tg3_get_5906_nvram_info(tp);
12608 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12609 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12610 tg3_get_57780_nvram_info(tp);
12611 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12613 tg3_get_5717_nvram_info(tp);
12614 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12615 tg3_get_5720_nvram_info(tp);
12617 tg3_get_nvram_info(tp);
12619 if (tp->nvram_size == 0)
12620 tg3_get_nvram_size(tp);
12622 tg3_disable_nvram_access(tp);
12623 tg3_nvram_unlock(tp);
12626 tg3_flag_clear(tp, NVRAM);
12627 tg3_flag_clear(tp, NVRAM_BUFFERED);
12629 tg3_get_eeprom_size(tp);
12633 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12634 u32 offset, u32 len, u8 *buf)
12639 for (i = 0; i < len; i += 4) {
12645 memcpy(&data, buf + i, 4);
12648 * The SEEPROM interface expects the data to always be opposite
12649 * the native endian format. We accomplish this by reversing
12650 * all the operations that would have been performed on the
12651 * data from a call to tg3_nvram_read_be32().
12653 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12655 val = tr32(GRC_EEPROM_ADDR);
12656 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12658 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12660 tw32(GRC_EEPROM_ADDR, val |
12661 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12662 (addr & EEPROM_ADDR_ADDR_MASK) |
12663 EEPROM_ADDR_START |
12664 EEPROM_ADDR_WRITE);
12666 for (j = 0; j < 1000; j++) {
12667 val = tr32(GRC_EEPROM_ADDR);
12669 if (val & EEPROM_ADDR_COMPLETE)
12673 if (!(val & EEPROM_ADDR_COMPLETE)) {
12682 /* offset and length are dword aligned */
12683 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12687 u32 pagesize = tp->nvram_pagesize;
12688 u32 pagemask = pagesize - 1;
12692 tmp = kmalloc(pagesize, GFP_KERNEL);
12698 u32 phy_addr, page_off, size;
12700 phy_addr = offset & ~pagemask;
12702 for (j = 0; j < pagesize; j += 4) {
12703 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12704 (__be32 *) (tmp + j));
12711 page_off = offset & pagemask;
12718 memcpy(tmp + page_off, buf, size);
12720 offset = offset + (pagesize - page_off);
12722 tg3_enable_nvram_access(tp);
12725 * Before we can erase the flash page, we need
12726 * to issue a special "write enable" command.
12728 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12730 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12733 /* Erase the target page */
12734 tw32(NVRAM_ADDR, phy_addr);
12736 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12737 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12739 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12742 /* Issue another write enable to start the write. */
12743 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12745 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12748 for (j = 0; j < pagesize; j += 4) {
12751 data = *((__be32 *) (tmp + j));
12753 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12755 tw32(NVRAM_ADDR, phy_addr + j);
12757 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12761 nvram_cmd |= NVRAM_CMD_FIRST;
12762 else if (j == (pagesize - 4))
12763 nvram_cmd |= NVRAM_CMD_LAST;
12765 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12772 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12773 tg3_nvram_exec_cmd(tp, nvram_cmd);
12780 /* offset and length are dword aligned */
12781 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12786 for (i = 0; i < len; i += 4, offset += 4) {
12787 u32 page_off, phy_addr, nvram_cmd;
12790 memcpy(&data, buf + i, 4);
12791 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12793 page_off = offset % tp->nvram_pagesize;
12795 phy_addr = tg3_nvram_phys_addr(tp, offset);
12797 tw32(NVRAM_ADDR, phy_addr);
12799 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12801 if (page_off == 0 || i == 0)
12802 nvram_cmd |= NVRAM_CMD_FIRST;
12803 if (page_off == (tp->nvram_pagesize - 4))
12804 nvram_cmd |= NVRAM_CMD_LAST;
12806 if (i == (len - 4))
12807 nvram_cmd |= NVRAM_CMD_LAST;
12809 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12810 !tg3_flag(tp, 5755_PLUS) &&
12811 (tp->nvram_jedecnum == JEDEC_ST) &&
12812 (nvram_cmd & NVRAM_CMD_FIRST)) {
12814 if ((ret = tg3_nvram_exec_cmd(tp,
12815 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12820 if (!tg3_flag(tp, FLASH)) {
12821 /* We always do complete word writes to eeprom. */
12822 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12825 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12831 /* offset and length are dword aligned */
12832 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12836 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12837 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12838 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12842 if (!tg3_flag(tp, NVRAM)) {
12843 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12847 ret = tg3_nvram_lock(tp);
12851 tg3_enable_nvram_access(tp);
12852 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12853 tw32(NVRAM_WRITE1, 0x406);
12855 grc_mode = tr32(GRC_MODE);
12856 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12858 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12859 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12862 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12866 grc_mode = tr32(GRC_MODE);
12867 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12869 tg3_disable_nvram_access(tp);
12870 tg3_nvram_unlock(tp);
12873 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12874 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12881 struct subsys_tbl_ent {
12882 u16 subsys_vendor, subsys_devid;
12886 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12887 /* Broadcom boards. */
12888 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12889 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12890 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12891 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12892 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12893 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12894 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12895 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12896 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12897 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12898 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12899 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12900 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12901 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12902 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12903 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12904 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12905 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12906 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12907 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12908 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12909 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12912 { TG3PCI_SUBVENDOR_ID_3COM,
12913 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12914 { TG3PCI_SUBVENDOR_ID_3COM,
12915 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12916 { TG3PCI_SUBVENDOR_ID_3COM,
12917 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12918 { TG3PCI_SUBVENDOR_ID_3COM,
12919 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12920 { TG3PCI_SUBVENDOR_ID_3COM,
12921 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12924 { TG3PCI_SUBVENDOR_ID_DELL,
12925 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12926 { TG3PCI_SUBVENDOR_ID_DELL,
12927 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12928 { TG3PCI_SUBVENDOR_ID_DELL,
12929 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12930 { TG3PCI_SUBVENDOR_ID_DELL,
12931 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12933 /* Compaq boards. */
12934 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12935 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12936 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12937 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12938 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12939 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12940 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12941 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12942 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12943 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12946 { TG3PCI_SUBVENDOR_ID_IBM,
12947 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12950 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12954 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12955 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12956 tp->pdev->subsystem_vendor) &&
12957 (subsys_id_to_phy_id[i].subsys_devid ==
12958 tp->pdev->subsystem_device))
12959 return &subsys_id_to_phy_id[i];
12964 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12968 tp->phy_id = TG3_PHY_ID_INVALID;
12969 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12971 /* Assume an onboard device and WOL capable by default. */
12972 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12973 tg3_flag_set(tp, WOL_CAP);
12975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12976 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12977 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12978 tg3_flag_set(tp, IS_NIC);
12980 val = tr32(VCPU_CFGSHDW);
12981 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12982 tg3_flag_set(tp, ASPM_WORKAROUND);
12983 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12984 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12985 tg3_flag_set(tp, WOL_ENABLE);
12986 device_set_wakeup_enable(&tp->pdev->dev, true);
12991 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12992 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12993 u32 nic_cfg, led_cfg;
12994 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12995 int eeprom_phy_serdes = 0;
12997 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12998 tp->nic_sram_data_cfg = nic_cfg;
13000 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13001 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13002 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13003 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13004 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13005 (ver > 0) && (ver < 0x100))
13006 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13009 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13011 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13012 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13013 eeprom_phy_serdes = 1;
13015 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13016 if (nic_phy_id != 0) {
13017 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13018 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13020 eeprom_phy_id = (id1 >> 16) << 10;
13021 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13022 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13026 tp->phy_id = eeprom_phy_id;
13027 if (eeprom_phy_serdes) {
13028 if (!tg3_flag(tp, 5705_PLUS))
13029 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13031 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13034 if (tg3_flag(tp, 5750_PLUS))
13035 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13036 SHASTA_EXT_LED_MODE_MASK);
13038 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13042 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13043 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13046 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13047 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13050 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13051 tp->led_ctrl = LED_CTRL_MODE_MAC;
13053 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13054 * read on some older 5700/5701 bootcode.
13056 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13058 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13060 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13064 case SHASTA_EXT_LED_SHARED:
13065 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13066 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13067 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13068 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13069 LED_CTRL_MODE_PHY_2);
13072 case SHASTA_EXT_LED_MAC:
13073 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13076 case SHASTA_EXT_LED_COMBO:
13077 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13078 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13079 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13080 LED_CTRL_MODE_PHY_2);
13085 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13086 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13087 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13088 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13090 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13091 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13093 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13094 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13095 if ((tp->pdev->subsystem_vendor ==
13096 PCI_VENDOR_ID_ARIMA) &&
13097 (tp->pdev->subsystem_device == 0x205a ||
13098 tp->pdev->subsystem_device == 0x2063))
13099 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13101 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13102 tg3_flag_set(tp, IS_NIC);
13105 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13106 tg3_flag_set(tp, ENABLE_ASF);
13107 if (tg3_flag(tp, 5750_PLUS))
13108 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13111 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13112 tg3_flag(tp, 5750_PLUS))
13113 tg3_flag_set(tp, ENABLE_APE);
13115 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13116 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13117 tg3_flag_clear(tp, WOL_CAP);
13119 if (tg3_flag(tp, WOL_CAP) &&
13120 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13121 tg3_flag_set(tp, WOL_ENABLE);
13122 device_set_wakeup_enable(&tp->pdev->dev, true);
13125 if (cfg2 & (1 << 17))
13126 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13128 /* serdes signal pre-emphasis in register 0x590 set by */
13129 /* bootcode if bit 18 is set */
13130 if (cfg2 & (1 << 18))
13131 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13133 if ((tg3_flag(tp, 57765_PLUS) ||
13134 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13135 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13136 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13137 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13139 if (tg3_flag(tp, PCI_EXPRESS) &&
13140 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13141 !tg3_flag(tp, 57765_PLUS)) {
13144 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13145 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13146 tg3_flag_set(tp, ASPM_WORKAROUND);
13149 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13150 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13151 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13152 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13153 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13154 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13157 if (tg3_flag(tp, WOL_CAP))
13158 device_set_wakeup_enable(&tp->pdev->dev,
13159 tg3_flag(tp, WOL_ENABLE));
13161 device_set_wakeup_capable(&tp->pdev->dev, false);
13164 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13169 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13170 tw32(OTP_CTRL, cmd);
13172 /* Wait for up to 1 ms for command to execute. */
13173 for (i = 0; i < 100; i++) {
13174 val = tr32(OTP_STATUS);
13175 if (val & OTP_STATUS_CMD_DONE)
13180 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13183 /* Read the gphy configuration from the OTP region of the chip. The gphy
13184 * configuration is a 32-bit value that straddles the alignment boundary.
13185 * We do two 32-bit reads and then shift and merge the results.
13187 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13189 u32 bhalf_otp, thalf_otp;
13191 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13193 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13196 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13198 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13201 thalf_otp = tr32(OTP_READ_DATA);
13203 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13205 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13208 bhalf_otp = tr32(OTP_READ_DATA);
13210 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13213 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13215 u32 adv = ADVERTISED_Autoneg |
13218 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13219 adv |= ADVERTISED_1000baseT_Half |
13220 ADVERTISED_1000baseT_Full;
13222 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13223 adv |= ADVERTISED_100baseT_Half |
13224 ADVERTISED_100baseT_Full |
13225 ADVERTISED_10baseT_Half |
13226 ADVERTISED_10baseT_Full |
13229 adv |= ADVERTISED_FIBRE;
13231 tp->link_config.advertising = adv;
13232 tp->link_config.speed = SPEED_INVALID;
13233 tp->link_config.duplex = DUPLEX_INVALID;
13234 tp->link_config.autoneg = AUTONEG_ENABLE;
13235 tp->link_config.active_speed = SPEED_INVALID;
13236 tp->link_config.active_duplex = DUPLEX_INVALID;
13237 tp->link_config.orig_speed = SPEED_INVALID;
13238 tp->link_config.orig_duplex = DUPLEX_INVALID;
13239 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13242 static int __devinit tg3_phy_probe(struct tg3 *tp)
13244 u32 hw_phy_id_1, hw_phy_id_2;
13245 u32 hw_phy_id, hw_phy_id_masked;
13248 /* flow control autonegotiation is default behavior */
13249 tg3_flag_set(tp, PAUSE_AUTONEG);
13250 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13252 if (tg3_flag(tp, USE_PHYLIB))
13253 return tg3_phy_init(tp);
13255 /* Reading the PHY ID register can conflict with ASF
13256 * firmware access to the PHY hardware.
13259 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13260 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13262 /* Now read the physical PHY_ID from the chip and verify
13263 * that it is sane. If it doesn't look good, we fall back
13264 * to either the hard-coded table based PHY_ID and failing
13265 * that the value found in the eeprom area.
13267 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13268 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13270 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13271 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13272 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13274 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13277 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13278 tp->phy_id = hw_phy_id;
13279 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13280 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13282 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13284 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13285 /* Do nothing, phy ID already set up in
13286 * tg3_get_eeprom_hw_cfg().
13289 struct subsys_tbl_ent *p;
13291 /* No eeprom signature? Try the hardcoded
13292 * subsys device table.
13294 p = tg3_lookup_by_subsys(tp);
13298 tp->phy_id = p->phy_id;
13300 tp->phy_id == TG3_PHY_ID_BCM8002)
13301 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13305 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13306 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13307 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13308 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13309 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13310 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13311 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13312 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13314 tg3_phy_init_link_config(tp);
13316 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13317 !tg3_flag(tp, ENABLE_APE) &&
13318 !tg3_flag(tp, ENABLE_ASF)) {
13321 tg3_readphy(tp, MII_BMSR, &bmsr);
13322 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13323 (bmsr & BMSR_LSTATUS))
13324 goto skip_phy_reset;
13326 err = tg3_phy_reset(tp);
13330 tg3_phy_set_wirespeed(tp);
13332 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13333 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13334 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13335 if (!tg3_copper_is_advertising_all(tp, mask)) {
13336 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13337 tp->link_config.flowctrl);
13339 tg3_writephy(tp, MII_BMCR,
13340 BMCR_ANENABLE | BMCR_ANRESTART);
13345 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13346 err = tg3_init_5401phy_dsp(tp);
13350 err = tg3_init_5401phy_dsp(tp);
13356 static void __devinit tg3_read_vpd(struct tg3 *tp)
13359 unsigned int block_end, rosize, len;
13363 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13367 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13369 goto out_not_found;
13371 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13372 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13373 i += PCI_VPD_LRDT_TAG_SIZE;
13375 if (block_end > vpdlen)
13376 goto out_not_found;
13378 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13379 PCI_VPD_RO_KEYWORD_MFR_ID);
13381 len = pci_vpd_info_field_size(&vpd_data[j]);
13383 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13384 if (j + len > block_end || len != 4 ||
13385 memcmp(&vpd_data[j], "1028", 4))
13388 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13389 PCI_VPD_RO_KEYWORD_VENDOR0);
13393 len = pci_vpd_info_field_size(&vpd_data[j]);
13395 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13396 if (j + len > block_end)
13399 memcpy(tp->fw_ver, &vpd_data[j], len);
13400 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13404 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13405 PCI_VPD_RO_KEYWORD_PARTNO);
13407 goto out_not_found;
13409 len = pci_vpd_info_field_size(&vpd_data[i]);
13411 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13412 if (len > TG3_BPN_SIZE ||
13413 (len + i) > vpdlen)
13414 goto out_not_found;
13416 memcpy(tp->board_part_number, &vpd_data[i], len);
13420 if (tp->board_part_number[0])
13424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13425 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13426 strcpy(tp->board_part_number, "BCM5717");
13427 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13428 strcpy(tp->board_part_number, "BCM5718");
13431 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13432 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13433 strcpy(tp->board_part_number, "BCM57780");
13434 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13435 strcpy(tp->board_part_number, "BCM57760");
13436 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13437 strcpy(tp->board_part_number, "BCM57790");
13438 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13439 strcpy(tp->board_part_number, "BCM57788");
13442 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13443 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13444 strcpy(tp->board_part_number, "BCM57761");
13445 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13446 strcpy(tp->board_part_number, "BCM57765");
13447 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13448 strcpy(tp->board_part_number, "BCM57781");
13449 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13450 strcpy(tp->board_part_number, "BCM57785");
13451 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13452 strcpy(tp->board_part_number, "BCM57791");
13453 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13454 strcpy(tp->board_part_number, "BCM57795");
13457 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13458 strcpy(tp->board_part_number, "BCM95906");
13461 strcpy(tp->board_part_number, "none");
13465 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13469 if (tg3_nvram_read(tp, offset, &val) ||
13470 (val & 0xfc000000) != 0x0c000000 ||
13471 tg3_nvram_read(tp, offset + 4, &val) ||
13478 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13480 u32 val, offset, start, ver_offset;
13482 bool newver = false;
13484 if (tg3_nvram_read(tp, 0xc, &offset) ||
13485 tg3_nvram_read(tp, 0x4, &start))
13488 offset = tg3_nvram_logical_addr(tp, offset);
13490 if (tg3_nvram_read(tp, offset, &val))
13493 if ((val & 0xfc000000) == 0x0c000000) {
13494 if (tg3_nvram_read(tp, offset + 4, &val))
13501 dst_off = strlen(tp->fw_ver);
13504 if (TG3_VER_SIZE - dst_off < 16 ||
13505 tg3_nvram_read(tp, offset + 8, &ver_offset))
13508 offset = offset + ver_offset - start;
13509 for (i = 0; i < 16; i += 4) {
13511 if (tg3_nvram_read_be32(tp, offset + i, &v))
13514 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13519 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13522 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13523 TG3_NVM_BCVER_MAJSFT;
13524 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13525 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13526 "v%d.%02d", major, minor);
13530 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13532 u32 val, major, minor;
13534 /* Use native endian representation */
13535 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13538 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13539 TG3_NVM_HWSB_CFG1_MAJSFT;
13540 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13541 TG3_NVM_HWSB_CFG1_MINSFT;
13543 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13546 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13548 u32 offset, major, minor, build;
13550 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13552 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13555 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13556 case TG3_EEPROM_SB_REVISION_0:
13557 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13559 case TG3_EEPROM_SB_REVISION_2:
13560 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13562 case TG3_EEPROM_SB_REVISION_3:
13563 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13565 case TG3_EEPROM_SB_REVISION_4:
13566 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13568 case TG3_EEPROM_SB_REVISION_5:
13569 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13571 case TG3_EEPROM_SB_REVISION_6:
13572 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13578 if (tg3_nvram_read(tp, offset, &val))
13581 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13582 TG3_EEPROM_SB_EDH_BLD_SHFT;
13583 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13584 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13585 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13587 if (minor > 99 || build > 26)
13590 offset = strlen(tp->fw_ver);
13591 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13592 " v%d.%02d", major, minor);
13595 offset = strlen(tp->fw_ver);
13596 if (offset < TG3_VER_SIZE - 1)
13597 tp->fw_ver[offset] = 'a' + build - 1;
13601 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13603 u32 val, offset, start;
13606 for (offset = TG3_NVM_DIR_START;
13607 offset < TG3_NVM_DIR_END;
13608 offset += TG3_NVM_DIRENT_SIZE) {
13609 if (tg3_nvram_read(tp, offset, &val))
13612 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13616 if (offset == TG3_NVM_DIR_END)
13619 if (!tg3_flag(tp, 5705_PLUS))
13620 start = 0x08000000;
13621 else if (tg3_nvram_read(tp, offset - 4, &start))
13624 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13625 !tg3_fw_img_is_valid(tp, offset) ||
13626 tg3_nvram_read(tp, offset + 8, &val))
13629 offset += val - start;
13631 vlen = strlen(tp->fw_ver);
13633 tp->fw_ver[vlen++] = ',';
13634 tp->fw_ver[vlen++] = ' ';
13636 for (i = 0; i < 4; i++) {
13638 if (tg3_nvram_read_be32(tp, offset, &v))
13641 offset += sizeof(v);
13643 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13644 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13648 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13653 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13659 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13662 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13663 if (apedata != APE_SEG_SIG_MAGIC)
13666 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13667 if (!(apedata & APE_FW_STATUS_READY))
13670 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13672 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13673 tg3_flag_set(tp, APE_HAS_NCSI);
13679 vlen = strlen(tp->fw_ver);
13681 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13683 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13684 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13685 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13686 (apedata & APE_FW_VERSION_BLDMSK));
13689 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13692 bool vpd_vers = false;
13694 if (tp->fw_ver[0] != 0)
13697 if (tg3_flag(tp, NO_NVRAM)) {
13698 strcat(tp->fw_ver, "sb");
13702 if (tg3_nvram_read(tp, 0, &val))
13705 if (val == TG3_EEPROM_MAGIC)
13706 tg3_read_bc_ver(tp);
13707 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13708 tg3_read_sb_ver(tp, val);
13709 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13710 tg3_read_hwsb_ver(tp);
13717 if (tg3_flag(tp, ENABLE_APE)) {
13718 if (tg3_flag(tp, ENABLE_ASF))
13719 tg3_read_dash_ver(tp);
13720 } else if (tg3_flag(tp, ENABLE_ASF)) {
13721 tg3_read_mgmtfw_ver(tp);
13725 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13728 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13730 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13732 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13733 return TG3_RX_RET_MAX_SIZE_5717;
13734 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13735 return TG3_RX_RET_MAX_SIZE_5700;
13737 return TG3_RX_RET_MAX_SIZE_5705;
13740 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13741 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13742 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13743 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13747 static int __devinit tg3_get_invariants(struct tg3 *tp)
13750 u32 pci_state_reg, grc_misc_cfg;
13755 /* Force memory write invalidate off. If we leave it on,
13756 * then on 5700_BX chips we have to enable a workaround.
13757 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13758 * to match the cacheline size. The Broadcom driver have this
13759 * workaround but turns MWI off all the times so never uses
13760 * it. This seems to suggest that the workaround is insufficient.
13762 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13763 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13764 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13766 /* Important! -- Make sure register accesses are byteswapped
13767 * correctly. Also, for those chips that require it, make
13768 * sure that indirect register accesses are enabled before
13769 * the first operation.
13771 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13773 tp->misc_host_ctrl |= (misc_ctrl_reg &
13774 MISC_HOST_CTRL_CHIPREV);
13775 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13776 tp->misc_host_ctrl);
13778 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13779 MISC_HOST_CTRL_CHIPREV_SHIFT);
13780 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13781 u32 prod_id_asic_rev;
13783 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13784 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13785 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13786 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13787 pci_read_config_dword(tp->pdev,
13788 TG3PCI_GEN2_PRODID_ASICREV,
13789 &prod_id_asic_rev);
13790 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13791 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13792 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13793 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13794 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13795 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13796 pci_read_config_dword(tp->pdev,
13797 TG3PCI_GEN15_PRODID_ASICREV,
13798 &prod_id_asic_rev);
13800 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13801 &prod_id_asic_rev);
13803 tp->pci_chip_rev_id = prod_id_asic_rev;
13806 /* Wrong chip ID in 5752 A0. This code can be removed later
13807 * as A0 is not in production.
13809 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13810 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13812 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13813 * we need to disable memory and use config. cycles
13814 * only to access all registers. The 5702/03 chips
13815 * can mistakenly decode the special cycles from the
13816 * ICH chipsets as memory write cycles, causing corruption
13817 * of register and memory space. Only certain ICH bridges
13818 * will drive special cycles with non-zero data during the
13819 * address phase which can fall within the 5703's address
13820 * range. This is not an ICH bug as the PCI spec allows
13821 * non-zero address during special cycles. However, only
13822 * these ICH bridges are known to drive non-zero addresses
13823 * during special cycles.
13825 * Since special cycles do not cross PCI bridges, we only
13826 * enable this workaround if the 5703 is on the secondary
13827 * bus of these ICH bridges.
13829 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13830 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13831 static struct tg3_dev_id {
13835 } ich_chipsets[] = {
13836 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13838 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13840 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13842 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13846 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13847 struct pci_dev *bridge = NULL;
13849 while (pci_id->vendor != 0) {
13850 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13856 if (pci_id->rev != PCI_ANY_ID) {
13857 if (bridge->revision > pci_id->rev)
13860 if (bridge->subordinate &&
13861 (bridge->subordinate->number ==
13862 tp->pdev->bus->number)) {
13863 tg3_flag_set(tp, ICH_WORKAROUND);
13864 pci_dev_put(bridge);
13870 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13871 static struct tg3_dev_id {
13874 } bridge_chipsets[] = {
13875 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13876 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13879 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13880 struct pci_dev *bridge = NULL;
13882 while (pci_id->vendor != 0) {
13883 bridge = pci_get_device(pci_id->vendor,
13890 if (bridge->subordinate &&
13891 (bridge->subordinate->number <=
13892 tp->pdev->bus->number) &&
13893 (bridge->subordinate->subordinate >=
13894 tp->pdev->bus->number)) {
13895 tg3_flag_set(tp, 5701_DMA_BUG);
13896 pci_dev_put(bridge);
13902 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13903 * DMA addresses > 40-bit. This bridge may have other additional
13904 * 57xx devices behind it in some 4-port NIC designs for example.
13905 * Any tg3 device found behind the bridge will also need the 40-bit
13908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13909 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13910 tg3_flag_set(tp, 5780_CLASS);
13911 tg3_flag_set(tp, 40BIT_DMA_BUG);
13912 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13914 struct pci_dev *bridge = NULL;
13917 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13918 PCI_DEVICE_ID_SERVERWORKS_EPB,
13920 if (bridge && bridge->subordinate &&
13921 (bridge->subordinate->number <=
13922 tp->pdev->bus->number) &&
13923 (bridge->subordinate->subordinate >=
13924 tp->pdev->bus->number)) {
13925 tg3_flag_set(tp, 40BIT_DMA_BUG);
13926 pci_dev_put(bridge);
13932 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13933 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13934 tp->pdev_peer = tg3_find_peer(tp);
13936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13937 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13939 tg3_flag_set(tp, 5717_PLUS);
13941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13942 tg3_flag(tp, 5717_PLUS))
13943 tg3_flag_set(tp, 57765_PLUS);
13945 /* Intentionally exclude ASIC_REV_5906 */
13946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13950 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13952 tg3_flag(tp, 57765_PLUS))
13953 tg3_flag_set(tp, 5755_PLUS);
13955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13958 tg3_flag(tp, 5755_PLUS) ||
13959 tg3_flag(tp, 5780_CLASS))
13960 tg3_flag_set(tp, 5750_PLUS);
13962 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13963 tg3_flag(tp, 5750_PLUS))
13964 tg3_flag_set(tp, 5705_PLUS);
13966 /* Determine TSO capabilities */
13967 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13968 ; /* Do nothing. HW bug. */
13969 else if (tg3_flag(tp, 57765_PLUS))
13970 tg3_flag_set(tp, HW_TSO_3);
13971 else if (tg3_flag(tp, 5755_PLUS) ||
13972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13973 tg3_flag_set(tp, HW_TSO_2);
13974 else if (tg3_flag(tp, 5750_PLUS)) {
13975 tg3_flag_set(tp, HW_TSO_1);
13976 tg3_flag_set(tp, TSO_BUG);
13977 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13978 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13979 tg3_flag_clear(tp, TSO_BUG);
13980 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13981 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13982 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13983 tg3_flag_set(tp, TSO_BUG);
13984 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13985 tp->fw_needed = FIRMWARE_TG3TSO5;
13987 tp->fw_needed = FIRMWARE_TG3TSO;
13990 /* Selectively allow TSO based on operating conditions */
13991 if (tg3_flag(tp, HW_TSO_1) ||
13992 tg3_flag(tp, HW_TSO_2) ||
13993 tg3_flag(tp, HW_TSO_3) ||
13994 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13995 tg3_flag_set(tp, TSO_CAPABLE);
13997 tg3_flag_clear(tp, TSO_CAPABLE);
13998 tg3_flag_clear(tp, TSO_BUG);
13999 tp->fw_needed = NULL;
14002 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14003 tp->fw_needed = FIRMWARE_TG3;
14007 if (tg3_flag(tp, 5750_PLUS)) {
14008 tg3_flag_set(tp, SUPPORT_MSI);
14009 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14010 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14011 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14012 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14013 tp->pdev_peer == tp->pdev))
14014 tg3_flag_clear(tp, SUPPORT_MSI);
14016 if (tg3_flag(tp, 5755_PLUS) ||
14017 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14018 tg3_flag_set(tp, 1SHOT_MSI);
14021 if (tg3_flag(tp, 57765_PLUS)) {
14022 tg3_flag_set(tp, SUPPORT_MSIX);
14023 tp->irq_max = TG3_IRQ_MAX_VECS;
14027 if (tg3_flag(tp, 5755_PLUS))
14028 tg3_flag_set(tp, SHORT_DMA_BUG);
14030 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14031 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14033 if (tg3_flag(tp, 5717_PLUS))
14034 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14036 if (tg3_flag(tp, 57765_PLUS) &&
14037 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14038 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14040 if (!tg3_flag(tp, 5705_PLUS) ||
14041 tg3_flag(tp, 5780_CLASS) ||
14042 tg3_flag(tp, USE_JUMBO_BDFLAG))
14043 tg3_flag_set(tp, JUMBO_CAPABLE);
14045 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14048 if (pci_is_pcie(tp->pdev)) {
14051 tg3_flag_set(tp, PCI_EXPRESS);
14053 tp->pcie_readrq = 4096;
14054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14055 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14056 tp->pcie_readrq = 2048;
14058 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14060 pci_read_config_word(tp->pdev,
14061 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14063 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14064 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14066 tg3_flag_clear(tp, HW_TSO_2);
14067 tg3_flag_clear(tp, TSO_CAPABLE);
14069 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14070 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14071 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14072 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14073 tg3_flag_set(tp, CLKREQ_BUG);
14074 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14075 tg3_flag_set(tp, L1PLLPD_EN);
14077 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14078 /* BCM5785 devices are effectively PCIe devices, and should
14079 * follow PCIe codepaths, but do not have a PCIe capabilities
14082 tg3_flag_set(tp, PCI_EXPRESS);
14083 } else if (!tg3_flag(tp, 5705_PLUS) ||
14084 tg3_flag(tp, 5780_CLASS)) {
14085 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14086 if (!tp->pcix_cap) {
14087 dev_err(&tp->pdev->dev,
14088 "Cannot find PCI-X capability, aborting\n");
14092 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14093 tg3_flag_set(tp, PCIX_MODE);
14096 /* If we have an AMD 762 or VIA K8T800 chipset, write
14097 * reordering to the mailbox registers done by the host
14098 * controller can cause major troubles. We read back from
14099 * every mailbox register write to force the writes to be
14100 * posted to the chip in order.
14102 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14103 !tg3_flag(tp, PCI_EXPRESS))
14104 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14106 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14107 &tp->pci_cacheline_sz);
14108 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14109 &tp->pci_lat_timer);
14110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14111 tp->pci_lat_timer < 64) {
14112 tp->pci_lat_timer = 64;
14113 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14114 tp->pci_lat_timer);
14117 /* Important! -- It is critical that the PCI-X hw workaround
14118 * situation is decided before the first MMIO register access.
14120 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14121 /* 5700 BX chips need to have their TX producer index
14122 * mailboxes written twice to workaround a bug.
14124 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14126 /* If we are in PCI-X mode, enable register write workaround.
14128 * The workaround is to use indirect register accesses
14129 * for all chip writes not to mailbox registers.
14131 if (tg3_flag(tp, PCIX_MODE)) {
14134 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14136 /* The chip can have it's power management PCI config
14137 * space registers clobbered due to this bug.
14138 * So explicitly force the chip into D0 here.
14140 pci_read_config_dword(tp->pdev,
14141 tp->pm_cap + PCI_PM_CTRL,
14143 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14144 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14145 pci_write_config_dword(tp->pdev,
14146 tp->pm_cap + PCI_PM_CTRL,
14149 /* Also, force SERR#/PERR# in PCI command. */
14150 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14151 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14152 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14156 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14157 tg3_flag_set(tp, PCI_HIGH_SPEED);
14158 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14159 tg3_flag_set(tp, PCI_32BIT);
14161 /* Chip-specific fixup from Broadcom driver */
14162 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14163 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14164 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14165 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14168 /* Default fast path register access methods */
14169 tp->read32 = tg3_read32;
14170 tp->write32 = tg3_write32;
14171 tp->read32_mbox = tg3_read32;
14172 tp->write32_mbox = tg3_write32;
14173 tp->write32_tx_mbox = tg3_write32;
14174 tp->write32_rx_mbox = tg3_write32;
14176 /* Various workaround register access methods */
14177 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14178 tp->write32 = tg3_write_indirect_reg32;
14179 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14180 (tg3_flag(tp, PCI_EXPRESS) &&
14181 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14183 * Back to back register writes can cause problems on these
14184 * chips, the workaround is to read back all reg writes
14185 * except those to mailbox regs.
14187 * See tg3_write_indirect_reg32().
14189 tp->write32 = tg3_write_flush_reg32;
14192 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14193 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14194 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14195 tp->write32_rx_mbox = tg3_write_flush_reg32;
14198 if (tg3_flag(tp, ICH_WORKAROUND)) {
14199 tp->read32 = tg3_read_indirect_reg32;
14200 tp->write32 = tg3_write_indirect_reg32;
14201 tp->read32_mbox = tg3_read_indirect_mbox;
14202 tp->write32_mbox = tg3_write_indirect_mbox;
14203 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14204 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14209 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14210 pci_cmd &= ~PCI_COMMAND_MEMORY;
14211 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14213 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14214 tp->read32_mbox = tg3_read32_mbox_5906;
14215 tp->write32_mbox = tg3_write32_mbox_5906;
14216 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14217 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14220 if (tp->write32 == tg3_write_indirect_reg32 ||
14221 (tg3_flag(tp, PCIX_MODE) &&
14222 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14224 tg3_flag_set(tp, SRAM_USE_CONFIG);
14226 /* The memory arbiter has to be enabled in order for SRAM accesses
14227 * to succeed. Normally on powerup the tg3 chip firmware will make
14228 * sure it is enabled, but other entities such as system netboot
14229 * code might disable it.
14231 val = tr32(MEMARB_MODE);
14232 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14234 if (tg3_flag(tp, PCIX_MODE)) {
14235 pci_read_config_dword(tp->pdev,
14236 tp->pcix_cap + PCI_X_STATUS, &val);
14237 tp->pci_fn = val & 0x7;
14239 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14242 /* Get eeprom hw config before calling tg3_set_power_state().
14243 * In particular, the TG3_FLAG_IS_NIC flag must be
14244 * determined before calling tg3_set_power_state() so that
14245 * we know whether or not to switch out of Vaux power.
14246 * When the flag is set, it means that GPIO1 is used for eeprom
14247 * write protect and also implies that it is a LOM where GPIOs
14248 * are not used to switch power.
14250 tg3_get_eeprom_hw_cfg(tp);
14252 if (tg3_flag(tp, ENABLE_APE)) {
14253 /* Allow reads and writes to the
14254 * APE register and memory space.
14256 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14257 PCISTATE_ALLOW_APE_SHMEM_WR |
14258 PCISTATE_ALLOW_APE_PSPACE_WR;
14259 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14262 tg3_ape_lock_init(tp);
14265 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14269 tg3_flag(tp, 57765_PLUS))
14270 tg3_flag_set(tp, CPMU_PRESENT);
14272 /* Set up tp->grc_local_ctrl before calling
14273 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14274 * will bring 5700's external PHY out of reset.
14275 * It is also used as eeprom write protect on LOMs.
14277 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14279 tg3_flag(tp, EEPROM_WRITE_PROT))
14280 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14281 GRC_LCLCTRL_GPIO_OUTPUT1);
14282 /* Unused GPIO3 must be driven as output on 5752 because there
14283 * are no pull-up resistors on unused GPIO pins.
14285 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14286 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14291 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14293 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14294 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14295 /* Turn off the debug UART. */
14296 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14297 if (tg3_flag(tp, IS_NIC))
14298 /* Keep VMain power. */
14299 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14300 GRC_LCLCTRL_GPIO_OUTPUT0;
14303 /* Switch out of Vaux if it is a NIC */
14304 tg3_pwrsrc_switch_to_vmain(tp);
14306 /* Derive initial jumbo mode from MTU assigned in
14307 * ether_setup() via the alloc_etherdev() call
14309 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14310 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14312 /* Determine WakeOnLan speed to use. */
14313 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14314 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14315 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14316 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14317 tg3_flag_clear(tp, WOL_SPEED_100MB);
14319 tg3_flag_set(tp, WOL_SPEED_100MB);
14322 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14323 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14325 /* A few boards don't want Ethernet@WireSpeed phy feature */
14326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14327 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14328 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14329 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14330 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14331 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14332 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14334 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14335 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14336 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14337 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14338 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14340 if (tg3_flag(tp, 5705_PLUS) &&
14341 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14342 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14343 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14344 !tg3_flag(tp, 57765_PLUS)) {
14345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14348 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14349 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14350 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14351 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14352 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14353 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14355 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14359 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14360 tp->phy_otp = tg3_read_otp_phycfg(tp);
14361 if (tp->phy_otp == 0)
14362 tp->phy_otp = TG3_OTP_DEFAULT;
14365 if (tg3_flag(tp, CPMU_PRESENT))
14366 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14368 tp->mi_mode = MAC_MI_MODE_BASE;
14370 tp->coalesce_mode = 0;
14371 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14372 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14373 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14375 /* Set these bits to enable statistics workaround. */
14376 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14377 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14378 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14379 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14380 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14383 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14384 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14385 tg3_flag_set(tp, USE_PHYLIB);
14387 err = tg3_mdio_init(tp);
14391 /* Initialize data/descriptor byte/word swapping. */
14392 val = tr32(GRC_MODE);
14393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14394 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14395 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14396 GRC_MODE_B2HRX_ENABLE |
14397 GRC_MODE_HTX2B_ENABLE |
14398 GRC_MODE_HOST_STACKUP);
14400 val &= GRC_MODE_HOST_STACKUP;
14402 tw32(GRC_MODE, val | tp->grc_mode);
14404 tg3_switch_clocks(tp);
14406 /* Clear this out for sanity. */
14407 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14409 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14411 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14412 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14413 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14415 if (chiprevid == CHIPREV_ID_5701_A0 ||
14416 chiprevid == CHIPREV_ID_5701_B0 ||
14417 chiprevid == CHIPREV_ID_5701_B2 ||
14418 chiprevid == CHIPREV_ID_5701_B5) {
14419 void __iomem *sram_base;
14421 /* Write some dummy words into the SRAM status block
14422 * area, see if it reads back correctly. If the return
14423 * value is bad, force enable the PCIX workaround.
14425 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14427 writel(0x00000000, sram_base);
14428 writel(0x00000000, sram_base + 4);
14429 writel(0xffffffff, sram_base + 4);
14430 if (readl(sram_base) != 0x00000000)
14431 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14436 tg3_nvram_init(tp);
14438 grc_misc_cfg = tr32(GRC_MISC_CFG);
14439 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14441 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14442 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14443 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14444 tg3_flag_set(tp, IS_5788);
14446 if (!tg3_flag(tp, IS_5788) &&
14447 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14448 tg3_flag_set(tp, TAGGED_STATUS);
14449 if (tg3_flag(tp, TAGGED_STATUS)) {
14450 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14451 HOSTCC_MODE_CLRTICK_TXBD);
14453 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14454 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14455 tp->misc_host_ctrl);
14458 /* Preserve the APE MAC_MODE bits */
14459 if (tg3_flag(tp, ENABLE_APE))
14460 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14464 /* these are limited to 10/100 only */
14465 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14466 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14467 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14468 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14469 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14470 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14471 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14472 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14473 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14474 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14475 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14476 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14477 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14478 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14479 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14480 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14482 err = tg3_phy_probe(tp);
14484 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14485 /* ... but do not return immediately ... */
14490 tg3_read_fw_ver(tp);
14492 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14493 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14496 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14498 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14501 /* 5700 {AX,BX} chips have a broken status block link
14502 * change bit implementation, so we must use the
14503 * status register in those cases.
14505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14506 tg3_flag_set(tp, USE_LINKCHG_REG);
14508 tg3_flag_clear(tp, USE_LINKCHG_REG);
14510 /* The led_ctrl is set during tg3_phy_probe, here we might
14511 * have to force the link status polling mechanism based
14512 * upon subsystem IDs.
14514 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14516 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14517 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14518 tg3_flag_set(tp, USE_LINKCHG_REG);
14521 /* For all SERDES we poll the MAC status register. */
14522 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14523 tg3_flag_set(tp, POLL_SERDES);
14525 tg3_flag_clear(tp, POLL_SERDES);
14527 tp->rx_offset = NET_IP_ALIGN;
14528 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14529 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14530 tg3_flag(tp, PCIX_MODE)) {
14532 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14533 tp->rx_copy_thresh = ~(u16)0;
14537 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14538 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14539 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14541 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14543 /* Increment the rx prod index on the rx std ring by at most
14544 * 8 for these chips to workaround hw errata.
14546 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14547 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14548 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14549 tp->rx_std_max_post = 8;
14551 if (tg3_flag(tp, ASPM_WORKAROUND))
14552 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14553 PCIE_PWR_MGMT_L1_THRESH_MSK;
14558 #ifdef CONFIG_SPARC
14559 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14561 struct net_device *dev = tp->dev;
14562 struct pci_dev *pdev = tp->pdev;
14563 struct device_node *dp = pci_device_to_OF_node(pdev);
14564 const unsigned char *addr;
14567 addr = of_get_property(dp, "local-mac-address", &len);
14568 if (addr && len == 6) {
14569 memcpy(dev->dev_addr, addr, 6);
14570 memcpy(dev->perm_addr, dev->dev_addr, 6);
14576 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14578 struct net_device *dev = tp->dev;
14580 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14581 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14586 static int __devinit tg3_get_device_address(struct tg3 *tp)
14588 struct net_device *dev = tp->dev;
14589 u32 hi, lo, mac_offset;
14592 #ifdef CONFIG_SPARC
14593 if (!tg3_get_macaddr_sparc(tp))
14598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14599 tg3_flag(tp, 5780_CLASS)) {
14600 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14602 if (tg3_nvram_lock(tp))
14603 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14605 tg3_nvram_unlock(tp);
14606 } else if (tg3_flag(tp, 5717_PLUS)) {
14607 if (tp->pci_fn & 1)
14609 if (tp->pci_fn > 1)
14610 mac_offset += 0x18c;
14611 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14614 /* First try to get it from MAC address mailbox. */
14615 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14616 if ((hi >> 16) == 0x484b) {
14617 dev->dev_addr[0] = (hi >> 8) & 0xff;
14618 dev->dev_addr[1] = (hi >> 0) & 0xff;
14620 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14621 dev->dev_addr[2] = (lo >> 24) & 0xff;
14622 dev->dev_addr[3] = (lo >> 16) & 0xff;
14623 dev->dev_addr[4] = (lo >> 8) & 0xff;
14624 dev->dev_addr[5] = (lo >> 0) & 0xff;
14626 /* Some old bootcode may report a 0 MAC address in SRAM */
14627 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14630 /* Next, try NVRAM. */
14631 if (!tg3_flag(tp, NO_NVRAM) &&
14632 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14633 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14634 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14635 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14637 /* Finally just fetch it out of the MAC control regs. */
14639 hi = tr32(MAC_ADDR_0_HIGH);
14640 lo = tr32(MAC_ADDR_0_LOW);
14642 dev->dev_addr[5] = lo & 0xff;
14643 dev->dev_addr[4] = (lo >> 8) & 0xff;
14644 dev->dev_addr[3] = (lo >> 16) & 0xff;
14645 dev->dev_addr[2] = (lo >> 24) & 0xff;
14646 dev->dev_addr[1] = hi & 0xff;
14647 dev->dev_addr[0] = (hi >> 8) & 0xff;
14651 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14652 #ifdef CONFIG_SPARC
14653 if (!tg3_get_default_macaddr_sparc(tp))
14658 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14662 #define BOUNDARY_SINGLE_CACHELINE 1
14663 #define BOUNDARY_MULTI_CACHELINE 2
14665 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14667 int cacheline_size;
14671 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14673 cacheline_size = 1024;
14675 cacheline_size = (int) byte * 4;
14677 /* On 5703 and later chips, the boundary bits have no
14680 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14681 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14682 !tg3_flag(tp, PCI_EXPRESS))
14685 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14686 goal = BOUNDARY_MULTI_CACHELINE;
14688 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14689 goal = BOUNDARY_SINGLE_CACHELINE;
14695 if (tg3_flag(tp, 57765_PLUS)) {
14696 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14703 /* PCI controllers on most RISC systems tend to disconnect
14704 * when a device tries to burst across a cache-line boundary.
14705 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14707 * Unfortunately, for PCI-E there are only limited
14708 * write-side controls for this, and thus for reads
14709 * we will still get the disconnects. We'll also waste
14710 * these PCI cycles for both read and write for chips
14711 * other than 5700 and 5701 which do not implement the
14714 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14715 switch (cacheline_size) {
14720 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14721 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14722 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14724 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14725 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14730 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14731 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14735 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14736 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14739 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14740 switch (cacheline_size) {
14744 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14745 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14746 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14752 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14753 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14757 switch (cacheline_size) {
14759 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14760 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14761 DMA_RWCTRL_WRITE_BNDRY_16);
14766 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14767 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14768 DMA_RWCTRL_WRITE_BNDRY_32);
14773 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14774 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14775 DMA_RWCTRL_WRITE_BNDRY_64);
14780 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14781 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14782 DMA_RWCTRL_WRITE_BNDRY_128);
14787 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14788 DMA_RWCTRL_WRITE_BNDRY_256);
14791 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14792 DMA_RWCTRL_WRITE_BNDRY_512);
14796 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14797 DMA_RWCTRL_WRITE_BNDRY_1024);
14806 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14808 struct tg3_internal_buffer_desc test_desc;
14809 u32 sram_dma_descs;
14812 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14814 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14815 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14816 tw32(RDMAC_STATUS, 0);
14817 tw32(WDMAC_STATUS, 0);
14819 tw32(BUFMGR_MODE, 0);
14820 tw32(FTQ_RESET, 0);
14822 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14823 test_desc.addr_lo = buf_dma & 0xffffffff;
14824 test_desc.nic_mbuf = 0x00002100;
14825 test_desc.len = size;
14828 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14829 * the *second* time the tg3 driver was getting loaded after an
14832 * Broadcom tells me:
14833 * ...the DMA engine is connected to the GRC block and a DMA
14834 * reset may affect the GRC block in some unpredictable way...
14835 * The behavior of resets to individual blocks has not been tested.
14837 * Broadcom noted the GRC reset will also reset all sub-components.
14840 test_desc.cqid_sqid = (13 << 8) | 2;
14842 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14845 test_desc.cqid_sqid = (16 << 8) | 7;
14847 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14850 test_desc.flags = 0x00000005;
14852 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14855 val = *(((u32 *)&test_desc) + i);
14856 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14857 sram_dma_descs + (i * sizeof(u32)));
14858 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14860 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14863 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14865 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14868 for (i = 0; i < 40; i++) {
14872 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14874 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14875 if ((val & 0xffff) == sram_dma_descs) {
14886 #define TEST_BUFFER_SIZE 0x2000
14888 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14889 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14893 static int __devinit tg3_test_dma(struct tg3 *tp)
14895 dma_addr_t buf_dma;
14896 u32 *buf, saved_dma_rwctrl;
14899 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14900 &buf_dma, GFP_KERNEL);
14906 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14907 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14909 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14911 if (tg3_flag(tp, 57765_PLUS))
14914 if (tg3_flag(tp, PCI_EXPRESS)) {
14915 /* DMA read watermark not used on PCIE */
14916 tp->dma_rwctrl |= 0x00180000;
14917 } else if (!tg3_flag(tp, PCIX_MODE)) {
14918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14919 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14920 tp->dma_rwctrl |= 0x003f0000;
14922 tp->dma_rwctrl |= 0x003f000f;
14924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14926 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14927 u32 read_water = 0x7;
14929 /* If the 5704 is behind the EPB bridge, we can
14930 * do the less restrictive ONE_DMA workaround for
14931 * better performance.
14933 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14935 tp->dma_rwctrl |= 0x8000;
14936 else if (ccval == 0x6 || ccval == 0x7)
14937 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14941 /* Set bit 23 to enable PCIX hw bug fix */
14943 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14944 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14946 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14947 /* 5780 always in PCIX mode */
14948 tp->dma_rwctrl |= 0x00144000;
14949 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14950 /* 5714 always in PCIX mode */
14951 tp->dma_rwctrl |= 0x00148000;
14953 tp->dma_rwctrl |= 0x001b000f;
14957 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14959 tp->dma_rwctrl &= 0xfffffff0;
14961 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14962 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14963 /* Remove this if it causes problems for some boards. */
14964 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14966 /* On 5700/5701 chips, we need to set this bit.
14967 * Otherwise the chip will issue cacheline transactions
14968 * to streamable DMA memory with not all the byte
14969 * enables turned on. This is an error on several
14970 * RISC PCI controllers, in particular sparc64.
14972 * On 5703/5704 chips, this bit has been reassigned
14973 * a different meaning. In particular, it is used
14974 * on those chips to enable a PCI-X workaround.
14976 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14979 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14982 /* Unneeded, already done by tg3_get_invariants. */
14983 tg3_switch_clocks(tp);
14986 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14987 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14990 /* It is best to perform DMA test with maximum write burst size
14991 * to expose the 5700/5701 write DMA bug.
14993 saved_dma_rwctrl = tp->dma_rwctrl;
14994 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14995 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15000 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15003 /* Send the buffer to the chip. */
15004 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15006 dev_err(&tp->pdev->dev,
15007 "%s: Buffer write failed. err = %d\n",
15013 /* validate data reached card RAM correctly. */
15014 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15016 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15017 if (le32_to_cpu(val) != p[i]) {
15018 dev_err(&tp->pdev->dev,
15019 "%s: Buffer corrupted on device! "
15020 "(%d != %d)\n", __func__, val, i);
15021 /* ret = -ENODEV here? */
15026 /* Now read it back. */
15027 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15029 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15030 "err = %d\n", __func__, ret);
15035 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15039 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15040 DMA_RWCTRL_WRITE_BNDRY_16) {
15041 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15042 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15043 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15046 dev_err(&tp->pdev->dev,
15047 "%s: Buffer corrupted on read back! "
15048 "(%d != %d)\n", __func__, p[i], i);
15054 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15060 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15061 DMA_RWCTRL_WRITE_BNDRY_16) {
15062 /* DMA test passed without adjusting DMA boundary,
15063 * now look for chipsets that are known to expose the
15064 * DMA bug without failing the test.
15066 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15067 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15068 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15070 /* Safe to use the calculated DMA boundary. */
15071 tp->dma_rwctrl = saved_dma_rwctrl;
15074 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15078 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15083 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15085 if (tg3_flag(tp, 57765_PLUS)) {
15086 tp->bufmgr_config.mbuf_read_dma_low_water =
15087 DEFAULT_MB_RDMA_LOW_WATER_5705;
15088 tp->bufmgr_config.mbuf_mac_rx_low_water =
15089 DEFAULT_MB_MACRX_LOW_WATER_57765;
15090 tp->bufmgr_config.mbuf_high_water =
15091 DEFAULT_MB_HIGH_WATER_57765;
15093 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15094 DEFAULT_MB_RDMA_LOW_WATER_5705;
15095 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15096 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15097 tp->bufmgr_config.mbuf_high_water_jumbo =
15098 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15099 } else if (tg3_flag(tp, 5705_PLUS)) {
15100 tp->bufmgr_config.mbuf_read_dma_low_water =
15101 DEFAULT_MB_RDMA_LOW_WATER_5705;
15102 tp->bufmgr_config.mbuf_mac_rx_low_water =
15103 DEFAULT_MB_MACRX_LOW_WATER_5705;
15104 tp->bufmgr_config.mbuf_high_water =
15105 DEFAULT_MB_HIGH_WATER_5705;
15106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15107 tp->bufmgr_config.mbuf_mac_rx_low_water =
15108 DEFAULT_MB_MACRX_LOW_WATER_5906;
15109 tp->bufmgr_config.mbuf_high_water =
15110 DEFAULT_MB_HIGH_WATER_5906;
15113 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15114 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15115 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15116 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15117 tp->bufmgr_config.mbuf_high_water_jumbo =
15118 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15120 tp->bufmgr_config.mbuf_read_dma_low_water =
15121 DEFAULT_MB_RDMA_LOW_WATER;
15122 tp->bufmgr_config.mbuf_mac_rx_low_water =
15123 DEFAULT_MB_MACRX_LOW_WATER;
15124 tp->bufmgr_config.mbuf_high_water =
15125 DEFAULT_MB_HIGH_WATER;
15127 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15128 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15129 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15130 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15131 tp->bufmgr_config.mbuf_high_water_jumbo =
15132 DEFAULT_MB_HIGH_WATER_JUMBO;
15135 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15136 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15139 static char * __devinit tg3_phy_string(struct tg3 *tp)
15141 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15142 case TG3_PHY_ID_BCM5400: return "5400";
15143 case TG3_PHY_ID_BCM5401: return "5401";
15144 case TG3_PHY_ID_BCM5411: return "5411";
15145 case TG3_PHY_ID_BCM5701: return "5701";
15146 case TG3_PHY_ID_BCM5703: return "5703";
15147 case TG3_PHY_ID_BCM5704: return "5704";
15148 case TG3_PHY_ID_BCM5705: return "5705";
15149 case TG3_PHY_ID_BCM5750: return "5750";
15150 case TG3_PHY_ID_BCM5752: return "5752";
15151 case TG3_PHY_ID_BCM5714: return "5714";
15152 case TG3_PHY_ID_BCM5780: return "5780";
15153 case TG3_PHY_ID_BCM5755: return "5755";
15154 case TG3_PHY_ID_BCM5787: return "5787";
15155 case TG3_PHY_ID_BCM5784: return "5784";
15156 case TG3_PHY_ID_BCM5756: return "5722/5756";
15157 case TG3_PHY_ID_BCM5906: return "5906";
15158 case TG3_PHY_ID_BCM5761: return "5761";
15159 case TG3_PHY_ID_BCM5718C: return "5718C";
15160 case TG3_PHY_ID_BCM5718S: return "5718S";
15161 case TG3_PHY_ID_BCM57765: return "57765";
15162 case TG3_PHY_ID_BCM5719C: return "5719C";
15163 case TG3_PHY_ID_BCM5720C: return "5720C";
15164 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15165 case 0: return "serdes";
15166 default: return "unknown";
15170 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15172 if (tg3_flag(tp, PCI_EXPRESS)) {
15173 strcpy(str, "PCI Express");
15175 } else if (tg3_flag(tp, PCIX_MODE)) {
15176 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15178 strcpy(str, "PCIX:");
15180 if ((clock_ctrl == 7) ||
15181 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15182 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15183 strcat(str, "133MHz");
15184 else if (clock_ctrl == 0)
15185 strcat(str, "33MHz");
15186 else if (clock_ctrl == 2)
15187 strcat(str, "50MHz");
15188 else if (clock_ctrl == 4)
15189 strcat(str, "66MHz");
15190 else if (clock_ctrl == 6)
15191 strcat(str, "100MHz");
15193 strcpy(str, "PCI:");
15194 if (tg3_flag(tp, PCI_HIGH_SPEED))
15195 strcat(str, "66MHz");
15197 strcat(str, "33MHz");
15199 if (tg3_flag(tp, PCI_32BIT))
15200 strcat(str, ":32-bit");
15202 strcat(str, ":64-bit");
15206 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15208 struct pci_dev *peer;
15209 unsigned int func, devnr = tp->pdev->devfn & ~7;
15211 for (func = 0; func < 8; func++) {
15212 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15213 if (peer && peer != tp->pdev)
15217 /* 5704 can be configured in single-port mode, set peer to
15218 * tp->pdev in that case.
15226 * We don't need to keep the refcount elevated; there's no way
15227 * to remove one half of this device without removing the other
15234 static void __devinit tg3_init_coal(struct tg3 *tp)
15236 struct ethtool_coalesce *ec = &tp->coal;
15238 memset(ec, 0, sizeof(*ec));
15239 ec->cmd = ETHTOOL_GCOALESCE;
15240 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15241 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15242 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15243 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15244 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15245 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15246 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15247 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15248 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15250 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15251 HOSTCC_MODE_CLRTICK_TXBD)) {
15252 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15253 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15254 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15255 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15258 if (tg3_flag(tp, 5705_PLUS)) {
15259 ec->rx_coalesce_usecs_irq = 0;
15260 ec->tx_coalesce_usecs_irq = 0;
15261 ec->stats_block_coalesce_usecs = 0;
15265 static const struct net_device_ops tg3_netdev_ops = {
15266 .ndo_open = tg3_open,
15267 .ndo_stop = tg3_close,
15268 .ndo_start_xmit = tg3_start_xmit,
15269 .ndo_get_stats64 = tg3_get_stats64,
15270 .ndo_validate_addr = eth_validate_addr,
15271 .ndo_set_rx_mode = tg3_set_rx_mode,
15272 .ndo_set_mac_address = tg3_set_mac_addr,
15273 .ndo_do_ioctl = tg3_ioctl,
15274 .ndo_tx_timeout = tg3_tx_timeout,
15275 .ndo_change_mtu = tg3_change_mtu,
15276 .ndo_fix_features = tg3_fix_features,
15277 .ndo_set_features = tg3_set_features,
15278 #ifdef CONFIG_NET_POLL_CONTROLLER
15279 .ndo_poll_controller = tg3_poll_controller,
15283 static int __devinit tg3_init_one(struct pci_dev *pdev,
15284 const struct pci_device_id *ent)
15286 struct net_device *dev;
15288 int i, err, pm_cap;
15289 u32 sndmbx, rcvmbx, intmbx;
15291 u64 dma_mask, persist_dma_mask;
15294 printk_once(KERN_INFO "%s\n", version);
15296 err = pci_enable_device(pdev);
15298 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15302 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15304 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15305 goto err_out_disable_pdev;
15308 pci_set_master(pdev);
15310 /* Find power-management capability. */
15311 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15313 dev_err(&pdev->dev,
15314 "Cannot find Power Management capability, aborting\n");
15316 goto err_out_free_res;
15319 err = pci_set_power_state(pdev, PCI_D0);
15321 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15322 goto err_out_free_res;
15325 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15327 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15329 goto err_out_power_down;
15332 SET_NETDEV_DEV(dev, &pdev->dev);
15334 tp = netdev_priv(dev);
15337 tp->pm_cap = pm_cap;
15338 tp->rx_mode = TG3_DEF_RX_MODE;
15339 tp->tx_mode = TG3_DEF_TX_MODE;
15342 tp->msg_enable = tg3_debug;
15344 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15346 /* The word/byte swap controls here control register access byte
15347 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15350 tp->misc_host_ctrl =
15351 MISC_HOST_CTRL_MASK_PCI_INT |
15352 MISC_HOST_CTRL_WORD_SWAP |
15353 MISC_HOST_CTRL_INDIR_ACCESS |
15354 MISC_HOST_CTRL_PCISTATE_RW;
15356 /* The NONFRM (non-frame) byte/word swap controls take effect
15357 * on descriptor entries, anything which isn't packet data.
15359 * The StrongARM chips on the board (one for tx, one for rx)
15360 * are running in big-endian mode.
15362 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15363 GRC_MODE_WSWAP_NONFRM_DATA);
15364 #ifdef __BIG_ENDIAN
15365 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15367 spin_lock_init(&tp->lock);
15368 spin_lock_init(&tp->indirect_lock);
15369 INIT_WORK(&tp->reset_task, tg3_reset_task);
15371 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15373 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15375 goto err_out_free_dev;
15378 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15379 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15380 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15381 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15382 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15383 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15384 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15385 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15386 tg3_flag_set(tp, ENABLE_APE);
15387 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15388 if (!tp->aperegs) {
15389 dev_err(&pdev->dev,
15390 "Cannot map APE registers, aborting\n");
15392 goto err_out_iounmap;
15396 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15397 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15399 dev->ethtool_ops = &tg3_ethtool_ops;
15400 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15401 dev->netdev_ops = &tg3_netdev_ops;
15402 dev->irq = pdev->irq;
15404 err = tg3_get_invariants(tp);
15406 dev_err(&pdev->dev,
15407 "Problem fetching invariants of chip, aborting\n");
15408 goto err_out_apeunmap;
15411 /* The EPB bridge inside 5714, 5715, and 5780 and any
15412 * device behind the EPB cannot support DMA addresses > 40-bit.
15413 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15414 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15415 * do DMA address check in tg3_start_xmit().
15417 if (tg3_flag(tp, IS_5788))
15418 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15419 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15420 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15421 #ifdef CONFIG_HIGHMEM
15422 dma_mask = DMA_BIT_MASK(64);
15425 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15427 /* Configure DMA attributes. */
15428 if (dma_mask > DMA_BIT_MASK(32)) {
15429 err = pci_set_dma_mask(pdev, dma_mask);
15431 features |= NETIF_F_HIGHDMA;
15432 err = pci_set_consistent_dma_mask(pdev,
15435 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15436 "DMA for consistent allocations\n");
15437 goto err_out_apeunmap;
15441 if (err || dma_mask == DMA_BIT_MASK(32)) {
15442 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15444 dev_err(&pdev->dev,
15445 "No usable DMA configuration, aborting\n");
15446 goto err_out_apeunmap;
15450 tg3_init_bufmgr_config(tp);
15452 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15454 /* 5700 B0 chips do not support checksumming correctly due
15455 * to hardware bugs.
15457 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15458 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15460 if (tg3_flag(tp, 5755_PLUS))
15461 features |= NETIF_F_IPV6_CSUM;
15464 /* TSO is on by default on chips that support hardware TSO.
15465 * Firmware TSO on older chips gives lower performance, so it
15466 * is off by default, but can be enabled using ethtool.
15468 if ((tg3_flag(tp, HW_TSO_1) ||
15469 tg3_flag(tp, HW_TSO_2) ||
15470 tg3_flag(tp, HW_TSO_3)) &&
15471 (features & NETIF_F_IP_CSUM))
15472 features |= NETIF_F_TSO;
15473 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15474 if (features & NETIF_F_IPV6_CSUM)
15475 features |= NETIF_F_TSO6;
15476 if (tg3_flag(tp, HW_TSO_3) ||
15477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15478 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15479 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15480 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15481 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15482 features |= NETIF_F_TSO_ECN;
15485 dev->features |= features;
15486 dev->vlan_features |= features;
15489 * Add loopback capability only for a subset of devices that support
15490 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15491 * loopback for the remaining devices.
15493 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15494 !tg3_flag(tp, CPMU_PRESENT))
15495 /* Add the loopback capability */
15496 features |= NETIF_F_LOOPBACK;
15498 dev->hw_features |= features;
15500 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15501 !tg3_flag(tp, TSO_CAPABLE) &&
15502 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15503 tg3_flag_set(tp, MAX_RXPEND_64);
15504 tp->rx_pending = 63;
15507 err = tg3_get_device_address(tp);
15509 dev_err(&pdev->dev,
15510 "Could not obtain valid ethernet address, aborting\n");
15511 goto err_out_apeunmap;
15515 * Reset chip in case UNDI or EFI driver did not shutdown
15516 * DMA self test will enable WDMAC and we'll see (spurious)
15517 * pending DMA on the PCI bus at that point.
15519 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15520 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15521 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15522 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15525 err = tg3_test_dma(tp);
15527 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15528 goto err_out_apeunmap;
15531 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15532 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15533 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15534 for (i = 0; i < tp->irq_max; i++) {
15535 struct tg3_napi *tnapi = &tp->napi[i];
15538 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15540 tnapi->int_mbox = intmbx;
15546 tnapi->consmbox = rcvmbx;
15547 tnapi->prodmbox = sndmbx;
15550 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15552 tnapi->coal_now = HOSTCC_MODE_NOW;
15554 if (!tg3_flag(tp, SUPPORT_MSIX))
15558 * If we support MSIX, we'll be using RSS. If we're using
15559 * RSS, the first vector only handles link interrupts and the
15560 * remaining vectors handle rx and tx interrupts. Reuse the
15561 * mailbox values for the next iteration. The values we setup
15562 * above are still useful for the single vectored mode.
15577 pci_set_drvdata(pdev, dev);
15579 if (tg3_flag(tp, 5717_PLUS)) {
15580 /* Resume a low-power mode */
15581 tg3_frob_aux_power(tp, false);
15584 err = register_netdev(dev);
15586 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15587 goto err_out_apeunmap;
15590 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15591 tp->board_part_number,
15592 tp->pci_chip_rev_id,
15593 tg3_bus_string(tp, str),
15596 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15597 struct phy_device *phydev;
15598 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15600 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15601 phydev->drv->name, dev_name(&phydev->dev));
15605 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15606 ethtype = "10/100Base-TX";
15607 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15608 ethtype = "1000Base-SX";
15610 ethtype = "10/100/1000Base-T";
15612 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15613 "(WireSpeed[%d], EEE[%d])\n",
15614 tg3_phy_string(tp), ethtype,
15615 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15616 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15619 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15620 (dev->features & NETIF_F_RXCSUM) != 0,
15621 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15622 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15623 tg3_flag(tp, ENABLE_ASF) != 0,
15624 tg3_flag(tp, TSO_CAPABLE) != 0);
15625 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15627 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15628 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15630 pci_save_state(pdev);
15636 iounmap(tp->aperegs);
15637 tp->aperegs = NULL;
15649 err_out_power_down:
15650 pci_set_power_state(pdev, PCI_D3hot);
15653 pci_release_regions(pdev);
15655 err_out_disable_pdev:
15656 pci_disable_device(pdev);
15657 pci_set_drvdata(pdev, NULL);
15661 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15663 struct net_device *dev = pci_get_drvdata(pdev);
15666 struct tg3 *tp = netdev_priv(dev);
15669 release_firmware(tp->fw);
15671 cancel_work_sync(&tp->reset_task);
15673 if (!tg3_flag(tp, USE_PHYLIB)) {
15678 unregister_netdev(dev);
15680 iounmap(tp->aperegs);
15681 tp->aperegs = NULL;
15688 pci_release_regions(pdev);
15689 pci_disable_device(pdev);
15690 pci_set_drvdata(pdev, NULL);
15694 #ifdef CONFIG_PM_SLEEP
15695 static int tg3_suspend(struct device *device)
15697 struct pci_dev *pdev = to_pci_dev(device);
15698 struct net_device *dev = pci_get_drvdata(pdev);
15699 struct tg3 *tp = netdev_priv(dev);
15702 if (!netif_running(dev))
15705 flush_work_sync(&tp->reset_task);
15707 tg3_netif_stop(tp);
15709 del_timer_sync(&tp->timer);
15711 tg3_full_lock(tp, 1);
15712 tg3_disable_ints(tp);
15713 tg3_full_unlock(tp);
15715 netif_device_detach(dev);
15717 tg3_full_lock(tp, 0);
15718 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15719 tg3_flag_clear(tp, INIT_COMPLETE);
15720 tg3_full_unlock(tp);
15722 err = tg3_power_down_prepare(tp);
15726 tg3_full_lock(tp, 0);
15728 tg3_flag_set(tp, INIT_COMPLETE);
15729 err2 = tg3_restart_hw(tp, 1);
15733 tp->timer.expires = jiffies + tp->timer_offset;
15734 add_timer(&tp->timer);
15736 netif_device_attach(dev);
15737 tg3_netif_start(tp);
15740 tg3_full_unlock(tp);
15749 static int tg3_resume(struct device *device)
15751 struct pci_dev *pdev = to_pci_dev(device);
15752 struct net_device *dev = pci_get_drvdata(pdev);
15753 struct tg3 *tp = netdev_priv(dev);
15756 if (!netif_running(dev))
15759 netif_device_attach(dev);
15761 tg3_full_lock(tp, 0);
15763 tg3_flag_set(tp, INIT_COMPLETE);
15764 err = tg3_restart_hw(tp, 1);
15768 tp->timer.expires = jiffies + tp->timer_offset;
15769 add_timer(&tp->timer);
15771 tg3_netif_start(tp);
15774 tg3_full_unlock(tp);
15782 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15783 #define TG3_PM_OPS (&tg3_pm_ops)
15787 #define TG3_PM_OPS NULL
15789 #endif /* CONFIG_PM_SLEEP */
15792 * tg3_io_error_detected - called when PCI error is detected
15793 * @pdev: Pointer to PCI device
15794 * @state: The current pci connection state
15796 * This function is called after a PCI bus error affecting
15797 * this device has been detected.
15799 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15800 pci_channel_state_t state)
15802 struct net_device *netdev = pci_get_drvdata(pdev);
15803 struct tg3 *tp = netdev_priv(netdev);
15804 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15806 netdev_info(netdev, "PCI I/O error detected\n");
15810 if (!netif_running(netdev))
15815 tg3_netif_stop(tp);
15817 del_timer_sync(&tp->timer);
15818 tg3_flag_clear(tp, RESTART_TIMER);
15820 /* Want to make sure that the reset task doesn't run */
15821 cancel_work_sync(&tp->reset_task);
15822 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15823 tg3_flag_clear(tp, RESTART_TIMER);
15825 netif_device_detach(netdev);
15827 /* Clean up software state, even if MMIO is blocked */
15828 tg3_full_lock(tp, 0);
15829 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15830 tg3_full_unlock(tp);
15833 if (state == pci_channel_io_perm_failure)
15834 err = PCI_ERS_RESULT_DISCONNECT;
15836 pci_disable_device(pdev);
15844 * tg3_io_slot_reset - called after the pci bus has been reset.
15845 * @pdev: Pointer to PCI device
15847 * Restart the card from scratch, as if from a cold-boot.
15848 * At this point, the card has exprienced a hard reset,
15849 * followed by fixups by BIOS, and has its config space
15850 * set up identically to what it was at cold boot.
15852 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15854 struct net_device *netdev = pci_get_drvdata(pdev);
15855 struct tg3 *tp = netdev_priv(netdev);
15856 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15861 if (pci_enable_device(pdev)) {
15862 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15866 pci_set_master(pdev);
15867 pci_restore_state(pdev);
15868 pci_save_state(pdev);
15870 if (!netif_running(netdev)) {
15871 rc = PCI_ERS_RESULT_RECOVERED;
15875 err = tg3_power_up(tp);
15879 rc = PCI_ERS_RESULT_RECOVERED;
15888 * tg3_io_resume - called when traffic can start flowing again.
15889 * @pdev: Pointer to PCI device
15891 * This callback is called when the error recovery driver tells
15892 * us that its OK to resume normal operation.
15894 static void tg3_io_resume(struct pci_dev *pdev)
15896 struct net_device *netdev = pci_get_drvdata(pdev);
15897 struct tg3 *tp = netdev_priv(netdev);
15902 if (!netif_running(netdev))
15905 tg3_full_lock(tp, 0);
15906 tg3_flag_set(tp, INIT_COMPLETE);
15907 err = tg3_restart_hw(tp, 1);
15908 tg3_full_unlock(tp);
15910 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15914 netif_device_attach(netdev);
15916 tp->timer.expires = jiffies + tp->timer_offset;
15917 add_timer(&tp->timer);
15919 tg3_netif_start(tp);
15927 static struct pci_error_handlers tg3_err_handler = {
15928 .error_detected = tg3_io_error_detected,
15929 .slot_reset = tg3_io_slot_reset,
15930 .resume = tg3_io_resume
15933 static struct pci_driver tg3_driver = {
15934 .name = DRV_MODULE_NAME,
15935 .id_table = tg3_pci_tbl,
15936 .probe = tg3_init_one,
15937 .remove = __devexit_p(tg3_remove_one),
15938 .err_handler = &tg3_err_handler,
15939 .driver.pm = TG3_PM_OPS,
15942 static int __init tg3_init(void)
15944 return pci_register_driver(&tg3_driver);
15947 static void __exit tg3_cleanup(void)
15949 pci_unregister_driver(&tg3_driver);
15952 module_init(tg3_init);
15953 module_exit(tg3_cleanup);