2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
53 #define assert(expr) \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 #define R8169_REGS_SIZE 256
84 #define R8169_NAPI_WEIGHT 64
85 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
86 #define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
87 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
88 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
90 #define RTL8169_TX_TIMEOUT (6*HZ)
91 #define RTL8169_PHY_TIMEOUT (10*HZ)
93 /* write/read MMIO register */
94 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
95 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
96 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
97 #define RTL_R8(reg) readb (ioaddr + (reg))
98 #define RTL_R16(reg) readw (ioaddr + (reg))
99 #define RTL_R32(reg) readl (ioaddr + (reg))
102 RTL_GIGA_MAC_VER_01 = 0,
143 RTL_GIGA_MAC_NONE = 0xff,
146 enum rtl_tx_desc_version {
151 #define JUMBO_1K ETH_DATA_LEN
152 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
153 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
154 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
155 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
157 #define _R(NAME,TD,FW,SZ,B) { \
165 static const struct {
167 enum rtl_tx_desc_version txd_version;
171 } rtl_chip_infos[] = {
173 [RTL_GIGA_MAC_VER_01] =
174 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
175 [RTL_GIGA_MAC_VER_02] =
176 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
177 [RTL_GIGA_MAC_VER_03] =
178 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
179 [RTL_GIGA_MAC_VER_04] =
180 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_05] =
182 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_06] =
184 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
186 [RTL_GIGA_MAC_VER_07] =
187 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
188 [RTL_GIGA_MAC_VER_08] =
189 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
190 [RTL_GIGA_MAC_VER_09] =
191 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
192 [RTL_GIGA_MAC_VER_10] =
193 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_11] =
195 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
196 [RTL_GIGA_MAC_VER_12] =
197 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
198 [RTL_GIGA_MAC_VER_13] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_14] =
201 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
202 [RTL_GIGA_MAC_VER_15] =
203 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
204 [RTL_GIGA_MAC_VER_16] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_17] =
207 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
208 [RTL_GIGA_MAC_VER_18] =
209 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
210 [RTL_GIGA_MAC_VER_19] =
211 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
212 [RTL_GIGA_MAC_VER_20] =
213 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
214 [RTL_GIGA_MAC_VER_21] =
215 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_22] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_23] =
219 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_24] =
221 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_25] =
223 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
225 [RTL_GIGA_MAC_VER_26] =
226 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
228 [RTL_GIGA_MAC_VER_27] =
229 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
230 [RTL_GIGA_MAC_VER_28] =
231 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
232 [RTL_GIGA_MAC_VER_29] =
233 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
235 [RTL_GIGA_MAC_VER_30] =
236 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
238 [RTL_GIGA_MAC_VER_31] =
239 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
240 [RTL_GIGA_MAC_VER_32] =
241 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
243 [RTL_GIGA_MAC_VER_33] =
244 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
246 [RTL_GIGA_MAC_VER_34] =
247 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
249 [RTL_GIGA_MAC_VER_35] =
250 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
252 [RTL_GIGA_MAC_VER_36] =
253 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
255 [RTL_GIGA_MAC_VER_37] =
256 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
258 [RTL_GIGA_MAC_VER_38] =
259 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
261 [RTL_GIGA_MAC_VER_39] =
262 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
264 [RTL_GIGA_MAC_VER_40] =
265 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
267 [RTL_GIGA_MAC_VER_41] =
268 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
278 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
279 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
280 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
281 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
282 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
283 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
284 { PCI_VENDOR_ID_DLINK, 0x4300,
285 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
286 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
287 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
289 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_LINKSYS, 0x1032,
291 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
293 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
297 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
299 static int rx_buf_sz = 16383;
306 MAC0 = 0, /* Ethernet hardware address. */
308 MAR0 = 8, /* Multicast filter. */
309 CounterAddrLow = 0x10,
310 CounterAddrHigh = 0x14,
311 TxDescStartAddrLow = 0x20,
312 TxDescStartAddrHigh = 0x24,
313 TxHDescStartAddrLow = 0x28,
314 TxHDescStartAddrHigh = 0x2c,
323 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
324 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
327 #define RX128_INT_EN (1 << 15) /* 8111c and later */
328 #define RX_MULTI_EN (1 << 14) /* 8111c only */
329 #define RXCFG_FIFO_SHIFT 13
330 /* No threshold before first PCI xfer */
331 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
332 #define RXCFG_DMA_SHIFT 8
333 /* Unlimited maximum PCI burst. */
334 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
341 #define PME_SIGNAL (1 << 5) /* 8168c and later */
352 RxDescAddrLow = 0xe4,
353 RxDescAddrHigh = 0xe8,
354 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
356 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
358 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
360 #define TxPacketMax (8064 >> 7)
361 #define EarlySize 0x27
364 FuncEventMask = 0xf4,
365 FuncPresetState = 0xf8,
366 FuncForceEvent = 0xfc,
369 enum rtl8110_registers {
375 enum rtl8168_8101_registers {
378 #define CSIAR_FLAG 0x80000000
379 #define CSIAR_WRITE_CMD 0x80000000
380 #define CSIAR_BYTE_ENABLE 0x0f
381 #define CSIAR_BYTE_ENABLE_SHIFT 12
382 #define CSIAR_ADDR_MASK 0x0fff
383 #define CSIAR_FUNC_CARD 0x00000000
384 #define CSIAR_FUNC_SDIO 0x00010000
385 #define CSIAR_FUNC_NIC 0x00020000
388 #define EPHYAR_FLAG 0x80000000
389 #define EPHYAR_WRITE_CMD 0x80000000
390 #define EPHYAR_REG_MASK 0x1f
391 #define EPHYAR_REG_SHIFT 16
392 #define EPHYAR_DATA_MASK 0xffff
394 #define PFM_EN (1 << 6)
396 #define FIX_NAK_1 (1 << 4)
397 #define FIX_NAK_2 (1 << 3)
400 #define NOW_IS_OOB (1 << 7)
401 #define TX_EMPTY (1 << 5)
402 #define RX_EMPTY (1 << 4)
403 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
404 #define EN_NDP (1 << 3)
405 #define EN_OOB_RESET (1 << 2)
406 #define LINK_LIST_RDY (1 << 1)
408 #define EFUSEAR_FLAG 0x80000000
409 #define EFUSEAR_WRITE_CMD 0x80000000
410 #define EFUSEAR_READ_CMD 0x00000000
411 #define EFUSEAR_REG_MASK 0x03ff
412 #define EFUSEAR_REG_SHIFT 8
413 #define EFUSEAR_DATA_MASK 0xff
416 enum rtl8168_registers {
421 #define ERIAR_FLAG 0x80000000
422 #define ERIAR_WRITE_CMD 0x80000000
423 #define ERIAR_READ_CMD 0x00000000
424 #define ERIAR_ADDR_BYTE_ALIGN 4
425 #define ERIAR_TYPE_SHIFT 16
426 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
427 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
428 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
429 #define ERIAR_MASK_SHIFT 12
430 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
431 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
432 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
433 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
434 EPHY_RXER_NUM = 0x7c,
435 OCPDR = 0xb0, /* OCP GPHY access */
436 #define OCPDR_WRITE_CMD 0x80000000
437 #define OCPDR_READ_CMD 0x00000000
438 #define OCPDR_REG_MASK 0x7f
439 #define OCPDR_GPHY_REG_SHIFT 16
440 #define OCPDR_DATA_MASK 0xffff
442 #define OCPAR_FLAG 0x80000000
443 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
444 #define OCPAR_GPHY_READ_CMD 0x0000f060
446 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
447 MISC = 0xf0, /* 8168e only. */
448 #define TXPLA_RST (1 << 29)
449 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
450 #define PWM_EN (1 << 22)
451 #define RXDV_GATED_EN (1 << 19)
452 #define EARLY_TALLY_EN (1 << 16)
455 enum rtl_register_content {
456 /* InterruptStatusBits */
460 TxDescUnavail = 0x0080,
484 /* TXPoll register p.5 */
485 HPQ = 0x80, /* Poll cmd on the high prio queue */
486 NPQ = 0x40, /* Poll cmd on the low prio queue */
487 FSWInt = 0x01, /* Forced software interrupt */
491 Cfg9346_Unlock = 0xc0,
496 AcceptBroadcast = 0x08,
497 AcceptMulticast = 0x04,
499 AcceptAllPhys = 0x01,
500 #define RX_CONFIG_ACCEPT_MASK 0x3f
503 TxInterFrameGapShift = 24,
504 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
506 /* Config1 register p.24 */
509 Speed_down = (1 << 4),
513 PMEnable = (1 << 0), /* Power Management Enable */
515 /* Config2 register p. 25 */
516 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
517 PCI_Clock_66MHz = 0x01,
518 PCI_Clock_33MHz = 0x00,
520 /* Config3 register p.25 */
521 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
522 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
523 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
524 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
526 /* Config4 register */
527 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
529 /* Config5 register p.27 */
530 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
531 MWF = (1 << 5), /* Accept Multicast wakeup frame */
532 UWF = (1 << 4), /* Accept Unicast wakeup frame */
534 LanWake = (1 << 1), /* LanWake enable/disable */
535 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
538 TBIReset = 0x80000000,
539 TBILoopback = 0x40000000,
540 TBINwEnable = 0x20000000,
541 TBINwRestart = 0x10000000,
542 TBILinkOk = 0x02000000,
543 TBINwComplete = 0x01000000,
546 EnableBist = (1 << 15), // 8168 8101
547 Mac_dbgo_oe = (1 << 14), // 8168 8101
548 Normal_mode = (1 << 13), // unused
549 Force_half_dup = (1 << 12), // 8168 8101
550 Force_rxflow_en = (1 << 11), // 8168 8101
551 Force_txflow_en = (1 << 10), // 8168 8101
552 Cxpl_dbg_sel = (1 << 9), // 8168 8101
553 ASF = (1 << 8), // 8168 8101
554 PktCntrDisable = (1 << 7), // 8168 8101
555 Mac_dbgo_sel = 0x001c, // 8168
560 INTT_0 = 0x0000, // 8168
561 INTT_1 = 0x0001, // 8168
562 INTT_2 = 0x0002, // 8168
563 INTT_3 = 0x0003, // 8168
565 /* rtl8169_PHYstatus */
576 TBILinkOK = 0x02000000,
578 /* DumpCounterCommand */
583 /* First doubleword. */
584 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
585 RingEnd = (1 << 30), /* End of descriptor ring */
586 FirstFrag = (1 << 29), /* First segment of a packet */
587 LastFrag = (1 << 28), /* Final segment of a packet */
591 enum rtl_tx_desc_bit {
592 /* First doubleword. */
593 TD_LSO = (1 << 27), /* Large Send Offload */
594 #define TD_MSS_MAX 0x07ffu /* MSS value */
596 /* Second doubleword. */
597 TxVlanTag = (1 << 17), /* Add VLAN tag */
600 /* 8169, 8168b and 810x except 8102e. */
601 enum rtl_tx_desc_bit_0 {
602 /* First doubleword. */
603 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
604 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
605 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
606 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
609 /* 8102e, 8168c and beyond. */
610 enum rtl_tx_desc_bit_1 {
611 /* Second doubleword. */
612 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
613 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
614 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
615 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
618 static const struct rtl_tx_desc_info {
625 } tx_desc_info [] = {
628 .udp = TD0_IP_CS | TD0_UDP_CS,
629 .tcp = TD0_IP_CS | TD0_TCP_CS
631 .mss_shift = TD0_MSS_SHIFT,
636 .udp = TD1_IP_CS | TD1_UDP_CS,
637 .tcp = TD1_IP_CS | TD1_TCP_CS
639 .mss_shift = TD1_MSS_SHIFT,
644 enum rtl_rx_desc_bit {
646 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
647 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
649 #define RxProtoUDP (PID1)
650 #define RxProtoTCP (PID0)
651 #define RxProtoIP (PID1 | PID0)
652 #define RxProtoMask RxProtoIP
654 IPFail = (1 << 16), /* IP checksum failed */
655 UDPFail = (1 << 15), /* UDP/IP checksum failed */
656 TCPFail = (1 << 14), /* TCP/IP checksum failed */
657 RxVlanTag = (1 << 16), /* VLAN tag available */
660 #define RsvdMask 0x3fffc000
677 u8 __pad[sizeof(void *) - sizeof(u32)];
681 RTL_FEATURE_WOL = (1 << 0),
682 RTL_FEATURE_MSI = (1 << 1),
683 RTL_FEATURE_GMII = (1 << 2),
686 struct rtl8169_counters {
693 __le32 tx_one_collision;
694 __le32 tx_multi_collision;
703 RTL_FLAG_TASK_ENABLED,
704 RTL_FLAG_TASK_SLOW_PENDING,
705 RTL_FLAG_TASK_RESET_PENDING,
706 RTL_FLAG_TASK_PHY_PENDING,
710 struct rtl8169_stats {
713 struct u64_stats_sync syncp;
716 struct rtl8169_private {
717 void __iomem *mmio_addr; /* memory map physical address */
718 struct pci_dev *pci_dev;
719 struct net_device *dev;
720 struct napi_struct napi;
724 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
725 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
728 struct rtl8169_stats rx_stats;
729 struct rtl8169_stats tx_stats;
730 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
731 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
732 dma_addr_t TxPhyAddr;
733 dma_addr_t RxPhyAddr;
734 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
735 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
736 struct timer_list timer;
742 void (*write)(struct rtl8169_private *, int, int);
743 int (*read)(struct rtl8169_private *, int);
746 struct pll_power_ops {
747 void (*down)(struct rtl8169_private *);
748 void (*up)(struct rtl8169_private *);
752 void (*enable)(struct rtl8169_private *);
753 void (*disable)(struct rtl8169_private *);
757 void (*write)(struct rtl8169_private *, int, int);
758 u32 (*read)(struct rtl8169_private *, int);
761 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
762 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
763 void (*phy_reset_enable)(struct rtl8169_private *tp);
764 void (*hw_start)(struct net_device *);
765 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
766 unsigned int (*link_ok)(void __iomem *);
767 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
770 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
772 struct work_struct work;
777 struct mii_if_info mii;
778 struct rtl8169_counters counters;
783 const struct firmware *fw;
785 #define RTL_VER_SIZE 32
787 char version[RTL_VER_SIZE];
789 struct rtl_fw_phy_action {
794 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
799 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
800 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
801 module_param(use_dac, int, 0);
802 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
803 module_param_named(debug, debug.msg_enable, int, 0);
804 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
805 MODULE_LICENSE("GPL");
806 MODULE_VERSION(RTL8169_VERSION);
807 MODULE_FIRMWARE(FIRMWARE_8168D_1);
808 MODULE_FIRMWARE(FIRMWARE_8168D_2);
809 MODULE_FIRMWARE(FIRMWARE_8168E_1);
810 MODULE_FIRMWARE(FIRMWARE_8168E_2);
811 MODULE_FIRMWARE(FIRMWARE_8168E_3);
812 MODULE_FIRMWARE(FIRMWARE_8105E_1);
813 MODULE_FIRMWARE(FIRMWARE_8168F_1);
814 MODULE_FIRMWARE(FIRMWARE_8168F_2);
815 MODULE_FIRMWARE(FIRMWARE_8402_1);
816 MODULE_FIRMWARE(FIRMWARE_8411_1);
817 MODULE_FIRMWARE(FIRMWARE_8106E_1);
818 MODULE_FIRMWARE(FIRMWARE_8168G_1);
820 static void rtl_lock_work(struct rtl8169_private *tp)
822 mutex_lock(&tp->wk.mutex);
825 static void rtl_unlock_work(struct rtl8169_private *tp)
827 mutex_unlock(&tp->wk.mutex);
830 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
832 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
833 PCI_EXP_DEVCTL_READRQ, force);
837 bool (*check)(struct rtl8169_private *);
841 static void rtl_udelay(unsigned int d)
846 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
847 void (*delay)(unsigned int), unsigned int d, int n,
852 for (i = 0; i < n; i++) {
854 if (c->check(tp) == high)
857 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
858 c->msg, !high, n, d);
862 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
863 const struct rtl_cond *c,
864 unsigned int d, int n)
866 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
869 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
870 const struct rtl_cond *c,
871 unsigned int d, int n)
873 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
876 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
877 const struct rtl_cond *c,
878 unsigned int d, int n)
880 return rtl_loop_wait(tp, c, msleep, d, n, true);
883 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
884 const struct rtl_cond *c,
885 unsigned int d, int n)
887 return rtl_loop_wait(tp, c, msleep, d, n, false);
890 #define DECLARE_RTL_COND(name) \
891 static bool name ## _check(struct rtl8169_private *); \
893 static const struct rtl_cond name = { \
894 .check = name ## _check, \
898 static bool name ## _check(struct rtl8169_private *tp)
900 DECLARE_RTL_COND(rtl_ocpar_cond)
902 void __iomem *ioaddr = tp->mmio_addr;
904 return RTL_R32(OCPAR) & OCPAR_FLAG;
907 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
909 void __iomem *ioaddr = tp->mmio_addr;
911 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
913 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
917 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
919 void __iomem *ioaddr = tp->mmio_addr;
921 RTL_W32(OCPDR, data);
922 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
924 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
927 DECLARE_RTL_COND(rtl_eriar_cond)
929 void __iomem *ioaddr = tp->mmio_addr;
931 return RTL_R32(ERIAR) & ERIAR_FLAG;
934 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
936 void __iomem *ioaddr = tp->mmio_addr;
939 RTL_W32(ERIAR, 0x800010e8);
942 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
945 ocp_write(tp, 0x1, 0x30, 0x00000001);
948 #define OOB_CMD_RESET 0x00
949 #define OOB_CMD_DRIVER_START 0x05
950 #define OOB_CMD_DRIVER_STOP 0x06
952 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
954 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
957 DECLARE_RTL_COND(rtl_ocp_read_cond)
961 reg = rtl8168_get_ocp_reg(tp);
963 return ocp_read(tp, 0x0f, reg) & 0x00000800;
966 static void rtl8168_driver_start(struct rtl8169_private *tp)
968 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
970 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
973 static void rtl8168_driver_stop(struct rtl8169_private *tp)
975 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
977 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
980 static int r8168dp_check_dash(struct rtl8169_private *tp)
982 u16 reg = rtl8168_get_ocp_reg(tp);
984 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
987 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
989 if (reg & 0xffff0001) {
990 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
996 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
998 void __iomem *ioaddr = tp->mmio_addr;
1000 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1003 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1005 void __iomem *ioaddr = tp->mmio_addr;
1007 if (rtl_ocp_reg_failure(tp, reg))
1010 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1012 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1015 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1017 void __iomem *ioaddr = tp->mmio_addr;
1019 if (rtl_ocp_reg_failure(tp, reg))
1022 RTL_W32(GPHY_OCP, reg << 15);
1024 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1025 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1028 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1032 val = r8168_phy_ocp_read(tp, reg);
1033 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1036 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1038 void __iomem *ioaddr = tp->mmio_addr;
1040 if (rtl_ocp_reg_failure(tp, reg))
1043 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1046 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1048 void __iomem *ioaddr = tp->mmio_addr;
1050 if (rtl_ocp_reg_failure(tp, reg))
1053 RTL_W32(OCPDR, reg << 15);
1055 return RTL_R32(OCPDR);
1058 #define OCP_STD_PHY_BASE 0xa400
1060 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1063 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1067 if (tp->ocp_base != OCP_STD_PHY_BASE)
1070 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1073 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1075 if (tp->ocp_base != OCP_STD_PHY_BASE)
1078 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1081 DECLARE_RTL_COND(rtl_phyar_cond)
1083 void __iomem *ioaddr = tp->mmio_addr;
1085 return RTL_R32(PHYAR) & 0x80000000;
1088 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1090 void __iomem *ioaddr = tp->mmio_addr;
1092 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1094 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1096 * According to hardware specs a 20us delay is required after write
1097 * complete indication, but before sending next command.
1102 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1104 void __iomem *ioaddr = tp->mmio_addr;
1107 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1109 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1110 RTL_R32(PHYAR) & 0xffff : ~0;
1113 * According to hardware specs a 20us delay is required after read
1114 * complete indication, but before sending next command.
1121 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1123 void __iomem *ioaddr = tp->mmio_addr;
1125 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1126 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1127 RTL_W32(EPHY_RXER_NUM, 0);
1129 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1132 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1134 r8168dp_1_mdio_access(tp, reg,
1135 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1138 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1140 void __iomem *ioaddr = tp->mmio_addr;
1142 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1145 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1146 RTL_W32(EPHY_RXER_NUM, 0);
1148 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1149 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1152 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1154 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1156 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1159 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1161 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1164 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1166 void __iomem *ioaddr = tp->mmio_addr;
1168 r8168dp_2_mdio_start(ioaddr);
1170 r8169_mdio_write(tp, reg, value);
1172 r8168dp_2_mdio_stop(ioaddr);
1175 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1177 void __iomem *ioaddr = tp->mmio_addr;
1180 r8168dp_2_mdio_start(ioaddr);
1182 value = r8169_mdio_read(tp, reg);
1184 r8168dp_2_mdio_stop(ioaddr);
1189 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1191 tp->mdio_ops.write(tp, location, val);
1194 static int rtl_readphy(struct rtl8169_private *tp, int location)
1196 return tp->mdio_ops.read(tp, location);
1199 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1201 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1204 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1208 val = rtl_readphy(tp, reg_addr);
1209 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1212 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1215 struct rtl8169_private *tp = netdev_priv(dev);
1217 rtl_writephy(tp, location, val);
1220 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1222 struct rtl8169_private *tp = netdev_priv(dev);
1224 return rtl_readphy(tp, location);
1227 DECLARE_RTL_COND(rtl_ephyar_cond)
1229 void __iomem *ioaddr = tp->mmio_addr;
1231 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1234 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1236 void __iomem *ioaddr = tp->mmio_addr;
1238 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1239 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1241 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1246 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1248 void __iomem *ioaddr = tp->mmio_addr;
1250 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1252 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1253 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1256 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1259 void __iomem *ioaddr = tp->mmio_addr;
1261 BUG_ON((addr & 3) || (mask == 0));
1262 RTL_W32(ERIDR, val);
1263 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1265 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1268 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1270 void __iomem *ioaddr = tp->mmio_addr;
1272 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1274 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1275 RTL_R32(ERIDR) : ~0;
1278 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1283 val = rtl_eri_read(tp, addr, type);
1284 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1293 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1294 const struct exgmac_reg *r, int len)
1297 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1302 DECLARE_RTL_COND(rtl_efusear_cond)
1304 void __iomem *ioaddr = tp->mmio_addr;
1306 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1309 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1311 void __iomem *ioaddr = tp->mmio_addr;
1313 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1315 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1316 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1319 static u16 rtl_get_events(struct rtl8169_private *tp)
1321 void __iomem *ioaddr = tp->mmio_addr;
1323 return RTL_R16(IntrStatus);
1326 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1328 void __iomem *ioaddr = tp->mmio_addr;
1330 RTL_W16(IntrStatus, bits);
1334 static void rtl_irq_disable(struct rtl8169_private *tp)
1336 void __iomem *ioaddr = tp->mmio_addr;
1338 RTL_W16(IntrMask, 0);
1342 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1344 void __iomem *ioaddr = tp->mmio_addr;
1346 RTL_W16(IntrMask, bits);
1349 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1350 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1351 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1353 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1355 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1358 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1360 void __iomem *ioaddr = tp->mmio_addr;
1362 rtl_irq_disable(tp);
1363 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1367 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1369 void __iomem *ioaddr = tp->mmio_addr;
1371 return RTL_R32(TBICSR) & TBIReset;
1374 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1376 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1379 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1381 return RTL_R32(TBICSR) & TBILinkOk;
1384 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1386 return RTL_R8(PHYstatus) & LinkStatus;
1389 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1391 void __iomem *ioaddr = tp->mmio_addr;
1393 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1396 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1400 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1401 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1404 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1406 void __iomem *ioaddr = tp->mmio_addr;
1407 struct net_device *dev = tp->dev;
1409 if (!netif_running(dev))
1412 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1413 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1414 if (RTL_R8(PHYstatus) & _1000bpsF) {
1415 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1417 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1419 } else if (RTL_R8(PHYstatus) & _100bps) {
1420 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1422 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1425 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1427 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1430 /* Reset packet filter */
1431 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1433 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1435 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1436 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1437 if (RTL_R8(PHYstatus) & _1000bpsF) {
1438 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1440 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1443 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1445 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1448 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1449 if (RTL_R8(PHYstatus) & _10bps) {
1450 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1452 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1455 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1461 static void __rtl8169_check_link_status(struct net_device *dev,
1462 struct rtl8169_private *tp,
1463 void __iomem *ioaddr, bool pm)
1465 if (tp->link_ok(ioaddr)) {
1466 rtl_link_chg_patch(tp);
1467 /* This is to cancel a scheduled suspend if there's one. */
1469 pm_request_resume(&tp->pci_dev->dev);
1470 netif_carrier_on(dev);
1471 if (net_ratelimit())
1472 netif_info(tp, ifup, dev, "link up\n");
1474 netif_carrier_off(dev);
1475 netif_info(tp, ifdown, dev, "link down\n");
1477 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1481 static void rtl8169_check_link_status(struct net_device *dev,
1482 struct rtl8169_private *tp,
1483 void __iomem *ioaddr)
1485 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1488 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1490 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1492 void __iomem *ioaddr = tp->mmio_addr;
1496 options = RTL_R8(Config1);
1497 if (!(options & PMEnable))
1500 options = RTL_R8(Config3);
1501 if (options & LinkUp)
1502 wolopts |= WAKE_PHY;
1503 if (options & MagicPacket)
1504 wolopts |= WAKE_MAGIC;
1506 options = RTL_R8(Config5);
1508 wolopts |= WAKE_UCAST;
1510 wolopts |= WAKE_BCAST;
1512 wolopts |= WAKE_MCAST;
1517 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1519 struct rtl8169_private *tp = netdev_priv(dev);
1523 wol->supported = WAKE_ANY;
1524 wol->wolopts = __rtl8169_get_wol(tp);
1526 rtl_unlock_work(tp);
1529 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1531 void __iomem *ioaddr = tp->mmio_addr;
1533 static const struct {
1538 { WAKE_PHY, Config3, LinkUp },
1539 { WAKE_MAGIC, Config3, MagicPacket },
1540 { WAKE_UCAST, Config5, UWF },
1541 { WAKE_BCAST, Config5, BWF },
1542 { WAKE_MCAST, Config5, MWF },
1543 { WAKE_ANY, Config5, LanWake }
1547 RTL_W8(Cfg9346, Cfg9346_Unlock);
1549 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1550 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1551 if (wolopts & cfg[i].opt)
1552 options |= cfg[i].mask;
1553 RTL_W8(cfg[i].reg, options);
1556 switch (tp->mac_version) {
1557 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1558 options = RTL_R8(Config1) & ~PMEnable;
1560 options |= PMEnable;
1561 RTL_W8(Config1, options);
1564 options = RTL_R8(Config2) & ~PME_SIGNAL;
1566 options |= PME_SIGNAL;
1567 RTL_W8(Config2, options);
1571 RTL_W8(Cfg9346, Cfg9346_Lock);
1574 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1576 struct rtl8169_private *tp = netdev_priv(dev);
1581 tp->features |= RTL_FEATURE_WOL;
1583 tp->features &= ~RTL_FEATURE_WOL;
1584 __rtl8169_set_wol(tp, wol->wolopts);
1586 rtl_unlock_work(tp);
1588 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1593 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1595 return rtl_chip_infos[tp->mac_version].fw_name;
1598 static void rtl8169_get_drvinfo(struct net_device *dev,
1599 struct ethtool_drvinfo *info)
1601 struct rtl8169_private *tp = netdev_priv(dev);
1602 struct rtl_fw *rtl_fw = tp->rtl_fw;
1604 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1605 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1606 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1607 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1608 if (!IS_ERR_OR_NULL(rtl_fw))
1609 strlcpy(info->fw_version, rtl_fw->version,
1610 sizeof(info->fw_version));
1613 static int rtl8169_get_regs_len(struct net_device *dev)
1615 return R8169_REGS_SIZE;
1618 static int rtl8169_set_speed_tbi(struct net_device *dev,
1619 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1621 struct rtl8169_private *tp = netdev_priv(dev);
1622 void __iomem *ioaddr = tp->mmio_addr;
1626 reg = RTL_R32(TBICSR);
1627 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1628 (duplex == DUPLEX_FULL)) {
1629 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1630 } else if (autoneg == AUTONEG_ENABLE)
1631 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1633 netif_warn(tp, link, dev,
1634 "incorrect speed setting refused in TBI mode\n");
1641 static int rtl8169_set_speed_xmii(struct net_device *dev,
1642 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1644 struct rtl8169_private *tp = netdev_priv(dev);
1645 int giga_ctrl, bmcr;
1648 rtl_writephy(tp, 0x1f, 0x0000);
1650 if (autoneg == AUTONEG_ENABLE) {
1653 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1654 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1655 ADVERTISE_100HALF | ADVERTISE_100FULL);
1657 if (adv & ADVERTISED_10baseT_Half)
1658 auto_nego |= ADVERTISE_10HALF;
1659 if (adv & ADVERTISED_10baseT_Full)
1660 auto_nego |= ADVERTISE_10FULL;
1661 if (adv & ADVERTISED_100baseT_Half)
1662 auto_nego |= ADVERTISE_100HALF;
1663 if (adv & ADVERTISED_100baseT_Full)
1664 auto_nego |= ADVERTISE_100FULL;
1666 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1668 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1669 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1671 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1672 if (tp->mii.supports_gmii) {
1673 if (adv & ADVERTISED_1000baseT_Half)
1674 giga_ctrl |= ADVERTISE_1000HALF;
1675 if (adv & ADVERTISED_1000baseT_Full)
1676 giga_ctrl |= ADVERTISE_1000FULL;
1677 } else if (adv & (ADVERTISED_1000baseT_Half |
1678 ADVERTISED_1000baseT_Full)) {
1679 netif_info(tp, link, dev,
1680 "PHY does not support 1000Mbps\n");
1684 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1686 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1687 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1691 if (speed == SPEED_10)
1693 else if (speed == SPEED_100)
1694 bmcr = BMCR_SPEED100;
1698 if (duplex == DUPLEX_FULL)
1699 bmcr |= BMCR_FULLDPLX;
1702 rtl_writephy(tp, MII_BMCR, bmcr);
1704 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1705 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1706 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1707 rtl_writephy(tp, 0x17, 0x2138);
1708 rtl_writephy(tp, 0x0e, 0x0260);
1710 rtl_writephy(tp, 0x17, 0x2108);
1711 rtl_writephy(tp, 0x0e, 0x0000);
1720 static int rtl8169_set_speed(struct net_device *dev,
1721 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1723 struct rtl8169_private *tp = netdev_priv(dev);
1726 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1730 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1731 (advertising & ADVERTISED_1000baseT_Full)) {
1732 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1738 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1740 struct rtl8169_private *tp = netdev_priv(dev);
1743 del_timer_sync(&tp->timer);
1746 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1747 cmd->duplex, cmd->advertising);
1748 rtl_unlock_work(tp);
1753 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1754 netdev_features_t features)
1756 struct rtl8169_private *tp = netdev_priv(dev);
1758 if (dev->mtu > TD_MSS_MAX)
1759 features &= ~NETIF_F_ALL_TSO;
1761 if (dev->mtu > JUMBO_1K &&
1762 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1763 features &= ~NETIF_F_IP_CSUM;
1768 static void __rtl8169_set_features(struct net_device *dev,
1769 netdev_features_t features)
1771 struct rtl8169_private *tp = netdev_priv(dev);
1772 netdev_features_t changed = features ^ dev->features;
1773 void __iomem *ioaddr = tp->mmio_addr;
1775 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1778 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1779 if (features & NETIF_F_RXCSUM)
1780 tp->cp_cmd |= RxChkSum;
1782 tp->cp_cmd &= ~RxChkSum;
1784 if (dev->features & NETIF_F_HW_VLAN_RX)
1785 tp->cp_cmd |= RxVlan;
1787 tp->cp_cmd &= ~RxVlan;
1789 RTL_W16(CPlusCmd, tp->cp_cmd);
1792 if (changed & NETIF_F_RXALL) {
1793 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1794 if (features & NETIF_F_RXALL)
1795 tmp |= (AcceptErr | AcceptRunt);
1796 RTL_W32(RxConfig, tmp);
1800 static int rtl8169_set_features(struct net_device *dev,
1801 netdev_features_t features)
1803 struct rtl8169_private *tp = netdev_priv(dev);
1806 __rtl8169_set_features(dev, features);
1807 rtl_unlock_work(tp);
1813 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1815 return (vlan_tx_tag_present(skb)) ?
1816 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1819 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1821 u32 opts2 = le32_to_cpu(desc->opts2);
1823 if (opts2 & RxVlanTag)
1824 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1827 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1829 struct rtl8169_private *tp = netdev_priv(dev);
1830 void __iomem *ioaddr = tp->mmio_addr;
1834 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1835 cmd->port = PORT_FIBRE;
1836 cmd->transceiver = XCVR_INTERNAL;
1838 status = RTL_R32(TBICSR);
1839 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1840 cmd->autoneg = !!(status & TBINwEnable);
1842 ethtool_cmd_speed_set(cmd, SPEED_1000);
1843 cmd->duplex = DUPLEX_FULL; /* Always set */
1848 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1850 struct rtl8169_private *tp = netdev_priv(dev);
1852 return mii_ethtool_gset(&tp->mii, cmd);
1855 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1857 struct rtl8169_private *tp = netdev_priv(dev);
1861 rc = tp->get_settings(dev, cmd);
1862 rtl_unlock_work(tp);
1867 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1870 struct rtl8169_private *tp = netdev_priv(dev);
1872 if (regs->len > R8169_REGS_SIZE)
1873 regs->len = R8169_REGS_SIZE;
1876 memcpy_fromio(p, tp->mmio_addr, regs->len);
1877 rtl_unlock_work(tp);
1880 static u32 rtl8169_get_msglevel(struct net_device *dev)
1882 struct rtl8169_private *tp = netdev_priv(dev);
1884 return tp->msg_enable;
1887 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1889 struct rtl8169_private *tp = netdev_priv(dev);
1891 tp->msg_enable = value;
1894 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1901 "tx_single_collisions",
1902 "tx_multi_collisions",
1910 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1914 return ARRAY_SIZE(rtl8169_gstrings);
1920 DECLARE_RTL_COND(rtl_counters_cond)
1922 void __iomem *ioaddr = tp->mmio_addr;
1924 return RTL_R32(CounterAddrLow) & CounterDump;
1927 static void rtl8169_update_counters(struct net_device *dev)
1929 struct rtl8169_private *tp = netdev_priv(dev);
1930 void __iomem *ioaddr = tp->mmio_addr;
1931 struct device *d = &tp->pci_dev->dev;
1932 struct rtl8169_counters *counters;
1937 * Some chips are unable to dump tally counters when the receiver
1940 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1943 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1947 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1948 cmd = (u64)paddr & DMA_BIT_MASK(32);
1949 RTL_W32(CounterAddrLow, cmd);
1950 RTL_W32(CounterAddrLow, cmd | CounterDump);
1952 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1953 memcpy(&tp->counters, counters, sizeof(*counters));
1955 RTL_W32(CounterAddrLow, 0);
1956 RTL_W32(CounterAddrHigh, 0);
1958 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1961 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1962 struct ethtool_stats *stats, u64 *data)
1964 struct rtl8169_private *tp = netdev_priv(dev);
1968 rtl8169_update_counters(dev);
1970 data[0] = le64_to_cpu(tp->counters.tx_packets);
1971 data[1] = le64_to_cpu(tp->counters.rx_packets);
1972 data[2] = le64_to_cpu(tp->counters.tx_errors);
1973 data[3] = le32_to_cpu(tp->counters.rx_errors);
1974 data[4] = le16_to_cpu(tp->counters.rx_missed);
1975 data[5] = le16_to_cpu(tp->counters.align_errors);
1976 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1977 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1978 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1979 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1980 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1981 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1982 data[12] = le16_to_cpu(tp->counters.tx_underun);
1985 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1989 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1994 static const struct ethtool_ops rtl8169_ethtool_ops = {
1995 .get_drvinfo = rtl8169_get_drvinfo,
1996 .get_regs_len = rtl8169_get_regs_len,
1997 .get_link = ethtool_op_get_link,
1998 .get_settings = rtl8169_get_settings,
1999 .set_settings = rtl8169_set_settings,
2000 .get_msglevel = rtl8169_get_msglevel,
2001 .set_msglevel = rtl8169_set_msglevel,
2002 .get_regs = rtl8169_get_regs,
2003 .get_wol = rtl8169_get_wol,
2004 .set_wol = rtl8169_set_wol,
2005 .get_strings = rtl8169_get_strings,
2006 .get_sset_count = rtl8169_get_sset_count,
2007 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2008 .get_ts_info = ethtool_op_get_ts_info,
2011 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2012 struct net_device *dev, u8 default_version)
2014 void __iomem *ioaddr = tp->mmio_addr;
2016 * The driver currently handles the 8168Bf and the 8168Be identically
2017 * but they can be identified more specifically through the test below
2020 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2022 * Same thing for the 8101Eb and the 8101Ec:
2024 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2026 static const struct rtl_mac_info {
2032 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2033 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2036 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2037 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2038 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2041 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2042 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2043 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2044 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2047 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2048 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2049 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2051 /* 8168DP family. */
2052 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2053 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2054 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2057 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2058 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2059 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2060 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2061 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2062 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2063 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2064 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2065 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2068 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2069 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2070 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2071 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2074 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2075 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2076 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2077 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2078 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2079 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2080 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2081 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2082 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2083 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2084 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2085 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2086 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2087 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2088 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2089 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2090 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2091 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2092 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2093 /* FIXME: where did these entries come from ? -- FR */
2094 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2095 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2098 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2099 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2100 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2101 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2102 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2103 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2106 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2108 const struct rtl_mac_info *p = mac_info;
2111 reg = RTL_R32(TxConfig);
2112 while ((reg & p->mask) != p->val)
2114 tp->mac_version = p->mac_version;
2116 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2117 netif_notice(tp, probe, dev,
2118 "unknown MAC, using family default\n");
2119 tp->mac_version = default_version;
2123 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2125 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2133 static void rtl_writephy_batch(struct rtl8169_private *tp,
2134 const struct phy_reg *regs, int len)
2137 rtl_writephy(tp, regs->reg, regs->val);
2142 #define PHY_READ 0x00000000
2143 #define PHY_DATA_OR 0x10000000
2144 #define PHY_DATA_AND 0x20000000
2145 #define PHY_BJMPN 0x30000000
2146 #define PHY_READ_EFUSE 0x40000000
2147 #define PHY_READ_MAC_BYTE 0x50000000
2148 #define PHY_WRITE_MAC_BYTE 0x60000000
2149 #define PHY_CLEAR_READCOUNT 0x70000000
2150 #define PHY_WRITE 0x80000000
2151 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2152 #define PHY_COMP_EQ_SKIPN 0xa0000000
2153 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2154 #define PHY_WRITE_PREVIOUS 0xc0000000
2155 #define PHY_SKIPN 0xd0000000
2156 #define PHY_DELAY_MS 0xe0000000
2157 #define PHY_WRITE_ERI_WORD 0xf0000000
2161 char version[RTL_VER_SIZE];
2167 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2169 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2171 const struct firmware *fw = rtl_fw->fw;
2172 struct fw_info *fw_info = (struct fw_info *)fw->data;
2173 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2174 char *version = rtl_fw->version;
2177 if (fw->size < FW_OPCODE_SIZE)
2180 if (!fw_info->magic) {
2181 size_t i, size, start;
2184 if (fw->size < sizeof(*fw_info))
2187 for (i = 0; i < fw->size; i++)
2188 checksum += fw->data[i];
2192 start = le32_to_cpu(fw_info->fw_start);
2193 if (start > fw->size)
2196 size = le32_to_cpu(fw_info->fw_len);
2197 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2200 memcpy(version, fw_info->version, RTL_VER_SIZE);
2202 pa->code = (__le32 *)(fw->data + start);
2205 if (fw->size % FW_OPCODE_SIZE)
2208 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2210 pa->code = (__le32 *)fw->data;
2211 pa->size = fw->size / FW_OPCODE_SIZE;
2213 version[RTL_VER_SIZE - 1] = 0;
2220 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2221 struct rtl_fw_phy_action *pa)
2226 for (index = 0; index < pa->size; index++) {
2227 u32 action = le32_to_cpu(pa->code[index]);
2228 u32 regno = (action & 0x0fff0000) >> 16;
2230 switch(action & 0xf0000000) {
2234 case PHY_READ_EFUSE:
2235 case PHY_CLEAR_READCOUNT:
2237 case PHY_WRITE_PREVIOUS:
2242 if (regno > index) {
2243 netif_err(tp, ifup, tp->dev,
2244 "Out of range of firmware\n");
2248 case PHY_READCOUNT_EQ_SKIP:
2249 if (index + 2 >= pa->size) {
2250 netif_err(tp, ifup, tp->dev,
2251 "Out of range of firmware\n");
2255 case PHY_COMP_EQ_SKIPN:
2256 case PHY_COMP_NEQ_SKIPN:
2258 if (index + 1 + regno >= pa->size) {
2259 netif_err(tp, ifup, tp->dev,
2260 "Out of range of firmware\n");
2265 case PHY_READ_MAC_BYTE:
2266 case PHY_WRITE_MAC_BYTE:
2267 case PHY_WRITE_ERI_WORD:
2269 netif_err(tp, ifup, tp->dev,
2270 "Invalid action 0x%08x\n", action);
2279 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2281 struct net_device *dev = tp->dev;
2284 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2285 netif_err(tp, ifup, dev, "invalid firwmare\n");
2289 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2295 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2297 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2301 predata = count = 0;
2303 for (index = 0; index < pa->size; ) {
2304 u32 action = le32_to_cpu(pa->code[index]);
2305 u32 data = action & 0x0000ffff;
2306 u32 regno = (action & 0x0fff0000) >> 16;
2311 switch(action & 0xf0000000) {
2313 predata = rtl_readphy(tp, regno);
2328 case PHY_READ_EFUSE:
2329 predata = rtl8168d_efuse_read(tp, regno);
2332 case PHY_CLEAR_READCOUNT:
2337 rtl_writephy(tp, regno, data);
2340 case PHY_READCOUNT_EQ_SKIP:
2341 index += (count == data) ? 2 : 1;
2343 case PHY_COMP_EQ_SKIPN:
2344 if (predata == data)
2348 case PHY_COMP_NEQ_SKIPN:
2349 if (predata != data)
2353 case PHY_WRITE_PREVIOUS:
2354 rtl_writephy(tp, regno, predata);
2365 case PHY_READ_MAC_BYTE:
2366 case PHY_WRITE_MAC_BYTE:
2367 case PHY_WRITE_ERI_WORD:
2374 static void rtl_release_firmware(struct rtl8169_private *tp)
2376 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2377 release_firmware(tp->rtl_fw->fw);
2380 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2383 static void rtl_apply_firmware(struct rtl8169_private *tp)
2385 struct rtl_fw *rtl_fw = tp->rtl_fw;
2387 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2388 if (!IS_ERR_OR_NULL(rtl_fw))
2389 rtl_phy_write_fw(tp, rtl_fw);
2392 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2394 if (rtl_readphy(tp, reg) != val)
2395 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2397 rtl_apply_firmware(tp);
2400 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2402 static const struct phy_reg phy_reg_init[] = {
2464 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2467 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2469 static const struct phy_reg phy_reg_init[] = {
2475 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2478 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2480 struct pci_dev *pdev = tp->pci_dev;
2482 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2483 (pdev->subsystem_device != 0xe000))
2486 rtl_writephy(tp, 0x1f, 0x0001);
2487 rtl_writephy(tp, 0x10, 0xf01b);
2488 rtl_writephy(tp, 0x1f, 0x0000);
2491 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2493 static const struct phy_reg phy_reg_init[] = {
2533 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2535 rtl8169scd_hw_phy_config_quirk(tp);
2538 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2540 static const struct phy_reg phy_reg_init[] = {
2588 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2591 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2593 static const struct phy_reg phy_reg_init[] = {
2598 rtl_writephy(tp, 0x1f, 0x0001);
2599 rtl_patchphy(tp, 0x16, 1 << 0);
2601 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2604 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2606 static const struct phy_reg phy_reg_init[] = {
2612 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2615 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2617 static const struct phy_reg phy_reg_init[] = {
2625 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2628 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2630 static const struct phy_reg phy_reg_init[] = {
2636 rtl_writephy(tp, 0x1f, 0x0000);
2637 rtl_patchphy(tp, 0x14, 1 << 5);
2638 rtl_patchphy(tp, 0x0d, 1 << 5);
2640 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2643 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2645 static const struct phy_reg phy_reg_init[] = {
2665 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2667 rtl_patchphy(tp, 0x14, 1 << 5);
2668 rtl_patchphy(tp, 0x0d, 1 << 5);
2669 rtl_writephy(tp, 0x1f, 0x0000);
2672 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2674 static const struct phy_reg phy_reg_init[] = {
2692 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2694 rtl_patchphy(tp, 0x16, 1 << 0);
2695 rtl_patchphy(tp, 0x14, 1 << 5);
2696 rtl_patchphy(tp, 0x0d, 1 << 5);
2697 rtl_writephy(tp, 0x1f, 0x0000);
2700 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2702 static const struct phy_reg phy_reg_init[] = {
2714 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2716 rtl_patchphy(tp, 0x16, 1 << 0);
2717 rtl_patchphy(tp, 0x14, 1 << 5);
2718 rtl_patchphy(tp, 0x0d, 1 << 5);
2719 rtl_writephy(tp, 0x1f, 0x0000);
2722 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2724 rtl8168c_3_hw_phy_config(tp);
2727 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2729 static const struct phy_reg phy_reg_init_0[] = {
2730 /* Channel Estimation */
2751 * Enhance line driver power
2760 * Can not link to 1Gbps with bad cable
2761 * Decrease SNR threshold form 21.07dB to 19.04dB
2770 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2774 * Fine Tune Switching regulator parameter
2776 rtl_writephy(tp, 0x1f, 0x0002);
2777 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2778 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2780 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2781 static const struct phy_reg phy_reg_init[] = {
2791 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2793 val = rtl_readphy(tp, 0x0d);
2795 if ((val & 0x00ff) != 0x006c) {
2796 static const u32 set[] = {
2797 0x0065, 0x0066, 0x0067, 0x0068,
2798 0x0069, 0x006a, 0x006b, 0x006c
2802 rtl_writephy(tp, 0x1f, 0x0002);
2805 for (i = 0; i < ARRAY_SIZE(set); i++)
2806 rtl_writephy(tp, 0x0d, val | set[i]);
2809 static const struct phy_reg phy_reg_init[] = {
2817 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2820 /* RSET couple improve */
2821 rtl_writephy(tp, 0x1f, 0x0002);
2822 rtl_patchphy(tp, 0x0d, 0x0300);
2823 rtl_patchphy(tp, 0x0f, 0x0010);
2825 /* Fine tune PLL performance */
2826 rtl_writephy(tp, 0x1f, 0x0002);
2827 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2828 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2830 rtl_writephy(tp, 0x1f, 0x0005);
2831 rtl_writephy(tp, 0x05, 0x001b);
2833 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2835 rtl_writephy(tp, 0x1f, 0x0000);
2838 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2840 static const struct phy_reg phy_reg_init_0[] = {
2841 /* Channel Estimation */
2862 * Enhance line driver power
2871 * Can not link to 1Gbps with bad cable
2872 * Decrease SNR threshold form 21.07dB to 19.04dB
2881 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2883 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2884 static const struct phy_reg phy_reg_init[] = {
2895 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2897 val = rtl_readphy(tp, 0x0d);
2898 if ((val & 0x00ff) != 0x006c) {
2899 static const u32 set[] = {
2900 0x0065, 0x0066, 0x0067, 0x0068,
2901 0x0069, 0x006a, 0x006b, 0x006c
2905 rtl_writephy(tp, 0x1f, 0x0002);
2908 for (i = 0; i < ARRAY_SIZE(set); i++)
2909 rtl_writephy(tp, 0x0d, val | set[i]);
2912 static const struct phy_reg phy_reg_init[] = {
2920 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2923 /* Fine tune PLL performance */
2924 rtl_writephy(tp, 0x1f, 0x0002);
2925 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2926 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2928 /* Switching regulator Slew rate */
2929 rtl_writephy(tp, 0x1f, 0x0002);
2930 rtl_patchphy(tp, 0x0f, 0x0017);
2932 rtl_writephy(tp, 0x1f, 0x0005);
2933 rtl_writephy(tp, 0x05, 0x001b);
2935 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2937 rtl_writephy(tp, 0x1f, 0x0000);
2940 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2942 static const struct phy_reg phy_reg_init[] = {
2998 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3001 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3003 static const struct phy_reg phy_reg_init[] = {
3013 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3014 rtl_patchphy(tp, 0x0d, 1 << 5);
3017 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3019 static const struct phy_reg phy_reg_init[] = {
3020 /* Enable Delay cap */
3026 /* Channel estimation fine tune */
3035 /* Update PFM & 10M TX idle timer */
3047 rtl_apply_firmware(tp);
3049 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3051 /* DCO enable for 10M IDLE Power */
3052 rtl_writephy(tp, 0x1f, 0x0007);
3053 rtl_writephy(tp, 0x1e, 0x0023);
3054 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3055 rtl_writephy(tp, 0x1f, 0x0000);
3057 /* For impedance matching */
3058 rtl_writephy(tp, 0x1f, 0x0002);
3059 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3060 rtl_writephy(tp, 0x1f, 0x0000);
3062 /* PHY auto speed down */
3063 rtl_writephy(tp, 0x1f, 0x0007);
3064 rtl_writephy(tp, 0x1e, 0x002d);
3065 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3066 rtl_writephy(tp, 0x1f, 0x0000);
3067 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3069 rtl_writephy(tp, 0x1f, 0x0005);
3070 rtl_writephy(tp, 0x05, 0x8b86);
3071 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3072 rtl_writephy(tp, 0x1f, 0x0000);
3074 rtl_writephy(tp, 0x1f, 0x0005);
3075 rtl_writephy(tp, 0x05, 0x8b85);
3076 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3077 rtl_writephy(tp, 0x1f, 0x0007);
3078 rtl_writephy(tp, 0x1e, 0x0020);
3079 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3080 rtl_writephy(tp, 0x1f, 0x0006);
3081 rtl_writephy(tp, 0x00, 0x5a00);
3082 rtl_writephy(tp, 0x1f, 0x0000);
3083 rtl_writephy(tp, 0x0d, 0x0007);
3084 rtl_writephy(tp, 0x0e, 0x003c);
3085 rtl_writephy(tp, 0x0d, 0x4007);
3086 rtl_writephy(tp, 0x0e, 0x0000);
3087 rtl_writephy(tp, 0x0d, 0x0000);
3090 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3093 addr[0] | (addr[1] << 8),
3094 addr[2] | (addr[3] << 8),
3095 addr[4] | (addr[5] << 8)
3097 const struct exgmac_reg e[] = {
3098 { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3099 { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3100 { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3101 { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3104 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3107 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3109 static const struct phy_reg phy_reg_init[] = {
3110 /* Enable Delay cap */
3119 /* Channel estimation fine tune */
3136 rtl_apply_firmware(tp);
3138 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3140 /* For 4-corner performance improve */
3141 rtl_writephy(tp, 0x1f, 0x0005);
3142 rtl_writephy(tp, 0x05, 0x8b80);
3143 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3144 rtl_writephy(tp, 0x1f, 0x0000);
3146 /* PHY auto speed down */
3147 rtl_writephy(tp, 0x1f, 0x0004);
3148 rtl_writephy(tp, 0x1f, 0x0007);
3149 rtl_writephy(tp, 0x1e, 0x002d);
3150 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3151 rtl_writephy(tp, 0x1f, 0x0002);
3152 rtl_writephy(tp, 0x1f, 0x0000);
3153 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3155 /* improve 10M EEE waveform */
3156 rtl_writephy(tp, 0x1f, 0x0005);
3157 rtl_writephy(tp, 0x05, 0x8b86);
3158 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3159 rtl_writephy(tp, 0x1f, 0x0000);
3161 /* Improve 2-pair detection performance */
3162 rtl_writephy(tp, 0x1f, 0x0005);
3163 rtl_writephy(tp, 0x05, 0x8b85);
3164 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3165 rtl_writephy(tp, 0x1f, 0x0000);
3168 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3169 rtl_writephy(tp, 0x1f, 0x0005);
3170 rtl_writephy(tp, 0x05, 0x8b85);
3171 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3172 rtl_writephy(tp, 0x1f, 0x0004);
3173 rtl_writephy(tp, 0x1f, 0x0007);
3174 rtl_writephy(tp, 0x1e, 0x0020);
3175 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3176 rtl_writephy(tp, 0x1f, 0x0002);
3177 rtl_writephy(tp, 0x1f, 0x0000);
3178 rtl_writephy(tp, 0x0d, 0x0007);
3179 rtl_writephy(tp, 0x0e, 0x003c);
3180 rtl_writephy(tp, 0x0d, 0x4007);
3181 rtl_writephy(tp, 0x0e, 0x0000);
3182 rtl_writephy(tp, 0x0d, 0x0000);
3185 rtl_writephy(tp, 0x1f, 0x0003);
3186 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3187 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3188 rtl_writephy(tp, 0x1f, 0x0000);
3190 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3191 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3194 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3196 /* For 4-corner performance improve */
3197 rtl_writephy(tp, 0x1f, 0x0005);
3198 rtl_writephy(tp, 0x05, 0x8b80);
3199 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3200 rtl_writephy(tp, 0x1f, 0x0000);
3202 /* PHY auto speed down */
3203 rtl_writephy(tp, 0x1f, 0x0007);
3204 rtl_writephy(tp, 0x1e, 0x002d);
3205 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3206 rtl_writephy(tp, 0x1f, 0x0000);
3207 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3209 /* Improve 10M EEE waveform */
3210 rtl_writephy(tp, 0x1f, 0x0005);
3211 rtl_writephy(tp, 0x05, 0x8b86);
3212 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3213 rtl_writephy(tp, 0x1f, 0x0000);
3216 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3218 static const struct phy_reg phy_reg_init[] = {
3219 /* Channel estimation fine tune */
3224 /* Modify green table for giga & fnet */
3241 /* Modify green table for 10M */
3247 /* Disable hiimpedance detection (RTCT) */
3253 rtl_apply_firmware(tp);
3255 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3257 rtl8168f_hw_phy_config(tp);
3259 /* Improve 2-pair detection performance */
3260 rtl_writephy(tp, 0x1f, 0x0005);
3261 rtl_writephy(tp, 0x05, 0x8b85);
3262 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3263 rtl_writephy(tp, 0x1f, 0x0000);
3266 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3268 rtl_apply_firmware(tp);
3270 rtl8168f_hw_phy_config(tp);
3273 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3275 static const struct phy_reg phy_reg_init[] = {
3276 /* Channel estimation fine tune */
3281 /* Modify green table for giga & fnet */
3298 /* Modify green table for 10M */
3304 /* Disable hiimpedance detection (RTCT) */
3311 rtl_apply_firmware(tp);
3313 rtl8168f_hw_phy_config(tp);
3315 /* Improve 2-pair detection performance */
3316 rtl_writephy(tp, 0x1f, 0x0005);
3317 rtl_writephy(tp, 0x05, 0x8b85);
3318 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3319 rtl_writephy(tp, 0x1f, 0x0000);
3321 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3323 /* Modify green table for giga */
3324 rtl_writephy(tp, 0x1f, 0x0005);
3325 rtl_writephy(tp, 0x05, 0x8b54);
3326 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3327 rtl_writephy(tp, 0x05, 0x8b5d);
3328 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3329 rtl_writephy(tp, 0x05, 0x8a7c);
3330 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3331 rtl_writephy(tp, 0x05, 0x8a7f);
3332 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3333 rtl_writephy(tp, 0x05, 0x8a82);
3334 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3335 rtl_writephy(tp, 0x05, 0x8a85);
3336 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3337 rtl_writephy(tp, 0x05, 0x8a88);
3338 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3339 rtl_writephy(tp, 0x1f, 0x0000);
3341 /* uc same-seed solution */
3342 rtl_writephy(tp, 0x1f, 0x0005);
3343 rtl_writephy(tp, 0x05, 0x8b85);
3344 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3345 rtl_writephy(tp, 0x1f, 0x0000);
3348 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3349 rtl_writephy(tp, 0x1f, 0x0005);
3350 rtl_writephy(tp, 0x05, 0x8b85);
3351 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3352 rtl_writephy(tp, 0x1f, 0x0004);
3353 rtl_writephy(tp, 0x1f, 0x0007);
3354 rtl_writephy(tp, 0x1e, 0x0020);
3355 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3356 rtl_writephy(tp, 0x1f, 0x0000);
3357 rtl_writephy(tp, 0x0d, 0x0007);
3358 rtl_writephy(tp, 0x0e, 0x003c);
3359 rtl_writephy(tp, 0x0d, 0x4007);
3360 rtl_writephy(tp, 0x0e, 0x0000);
3361 rtl_writephy(tp, 0x0d, 0x0000);
3364 rtl_writephy(tp, 0x1f, 0x0003);
3365 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3366 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3367 rtl_writephy(tp, 0x1f, 0x0000);
3370 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3372 static const u16 mac_ocp_patch[] = {
3373 0xe008, 0xe01b, 0xe01d, 0xe01f,
3374 0xe021, 0xe023, 0xe025, 0xe027,
3375 0x49d2, 0xf10d, 0x766c, 0x49e2,
3376 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3378 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3379 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3380 0xbe00, 0xb416, 0x0076, 0xe86c,
3381 0xc602, 0xbe00, 0x0000, 0xc602,
3383 0xbe00, 0x0000, 0xc602, 0xbe00,
3384 0x0000, 0xc602, 0xbe00, 0x0000,
3385 0xc602, 0xbe00, 0x0000, 0xc602,
3386 0xbe00, 0x0000, 0xc602, 0xbe00,
3388 0x0000, 0x0000, 0x0000, 0x0000
3392 /* Patch code for GPHY reset */
3393 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3394 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3395 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3396 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3398 rtl_apply_firmware(tp);
3400 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3401 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3403 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3405 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3406 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3408 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3410 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3411 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3413 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3414 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3416 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3419 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3421 static const struct phy_reg phy_reg_init[] = {
3428 rtl_writephy(tp, 0x1f, 0x0000);
3429 rtl_patchphy(tp, 0x11, 1 << 12);
3430 rtl_patchphy(tp, 0x19, 1 << 13);
3431 rtl_patchphy(tp, 0x10, 1 << 15);
3433 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3436 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3438 static const struct phy_reg phy_reg_init[] = {
3452 /* Disable ALDPS before ram code */
3453 rtl_writephy(tp, 0x1f, 0x0000);
3454 rtl_writephy(tp, 0x18, 0x0310);
3457 rtl_apply_firmware(tp);
3459 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3462 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3464 /* Disable ALDPS before setting firmware */
3465 rtl_writephy(tp, 0x1f, 0x0000);
3466 rtl_writephy(tp, 0x18, 0x0310);
3469 rtl_apply_firmware(tp);
3472 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3473 rtl_writephy(tp, 0x1f, 0x0004);
3474 rtl_writephy(tp, 0x10, 0x401f);
3475 rtl_writephy(tp, 0x19, 0x7030);
3476 rtl_writephy(tp, 0x1f, 0x0000);
3479 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3481 static const struct phy_reg phy_reg_init[] = {
3488 /* Disable ALDPS before ram code */
3489 rtl_writephy(tp, 0x1f, 0x0000);
3490 rtl_writephy(tp, 0x18, 0x0310);
3493 rtl_apply_firmware(tp);
3495 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3496 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3498 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3501 static void rtl_hw_phy_config(struct net_device *dev)
3503 struct rtl8169_private *tp = netdev_priv(dev);
3505 rtl8169_print_mac_version(tp);
3507 switch (tp->mac_version) {
3508 case RTL_GIGA_MAC_VER_01:
3510 case RTL_GIGA_MAC_VER_02:
3511 case RTL_GIGA_MAC_VER_03:
3512 rtl8169s_hw_phy_config(tp);
3514 case RTL_GIGA_MAC_VER_04:
3515 rtl8169sb_hw_phy_config(tp);
3517 case RTL_GIGA_MAC_VER_05:
3518 rtl8169scd_hw_phy_config(tp);
3520 case RTL_GIGA_MAC_VER_06:
3521 rtl8169sce_hw_phy_config(tp);
3523 case RTL_GIGA_MAC_VER_07:
3524 case RTL_GIGA_MAC_VER_08:
3525 case RTL_GIGA_MAC_VER_09:
3526 rtl8102e_hw_phy_config(tp);
3528 case RTL_GIGA_MAC_VER_11:
3529 rtl8168bb_hw_phy_config(tp);
3531 case RTL_GIGA_MAC_VER_12:
3532 rtl8168bef_hw_phy_config(tp);
3534 case RTL_GIGA_MAC_VER_17:
3535 rtl8168bef_hw_phy_config(tp);
3537 case RTL_GIGA_MAC_VER_18:
3538 rtl8168cp_1_hw_phy_config(tp);
3540 case RTL_GIGA_MAC_VER_19:
3541 rtl8168c_1_hw_phy_config(tp);
3543 case RTL_GIGA_MAC_VER_20:
3544 rtl8168c_2_hw_phy_config(tp);
3546 case RTL_GIGA_MAC_VER_21:
3547 rtl8168c_3_hw_phy_config(tp);
3549 case RTL_GIGA_MAC_VER_22:
3550 rtl8168c_4_hw_phy_config(tp);
3552 case RTL_GIGA_MAC_VER_23:
3553 case RTL_GIGA_MAC_VER_24:
3554 rtl8168cp_2_hw_phy_config(tp);
3556 case RTL_GIGA_MAC_VER_25:
3557 rtl8168d_1_hw_phy_config(tp);
3559 case RTL_GIGA_MAC_VER_26:
3560 rtl8168d_2_hw_phy_config(tp);
3562 case RTL_GIGA_MAC_VER_27:
3563 rtl8168d_3_hw_phy_config(tp);
3565 case RTL_GIGA_MAC_VER_28:
3566 rtl8168d_4_hw_phy_config(tp);
3568 case RTL_GIGA_MAC_VER_29:
3569 case RTL_GIGA_MAC_VER_30:
3570 rtl8105e_hw_phy_config(tp);
3572 case RTL_GIGA_MAC_VER_31:
3575 case RTL_GIGA_MAC_VER_32:
3576 case RTL_GIGA_MAC_VER_33:
3577 rtl8168e_1_hw_phy_config(tp);
3579 case RTL_GIGA_MAC_VER_34:
3580 rtl8168e_2_hw_phy_config(tp);
3582 case RTL_GIGA_MAC_VER_35:
3583 rtl8168f_1_hw_phy_config(tp);
3585 case RTL_GIGA_MAC_VER_36:
3586 rtl8168f_2_hw_phy_config(tp);
3589 case RTL_GIGA_MAC_VER_37:
3590 rtl8402_hw_phy_config(tp);
3593 case RTL_GIGA_MAC_VER_38:
3594 rtl8411_hw_phy_config(tp);
3597 case RTL_GIGA_MAC_VER_39:
3598 rtl8106e_hw_phy_config(tp);
3601 case RTL_GIGA_MAC_VER_40:
3602 rtl8168g_1_hw_phy_config(tp);
3605 case RTL_GIGA_MAC_VER_41:
3611 static void rtl_phy_work(struct rtl8169_private *tp)
3613 struct timer_list *timer = &tp->timer;
3614 void __iomem *ioaddr = tp->mmio_addr;
3615 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3617 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3619 if (tp->phy_reset_pending(tp)) {
3621 * A busy loop could burn quite a few cycles on nowadays CPU.
3622 * Let's delay the execution of the timer for a few ticks.
3628 if (tp->link_ok(ioaddr))
3631 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3633 tp->phy_reset_enable(tp);
3636 mod_timer(timer, jiffies + timeout);
3639 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3641 if (!test_and_set_bit(flag, tp->wk.flags))
3642 schedule_work(&tp->wk.work);
3645 static void rtl8169_phy_timer(unsigned long __opaque)
3647 struct net_device *dev = (struct net_device *)__opaque;
3648 struct rtl8169_private *tp = netdev_priv(dev);
3650 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3653 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3654 void __iomem *ioaddr)
3657 pci_release_regions(pdev);
3658 pci_clear_mwi(pdev);
3659 pci_disable_device(pdev);
3663 DECLARE_RTL_COND(rtl_phy_reset_cond)
3665 return tp->phy_reset_pending(tp);
3668 static void rtl8169_phy_reset(struct net_device *dev,
3669 struct rtl8169_private *tp)
3671 tp->phy_reset_enable(tp);
3672 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3675 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3677 void __iomem *ioaddr = tp->mmio_addr;
3679 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3680 (RTL_R8(PHYstatus) & TBI_Enable);
3683 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3685 void __iomem *ioaddr = tp->mmio_addr;
3687 rtl_hw_phy_config(dev);
3689 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3690 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3694 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3696 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3697 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3699 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3700 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3702 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3703 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3706 rtl8169_phy_reset(dev, tp);
3708 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3709 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3710 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3711 (tp->mii.supports_gmii ?
3712 ADVERTISED_1000baseT_Half |
3713 ADVERTISED_1000baseT_Full : 0));
3715 if (rtl_tbi_enabled(tp))
3716 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3719 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3721 void __iomem *ioaddr = tp->mmio_addr;
3725 RTL_W8(Cfg9346, Cfg9346_Unlock);
3727 RTL_W32(MAC4, addr[4] | addr[5] << 8);
3730 RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3733 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3734 rtl_rar_exgmac_set(tp, addr);
3736 RTL_W8(Cfg9346, Cfg9346_Lock);
3738 rtl_unlock_work(tp);
3741 static int rtl_set_mac_address(struct net_device *dev, void *p)
3743 struct rtl8169_private *tp = netdev_priv(dev);
3744 struct sockaddr *addr = p;
3746 if (!is_valid_ether_addr(addr->sa_data))
3747 return -EADDRNOTAVAIL;
3749 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3751 rtl_rar_set(tp, dev->dev_addr);
3756 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3758 struct rtl8169_private *tp = netdev_priv(dev);
3759 struct mii_ioctl_data *data = if_mii(ifr);
3761 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3764 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3765 struct mii_ioctl_data *data, int cmd)
3769 data->phy_id = 32; /* Internal PHY */
3773 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3777 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3783 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3788 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3790 if (tp->features & RTL_FEATURE_MSI) {
3791 pci_disable_msi(pdev);
3792 tp->features &= ~RTL_FEATURE_MSI;
3796 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3798 struct mdio_ops *ops = &tp->mdio_ops;
3800 switch (tp->mac_version) {
3801 case RTL_GIGA_MAC_VER_27:
3802 ops->write = r8168dp_1_mdio_write;
3803 ops->read = r8168dp_1_mdio_read;
3805 case RTL_GIGA_MAC_VER_28:
3806 case RTL_GIGA_MAC_VER_31:
3807 ops->write = r8168dp_2_mdio_write;
3808 ops->read = r8168dp_2_mdio_read;
3810 case RTL_GIGA_MAC_VER_40:
3811 case RTL_GIGA_MAC_VER_41:
3812 ops->write = r8168g_mdio_write;
3813 ops->read = r8168g_mdio_read;
3816 ops->write = r8169_mdio_write;
3817 ops->read = r8169_mdio_read;
3822 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3824 void __iomem *ioaddr = tp->mmio_addr;
3826 switch (tp->mac_version) {
3827 case RTL_GIGA_MAC_VER_25:
3828 case RTL_GIGA_MAC_VER_26:
3829 case RTL_GIGA_MAC_VER_29:
3830 case RTL_GIGA_MAC_VER_30:
3831 case RTL_GIGA_MAC_VER_32:
3832 case RTL_GIGA_MAC_VER_33:
3833 case RTL_GIGA_MAC_VER_34:
3834 case RTL_GIGA_MAC_VER_37:
3835 case RTL_GIGA_MAC_VER_38:
3836 case RTL_GIGA_MAC_VER_39:
3837 case RTL_GIGA_MAC_VER_40:
3838 case RTL_GIGA_MAC_VER_41:
3839 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3840 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3847 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3849 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3852 rtl_writephy(tp, 0x1f, 0x0000);
3853 rtl_writephy(tp, MII_BMCR, 0x0000);
3855 rtl_wol_suspend_quirk(tp);
3860 static void r810x_phy_power_down(struct rtl8169_private *tp)
3862 rtl_writephy(tp, 0x1f, 0x0000);
3863 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3866 static void r810x_phy_power_up(struct rtl8169_private *tp)
3868 rtl_writephy(tp, 0x1f, 0x0000);
3869 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3872 static void r810x_pll_power_down(struct rtl8169_private *tp)
3874 void __iomem *ioaddr = tp->mmio_addr;
3876 if (rtl_wol_pll_power_down(tp))
3879 r810x_phy_power_down(tp);
3881 switch (tp->mac_version) {
3882 case RTL_GIGA_MAC_VER_07:
3883 case RTL_GIGA_MAC_VER_08:
3884 case RTL_GIGA_MAC_VER_09:
3885 case RTL_GIGA_MAC_VER_10:
3886 case RTL_GIGA_MAC_VER_13:
3887 case RTL_GIGA_MAC_VER_16:
3890 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3895 static void r810x_pll_power_up(struct rtl8169_private *tp)
3897 void __iomem *ioaddr = tp->mmio_addr;
3899 r810x_phy_power_up(tp);
3901 switch (tp->mac_version) {
3902 case RTL_GIGA_MAC_VER_07:
3903 case RTL_GIGA_MAC_VER_08:
3904 case RTL_GIGA_MAC_VER_09:
3905 case RTL_GIGA_MAC_VER_10:
3906 case RTL_GIGA_MAC_VER_13:
3907 case RTL_GIGA_MAC_VER_16:
3910 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3915 static void r8168_phy_power_up(struct rtl8169_private *tp)
3917 rtl_writephy(tp, 0x1f, 0x0000);
3918 switch (tp->mac_version) {
3919 case RTL_GIGA_MAC_VER_11:
3920 case RTL_GIGA_MAC_VER_12:
3921 case RTL_GIGA_MAC_VER_17:
3922 case RTL_GIGA_MAC_VER_18:
3923 case RTL_GIGA_MAC_VER_19:
3924 case RTL_GIGA_MAC_VER_20:
3925 case RTL_GIGA_MAC_VER_21:
3926 case RTL_GIGA_MAC_VER_22:
3927 case RTL_GIGA_MAC_VER_23:
3928 case RTL_GIGA_MAC_VER_24:
3929 case RTL_GIGA_MAC_VER_25:
3930 case RTL_GIGA_MAC_VER_26:
3931 case RTL_GIGA_MAC_VER_27:
3932 case RTL_GIGA_MAC_VER_28:
3933 case RTL_GIGA_MAC_VER_31:
3934 rtl_writephy(tp, 0x0e, 0x0000);
3939 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3942 static void r8168_phy_power_down(struct rtl8169_private *tp)
3944 rtl_writephy(tp, 0x1f, 0x0000);
3945 switch (tp->mac_version) {
3946 case RTL_GIGA_MAC_VER_32:
3947 case RTL_GIGA_MAC_VER_33:
3948 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3951 case RTL_GIGA_MAC_VER_11:
3952 case RTL_GIGA_MAC_VER_12:
3953 case RTL_GIGA_MAC_VER_17:
3954 case RTL_GIGA_MAC_VER_18:
3955 case RTL_GIGA_MAC_VER_19:
3956 case RTL_GIGA_MAC_VER_20:
3957 case RTL_GIGA_MAC_VER_21:
3958 case RTL_GIGA_MAC_VER_22:
3959 case RTL_GIGA_MAC_VER_23:
3960 case RTL_GIGA_MAC_VER_24:
3961 case RTL_GIGA_MAC_VER_25:
3962 case RTL_GIGA_MAC_VER_26:
3963 case RTL_GIGA_MAC_VER_27:
3964 case RTL_GIGA_MAC_VER_28:
3965 case RTL_GIGA_MAC_VER_31:
3966 rtl_writephy(tp, 0x0e, 0x0200);
3968 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3973 static void r8168_pll_power_down(struct rtl8169_private *tp)
3975 void __iomem *ioaddr = tp->mmio_addr;
3977 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3978 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3979 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3980 r8168dp_check_dash(tp)) {
3984 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3985 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
3986 (RTL_R16(CPlusCmd) & ASF)) {
3990 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3991 tp->mac_version == RTL_GIGA_MAC_VER_33)
3992 rtl_ephy_write(tp, 0x19, 0xff64);
3994 if (rtl_wol_pll_power_down(tp))
3997 r8168_phy_power_down(tp);
3999 switch (tp->mac_version) {
4000 case RTL_GIGA_MAC_VER_25:
4001 case RTL_GIGA_MAC_VER_26:
4002 case RTL_GIGA_MAC_VER_27:
4003 case RTL_GIGA_MAC_VER_28:
4004 case RTL_GIGA_MAC_VER_31:
4005 case RTL_GIGA_MAC_VER_32:
4006 case RTL_GIGA_MAC_VER_33:
4007 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4012 static void r8168_pll_power_up(struct rtl8169_private *tp)
4014 void __iomem *ioaddr = tp->mmio_addr;
4016 switch (tp->mac_version) {
4017 case RTL_GIGA_MAC_VER_25:
4018 case RTL_GIGA_MAC_VER_26:
4019 case RTL_GIGA_MAC_VER_27:
4020 case RTL_GIGA_MAC_VER_28:
4021 case RTL_GIGA_MAC_VER_31:
4022 case RTL_GIGA_MAC_VER_32:
4023 case RTL_GIGA_MAC_VER_33:
4024 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4028 r8168_phy_power_up(tp);
4031 static void rtl_generic_op(struct rtl8169_private *tp,
4032 void (*op)(struct rtl8169_private *))
4038 static void rtl_pll_power_down(struct rtl8169_private *tp)
4040 rtl_generic_op(tp, tp->pll_power_ops.down);
4043 static void rtl_pll_power_up(struct rtl8169_private *tp)
4045 rtl_generic_op(tp, tp->pll_power_ops.up);
4048 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4050 struct pll_power_ops *ops = &tp->pll_power_ops;
4052 switch (tp->mac_version) {
4053 case RTL_GIGA_MAC_VER_07:
4054 case RTL_GIGA_MAC_VER_08:
4055 case RTL_GIGA_MAC_VER_09:
4056 case RTL_GIGA_MAC_VER_10:
4057 case RTL_GIGA_MAC_VER_16:
4058 case RTL_GIGA_MAC_VER_29:
4059 case RTL_GIGA_MAC_VER_30:
4060 case RTL_GIGA_MAC_VER_37:
4061 case RTL_GIGA_MAC_VER_39:
4062 ops->down = r810x_pll_power_down;
4063 ops->up = r810x_pll_power_up;
4066 case RTL_GIGA_MAC_VER_11:
4067 case RTL_GIGA_MAC_VER_12:
4068 case RTL_GIGA_MAC_VER_17:
4069 case RTL_GIGA_MAC_VER_18:
4070 case RTL_GIGA_MAC_VER_19:
4071 case RTL_GIGA_MAC_VER_20:
4072 case RTL_GIGA_MAC_VER_21:
4073 case RTL_GIGA_MAC_VER_22:
4074 case RTL_GIGA_MAC_VER_23:
4075 case RTL_GIGA_MAC_VER_24:
4076 case RTL_GIGA_MAC_VER_25:
4077 case RTL_GIGA_MAC_VER_26:
4078 case RTL_GIGA_MAC_VER_27:
4079 case RTL_GIGA_MAC_VER_28:
4080 case RTL_GIGA_MAC_VER_31:
4081 case RTL_GIGA_MAC_VER_32:
4082 case RTL_GIGA_MAC_VER_33:
4083 case RTL_GIGA_MAC_VER_34:
4084 case RTL_GIGA_MAC_VER_35:
4085 case RTL_GIGA_MAC_VER_36:
4086 case RTL_GIGA_MAC_VER_38:
4087 case RTL_GIGA_MAC_VER_40:
4088 case RTL_GIGA_MAC_VER_41:
4089 ops->down = r8168_pll_power_down;
4090 ops->up = r8168_pll_power_up;
4100 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4102 void __iomem *ioaddr = tp->mmio_addr;
4104 switch (tp->mac_version) {
4105 case RTL_GIGA_MAC_VER_01:
4106 case RTL_GIGA_MAC_VER_02:
4107 case RTL_GIGA_MAC_VER_03:
4108 case RTL_GIGA_MAC_VER_04:
4109 case RTL_GIGA_MAC_VER_05:
4110 case RTL_GIGA_MAC_VER_06:
4111 case RTL_GIGA_MAC_VER_10:
4112 case RTL_GIGA_MAC_VER_11:
4113 case RTL_GIGA_MAC_VER_12:
4114 case RTL_GIGA_MAC_VER_13:
4115 case RTL_GIGA_MAC_VER_14:
4116 case RTL_GIGA_MAC_VER_15:
4117 case RTL_GIGA_MAC_VER_16:
4118 case RTL_GIGA_MAC_VER_17:
4119 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4121 case RTL_GIGA_MAC_VER_18:
4122 case RTL_GIGA_MAC_VER_19:
4123 case RTL_GIGA_MAC_VER_20:
4124 case RTL_GIGA_MAC_VER_21:
4125 case RTL_GIGA_MAC_VER_22:
4126 case RTL_GIGA_MAC_VER_23:
4127 case RTL_GIGA_MAC_VER_24:
4128 case RTL_GIGA_MAC_VER_34:
4129 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4132 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4137 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4139 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
4142 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4144 void __iomem *ioaddr = tp->mmio_addr;
4146 RTL_W8(Cfg9346, Cfg9346_Unlock);
4147 rtl_generic_op(tp, tp->jumbo_ops.enable);
4148 RTL_W8(Cfg9346, Cfg9346_Lock);
4151 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4153 void __iomem *ioaddr = tp->mmio_addr;
4155 RTL_W8(Cfg9346, Cfg9346_Unlock);
4156 rtl_generic_op(tp, tp->jumbo_ops.disable);
4157 RTL_W8(Cfg9346, Cfg9346_Lock);
4160 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4162 void __iomem *ioaddr = tp->mmio_addr;
4164 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4165 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4166 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4169 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4171 void __iomem *ioaddr = tp->mmio_addr;
4173 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4174 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4175 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4178 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4180 void __iomem *ioaddr = tp->mmio_addr;
4182 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4185 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4187 void __iomem *ioaddr = tp->mmio_addr;
4189 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4192 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4194 void __iomem *ioaddr = tp->mmio_addr;
4196 RTL_W8(MaxTxPacketSize, 0x3f);
4197 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4198 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4199 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4202 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4204 void __iomem *ioaddr = tp->mmio_addr;
4206 RTL_W8(MaxTxPacketSize, 0x0c);
4207 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4208 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4209 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4212 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4214 rtl_tx_performance_tweak(tp->pci_dev,
4215 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4218 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4220 rtl_tx_performance_tweak(tp->pci_dev,
4221 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4224 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4226 void __iomem *ioaddr = tp->mmio_addr;
4228 r8168b_0_hw_jumbo_enable(tp);
4230 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4233 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4235 void __iomem *ioaddr = tp->mmio_addr;
4237 r8168b_0_hw_jumbo_disable(tp);
4239 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4242 static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4244 struct jumbo_ops *ops = &tp->jumbo_ops;
4246 switch (tp->mac_version) {
4247 case RTL_GIGA_MAC_VER_11:
4248 ops->disable = r8168b_0_hw_jumbo_disable;
4249 ops->enable = r8168b_0_hw_jumbo_enable;
4251 case RTL_GIGA_MAC_VER_12:
4252 case RTL_GIGA_MAC_VER_17:
4253 ops->disable = r8168b_1_hw_jumbo_disable;
4254 ops->enable = r8168b_1_hw_jumbo_enable;
4256 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4257 case RTL_GIGA_MAC_VER_19:
4258 case RTL_GIGA_MAC_VER_20:
4259 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4260 case RTL_GIGA_MAC_VER_22:
4261 case RTL_GIGA_MAC_VER_23:
4262 case RTL_GIGA_MAC_VER_24:
4263 case RTL_GIGA_MAC_VER_25:
4264 case RTL_GIGA_MAC_VER_26:
4265 ops->disable = r8168c_hw_jumbo_disable;
4266 ops->enable = r8168c_hw_jumbo_enable;
4268 case RTL_GIGA_MAC_VER_27:
4269 case RTL_GIGA_MAC_VER_28:
4270 ops->disable = r8168dp_hw_jumbo_disable;
4271 ops->enable = r8168dp_hw_jumbo_enable;
4273 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4274 case RTL_GIGA_MAC_VER_32:
4275 case RTL_GIGA_MAC_VER_33:
4276 case RTL_GIGA_MAC_VER_34:
4277 ops->disable = r8168e_hw_jumbo_disable;
4278 ops->enable = r8168e_hw_jumbo_enable;
4282 * No action needed for jumbo frames with 8169.
4283 * No jumbo for 810x at all.
4285 case RTL_GIGA_MAC_VER_40:
4286 case RTL_GIGA_MAC_VER_41:
4288 ops->disable = NULL;
4294 DECLARE_RTL_COND(rtl_chipcmd_cond)
4296 void __iomem *ioaddr = tp->mmio_addr;
4298 return RTL_R8(ChipCmd) & CmdReset;
4301 static void rtl_hw_reset(struct rtl8169_private *tp)
4303 void __iomem *ioaddr = tp->mmio_addr;
4305 RTL_W8(ChipCmd, CmdReset);
4307 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4310 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4312 struct rtl_fw *rtl_fw;
4316 name = rtl_lookup_firmware_name(tp);
4318 goto out_no_firmware;
4320 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4324 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4328 rc = rtl_check_firmware(tp, rtl_fw);
4330 goto err_release_firmware;
4332 tp->rtl_fw = rtl_fw;
4336 err_release_firmware:
4337 release_firmware(rtl_fw->fw);
4341 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4348 static void rtl_request_firmware(struct rtl8169_private *tp)
4350 if (IS_ERR(tp->rtl_fw))
4351 rtl_request_uncached_firmware(tp);
4354 static void rtl_rx_close(struct rtl8169_private *tp)
4356 void __iomem *ioaddr = tp->mmio_addr;
4358 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4361 DECLARE_RTL_COND(rtl_npq_cond)
4363 void __iomem *ioaddr = tp->mmio_addr;
4365 return RTL_R8(TxPoll) & NPQ;
4368 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4370 void __iomem *ioaddr = tp->mmio_addr;
4372 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4375 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4377 void __iomem *ioaddr = tp->mmio_addr;
4379 /* Disable interrupts */
4380 rtl8169_irq_mask_and_ack(tp);
4384 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4385 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4386 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4387 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4388 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4389 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4390 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4391 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4392 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4393 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4394 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4395 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4396 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4398 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4405 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4407 void __iomem *ioaddr = tp->mmio_addr;
4409 /* Set DMA burst size and Interframe Gap Time */
4410 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4411 (InterFrameGap << TxInterFrameGapShift));
4414 static void rtl_hw_start(struct net_device *dev)
4416 struct rtl8169_private *tp = netdev_priv(dev);
4420 rtl_irq_enable_all(tp);
4423 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4424 void __iomem *ioaddr)
4427 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4428 * register to be written before TxDescAddrLow to work.
4429 * Switching from MMIO to I/O access fixes the issue as well.
4431 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4432 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4433 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4434 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4437 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4441 cmd = RTL_R16(CPlusCmd);
4442 RTL_W16(CPlusCmd, cmd);
4446 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4448 /* Low hurts. Let's disable the filtering. */
4449 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4452 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4454 static const struct rtl_cfg2_info {
4459 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4460 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4461 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4462 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4464 const struct rtl_cfg2_info *p = cfg2_info;
4468 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4469 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4470 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4471 RTL_W32(0x7c, p->val);
4477 static void rtl_set_rx_mode(struct net_device *dev)
4479 struct rtl8169_private *tp = netdev_priv(dev);
4480 void __iomem *ioaddr = tp->mmio_addr;
4481 u32 mc_filter[2]; /* Multicast hash filter */
4485 if (dev->flags & IFF_PROMISC) {
4486 /* Unconditionally log net taps. */
4487 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4489 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4491 mc_filter[1] = mc_filter[0] = 0xffffffff;
4492 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4493 (dev->flags & IFF_ALLMULTI)) {
4494 /* Too many to filter perfectly -- accept all multicasts. */
4495 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4496 mc_filter[1] = mc_filter[0] = 0xffffffff;
4498 struct netdev_hw_addr *ha;
4500 rx_mode = AcceptBroadcast | AcceptMyPhys;
4501 mc_filter[1] = mc_filter[0] = 0;
4502 netdev_for_each_mc_addr(ha, dev) {
4503 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4504 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4505 rx_mode |= AcceptMulticast;
4509 if (dev->features & NETIF_F_RXALL)
4510 rx_mode |= (AcceptErr | AcceptRunt);
4512 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4514 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4515 u32 data = mc_filter[0];
4517 mc_filter[0] = swab32(mc_filter[1]);
4518 mc_filter[1] = swab32(data);
4521 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4522 mc_filter[1] = mc_filter[0] = 0xffffffff;
4524 RTL_W32(MAR0 + 4, mc_filter[1]);
4525 RTL_W32(MAR0 + 0, mc_filter[0]);
4527 RTL_W32(RxConfig, tmp);
4530 static void rtl_hw_start_8169(struct net_device *dev)
4532 struct rtl8169_private *tp = netdev_priv(dev);
4533 void __iomem *ioaddr = tp->mmio_addr;
4534 struct pci_dev *pdev = tp->pci_dev;
4536 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4537 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4538 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4541 RTL_W8(Cfg9346, Cfg9346_Unlock);
4542 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4543 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4544 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4545 tp->mac_version == RTL_GIGA_MAC_VER_04)
4546 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4550 RTL_W8(EarlyTxThres, NoEarlyTx);
4552 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4554 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4555 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4556 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4557 tp->mac_version == RTL_GIGA_MAC_VER_04)
4558 rtl_set_rx_tx_config_registers(tp);
4560 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4562 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4563 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4564 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4565 "Bit-3 and bit-14 MUST be 1\n");
4566 tp->cp_cmd |= (1 << 14);
4569 RTL_W16(CPlusCmd, tp->cp_cmd);
4571 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4574 * Undocumented corner. Supposedly:
4575 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4577 RTL_W16(IntrMitigate, 0x0000);
4579 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4581 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4582 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4583 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4584 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4585 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4586 rtl_set_rx_tx_config_registers(tp);
4589 RTL_W8(Cfg9346, Cfg9346_Lock);
4591 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4594 RTL_W32(RxMissed, 0);
4596 rtl_set_rx_mode(dev);
4598 /* no early-rx interrupts */
4599 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4602 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4604 if (tp->csi_ops.write)
4605 tp->csi_ops.write(tp, addr, value);
4608 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4610 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4613 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4617 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4618 rtl_csi_write(tp, 0x070c, csi | bits);
4621 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4623 rtl_csi_access_enable(tp, 0x17000000);
4626 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4628 rtl_csi_access_enable(tp, 0x27000000);
4631 DECLARE_RTL_COND(rtl_csiar_cond)
4633 void __iomem *ioaddr = tp->mmio_addr;
4635 return RTL_R32(CSIAR) & CSIAR_FLAG;
4638 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4640 void __iomem *ioaddr = tp->mmio_addr;
4642 RTL_W32(CSIDR, value);
4643 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4644 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4646 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4649 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4651 void __iomem *ioaddr = tp->mmio_addr;
4653 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4654 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4656 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4657 RTL_R32(CSIDR) : ~0;
4660 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4662 void __iomem *ioaddr = tp->mmio_addr;
4664 RTL_W32(CSIDR, value);
4665 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4666 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4669 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4672 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4674 void __iomem *ioaddr = tp->mmio_addr;
4676 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4677 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4679 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4680 RTL_R32(CSIDR) : ~0;
4683 static void rtl_init_csi_ops(struct rtl8169_private *tp)
4685 struct csi_ops *ops = &tp->csi_ops;
4687 switch (tp->mac_version) {
4688 case RTL_GIGA_MAC_VER_01:
4689 case RTL_GIGA_MAC_VER_02:
4690 case RTL_GIGA_MAC_VER_03:
4691 case RTL_GIGA_MAC_VER_04:
4692 case RTL_GIGA_MAC_VER_05:
4693 case RTL_GIGA_MAC_VER_06:
4694 case RTL_GIGA_MAC_VER_10:
4695 case RTL_GIGA_MAC_VER_11:
4696 case RTL_GIGA_MAC_VER_12:
4697 case RTL_GIGA_MAC_VER_13:
4698 case RTL_GIGA_MAC_VER_14:
4699 case RTL_GIGA_MAC_VER_15:
4700 case RTL_GIGA_MAC_VER_16:
4701 case RTL_GIGA_MAC_VER_17:
4706 case RTL_GIGA_MAC_VER_37:
4707 case RTL_GIGA_MAC_VER_38:
4708 ops->write = r8402_csi_write;
4709 ops->read = r8402_csi_read;
4713 ops->write = r8169_csi_write;
4714 ops->read = r8169_csi_read;
4720 unsigned int offset;
4725 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4731 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4732 rtl_ephy_write(tp, e->offset, w);
4737 static void rtl_disable_clock_request(struct pci_dev *pdev)
4739 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4740 PCI_EXP_LNKCTL_CLKREQ_EN);
4743 static void rtl_enable_clock_request(struct pci_dev *pdev)
4745 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4746 PCI_EXP_LNKCTL_CLKREQ_EN);
4749 #define R8168_CPCMD_QUIRK_MASK (\
4760 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4762 void __iomem *ioaddr = tp->mmio_addr;
4763 struct pci_dev *pdev = tp->pci_dev;
4765 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4767 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4769 rtl_tx_performance_tweak(pdev,
4770 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4773 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4775 void __iomem *ioaddr = tp->mmio_addr;
4777 rtl_hw_start_8168bb(tp);
4779 RTL_W8(MaxTxPacketSize, TxPacketMax);
4781 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4784 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4786 void __iomem *ioaddr = tp->mmio_addr;
4787 struct pci_dev *pdev = tp->pci_dev;
4789 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4791 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4793 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4795 rtl_disable_clock_request(pdev);
4797 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4800 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4802 static const struct ephy_info e_info_8168cp[] = {
4803 { 0x01, 0, 0x0001 },
4804 { 0x02, 0x0800, 0x1000 },
4805 { 0x03, 0, 0x0042 },
4806 { 0x06, 0x0080, 0x0000 },
4810 rtl_csi_access_enable_2(tp);
4812 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4814 __rtl_hw_start_8168cp(tp);
4817 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4819 void __iomem *ioaddr = tp->mmio_addr;
4820 struct pci_dev *pdev = tp->pci_dev;
4822 rtl_csi_access_enable_2(tp);
4824 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4826 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4828 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4831 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4833 void __iomem *ioaddr = tp->mmio_addr;
4834 struct pci_dev *pdev = tp->pci_dev;
4836 rtl_csi_access_enable_2(tp);
4838 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4841 RTL_W8(DBG_REG, 0x20);
4843 RTL_W8(MaxTxPacketSize, TxPacketMax);
4845 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4847 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4850 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4852 void __iomem *ioaddr = tp->mmio_addr;
4853 static const struct ephy_info e_info_8168c_1[] = {
4854 { 0x02, 0x0800, 0x1000 },
4855 { 0x03, 0, 0x0002 },
4856 { 0x06, 0x0080, 0x0000 }
4859 rtl_csi_access_enable_2(tp);
4861 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4863 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4865 __rtl_hw_start_8168cp(tp);
4868 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4870 static const struct ephy_info e_info_8168c_2[] = {
4871 { 0x01, 0, 0x0001 },
4872 { 0x03, 0x0400, 0x0220 }
4875 rtl_csi_access_enable_2(tp);
4877 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4879 __rtl_hw_start_8168cp(tp);
4882 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4884 rtl_hw_start_8168c_2(tp);
4887 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4889 rtl_csi_access_enable_2(tp);
4891 __rtl_hw_start_8168cp(tp);
4894 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4896 void __iomem *ioaddr = tp->mmio_addr;
4897 struct pci_dev *pdev = tp->pci_dev;
4899 rtl_csi_access_enable_2(tp);
4901 rtl_disable_clock_request(pdev);
4903 RTL_W8(MaxTxPacketSize, TxPacketMax);
4905 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4907 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4910 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4912 void __iomem *ioaddr = tp->mmio_addr;
4913 struct pci_dev *pdev = tp->pci_dev;
4915 rtl_csi_access_enable_1(tp);
4917 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4919 RTL_W8(MaxTxPacketSize, TxPacketMax);
4921 rtl_disable_clock_request(pdev);
4924 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4926 void __iomem *ioaddr = tp->mmio_addr;
4927 struct pci_dev *pdev = tp->pci_dev;
4928 static const struct ephy_info e_info_8168d_4[] = {
4930 { 0x19, 0x20, 0x50 },
4935 rtl_csi_access_enable_1(tp);
4937 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4939 RTL_W8(MaxTxPacketSize, TxPacketMax);
4941 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4942 const struct ephy_info *e = e_info_8168d_4 + i;
4945 w = rtl_ephy_read(tp, e->offset);
4946 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4949 rtl_enable_clock_request(pdev);
4952 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4954 void __iomem *ioaddr = tp->mmio_addr;
4955 struct pci_dev *pdev = tp->pci_dev;
4956 static const struct ephy_info e_info_8168e_1[] = {
4957 { 0x00, 0x0200, 0x0100 },
4958 { 0x00, 0x0000, 0x0004 },
4959 { 0x06, 0x0002, 0x0001 },
4960 { 0x06, 0x0000, 0x0030 },
4961 { 0x07, 0x0000, 0x2000 },
4962 { 0x00, 0x0000, 0x0020 },
4963 { 0x03, 0x5800, 0x2000 },
4964 { 0x03, 0x0000, 0x0001 },
4965 { 0x01, 0x0800, 0x1000 },
4966 { 0x07, 0x0000, 0x4000 },
4967 { 0x1e, 0x0000, 0x2000 },
4968 { 0x19, 0xffff, 0xfe6c },
4969 { 0x0a, 0x0000, 0x0040 }
4972 rtl_csi_access_enable_2(tp);
4974 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4976 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4978 RTL_W8(MaxTxPacketSize, TxPacketMax);
4980 rtl_disable_clock_request(pdev);
4982 /* Reset tx FIFO pointer */
4983 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
4984 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
4986 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4989 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4991 void __iomem *ioaddr = tp->mmio_addr;
4992 struct pci_dev *pdev = tp->pci_dev;
4993 static const struct ephy_info e_info_8168e_2[] = {
4994 { 0x09, 0x0000, 0x0080 },
4995 { 0x19, 0x0000, 0x0224 }
4998 rtl_csi_access_enable_1(tp);
5000 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5002 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5004 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5005 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5006 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5007 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5008 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5009 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5010 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5011 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5013 RTL_W8(MaxTxPacketSize, EarlySize);
5015 rtl_disable_clock_request(pdev);
5017 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5018 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5020 /* Adjust EEE LED frequency */
5021 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5023 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5024 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5025 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5028 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5030 void __iomem *ioaddr = tp->mmio_addr;
5031 struct pci_dev *pdev = tp->pci_dev;
5033 rtl_csi_access_enable_2(tp);
5035 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5037 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5038 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5039 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5040 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5041 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5042 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5043 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5044 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5045 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5046 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5048 RTL_W8(MaxTxPacketSize, EarlySize);
5050 rtl_disable_clock_request(pdev);
5052 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5053 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5054 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5055 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5056 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5059 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5061 void __iomem *ioaddr = tp->mmio_addr;
5062 static const struct ephy_info e_info_8168f_1[] = {
5063 { 0x06, 0x00c0, 0x0020 },
5064 { 0x08, 0x0001, 0x0002 },
5065 { 0x09, 0x0000, 0x0080 },
5066 { 0x19, 0x0000, 0x0224 }
5069 rtl_hw_start_8168f(tp);
5071 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5073 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5075 /* Adjust EEE LED frequency */
5076 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5079 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5081 static const struct ephy_info e_info_8168f_1[] = {
5082 { 0x06, 0x00c0, 0x0020 },
5083 { 0x0f, 0xffff, 0x5200 },
5084 { 0x1e, 0x0000, 0x4000 },
5085 { 0x19, 0x0000, 0x0224 }
5088 rtl_hw_start_8168f(tp);
5090 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5092 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5095 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5097 void __iomem *ioaddr = tp->mmio_addr;
5098 struct pci_dev *pdev = tp->pci_dev;
5100 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5101 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5102 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5103 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5105 rtl_csi_access_enable_1(tp);
5107 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5109 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5110 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5112 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5113 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5114 RTL_W8(MaxTxPacketSize, EarlySize);
5116 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5117 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5119 /* Adjust EEE LED frequency */
5120 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5122 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5125 static void rtl_hw_start_8168(struct net_device *dev)
5127 struct rtl8169_private *tp = netdev_priv(dev);
5128 void __iomem *ioaddr = tp->mmio_addr;
5130 RTL_W8(Cfg9346, Cfg9346_Unlock);
5132 RTL_W8(MaxTxPacketSize, TxPacketMax);
5134 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5136 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5138 RTL_W16(CPlusCmd, tp->cp_cmd);
5140 RTL_W16(IntrMitigate, 0x5151);
5142 /* Work around for RxFIFO overflow. */
5143 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5144 tp->event_slow |= RxFIFOOver | PCSTimeout;
5145 tp->event_slow &= ~RxOverflow;
5148 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5150 rtl_set_rx_mode(dev);
5152 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5153 (InterFrameGap << TxInterFrameGapShift));
5157 switch (tp->mac_version) {
5158 case RTL_GIGA_MAC_VER_11:
5159 rtl_hw_start_8168bb(tp);
5162 case RTL_GIGA_MAC_VER_12:
5163 case RTL_GIGA_MAC_VER_17:
5164 rtl_hw_start_8168bef(tp);
5167 case RTL_GIGA_MAC_VER_18:
5168 rtl_hw_start_8168cp_1(tp);
5171 case RTL_GIGA_MAC_VER_19:
5172 rtl_hw_start_8168c_1(tp);
5175 case RTL_GIGA_MAC_VER_20:
5176 rtl_hw_start_8168c_2(tp);
5179 case RTL_GIGA_MAC_VER_21:
5180 rtl_hw_start_8168c_3(tp);
5183 case RTL_GIGA_MAC_VER_22:
5184 rtl_hw_start_8168c_4(tp);
5187 case RTL_GIGA_MAC_VER_23:
5188 rtl_hw_start_8168cp_2(tp);
5191 case RTL_GIGA_MAC_VER_24:
5192 rtl_hw_start_8168cp_3(tp);
5195 case RTL_GIGA_MAC_VER_25:
5196 case RTL_GIGA_MAC_VER_26:
5197 case RTL_GIGA_MAC_VER_27:
5198 rtl_hw_start_8168d(tp);
5201 case RTL_GIGA_MAC_VER_28:
5202 rtl_hw_start_8168d_4(tp);
5205 case RTL_GIGA_MAC_VER_31:
5206 rtl_hw_start_8168dp(tp);
5209 case RTL_GIGA_MAC_VER_32:
5210 case RTL_GIGA_MAC_VER_33:
5211 rtl_hw_start_8168e_1(tp);
5213 case RTL_GIGA_MAC_VER_34:
5214 rtl_hw_start_8168e_2(tp);
5217 case RTL_GIGA_MAC_VER_35:
5218 case RTL_GIGA_MAC_VER_36:
5219 rtl_hw_start_8168f_1(tp);
5222 case RTL_GIGA_MAC_VER_38:
5223 rtl_hw_start_8411(tp);
5226 case RTL_GIGA_MAC_VER_40:
5227 case RTL_GIGA_MAC_VER_41:
5228 rtl_hw_start_8168g_1(tp);
5232 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5233 dev->name, tp->mac_version);
5237 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5239 RTL_W8(Cfg9346, Cfg9346_Lock);
5241 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5244 #define R810X_CPCMD_QUIRK_MASK (\
5255 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5257 void __iomem *ioaddr = tp->mmio_addr;
5258 struct pci_dev *pdev = tp->pci_dev;
5259 static const struct ephy_info e_info_8102e_1[] = {
5260 { 0x01, 0, 0x6e65 },
5261 { 0x02, 0, 0x091f },
5262 { 0x03, 0, 0xc2f9 },
5263 { 0x06, 0, 0xafb5 },
5264 { 0x07, 0, 0x0e00 },
5265 { 0x19, 0, 0xec80 },
5266 { 0x01, 0, 0x2e65 },
5271 rtl_csi_access_enable_2(tp);
5273 RTL_W8(DBG_REG, FIX_NAK_1);
5275 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5278 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5279 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5281 cfg1 = RTL_R8(Config1);
5282 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5283 RTL_W8(Config1, cfg1 & ~LEDS0);
5285 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5288 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5290 void __iomem *ioaddr = tp->mmio_addr;
5291 struct pci_dev *pdev = tp->pci_dev;
5293 rtl_csi_access_enable_2(tp);
5295 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5297 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5298 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5301 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5303 rtl_hw_start_8102e_2(tp);
5305 rtl_ephy_write(tp, 0x03, 0xc2f9);
5308 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5310 void __iomem *ioaddr = tp->mmio_addr;
5311 static const struct ephy_info e_info_8105e_1[] = {
5312 { 0x07, 0, 0x4000 },
5313 { 0x19, 0, 0x0200 },
5314 { 0x19, 0, 0x0020 },
5315 { 0x1e, 0, 0x2000 },
5316 { 0x03, 0, 0x0001 },
5317 { 0x19, 0, 0x0100 },
5318 { 0x19, 0, 0x0004 },
5322 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5323 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5325 /* Disable Early Tally Counter */
5326 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5328 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5329 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5331 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5334 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5336 rtl_hw_start_8105e_1(tp);
5337 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5340 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5342 void __iomem *ioaddr = tp->mmio_addr;
5343 static const struct ephy_info e_info_8402[] = {
5344 { 0x19, 0xffff, 0xff64 },
5348 rtl_csi_access_enable_2(tp);
5350 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5351 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5353 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5354 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5356 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5358 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5360 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5361 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5362 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5363 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5364 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5365 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5366 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5369 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5371 void __iomem *ioaddr = tp->mmio_addr;
5373 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5374 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5376 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5377 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5378 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5381 static void rtl_hw_start_8101(struct net_device *dev)
5383 struct rtl8169_private *tp = netdev_priv(dev);
5384 void __iomem *ioaddr = tp->mmio_addr;
5385 struct pci_dev *pdev = tp->pci_dev;
5387 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5388 tp->event_slow &= ~RxFIFOOver;
5390 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5391 tp->mac_version == RTL_GIGA_MAC_VER_16)
5392 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5393 PCI_EXP_DEVCTL_NOSNOOP_EN);
5395 RTL_W8(Cfg9346, Cfg9346_Unlock);
5397 switch (tp->mac_version) {
5398 case RTL_GIGA_MAC_VER_07:
5399 rtl_hw_start_8102e_1(tp);
5402 case RTL_GIGA_MAC_VER_08:
5403 rtl_hw_start_8102e_3(tp);
5406 case RTL_GIGA_MAC_VER_09:
5407 rtl_hw_start_8102e_2(tp);
5410 case RTL_GIGA_MAC_VER_29:
5411 rtl_hw_start_8105e_1(tp);
5413 case RTL_GIGA_MAC_VER_30:
5414 rtl_hw_start_8105e_2(tp);
5417 case RTL_GIGA_MAC_VER_37:
5418 rtl_hw_start_8402(tp);
5421 case RTL_GIGA_MAC_VER_39:
5422 rtl_hw_start_8106(tp);
5426 RTL_W8(Cfg9346, Cfg9346_Lock);
5428 RTL_W8(MaxTxPacketSize, TxPacketMax);
5430 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5432 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5433 RTL_W16(CPlusCmd, tp->cp_cmd);
5435 RTL_W16(IntrMitigate, 0x0000);
5437 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5439 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5440 rtl_set_rx_tx_config_registers(tp);
5444 rtl_set_rx_mode(dev);
5446 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5449 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5451 struct rtl8169_private *tp = netdev_priv(dev);
5453 if (new_mtu < ETH_ZLEN ||
5454 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5457 if (new_mtu > ETH_DATA_LEN)
5458 rtl_hw_jumbo_enable(tp);
5460 rtl_hw_jumbo_disable(tp);
5463 netdev_update_features(dev);
5468 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5470 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5471 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5474 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5475 void **data_buff, struct RxDesc *desc)
5477 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5482 rtl8169_make_unusable_by_asic(desc);
5485 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5487 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5489 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5492 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5495 desc->addr = cpu_to_le64(mapping);
5497 rtl8169_mark_to_asic(desc, rx_buf_sz);
5500 static inline void *rtl8169_align(void *data)
5502 return (void *)ALIGN((long)data, 16);
5505 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5506 struct RxDesc *desc)
5510 struct device *d = &tp->pci_dev->dev;
5511 struct net_device *dev = tp->dev;
5512 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5514 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5518 if (rtl8169_align(data) != data) {
5520 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5525 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5527 if (unlikely(dma_mapping_error(d, mapping))) {
5528 if (net_ratelimit())
5529 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5533 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5541 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5545 for (i = 0; i < NUM_RX_DESC; i++) {
5546 if (tp->Rx_databuff[i]) {
5547 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5548 tp->RxDescArray + i);
5553 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5555 desc->opts1 |= cpu_to_le32(RingEnd);
5558 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5562 for (i = 0; i < NUM_RX_DESC; i++) {
5565 if (tp->Rx_databuff[i])
5568 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5570 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5573 tp->Rx_databuff[i] = data;
5576 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5580 rtl8169_rx_clear(tp);
5584 static int rtl8169_init_ring(struct net_device *dev)
5586 struct rtl8169_private *tp = netdev_priv(dev);
5588 rtl8169_init_ring_indexes(tp);
5590 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5591 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5593 return rtl8169_rx_fill(tp);
5596 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5597 struct TxDesc *desc)
5599 unsigned int len = tx_skb->len;
5601 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5609 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5614 for (i = 0; i < n; i++) {
5615 unsigned int entry = (start + i) % NUM_TX_DESC;
5616 struct ring_info *tx_skb = tp->tx_skb + entry;
5617 unsigned int len = tx_skb->len;
5620 struct sk_buff *skb = tx_skb->skb;
5622 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5623 tp->TxDescArray + entry);
5625 tp->dev->stats.tx_dropped++;
5633 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5635 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5636 tp->cur_tx = tp->dirty_tx = 0;
5639 static void rtl_reset_work(struct rtl8169_private *tp)
5641 struct net_device *dev = tp->dev;
5644 napi_disable(&tp->napi);
5645 netif_stop_queue(dev);
5646 synchronize_sched();
5648 rtl8169_hw_reset(tp);
5650 for (i = 0; i < NUM_RX_DESC; i++)
5651 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5653 rtl8169_tx_clear(tp);
5654 rtl8169_init_ring_indexes(tp);
5656 napi_enable(&tp->napi);
5658 netif_wake_queue(dev);
5659 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5662 static void rtl8169_tx_timeout(struct net_device *dev)
5664 struct rtl8169_private *tp = netdev_priv(dev);
5666 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5669 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5672 struct skb_shared_info *info = skb_shinfo(skb);
5673 unsigned int cur_frag, entry;
5674 struct TxDesc * uninitialized_var(txd);
5675 struct device *d = &tp->pci_dev->dev;
5678 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5679 const skb_frag_t *frag = info->frags + cur_frag;
5684 entry = (entry + 1) % NUM_TX_DESC;
5686 txd = tp->TxDescArray + entry;
5687 len = skb_frag_size(frag);
5688 addr = skb_frag_address(frag);
5689 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5690 if (unlikely(dma_mapping_error(d, mapping))) {
5691 if (net_ratelimit())
5692 netif_err(tp, drv, tp->dev,
5693 "Failed to map TX fragments DMA!\n");
5697 /* Anti gcc 2.95.3 bugware (sic) */
5698 status = opts[0] | len |
5699 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5701 txd->opts1 = cpu_to_le32(status);
5702 txd->opts2 = cpu_to_le32(opts[1]);
5703 txd->addr = cpu_to_le64(mapping);
5705 tp->tx_skb[entry].len = len;
5709 tp->tx_skb[entry].skb = skb;
5710 txd->opts1 |= cpu_to_le32(LastFrag);
5716 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5720 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5721 struct sk_buff *skb, u32 *opts)
5723 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5724 u32 mss = skb_shinfo(skb)->gso_size;
5725 int offset = info->opts_offset;
5729 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5730 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5731 const struct iphdr *ip = ip_hdr(skb);
5733 if (ip->protocol == IPPROTO_TCP)
5734 opts[offset] |= info->checksum.tcp;
5735 else if (ip->protocol == IPPROTO_UDP)
5736 opts[offset] |= info->checksum.udp;
5742 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5743 struct net_device *dev)
5745 struct rtl8169_private *tp = netdev_priv(dev);
5746 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5747 struct TxDesc *txd = tp->TxDescArray + entry;
5748 void __iomem *ioaddr = tp->mmio_addr;
5749 struct device *d = &tp->pci_dev->dev;
5755 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5756 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5760 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5763 len = skb_headlen(skb);
5764 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5765 if (unlikely(dma_mapping_error(d, mapping))) {
5766 if (net_ratelimit())
5767 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5771 tp->tx_skb[entry].len = len;
5772 txd->addr = cpu_to_le64(mapping);
5774 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5777 rtl8169_tso_csum(tp, skb, opts);
5779 frags = rtl8169_xmit_frags(tp, skb, opts);
5783 opts[0] |= FirstFrag;
5785 opts[0] |= FirstFrag | LastFrag;
5786 tp->tx_skb[entry].skb = skb;
5789 txd->opts2 = cpu_to_le32(opts[1]);
5791 skb_tx_timestamp(skb);
5795 /* Anti gcc 2.95.3 bugware (sic) */
5796 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5797 txd->opts1 = cpu_to_le32(status);
5799 tp->cur_tx += frags + 1;
5803 RTL_W8(TxPoll, NPQ);
5807 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5808 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5809 * not miss a ring update when it notices a stopped queue.
5812 netif_stop_queue(dev);
5813 /* Sync with rtl_tx:
5814 * - publish queue status and cur_tx ring index (write barrier)
5815 * - refresh dirty_tx ring index (read barrier).
5816 * May the current thread have a pessimistic view of the ring
5817 * status and forget to wake up queue, a racing rtl_tx thread
5821 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5822 netif_wake_queue(dev);
5825 return NETDEV_TX_OK;
5828 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5831 dev->stats.tx_dropped++;
5832 return NETDEV_TX_OK;
5835 netif_stop_queue(dev);
5836 dev->stats.tx_dropped++;
5837 return NETDEV_TX_BUSY;
5840 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5842 struct rtl8169_private *tp = netdev_priv(dev);
5843 struct pci_dev *pdev = tp->pci_dev;
5844 u16 pci_status, pci_cmd;
5846 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5847 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5849 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5850 pci_cmd, pci_status);
5853 * The recovery sequence below admits a very elaborated explanation:
5854 * - it seems to work;
5855 * - I did not see what else could be done;
5856 * - it makes iop3xx happy.
5858 * Feel free to adjust to your needs.
5860 if (pdev->broken_parity_status)
5861 pci_cmd &= ~PCI_COMMAND_PARITY;
5863 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5865 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5867 pci_write_config_word(pdev, PCI_STATUS,
5868 pci_status & (PCI_STATUS_DETECTED_PARITY |
5869 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5870 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5872 /* The infamous DAC f*ckup only happens at boot time */
5873 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
5874 void __iomem *ioaddr = tp->mmio_addr;
5876 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5877 tp->cp_cmd &= ~PCIDAC;
5878 RTL_W16(CPlusCmd, tp->cp_cmd);
5879 dev->features &= ~NETIF_F_HIGHDMA;
5882 rtl8169_hw_reset(tp);
5884 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5887 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5889 unsigned int dirty_tx, tx_left;
5891 dirty_tx = tp->dirty_tx;
5893 tx_left = tp->cur_tx - dirty_tx;
5895 while (tx_left > 0) {
5896 unsigned int entry = dirty_tx % NUM_TX_DESC;
5897 struct ring_info *tx_skb = tp->tx_skb + entry;
5901 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5902 if (status & DescOwn)
5905 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5906 tp->TxDescArray + entry);
5907 if (status & LastFrag) {
5908 u64_stats_update_begin(&tp->tx_stats.syncp);
5909 tp->tx_stats.packets++;
5910 tp->tx_stats.bytes += tx_skb->skb->len;
5911 u64_stats_update_end(&tp->tx_stats.syncp);
5912 dev_kfree_skb(tx_skb->skb);
5919 if (tp->dirty_tx != dirty_tx) {
5920 tp->dirty_tx = dirty_tx;
5921 /* Sync with rtl8169_start_xmit:
5922 * - publish dirty_tx ring index (write barrier)
5923 * - refresh cur_tx ring index and queue status (read barrier)
5924 * May the current thread miss the stopped queue condition,
5925 * a racing xmit thread can only have a right view of the
5929 if (netif_queue_stopped(dev) &&
5930 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5931 netif_wake_queue(dev);
5934 * 8168 hack: TxPoll requests are lost when the Tx packets are
5935 * too close. Let's kick an extra TxPoll request when a burst
5936 * of start_xmit activity is detected (if it is not detected,
5937 * it is slow enough). -- FR
5939 if (tp->cur_tx != dirty_tx) {
5940 void __iomem *ioaddr = tp->mmio_addr;
5942 RTL_W8(TxPoll, NPQ);
5947 static inline int rtl8169_fragmented_frame(u32 status)
5949 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5952 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5954 u32 status = opts1 & RxProtoMask;
5956 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5957 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5958 skb->ip_summed = CHECKSUM_UNNECESSARY;
5960 skb_checksum_none_assert(skb);
5963 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5964 struct rtl8169_private *tp,
5968 struct sk_buff *skb;
5969 struct device *d = &tp->pci_dev->dev;
5971 data = rtl8169_align(data);
5972 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
5974 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5976 memcpy(skb->data, data, pkt_size);
5977 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5982 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5984 unsigned int cur_rx, rx_left;
5987 cur_rx = tp->cur_rx;
5988 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
5989 rx_left = min(rx_left, budget);
5991 for (; rx_left > 0; rx_left--, cur_rx++) {
5992 unsigned int entry = cur_rx % NUM_RX_DESC;
5993 struct RxDesc *desc = tp->RxDescArray + entry;
5997 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
5999 if (status & DescOwn)
6001 if (unlikely(status & RxRES)) {
6002 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6004 dev->stats.rx_errors++;
6005 if (status & (RxRWT | RxRUNT))
6006 dev->stats.rx_length_errors++;
6008 dev->stats.rx_crc_errors++;
6009 if (status & RxFOVF) {
6010 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6011 dev->stats.rx_fifo_errors++;
6013 if ((status & (RxRUNT | RxCRC)) &&
6014 !(status & (RxRWT | RxFOVF)) &&
6015 (dev->features & NETIF_F_RXALL))
6018 struct sk_buff *skb;
6023 addr = le64_to_cpu(desc->addr);
6024 if (likely(!(dev->features & NETIF_F_RXFCS)))
6025 pkt_size = (status & 0x00003fff) - 4;
6027 pkt_size = status & 0x00003fff;
6030 * The driver does not support incoming fragmented
6031 * frames. They are seen as a symptom of over-mtu
6034 if (unlikely(rtl8169_fragmented_frame(status))) {
6035 dev->stats.rx_dropped++;
6036 dev->stats.rx_length_errors++;
6037 goto release_descriptor;
6040 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6041 tp, pkt_size, addr);
6043 dev->stats.rx_dropped++;
6044 goto release_descriptor;
6047 rtl8169_rx_csum(skb, status);
6048 skb_put(skb, pkt_size);
6049 skb->protocol = eth_type_trans(skb, dev);
6051 rtl8169_rx_vlan_tag(desc, skb);
6053 napi_gro_receive(&tp->napi, skb);
6055 u64_stats_update_begin(&tp->rx_stats.syncp);
6056 tp->rx_stats.packets++;
6057 tp->rx_stats.bytes += pkt_size;
6058 u64_stats_update_end(&tp->rx_stats.syncp);
6063 rtl8169_mark_to_asic(desc, rx_buf_sz);
6066 count = cur_rx - tp->cur_rx;
6067 tp->cur_rx = cur_rx;
6069 tp->dirty_rx += count;
6074 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6076 struct net_device *dev = dev_instance;
6077 struct rtl8169_private *tp = netdev_priv(dev);
6081 status = rtl_get_events(tp);
6082 if (status && status != 0xffff) {
6083 status &= RTL_EVENT_NAPI | tp->event_slow;
6087 rtl_irq_disable(tp);
6088 napi_schedule(&tp->napi);
6091 return IRQ_RETVAL(handled);
6095 * Workqueue context.
6097 static void rtl_slow_event_work(struct rtl8169_private *tp)
6099 struct net_device *dev = tp->dev;
6102 status = rtl_get_events(tp) & tp->event_slow;
6103 rtl_ack_events(tp, status);
6105 if (unlikely(status & RxFIFOOver)) {
6106 switch (tp->mac_version) {
6107 /* Work around for rx fifo overflow */
6108 case RTL_GIGA_MAC_VER_11:
6109 netif_stop_queue(dev);
6110 /* XXX - Hack alert. See rtl_task(). */
6111 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6117 if (unlikely(status & SYSErr))
6118 rtl8169_pcierr_interrupt(dev);
6120 if (status & LinkChg)
6121 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6123 rtl_irq_enable_all(tp);
6126 static void rtl_task(struct work_struct *work)
6128 static const struct {
6130 void (*action)(struct rtl8169_private *);
6132 /* XXX - keep rtl_slow_event_work() as first element. */
6133 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6134 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6135 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6137 struct rtl8169_private *tp =
6138 container_of(work, struct rtl8169_private, wk.work);
6139 struct net_device *dev = tp->dev;
6144 if (!netif_running(dev) ||
6145 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6148 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6151 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6153 rtl_work[i].action(tp);
6157 rtl_unlock_work(tp);
6160 static int rtl8169_poll(struct napi_struct *napi, int budget)
6162 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6163 struct net_device *dev = tp->dev;
6164 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6168 status = rtl_get_events(tp);
6169 rtl_ack_events(tp, status & ~tp->event_slow);
6171 if (status & RTL_EVENT_NAPI_RX)
6172 work_done = rtl_rx(dev, tp, (u32) budget);
6174 if (status & RTL_EVENT_NAPI_TX)
6177 if (status & tp->event_slow) {
6178 enable_mask &= ~tp->event_slow;
6180 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6183 if (work_done < budget) {
6184 napi_complete(napi);
6186 rtl_irq_enable(tp, enable_mask);
6193 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6195 struct rtl8169_private *tp = netdev_priv(dev);
6197 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6200 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6201 RTL_W32(RxMissed, 0);
6204 static void rtl8169_down(struct net_device *dev)
6206 struct rtl8169_private *tp = netdev_priv(dev);
6207 void __iomem *ioaddr = tp->mmio_addr;
6209 del_timer_sync(&tp->timer);
6211 napi_disable(&tp->napi);
6212 netif_stop_queue(dev);
6214 rtl8169_hw_reset(tp);
6216 * At this point device interrupts can not be enabled in any function,
6217 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6218 * and napi is disabled (rtl8169_poll).
6220 rtl8169_rx_missed(dev, ioaddr);
6222 /* Give a racing hard_start_xmit a few cycles to complete. */
6223 synchronize_sched();
6225 rtl8169_tx_clear(tp);
6227 rtl8169_rx_clear(tp);
6229 rtl_pll_power_down(tp);
6232 static int rtl8169_close(struct net_device *dev)
6234 struct rtl8169_private *tp = netdev_priv(dev);
6235 struct pci_dev *pdev = tp->pci_dev;
6237 pm_runtime_get_sync(&pdev->dev);
6239 /* Update counters before going down */
6240 rtl8169_update_counters(dev);
6243 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6246 rtl_unlock_work(tp);
6248 free_irq(pdev->irq, dev);
6250 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6252 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6254 tp->TxDescArray = NULL;
6255 tp->RxDescArray = NULL;
6257 pm_runtime_put_sync(&pdev->dev);
6262 #ifdef CONFIG_NET_POLL_CONTROLLER
6263 static void rtl8169_netpoll(struct net_device *dev)
6265 struct rtl8169_private *tp = netdev_priv(dev);
6267 rtl8169_interrupt(tp->pci_dev->irq, dev);
6271 static int rtl_open(struct net_device *dev)
6273 struct rtl8169_private *tp = netdev_priv(dev);
6274 void __iomem *ioaddr = tp->mmio_addr;
6275 struct pci_dev *pdev = tp->pci_dev;
6276 int retval = -ENOMEM;
6278 pm_runtime_get_sync(&pdev->dev);
6281 * Rx and Tx descriptors needs 256 bytes alignment.
6282 * dma_alloc_coherent provides more.
6284 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6285 &tp->TxPhyAddr, GFP_KERNEL);
6286 if (!tp->TxDescArray)
6287 goto err_pm_runtime_put;
6289 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6290 &tp->RxPhyAddr, GFP_KERNEL);
6291 if (!tp->RxDescArray)
6294 retval = rtl8169_init_ring(dev);
6298 INIT_WORK(&tp->wk.work, rtl_task);
6302 rtl_request_firmware(tp);
6304 retval = request_irq(pdev->irq, rtl8169_interrupt,
6305 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6308 goto err_release_fw_2;
6312 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6314 napi_enable(&tp->napi);
6316 rtl8169_init_phy(dev, tp);
6318 __rtl8169_set_features(dev, dev->features);
6320 rtl_pll_power_up(tp);
6324 netif_start_queue(dev);
6326 rtl_unlock_work(tp);
6328 tp->saved_wolopts = 0;
6329 pm_runtime_put_noidle(&pdev->dev);
6331 rtl8169_check_link_status(dev, tp, ioaddr);
6336 rtl_release_firmware(tp);
6337 rtl8169_rx_clear(tp);
6339 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6341 tp->RxDescArray = NULL;
6343 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6345 tp->TxDescArray = NULL;
6347 pm_runtime_put_noidle(&pdev->dev);
6351 static struct rtnl_link_stats64 *
6352 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6354 struct rtl8169_private *tp = netdev_priv(dev);
6355 void __iomem *ioaddr = tp->mmio_addr;
6358 if (netif_running(dev))
6359 rtl8169_rx_missed(dev, ioaddr);
6362 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6363 stats->rx_packets = tp->rx_stats.packets;
6364 stats->rx_bytes = tp->rx_stats.bytes;
6365 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6369 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6370 stats->tx_packets = tp->tx_stats.packets;
6371 stats->tx_bytes = tp->tx_stats.bytes;
6372 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6374 stats->rx_dropped = dev->stats.rx_dropped;
6375 stats->tx_dropped = dev->stats.tx_dropped;
6376 stats->rx_length_errors = dev->stats.rx_length_errors;
6377 stats->rx_errors = dev->stats.rx_errors;
6378 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6379 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6380 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6385 static void rtl8169_net_suspend(struct net_device *dev)
6387 struct rtl8169_private *tp = netdev_priv(dev);
6389 if (!netif_running(dev))
6392 netif_device_detach(dev);
6393 netif_stop_queue(dev);
6396 napi_disable(&tp->napi);
6397 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6398 rtl_unlock_work(tp);
6400 rtl_pll_power_down(tp);
6405 static int rtl8169_suspend(struct device *device)
6407 struct pci_dev *pdev = to_pci_dev(device);
6408 struct net_device *dev = pci_get_drvdata(pdev);
6410 rtl8169_net_suspend(dev);
6415 static void __rtl8169_resume(struct net_device *dev)
6417 struct rtl8169_private *tp = netdev_priv(dev);
6419 netif_device_attach(dev);
6421 rtl_pll_power_up(tp);
6424 napi_enable(&tp->napi);
6425 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6426 rtl_unlock_work(tp);
6428 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6431 static int rtl8169_resume(struct device *device)
6433 struct pci_dev *pdev = to_pci_dev(device);
6434 struct net_device *dev = pci_get_drvdata(pdev);
6435 struct rtl8169_private *tp = netdev_priv(dev);
6437 rtl8169_init_phy(dev, tp);
6439 if (netif_running(dev))
6440 __rtl8169_resume(dev);
6445 static int rtl8169_runtime_suspend(struct device *device)
6447 struct pci_dev *pdev = to_pci_dev(device);
6448 struct net_device *dev = pci_get_drvdata(pdev);
6449 struct rtl8169_private *tp = netdev_priv(dev);
6451 if (!tp->TxDescArray)
6455 tp->saved_wolopts = __rtl8169_get_wol(tp);
6456 __rtl8169_set_wol(tp, WAKE_ANY);
6457 rtl_unlock_work(tp);
6459 rtl8169_net_suspend(dev);
6464 static int rtl8169_runtime_resume(struct device *device)
6466 struct pci_dev *pdev = to_pci_dev(device);
6467 struct net_device *dev = pci_get_drvdata(pdev);
6468 struct rtl8169_private *tp = netdev_priv(dev);
6470 if (!tp->TxDescArray)
6474 __rtl8169_set_wol(tp, tp->saved_wolopts);
6475 tp->saved_wolopts = 0;
6476 rtl_unlock_work(tp);
6478 rtl8169_init_phy(dev, tp);
6480 __rtl8169_resume(dev);
6485 static int rtl8169_runtime_idle(struct device *device)
6487 struct pci_dev *pdev = to_pci_dev(device);
6488 struct net_device *dev = pci_get_drvdata(pdev);
6489 struct rtl8169_private *tp = netdev_priv(dev);
6491 return tp->TxDescArray ? -EBUSY : 0;
6494 static const struct dev_pm_ops rtl8169_pm_ops = {
6495 .suspend = rtl8169_suspend,
6496 .resume = rtl8169_resume,
6497 .freeze = rtl8169_suspend,
6498 .thaw = rtl8169_resume,
6499 .poweroff = rtl8169_suspend,
6500 .restore = rtl8169_resume,
6501 .runtime_suspend = rtl8169_runtime_suspend,
6502 .runtime_resume = rtl8169_runtime_resume,
6503 .runtime_idle = rtl8169_runtime_idle,
6506 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6508 #else /* !CONFIG_PM */
6510 #define RTL8169_PM_OPS NULL
6512 #endif /* !CONFIG_PM */
6514 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6516 void __iomem *ioaddr = tp->mmio_addr;
6518 /* WoL fails with 8168b when the receiver is disabled. */
6519 switch (tp->mac_version) {
6520 case RTL_GIGA_MAC_VER_11:
6521 case RTL_GIGA_MAC_VER_12:
6522 case RTL_GIGA_MAC_VER_17:
6523 pci_clear_master(tp->pci_dev);
6525 RTL_W8(ChipCmd, CmdRxEnb);
6534 static void rtl_shutdown(struct pci_dev *pdev)
6536 struct net_device *dev = pci_get_drvdata(pdev);
6537 struct rtl8169_private *tp = netdev_priv(dev);
6538 struct device *d = &pdev->dev;
6540 pm_runtime_get_sync(d);
6542 rtl8169_net_suspend(dev);
6544 /* Restore original MAC address */
6545 rtl_rar_set(tp, dev->perm_addr);
6547 rtl8169_hw_reset(tp);
6549 if (system_state == SYSTEM_POWER_OFF) {
6550 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6551 rtl_wol_suspend_quirk(tp);
6552 rtl_wol_shutdown_quirk(tp);
6555 pci_wake_from_d3(pdev, true);
6556 pci_set_power_state(pdev, PCI_D3hot);
6559 pm_runtime_put_noidle(d);
6562 static void rtl_remove_one(struct pci_dev *pdev)
6564 struct net_device *dev = pci_get_drvdata(pdev);
6565 struct rtl8169_private *tp = netdev_priv(dev);
6567 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6568 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6569 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6570 rtl8168_driver_stop(tp);
6573 cancel_work_sync(&tp->wk.work);
6575 netif_napi_del(&tp->napi);
6577 unregister_netdev(dev);
6579 rtl_release_firmware(tp);
6581 if (pci_dev_run_wake(pdev))
6582 pm_runtime_get_noresume(&pdev->dev);
6584 /* restore original MAC address */
6585 rtl_rar_set(tp, dev->perm_addr);
6587 rtl_disable_msi(pdev, tp);
6588 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6589 pci_set_drvdata(pdev, NULL);
6592 static const struct net_device_ops rtl_netdev_ops = {
6593 .ndo_open = rtl_open,
6594 .ndo_stop = rtl8169_close,
6595 .ndo_get_stats64 = rtl8169_get_stats64,
6596 .ndo_start_xmit = rtl8169_start_xmit,
6597 .ndo_tx_timeout = rtl8169_tx_timeout,
6598 .ndo_validate_addr = eth_validate_addr,
6599 .ndo_change_mtu = rtl8169_change_mtu,
6600 .ndo_fix_features = rtl8169_fix_features,
6601 .ndo_set_features = rtl8169_set_features,
6602 .ndo_set_mac_address = rtl_set_mac_address,
6603 .ndo_do_ioctl = rtl8169_ioctl,
6604 .ndo_set_rx_mode = rtl_set_rx_mode,
6605 #ifdef CONFIG_NET_POLL_CONTROLLER
6606 .ndo_poll_controller = rtl8169_netpoll,
6611 static const struct rtl_cfg_info {
6612 void (*hw_start)(struct net_device *);
6613 unsigned int region;
6618 } rtl_cfg_infos [] = {
6620 .hw_start = rtl_hw_start_8169,
6623 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6624 .features = RTL_FEATURE_GMII,
6625 .default_ver = RTL_GIGA_MAC_VER_01,
6628 .hw_start = rtl_hw_start_8168,
6631 .event_slow = SYSErr | LinkChg | RxOverflow,
6632 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6633 .default_ver = RTL_GIGA_MAC_VER_11,
6636 .hw_start = rtl_hw_start_8101,
6639 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6641 .features = RTL_FEATURE_MSI,
6642 .default_ver = RTL_GIGA_MAC_VER_13,
6646 /* Cfg9346_Unlock assumed. */
6647 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6648 const struct rtl_cfg_info *cfg)
6650 void __iomem *ioaddr = tp->mmio_addr;
6654 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6655 if (cfg->features & RTL_FEATURE_MSI) {
6656 if (pci_enable_msi(tp->pci_dev)) {
6657 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6660 msi = RTL_FEATURE_MSI;
6663 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6664 RTL_W8(Config2, cfg2);
6668 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6670 void __iomem *ioaddr = tp->mmio_addr;
6672 return RTL_R8(MCU) & LINK_LIST_RDY;
6675 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6677 void __iomem *ioaddr = tp->mmio_addr;
6679 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6682 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6684 void __iomem *ioaddr = tp->mmio_addr;
6687 tp->ocp_base = OCP_STD_PHY_BASE;
6689 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6691 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6694 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6697 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6699 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6701 data = r8168_mac_ocp_read(tp, 0xe8de);
6703 r8168_mac_ocp_write(tp, 0xe8de, data);
6705 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6708 data = r8168_mac_ocp_read(tp, 0xe8de);
6710 r8168_mac_ocp_write(tp, 0xe8de, data);
6712 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6716 static void rtl_hw_initialize(struct rtl8169_private *tp)
6718 switch (tp->mac_version) {
6719 case RTL_GIGA_MAC_VER_40:
6720 case RTL_GIGA_MAC_VER_41:
6721 rtl_hw_init_8168g(tp);
6730 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6732 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6733 const unsigned int region = cfg->region;
6734 struct rtl8169_private *tp;
6735 struct mii_if_info *mii;
6736 struct net_device *dev;
6737 void __iomem *ioaddr;
6741 if (netif_msg_drv(&debug)) {
6742 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6743 MODULENAME, RTL8169_VERSION);
6746 dev = alloc_etherdev(sizeof (*tp));
6752 SET_NETDEV_DEV(dev, &pdev->dev);
6753 dev->netdev_ops = &rtl_netdev_ops;
6754 tp = netdev_priv(dev);
6757 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6761 mii->mdio_read = rtl_mdio_read;
6762 mii->mdio_write = rtl_mdio_write;
6763 mii->phy_id_mask = 0x1f;
6764 mii->reg_num_mask = 0x1f;
6765 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6767 /* disable ASPM completely as that cause random device stop working
6768 * problems as well as full system hangs for some PCIe devices users */
6769 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6770 PCIE_LINK_STATE_CLKPM);
6772 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6773 rc = pci_enable_device(pdev);
6775 netif_err(tp, probe, dev, "enable failure\n");
6776 goto err_out_free_dev_1;
6779 if (pci_set_mwi(pdev) < 0)
6780 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6782 /* make sure PCI base addr 1 is MMIO */
6783 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6784 netif_err(tp, probe, dev,
6785 "region #%d not an MMIO resource, aborting\n",
6791 /* check for weird/broken PCI region reporting */
6792 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6793 netif_err(tp, probe, dev,
6794 "Invalid PCI region size(s), aborting\n");
6799 rc = pci_request_regions(pdev, MODULENAME);
6801 netif_err(tp, probe, dev, "could not request regions\n");
6805 tp->cp_cmd = RxChkSum;
6807 if ((sizeof(dma_addr_t) > 4) &&
6808 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6809 tp->cp_cmd |= PCIDAC;
6810 dev->features |= NETIF_F_HIGHDMA;
6812 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6814 netif_err(tp, probe, dev, "DMA configuration failed\n");
6815 goto err_out_free_res_3;
6819 /* ioremap MMIO region */
6820 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6822 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6824 goto err_out_free_res_3;
6826 tp->mmio_addr = ioaddr;
6828 if (!pci_is_pcie(pdev))
6829 netif_info(tp, probe, dev, "not PCI Express\n");
6831 /* Identify chip attached to board */
6832 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6836 rtl_irq_disable(tp);
6838 rtl_hw_initialize(tp);
6842 rtl_ack_events(tp, 0xffff);
6844 pci_set_master(pdev);
6847 * Pretend we are using VLANs; This bypasses a nasty bug where
6848 * Interrupts stop flowing on high load on 8110SCd controllers.
6850 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6851 tp->cp_cmd |= RxVlan;
6853 rtl_init_mdio_ops(tp);
6854 rtl_init_pll_power_ops(tp);
6855 rtl_init_jumbo_ops(tp);
6856 rtl_init_csi_ops(tp);
6858 rtl8169_print_mac_version(tp);
6860 chipset = tp->mac_version;
6861 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6863 RTL_W8(Cfg9346, Cfg9346_Unlock);
6864 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6865 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6866 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6867 tp->features |= RTL_FEATURE_WOL;
6868 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6869 tp->features |= RTL_FEATURE_WOL;
6870 tp->features |= rtl_try_msi(tp, cfg);
6871 RTL_W8(Cfg9346, Cfg9346_Lock);
6873 if (rtl_tbi_enabled(tp)) {
6874 tp->set_speed = rtl8169_set_speed_tbi;
6875 tp->get_settings = rtl8169_gset_tbi;
6876 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6877 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6878 tp->link_ok = rtl8169_tbi_link_ok;
6879 tp->do_ioctl = rtl_tbi_ioctl;
6881 tp->set_speed = rtl8169_set_speed_xmii;
6882 tp->get_settings = rtl8169_gset_xmii;
6883 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6884 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6885 tp->link_ok = rtl8169_xmii_link_ok;
6886 tp->do_ioctl = rtl_xmii_ioctl;
6889 mutex_init(&tp->wk.mutex);
6891 /* Get MAC address */
6892 for (i = 0; i < ETH_ALEN; i++)
6893 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6894 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6896 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6897 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6899 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6901 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6902 * properly for all devices */
6903 dev->features |= NETIF_F_RXCSUM |
6904 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6906 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6907 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6908 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6911 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6912 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6913 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6915 dev->hw_features |= NETIF_F_RXALL;
6916 dev->hw_features |= NETIF_F_RXFCS;
6918 tp->hw_start = cfg->hw_start;
6919 tp->event_slow = cfg->event_slow;
6921 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6922 ~(RxBOVF | RxFOVF) : ~0;
6924 init_timer(&tp->timer);
6925 tp->timer.data = (unsigned long) dev;
6926 tp->timer.function = rtl8169_phy_timer;
6928 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6930 rc = register_netdev(dev);
6934 pci_set_drvdata(pdev, dev);
6936 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6937 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6938 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6939 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6940 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6941 "tx checksumming: %s]\n",
6942 rtl_chip_infos[chipset].jumbo_max,
6943 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6946 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6947 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6948 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6949 rtl8168_driver_start(tp);
6952 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6954 if (pci_dev_run_wake(pdev))
6955 pm_runtime_put_noidle(&pdev->dev);
6957 netif_carrier_off(dev);
6963 netif_napi_del(&tp->napi);
6964 rtl_disable_msi(pdev, tp);
6967 pci_release_regions(pdev);
6969 pci_clear_mwi(pdev);
6970 pci_disable_device(pdev);
6976 static struct pci_driver rtl8169_pci_driver = {
6978 .id_table = rtl8169_pci_tbl,
6979 .probe = rtl_init_one,
6980 .remove = rtl_remove_one,
6981 .shutdown = rtl_shutdown,
6982 .driver.pm = RTL8169_PM_OPS,
6985 module_pci_driver(rtl8169_pci_driver);