1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
15 #include <linux/of_device.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/regmap.h>
20 #include <linux/clk.h>
21 #include <linux/if_vlan.h>
22 #include <linux/reset.h>
23 #include <linux/tcp.h>
25 #include "mtk_eth_soc.h"
27 static int mtk_msg_level = -1;
28 module_param_named(msg_level, mtk_msg_level, int, 0);
29 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
31 #define MTK_ETHTOOL_STAT(x) { #x, \
32 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
34 /* strings used by ethtool */
35 static const struct mtk_ethtool_stats {
36 char str[ETH_GSTRING_LEN];
38 } mtk_ethtool_stats[] = {
39 MTK_ETHTOOL_STAT(tx_bytes),
40 MTK_ETHTOOL_STAT(tx_packets),
41 MTK_ETHTOOL_STAT(tx_skip),
42 MTK_ETHTOOL_STAT(tx_collisions),
43 MTK_ETHTOOL_STAT(rx_bytes),
44 MTK_ETHTOOL_STAT(rx_packets),
45 MTK_ETHTOOL_STAT(rx_overflow),
46 MTK_ETHTOOL_STAT(rx_fcs_errors),
47 MTK_ETHTOOL_STAT(rx_short_errors),
48 MTK_ETHTOOL_STAT(rx_long_errors),
49 MTK_ETHTOOL_STAT(rx_checksum_errors),
50 MTK_ETHTOOL_STAT(rx_flow_control_packets),
53 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
55 __raw_writel(val, eth->base + reg);
58 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
60 return __raw_readl(eth->base + reg);
63 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
65 unsigned long t_start = jiffies;
68 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
70 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
75 dev_err(eth->dev, "mdio: MDIO timeout\n");
79 u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
80 u32 phy_register, u32 write_data)
82 if (mtk_mdio_busy_wait(eth))
87 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
88 (phy_register << PHY_IAC_REG_SHIFT) |
89 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
92 if (mtk_mdio_busy_wait(eth))
98 u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
102 if (mtk_mdio_busy_wait(eth))
105 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
106 (phy_reg << PHY_IAC_REG_SHIFT) |
107 (phy_addr << PHY_IAC_ADDR_SHIFT),
110 if (mtk_mdio_busy_wait(eth))
113 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
118 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
119 int phy_reg, u16 val)
121 struct mtk_eth *eth = bus->priv;
123 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
126 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
128 struct mtk_eth *eth = bus->priv;
130 return _mtk_mdio_read(eth, phy_addr, phy_reg);
133 static void mtk_phy_link_adjust(struct net_device *dev)
135 struct mtk_mac *mac = netdev_priv(dev);
136 u16 lcl_adv = 0, rmt_adv = 0;
138 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
139 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
140 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
143 switch (mac->phy_dev->speed) {
145 mcr |= MAC_MCR_SPEED_1000;
148 mcr |= MAC_MCR_SPEED_100;
152 if (mac->phy_dev->link)
153 mcr |= MAC_MCR_FORCE_LINK;
155 if (mac->phy_dev->duplex) {
156 mcr |= MAC_MCR_FORCE_DPX;
158 if (mac->phy_dev->pause)
159 rmt_adv = LPA_PAUSE_CAP;
160 if (mac->phy_dev->asym_pause)
161 rmt_adv |= LPA_PAUSE_ASYM;
163 if (mac->phy_dev->advertising & ADVERTISED_Pause)
164 lcl_adv |= ADVERTISE_PAUSE_CAP;
165 if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
166 lcl_adv |= ADVERTISE_PAUSE_ASYM;
168 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
170 if (flowctrl & FLOW_CTRL_TX)
171 mcr |= MAC_MCR_FORCE_TX_FC;
172 if (flowctrl & FLOW_CTRL_RX)
173 mcr |= MAC_MCR_FORCE_RX_FC;
175 netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
176 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
177 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
180 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
182 if (mac->phy_dev->link)
183 netif_carrier_on(dev);
185 netif_carrier_off(dev);
188 static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
189 struct device_node *phy_node)
191 const __be32 *_addr = NULL;
192 struct phy_device *phydev;
195 _addr = of_get_property(phy_node, "reg", NULL);
197 if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
198 pr_err("%s: invalid phy address\n", phy_node->name);
201 addr = be32_to_cpu(*_addr);
202 phy_mode = of_get_phy_mode(phy_node);
204 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
208 phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
209 mtk_phy_link_adjust, 0, phy_mode);
211 dev_err(eth->dev, "could not connect to PHY\n");
216 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
217 mac->id, phydev_name(phydev), phydev->phy_id,
220 mac->phy_dev = phydev;
225 static int mtk_phy_connect(struct mtk_mac *mac)
227 struct mtk_eth *eth = mac->hw;
228 struct device_node *np;
231 np = of_parse_phandle(mac->of_node, "phy-handle", 0);
232 if (!np && of_phy_is_fixed_link(mac->of_node))
233 if (!of_phy_register_fixed_link(mac->of_node))
234 np = of_node_get(mac->of_node);
238 switch (of_get_phy_mode(np)) {
239 case PHY_INTERFACE_MODE_RGMII_TXID:
240 case PHY_INTERFACE_MODE_RGMII_RXID:
241 case PHY_INTERFACE_MODE_RGMII_ID:
242 case PHY_INTERFACE_MODE_RGMII:
245 case PHY_INTERFACE_MODE_MII:
248 case PHY_INTERFACE_MODE_RMII:
252 dev_err(eth->dev, "invalid phy_mode\n");
256 /* put the gmac into the right mode */
257 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
258 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
259 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
260 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
262 mtk_phy_connect_node(eth, mac, np);
263 mac->phy_dev->autoneg = AUTONEG_ENABLE;
264 mac->phy_dev->speed = 0;
265 mac->phy_dev->duplex = 0;
266 mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
267 SUPPORTED_Asym_Pause;
268 mac->phy_dev->advertising = mac->phy_dev->supported |
270 phy_start_aneg(mac->phy_dev);
275 static int mtk_mdio_init(struct mtk_eth *eth)
277 struct device_node *mii_np;
280 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
282 dev_err(eth->dev, "no %s child node found", "mdio-bus");
286 if (!of_device_is_available(mii_np)) {
291 eth->mii_bus = mdiobus_alloc();
297 eth->mii_bus->name = "mdio";
298 eth->mii_bus->read = mtk_mdio_read;
299 eth->mii_bus->write = mtk_mdio_write;
300 eth->mii_bus->priv = eth;
301 eth->mii_bus->parent = eth->dev;
303 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
304 err = of_mdiobus_register(eth->mii_bus, mii_np);
311 mdiobus_free(eth->mii_bus);
319 static void mtk_mdio_cleanup(struct mtk_eth *eth)
324 mdiobus_unregister(eth->mii_bus);
325 of_node_put(eth->mii_bus->dev.of_node);
326 mdiobus_free(eth->mii_bus);
329 static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
333 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
334 mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
336 mtk_r32(eth, MTK_QDMA_INT_MASK);
339 static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
343 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
344 mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
346 mtk_r32(eth, MTK_QDMA_INT_MASK);
349 static int mtk_set_mac_address(struct net_device *dev, void *p)
351 int ret = eth_mac_addr(dev, p);
352 struct mtk_mac *mac = netdev_priv(dev);
353 const char *macaddr = dev->dev_addr;
359 spin_lock_irqsave(&mac->hw->page_lock, flags);
360 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
361 MTK_GDMA_MAC_ADRH(mac->id));
362 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
363 (macaddr[4] << 8) | macaddr[5],
364 MTK_GDMA_MAC_ADRL(mac->id));
365 spin_unlock_irqrestore(&mac->hw->page_lock, flags);
370 void mtk_stats_update_mac(struct mtk_mac *mac)
372 struct mtk_hw_stats *hw_stats = mac->hw_stats;
373 unsigned int base = MTK_GDM1_TX_GBCNT;
376 base += hw_stats->reg_offset;
378 u64_stats_update_begin(&hw_stats->syncp);
380 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
381 stats = mtk_r32(mac->hw, base + 0x04);
383 hw_stats->rx_bytes += (stats << 32);
384 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
385 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
386 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
387 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
388 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
389 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
390 hw_stats->rx_flow_control_packets +=
391 mtk_r32(mac->hw, base + 0x24);
392 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
393 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
394 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
395 stats = mtk_r32(mac->hw, base + 0x34);
397 hw_stats->tx_bytes += (stats << 32);
398 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
399 u64_stats_update_end(&hw_stats->syncp);
402 static void mtk_stats_update(struct mtk_eth *eth)
406 for (i = 0; i < MTK_MAC_COUNT; i++) {
407 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
409 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
410 mtk_stats_update_mac(eth->mac[i]);
411 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
416 static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
417 struct rtnl_link_stats64 *storage)
419 struct mtk_mac *mac = netdev_priv(dev);
420 struct mtk_hw_stats *hw_stats = mac->hw_stats;
423 if (netif_running(dev) && netif_device_present(dev)) {
424 if (spin_trylock(&hw_stats->stats_lock)) {
425 mtk_stats_update_mac(mac);
426 spin_unlock(&hw_stats->stats_lock);
431 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
432 storage->rx_packets = hw_stats->rx_packets;
433 storage->tx_packets = hw_stats->tx_packets;
434 storage->rx_bytes = hw_stats->rx_bytes;
435 storage->tx_bytes = hw_stats->tx_bytes;
436 storage->collisions = hw_stats->tx_collisions;
437 storage->rx_length_errors = hw_stats->rx_short_errors +
438 hw_stats->rx_long_errors;
439 storage->rx_over_errors = hw_stats->rx_overflow;
440 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
441 storage->rx_errors = hw_stats->rx_checksum_errors;
442 storage->tx_aborted_errors = hw_stats->tx_skip;
443 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
445 storage->tx_errors = dev->stats.tx_errors;
446 storage->rx_dropped = dev->stats.rx_dropped;
447 storage->tx_dropped = dev->stats.tx_dropped;
452 static inline int mtk_max_frag_size(int mtu)
454 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
455 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
456 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
458 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
459 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
462 static inline int mtk_max_buf_size(int frag_size)
464 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
465 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
467 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
472 static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
473 struct mtk_rx_dma *dma_rxd)
475 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
476 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
477 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
478 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
481 /* the qdma core needs scratch memory to be setup */
482 static int mtk_init_fq_dma(struct mtk_eth *eth)
484 dma_addr_t phy_ring_tail;
485 int cnt = MTK_DMA_SIZE;
489 eth->scratch_ring = dma_alloc_coherent(eth->dev,
490 cnt * sizeof(struct mtk_tx_dma),
491 ð->phy_scratch_ring,
492 GFP_ATOMIC | __GFP_ZERO);
493 if (unlikely(!eth->scratch_ring))
496 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
498 if (unlikely(!eth->scratch_head))
501 dma_addr = dma_map_single(eth->dev,
502 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
504 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
507 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
508 phy_ring_tail = eth->phy_scratch_ring +
509 (sizeof(struct mtk_tx_dma) * (cnt - 1));
511 for (i = 0; i < cnt; i++) {
512 eth->scratch_ring[i].txd1 =
513 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
515 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
516 ((i + 1) * sizeof(struct mtk_tx_dma)));
517 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
520 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
521 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
522 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
523 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
528 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
530 void *ret = ring->dma;
532 return ret + (desc - ring->phys);
535 static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
536 struct mtk_tx_dma *txd)
538 int idx = txd - ring->dma;
540 return &ring->buf[idx];
543 static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
545 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
546 dma_unmap_single(dev,
547 dma_unmap_addr(tx_buf, dma_addr0),
548 dma_unmap_len(tx_buf, dma_len0),
550 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
552 dma_unmap_addr(tx_buf, dma_addr0),
553 dma_unmap_len(tx_buf, dma_len0),
558 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
559 dev_kfree_skb_any(tx_buf->skb);
563 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
564 int tx_num, struct mtk_tx_ring *ring, bool gso)
566 struct mtk_mac *mac = netdev_priv(dev);
567 struct mtk_eth *eth = mac->hw;
568 struct mtk_tx_dma *itxd, *txd;
569 struct mtk_tx_buf *tx_buf;
570 dma_addr_t mapped_addr;
571 unsigned int nr_frags;
575 itxd = ring->next_free;
576 if (itxd == ring->last_free)
579 /* set the forward port */
580 txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
582 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
583 memset(tx_buf, 0, sizeof(*tx_buf));
588 /* TX Checksum offload */
589 if (skb->ip_summed == CHECKSUM_PARTIAL)
590 txd4 |= TX_DMA_CHKSUM;
592 /* VLAN header offload */
593 if (skb_vlan_tag_present(skb))
594 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
596 mapped_addr = dma_map_single(&dev->dev, skb->data,
597 skb_headlen(skb), DMA_TO_DEVICE);
598 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
601 WRITE_ONCE(itxd->txd1, mapped_addr);
602 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
603 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
604 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
608 nr_frags = skb_shinfo(skb)->nr_frags;
609 for (i = 0; i < nr_frags; i++) {
610 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
611 unsigned int offset = 0;
612 int frag_size = skb_frag_size(frag);
615 bool last_frag = false;
616 unsigned int frag_map_size;
618 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
619 if (txd == ring->last_free)
623 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
624 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
627 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
630 if (i == nr_frags - 1 &&
631 (frag_size - frag_map_size) == 0)
634 WRITE_ONCE(txd->txd1, mapped_addr);
635 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
636 TX_DMA_PLEN0(frag_map_size) |
637 last_frag * TX_DMA_LS0));
638 WRITE_ONCE(txd->txd4, 0);
640 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
641 tx_buf = mtk_desc_to_tx_buf(ring, txd);
642 memset(tx_buf, 0, sizeof(*tx_buf));
644 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
645 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
646 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
647 frag_size -= frag_map_size;
648 offset += frag_map_size;
652 /* store skb to cleanup */
655 WRITE_ONCE(itxd->txd4, txd4);
656 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
657 (!nr_frags * TX_DMA_LS0)));
659 netdev_sent_queue(dev, skb->len);
660 skb_tx_timestamp(skb);
662 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
663 atomic_sub(n_desc, &ring->free_count);
665 /* make sure that all changes to the dma ring are flushed before we
670 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
671 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
677 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
680 mtk_tx_unmap(&dev->dev, tx_buf);
682 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
683 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
684 } while (itxd != txd);
689 static inline int mtk_cal_txd_req(struct sk_buff *skb)
692 struct skb_frag_struct *frag;
695 if (skb_is_gso(skb)) {
696 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
697 frag = &skb_shinfo(skb)->frags[i];
698 nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
701 nfrags += skb_shinfo(skb)->nr_frags;
707 static void mtk_wake_queue(struct mtk_eth *eth)
711 for (i = 0; i < MTK_MAC_COUNT; i++) {
714 netif_wake_queue(eth->netdev[i]);
718 static void mtk_stop_queue(struct mtk_eth *eth)
722 for (i = 0; i < MTK_MAC_COUNT; i++) {
725 netif_stop_queue(eth->netdev[i]);
729 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
731 struct mtk_mac *mac = netdev_priv(dev);
732 struct mtk_eth *eth = mac->hw;
733 struct mtk_tx_ring *ring = ð->tx_ring;
734 struct net_device_stats *stats = &dev->stats;
739 /* normally we can rely on the stack not calling this more than once,
740 * however we have 2 queues running on the same ring so we need to lock
743 spin_lock_irqsave(ð->page_lock, flags);
745 tx_num = mtk_cal_txd_req(skb);
746 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
748 netif_err(eth, tx_queued, dev,
749 "Tx Ring full when queue awake!\n");
750 spin_unlock_irqrestore(ð->page_lock, flags);
751 return NETDEV_TX_BUSY;
754 /* TSO: fill MSS info in tcp checksum field */
755 if (skb_is_gso(skb)) {
756 if (skb_cow_head(skb, 0)) {
757 netif_warn(eth, tx_err, dev,
758 "GSO expand head fail.\n");
762 if (skb_shinfo(skb)->gso_type &
763 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
765 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
769 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
772 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
774 if (unlikely(atomic_read(&ring->free_count) >
778 spin_unlock_irqrestore(ð->page_lock, flags);
783 spin_unlock_irqrestore(ð->page_lock, flags);
789 static int mtk_poll_rx(struct napi_struct *napi, int budget,
790 struct mtk_eth *eth, u32 rx_intr)
792 struct mtk_rx_ring *ring = ð->rx_ring;
793 int idx = ring->calc_idx;
796 struct mtk_rx_dma *rxd, trxd;
799 while (done < budget) {
800 struct net_device *netdev;
805 idx = NEXT_RX_DESP_IDX(idx);
806 rxd = &ring->dma[idx];
807 data = ring->data[idx];
809 mtk_rx_get_desc(&trxd, rxd);
810 if (!(trxd.rxd2 & RX_DMA_DONE))
813 /* find out which mac the packet come from. values start at 1 */
814 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
818 netdev = eth->netdev[mac];
820 /* alloc new buffer */
821 new_data = napi_alloc_frag(ring->frag_size);
822 if (unlikely(!new_data)) {
823 netdev->stats.rx_dropped++;
826 dma_addr = dma_map_single(ð->netdev[mac]->dev,
827 new_data + NET_SKB_PAD,
830 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
831 skb_free_frag(new_data);
836 skb = build_skb(data, ring->frag_size);
837 if (unlikely(!skb)) {
838 put_page(virt_to_head_page(new_data));
841 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
843 dma_unmap_single(&netdev->dev, trxd.rxd1,
844 ring->buf_size, DMA_FROM_DEVICE);
845 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
847 skb_put(skb, pktlen);
848 if (trxd.rxd4 & RX_DMA_L4_VALID)
849 skb->ip_summed = CHECKSUM_UNNECESSARY;
851 skb_checksum_none_assert(skb);
852 skb->protocol = eth_type_trans(skb, netdev);
854 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
855 RX_DMA_VID(trxd.rxd3))
856 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
857 RX_DMA_VID(trxd.rxd3));
858 napi_gro_receive(napi, skb);
860 ring->data[idx] = new_data;
861 rxd->rxd1 = (unsigned int)dma_addr;
864 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
866 ring->calc_idx = idx;
867 /* make sure that all changes to the dma ring are flushed before
871 mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0);
876 mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
881 static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
883 struct mtk_tx_ring *ring = ð->tx_ring;
884 struct mtk_tx_dma *desc;
886 struct mtk_tx_buf *tx_buf;
887 int total = 0, done[MTK_MAX_DEVS];
888 unsigned int bytes[MTK_MAX_DEVS];
890 static int condition;
893 memset(done, 0, sizeof(done));
894 memset(bytes, 0, sizeof(bytes));
896 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
897 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
899 desc = mtk_qdma_phys_to_virt(ring, cpu);
901 while ((cpu != dma) && budget) {
902 u32 next_cpu = desc->txd2;
905 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
906 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
909 mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
913 tx_buf = mtk_desc_to_tx_buf(ring, desc);
920 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
921 bytes[mac] += skb->len;
925 mtk_tx_unmap(eth->dev, tx_buf);
927 ring->last_free->txd2 = next_cpu;
928 ring->last_free = desc;
929 atomic_inc(&ring->free_count);
934 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
936 for (i = 0; i < MTK_MAC_COUNT; i++) {
937 if (!eth->netdev[i] || !done[i])
939 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
943 /* read hw index again make sure no new tx packet */
944 if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
947 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
952 if (atomic_read(&ring->free_count) > ring->thresh)
958 static int mtk_poll(struct napi_struct *napi, int budget)
960 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
961 u32 status, status2, mask, tx_intr, rx_intr, status_intr;
962 int tx_done, rx_done;
963 bool tx_again = false;
965 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
966 status2 = mtk_r32(eth, MTK_INT_STATUS2);
967 tx_intr = MTK_TX_DONE_INT;
968 rx_intr = MTK_RX_DONE_INT;
969 status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
974 if (status & tx_intr)
975 tx_done = mtk_poll_tx(eth, budget, &tx_again);
977 if (status & rx_intr)
978 rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
980 if (unlikely(status2 & status_intr)) {
981 mtk_stats_update(eth);
982 mtk_w32(eth, status_intr, MTK_INT_STATUS2);
985 if (unlikely(netif_msg_intr(eth))) {
986 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
987 netdev_info(eth->netdev[0],
988 "done tx %d, rx %d, intr 0x%08x/0x%x\n",
989 tx_done, rx_done, status, mask);
992 if (tx_again || rx_done == budget)
995 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
996 if (status & (tx_intr | rx_intr))
1000 mtk_irq_enable(eth, tx_intr | rx_intr);
1005 static int mtk_tx_alloc(struct mtk_eth *eth)
1007 struct mtk_tx_ring *ring = ð->tx_ring;
1008 int i, sz = sizeof(*ring->dma);
1010 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1015 ring->dma = dma_alloc_coherent(eth->dev,
1018 GFP_ATOMIC | __GFP_ZERO);
1022 memset(ring->dma, 0, MTK_DMA_SIZE * sz);
1023 for (i = 0; i < MTK_DMA_SIZE; i++) {
1024 int next = (i + 1) % MTK_DMA_SIZE;
1025 u32 next_ptr = ring->phys + next * sz;
1027 ring->dma[i].txd2 = next_ptr;
1028 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1031 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1032 ring->next_free = &ring->dma[0];
1033 ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
1034 ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
1037 /* make sure that all changes to the dma ring are flushed before we
1042 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1043 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1045 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1048 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1057 static void mtk_tx_clean(struct mtk_eth *eth)
1059 struct mtk_tx_ring *ring = ð->tx_ring;
1063 for (i = 0; i < MTK_DMA_SIZE; i++)
1064 mtk_tx_unmap(eth->dev, &ring->buf[i]);
1070 dma_free_coherent(eth->dev,
1071 MTK_DMA_SIZE * sizeof(*ring->dma),
1078 static int mtk_rx_alloc(struct mtk_eth *eth)
1080 struct mtk_rx_ring *ring = ð->rx_ring;
1083 ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
1084 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1085 ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
1090 for (i = 0; i < MTK_DMA_SIZE; i++) {
1091 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1096 ring->dma = dma_alloc_coherent(eth->dev,
1097 MTK_DMA_SIZE * sizeof(*ring->dma),
1099 GFP_ATOMIC | __GFP_ZERO);
1103 for (i = 0; i < MTK_DMA_SIZE; i++) {
1104 dma_addr_t dma_addr = dma_map_single(eth->dev,
1105 ring->data[i] + NET_SKB_PAD,
1108 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1110 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1112 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1114 ring->calc_idx = MTK_DMA_SIZE - 1;
1115 /* make sure that all changes to the dma ring are flushed before we
1120 mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0);
1121 mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0);
1122 mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0);
1123 mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
1124 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1129 static void mtk_rx_clean(struct mtk_eth *eth)
1131 struct mtk_rx_ring *ring = ð->rx_ring;
1134 if (ring->data && ring->dma) {
1135 for (i = 0; i < MTK_DMA_SIZE; i++) {
1138 if (!ring->dma[i].rxd1)
1140 dma_unmap_single(eth->dev,
1144 skb_free_frag(ring->data[i]);
1151 dma_free_coherent(eth->dev,
1152 MTK_DMA_SIZE * sizeof(*ring->dma),
1159 /* wait for DMA to finish whatever it is doing before we start using it again */
1160 static int mtk_dma_busy_wait(struct mtk_eth *eth)
1162 unsigned long t_start = jiffies;
1165 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1166 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1168 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
1172 dev_err(eth->dev, "DMA init timeout\n");
1176 static int mtk_dma_init(struct mtk_eth *eth)
1180 if (mtk_dma_busy_wait(eth))
1183 /* QDMA needs scratch memory for internal reordering of the
1186 err = mtk_init_fq_dma(eth);
1190 err = mtk_tx_alloc(eth);
1194 err = mtk_rx_alloc(eth);
1198 /* Enable random early drop and set drop threshold automatically */
1199 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
1201 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1206 static void mtk_dma_free(struct mtk_eth *eth)
1210 for (i = 0; i < MTK_MAC_COUNT; i++)
1212 netdev_reset_queue(eth->netdev[i]);
1213 if (eth->scratch_ring) {
1214 dma_free_coherent(eth->dev,
1215 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
1217 eth->phy_scratch_ring);
1218 eth->scratch_ring = NULL;
1219 eth->phy_scratch_ring = 0;
1223 kfree(eth->scratch_head);
1226 static void mtk_tx_timeout(struct net_device *dev)
1228 struct mtk_mac *mac = netdev_priv(dev);
1229 struct mtk_eth *eth = mac->hw;
1231 eth->netdev[mac->id]->stats.tx_errors++;
1232 netif_err(eth, tx_err, dev,
1233 "transmit timed out\n");
1234 schedule_work(ð->pending_work);
1237 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
1239 struct mtk_eth *eth = _eth;
1242 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1243 if (unlikely(!status))
1246 if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
1247 if (likely(napi_schedule_prep(ð->rx_napi)))
1248 __napi_schedule(ð->rx_napi);
1250 mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
1252 mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
1257 #ifdef CONFIG_NET_POLL_CONTROLLER
1258 static void mtk_poll_controller(struct net_device *dev)
1260 struct mtk_mac *mac = netdev_priv(dev);
1261 struct mtk_eth *eth = mac->hw;
1262 u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
1264 mtk_irq_disable(eth, int_mask);
1265 mtk_handle_irq(dev->irq, dev);
1266 mtk_irq_enable(eth, int_mask);
1270 static int mtk_start_dma(struct mtk_eth *eth)
1274 err = mtk_dma_init(eth);
1281 MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
1282 MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
1289 static int mtk_open(struct net_device *dev)
1291 struct mtk_mac *mac = netdev_priv(dev);
1292 struct mtk_eth *eth = mac->hw;
1294 /* we run 2 netdevs on the same dma ring so we only bring it up once */
1295 if (!atomic_read(ð->dma_refcnt)) {
1296 int err = mtk_start_dma(eth);
1301 napi_enable(ð->rx_napi);
1302 mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1304 atomic_inc(ð->dma_refcnt);
1306 phy_start(mac->phy_dev);
1307 netif_start_queue(dev);
1312 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1314 unsigned long flags;
1318 /* stop the dma engine */
1319 spin_lock_irqsave(ð->page_lock, flags);
1320 val = mtk_r32(eth, glo_cfg);
1321 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1323 spin_unlock_irqrestore(ð->page_lock, flags);
1325 /* wait for dma stop */
1326 for (i = 0; i < 10; i++) {
1327 val = mtk_r32(eth, glo_cfg);
1328 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1336 static int mtk_stop(struct net_device *dev)
1338 struct mtk_mac *mac = netdev_priv(dev);
1339 struct mtk_eth *eth = mac->hw;
1341 netif_tx_disable(dev);
1342 phy_stop(mac->phy_dev);
1344 /* only shutdown DMA if this is the last user */
1345 if (!atomic_dec_and_test(ð->dma_refcnt))
1348 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1349 napi_disable(ð->rx_napi);
1351 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1358 static int __init mtk_hw_init(struct mtk_eth *eth)
1362 /* reset the frame engine */
1363 reset_control_assert(eth->rstc);
1364 usleep_range(10, 20);
1365 reset_control_deassert(eth->rstc);
1366 usleep_range(10, 20);
1368 /* Set GE2 driving and slew rate */
1369 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
1372 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
1375 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1377 /* GE1, Force 1000M/FD, FC ON */
1378 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
1380 /* GE2, Force 1000M/FD, FC ON */
1381 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
1383 /* Enable RX VLan Offloading */
1384 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
1386 err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
1387 dev_name(eth->dev), eth);
1391 err = mtk_mdio_init(eth);
1395 /* disable delay and normal interrupt */
1396 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1397 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1398 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
1399 mtk_w32(eth, 0, MTK_RST_GL);
1401 /* FE int grouping */
1402 mtk_w32(eth, 0, MTK_FE_INT_GRP);
1404 for (i = 0; i < 2; i++) {
1405 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
1407 /* setup the forward port to send frame to QDMA */
1411 /* Enable RX checksum */
1412 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
1414 /* setup the mac dma */
1415 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
1421 static int __init mtk_init(struct net_device *dev)
1423 struct mtk_mac *mac = netdev_priv(dev);
1424 struct mtk_eth *eth = mac->hw;
1425 const char *mac_addr;
1427 mac_addr = of_get_mac_address(mac->of_node);
1429 ether_addr_copy(dev->dev_addr, mac_addr);
1431 /* If the mac address is invalid, use random mac address */
1432 if (!is_valid_ether_addr(dev->dev_addr)) {
1433 random_ether_addr(dev->dev_addr);
1434 dev_err(eth->dev, "generated random MAC address %pM\n",
1436 dev->addr_assign_type = NET_ADDR_RANDOM;
1439 return mtk_phy_connect(mac);
1442 static void mtk_uninit(struct net_device *dev)
1444 struct mtk_mac *mac = netdev_priv(dev);
1445 struct mtk_eth *eth = mac->hw;
1447 phy_disconnect(mac->phy_dev);
1448 mtk_mdio_cleanup(eth);
1449 mtk_irq_disable(eth, ~0);
1450 free_irq(dev->irq, dev);
1453 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1455 struct mtk_mac *mac = netdev_priv(dev);
1461 return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
1469 static void mtk_pending_work(struct work_struct *work)
1471 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
1473 unsigned long restart = 0;
1477 /* stop all devices to make sure that dma is properly shut down */
1478 for (i = 0; i < MTK_MAC_COUNT; i++) {
1479 if (!eth->netdev[i])
1481 mtk_stop(eth->netdev[i]);
1482 __set_bit(i, &restart);
1485 /* restart DMA and enable IRQs */
1486 for (i = 0; i < MTK_MAC_COUNT; i++) {
1487 if (!test_bit(i, &restart))
1489 err = mtk_open(eth->netdev[i]);
1491 netif_alert(eth, ifup, eth->netdev[i],
1492 "Driver up/down cycle failed, closing device.\n");
1493 dev_close(eth->netdev[i]);
1499 static int mtk_cleanup(struct mtk_eth *eth)
1503 for (i = 0; i < MTK_MAC_COUNT; i++) {
1504 if (!eth->netdev[i])
1507 unregister_netdev(eth->netdev[i]);
1508 free_netdev(eth->netdev[i]);
1510 cancel_work_sync(ð->pending_work);
1515 static int mtk_get_settings(struct net_device *dev,
1516 struct ethtool_cmd *cmd)
1518 struct mtk_mac *mac = netdev_priv(dev);
1521 err = phy_read_status(mac->phy_dev);
1525 return phy_ethtool_gset(mac->phy_dev, cmd);
1528 static int mtk_set_settings(struct net_device *dev,
1529 struct ethtool_cmd *cmd)
1531 struct mtk_mac *mac = netdev_priv(dev);
1533 if (cmd->phy_address != mac->phy_dev->mdio.addr) {
1534 mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
1540 return phy_ethtool_sset(mac->phy_dev, cmd);
1543 static void mtk_get_drvinfo(struct net_device *dev,
1544 struct ethtool_drvinfo *info)
1546 struct mtk_mac *mac = netdev_priv(dev);
1548 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
1549 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
1550 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
1553 static u32 mtk_get_msglevel(struct net_device *dev)
1555 struct mtk_mac *mac = netdev_priv(dev);
1557 return mac->hw->msg_enable;
1560 static void mtk_set_msglevel(struct net_device *dev, u32 value)
1562 struct mtk_mac *mac = netdev_priv(dev);
1564 mac->hw->msg_enable = value;
1567 static int mtk_nway_reset(struct net_device *dev)
1569 struct mtk_mac *mac = netdev_priv(dev);
1571 return genphy_restart_aneg(mac->phy_dev);
1574 static u32 mtk_get_link(struct net_device *dev)
1576 struct mtk_mac *mac = netdev_priv(dev);
1579 err = genphy_update_link(mac->phy_dev);
1581 return ethtool_op_get_link(dev);
1583 return mac->phy_dev->link;
1586 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1590 switch (stringset) {
1592 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
1593 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
1594 data += ETH_GSTRING_LEN;
1600 static int mtk_get_sset_count(struct net_device *dev, int sset)
1604 return ARRAY_SIZE(mtk_ethtool_stats);
1610 static void mtk_get_ethtool_stats(struct net_device *dev,
1611 struct ethtool_stats *stats, u64 *data)
1613 struct mtk_mac *mac = netdev_priv(dev);
1614 struct mtk_hw_stats *hwstats = mac->hw_stats;
1615 u64 *data_src, *data_dst;
1619 if (netif_running(dev) && netif_device_present(dev)) {
1620 if (spin_trylock(&hwstats->stats_lock)) {
1621 mtk_stats_update_mac(mac);
1622 spin_unlock(&hwstats->stats_lock);
1627 data_src = (u64*)hwstats;
1629 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
1631 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
1632 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
1633 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
1636 static struct ethtool_ops mtk_ethtool_ops = {
1637 .get_settings = mtk_get_settings,
1638 .set_settings = mtk_set_settings,
1639 .get_drvinfo = mtk_get_drvinfo,
1640 .get_msglevel = mtk_get_msglevel,
1641 .set_msglevel = mtk_set_msglevel,
1642 .nway_reset = mtk_nway_reset,
1643 .get_link = mtk_get_link,
1644 .get_strings = mtk_get_strings,
1645 .get_sset_count = mtk_get_sset_count,
1646 .get_ethtool_stats = mtk_get_ethtool_stats,
1649 static const struct net_device_ops mtk_netdev_ops = {
1650 .ndo_init = mtk_init,
1651 .ndo_uninit = mtk_uninit,
1652 .ndo_open = mtk_open,
1653 .ndo_stop = mtk_stop,
1654 .ndo_start_xmit = mtk_start_xmit,
1655 .ndo_set_mac_address = mtk_set_mac_address,
1656 .ndo_validate_addr = eth_validate_addr,
1657 .ndo_do_ioctl = mtk_do_ioctl,
1658 .ndo_change_mtu = eth_change_mtu,
1659 .ndo_tx_timeout = mtk_tx_timeout,
1660 .ndo_get_stats64 = mtk_get_stats64,
1661 #ifdef CONFIG_NET_POLL_CONTROLLER
1662 .ndo_poll_controller = mtk_poll_controller,
1666 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1668 struct mtk_mac *mac;
1669 const __be32 *_id = of_get_property(np, "reg", NULL);
1673 dev_err(eth->dev, "missing mac id\n");
1677 id = be32_to_cpup(_id);
1678 if (id >= MTK_MAC_COUNT) {
1679 dev_err(eth->dev, "%d is not a valid mac id\n", id);
1683 if (eth->netdev[id]) {
1684 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
1688 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
1689 if (!eth->netdev[id]) {
1690 dev_err(eth->dev, "alloc_etherdev failed\n");
1693 mac = netdev_priv(eth->netdev[id]);
1699 mac->hw_stats = devm_kzalloc(eth->dev,
1700 sizeof(*mac->hw_stats),
1702 if (!mac->hw_stats) {
1703 dev_err(eth->dev, "failed to allocate counter memory\n");
1707 spin_lock_init(&mac->hw_stats->stats_lock);
1708 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
1710 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
1711 eth->netdev[id]->watchdog_timeo = HZ;
1712 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
1713 eth->netdev[id]->base_addr = (unsigned long)eth->base;
1714 eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
1715 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1716 eth->netdev[id]->features |= MTK_HW_FEATURES;
1717 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
1719 err = register_netdev(eth->netdev[id]);
1721 dev_err(eth->dev, "error bringing up device\n");
1724 eth->netdev[id]->irq = eth->irq;
1725 netif_info(eth, probe, eth->netdev[id],
1726 "mediatek frame engine at 0x%08lx, irq %d\n",
1727 eth->netdev[id]->base_addr, eth->netdev[id]->irq);
1732 free_netdev(eth->netdev[id]);
1736 static int mtk_probe(struct platform_device *pdev)
1738 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1739 struct device_node *mac_np;
1740 const struct of_device_id *match;
1741 struct mtk_soc_data *soc;
1742 struct mtk_eth *eth;
1745 match = of_match_device(of_mtk_match, &pdev->dev);
1746 soc = (struct mtk_soc_data *)match->data;
1748 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
1752 eth->base = devm_ioremap_resource(&pdev->dev, res);
1753 if (IS_ERR(eth->base))
1754 return PTR_ERR(eth->base);
1756 spin_lock_init(ð->page_lock);
1758 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1760 if (IS_ERR(eth->ethsys)) {
1761 dev_err(&pdev->dev, "no ethsys regmap found\n");
1762 return PTR_ERR(eth->ethsys);
1765 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1767 if (IS_ERR(eth->pctl)) {
1768 dev_err(&pdev->dev, "no pctl regmap found\n");
1769 return PTR_ERR(eth->pctl);
1772 eth->rstc = devm_reset_control_get(&pdev->dev, "eth");
1773 if (IS_ERR(eth->rstc)) {
1774 dev_err(&pdev->dev, "no eth reset found\n");
1775 return PTR_ERR(eth->rstc);
1778 eth->irq = platform_get_irq(pdev, 0);
1780 dev_err(&pdev->dev, "no IRQ resource found\n");
1784 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
1785 eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
1786 eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
1787 eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
1788 if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
1789 IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
1792 clk_prepare_enable(eth->clk_ethif);
1793 clk_prepare_enable(eth->clk_esw);
1794 clk_prepare_enable(eth->clk_gp1);
1795 clk_prepare_enable(eth->clk_gp2);
1797 eth->dev = &pdev->dev;
1798 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
1799 INIT_WORK(ð->pending_work, mtk_pending_work);
1801 err = mtk_hw_init(eth);
1805 for_each_child_of_node(pdev->dev.of_node, mac_np) {
1806 if (!of_device_is_compatible(mac_np,
1807 "mediatek,eth-mac"))
1810 if (!of_device_is_available(mac_np))
1813 err = mtk_add_mac(eth, mac_np);
1818 /* we run 2 devices on the same DMA ring so we need a dummy device
1821 init_dummy_netdev(ð->dummy_dev);
1822 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_poll,
1825 platform_set_drvdata(pdev, eth);
1834 static int mtk_remove(struct platform_device *pdev)
1836 struct mtk_eth *eth = platform_get_drvdata(pdev);
1838 clk_disable_unprepare(eth->clk_ethif);
1839 clk_disable_unprepare(eth->clk_esw);
1840 clk_disable_unprepare(eth->clk_gp1);
1841 clk_disable_unprepare(eth->clk_gp2);
1843 netif_napi_del(ð->rx_napi);
1845 platform_set_drvdata(pdev, NULL);
1850 const struct of_device_id of_mtk_match[] = {
1851 { .compatible = "mediatek,mt7623-eth" },
1855 static struct platform_driver mtk_driver = {
1857 .remove = mtk_remove,
1859 .name = "mtk_soc_eth",
1860 .owner = THIS_MODULE,
1861 .of_match_table = of_mtk_match,
1865 module_platform_driver(mtk_driver);
1867 MODULE_LICENSE("GPL");
1868 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1869 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");