2 * Faraday FTGMAC100 Gigabit Ethernet
4 * (C) Copyright 2009-2011 Faraday Technology
5 * Po-Yu Chuang <ratbert@faraday-tech.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/dma-mapping.h>
25 #include <linux/etherdevice.h>
26 #include <linux/ethtool.h>
27 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
32 #include <linux/phy.h>
33 #include <linux/platform_device.h>
34 #include <linux/property.h>
38 #include "ftgmac100.h"
40 #define DRV_NAME "ftgmac100"
41 #define DRV_VERSION "0.7"
43 #define RX_QUEUE_ENTRIES 256 /* must be power of 2 */
44 #define TX_QUEUE_ENTRIES 512 /* must be power of 2 */
46 #define MAX_PKT_SIZE 1536
47 #define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */
49 /* Min number of tx ring entries before stopping queue */
50 #define TX_THRESHOLD (MAX_SKB_FRAGS + 1)
52 struct ftgmac100_descs {
53 struct ftgmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
54 struct ftgmac100_txdes txdes[TX_QUEUE_ENTRIES];
62 struct ftgmac100_descs *descs;
63 dma_addr_t descs_dma_addr;
66 struct sk_buff *rx_skbs[RX_QUEUE_ENTRIES];
67 unsigned int rx_pointer;
68 u32 rxdes0_edorr_mask;
71 struct sk_buff *tx_skbs[TX_QUEUE_ENTRIES];
72 unsigned int tx_clean_pointer;
73 unsigned int tx_pointer;
74 u32 txdes0_edotr_mask;
76 /* Scratch page to use when rx skb alloc fails */
78 dma_addr_t rx_scratch_dma;
80 /* Component structures */
81 struct net_device *netdev;
83 struct ncsi_dev *ndev;
84 struct napi_struct napi;
85 struct work_struct reset_task;
86 struct mii_bus *mii_bus;
94 bool need_mac_restart;
98 static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
100 struct net_device *netdev = priv->netdev;
103 /* NOTE: reset clears all registers */
104 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
105 iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
106 priv->base + FTGMAC100_OFFSET_MACCR);
107 for (i = 0; i < 50; i++) {
110 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
111 if (!(maccr & FTGMAC100_MACCR_SW_RST))
117 netdev_err(netdev, "Hardware reset failed\n");
121 static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
125 switch (priv->cur_speed) {
127 case 0: /* no link */
131 maccr |= FTGMAC100_MACCR_FAST_MODE;
135 maccr |= FTGMAC100_MACCR_GIGA_MODE;
138 netdev_err(priv->netdev, "Unknown speed %d !\n",
143 /* (Re)initialize the queue pointers */
144 priv->rx_pointer = 0;
145 priv->tx_clean_pointer = 0;
146 priv->tx_pointer = 0;
148 /* The doc says reset twice with 10us interval */
149 if (ftgmac100_reset_mac(priv, maccr))
151 usleep_range(10, 1000);
152 return ftgmac100_reset_mac(priv, maccr);
155 static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
157 unsigned int maddr = mac[0] << 8 | mac[1];
158 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
160 iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR);
161 iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
164 static void ftgmac100_initial_mac(struct ftgmac100 *priv)
171 addr = device_get_mac_address(priv->dev, mac, ETH_ALEN);
173 ether_addr_copy(priv->netdev->dev_addr, mac);
174 dev_info(priv->dev, "Read MAC address %pM from device tree\n",
179 m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR);
180 l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR);
182 mac[0] = (m >> 8) & 0xff;
184 mac[2] = (l >> 24) & 0xff;
185 mac[3] = (l >> 16) & 0xff;
186 mac[4] = (l >> 8) & 0xff;
189 if (is_valid_ether_addr(mac)) {
190 ether_addr_copy(priv->netdev->dev_addr, mac);
191 dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
193 eth_hw_addr_random(priv->netdev);
194 dev_info(priv->dev, "Generated random MAC address %pM\n",
195 priv->netdev->dev_addr);
199 static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
203 ret = eth_prepare_mac_addr_change(dev, p);
207 eth_commit_mac_addr_change(dev, p);
208 ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr);
213 static void ftgmac100_init_hw(struct ftgmac100 *priv)
217 /* Setup RX ring buffer base */
218 iowrite32(priv->descs_dma_addr +
219 offsetof(struct ftgmac100_descs, rxdes),
220 priv->base + FTGMAC100_OFFSET_RXR_BADR);
222 /* Setup TX ring buffer base */
223 iowrite32(priv->descs_dma_addr +
224 offsetof(struct ftgmac100_descs, txdes),
225 priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
227 /* Configure RX buffer size */
228 iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE),
229 priv->base + FTGMAC100_OFFSET_RBSR);
231 /* Set RX descriptor autopoll */
232 iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1),
233 priv->base + FTGMAC100_OFFSET_APTC);
235 /* Write MAC address */
236 ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr);
239 static void ftgmac100_start_hw(struct ftgmac100 *priv)
241 u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
243 /* Keep the original GMAC and FAST bits */
244 maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE);
246 /* Add all the main enable bits */
247 maccr |= FTGMAC100_MACCR_TXDMA_EN |
248 FTGMAC100_MACCR_RXDMA_EN |
249 FTGMAC100_MACCR_TXMAC_EN |
250 FTGMAC100_MACCR_RXMAC_EN |
251 FTGMAC100_MACCR_CRC_APD |
252 FTGMAC100_MACCR_PHY_LINK_LEVEL |
253 FTGMAC100_MACCR_RX_RUNT |
254 FTGMAC100_MACCR_RX_BROADPKT;
256 /* Add other bits as needed */
257 if (priv->cur_duplex == DUPLEX_FULL)
258 maccr |= FTGMAC100_MACCR_FULLDUP;
261 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
264 static void ftgmac100_stop_hw(struct ftgmac100 *priv)
266 iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
269 static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
270 struct ftgmac100_rxdes *rxdes, gfp_t gfp)
272 struct net_device *netdev = priv->netdev;
277 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
278 if (unlikely(!skb)) {
280 netdev_warn(netdev, "failed to allocate rx skb\n");
282 map = priv->rx_scratch_dma;
284 map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE,
286 if (unlikely(dma_mapping_error(priv->dev, map))) {
288 netdev_err(netdev, "failed to map rx page\n");
289 dev_kfree_skb_any(skb);
290 map = priv->rx_scratch_dma;
297 priv->rx_skbs[entry] = skb;
299 /* Store DMA address into RX desc */
300 rxdes->rxdes3 = cpu_to_le32(map);
302 /* Ensure the above is ordered vs clearing the OWN bit */
305 /* Clean status (which resets own bit) */
306 if (entry == (RX_QUEUE_ENTRIES - 1))
307 rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask);
314 static int ftgmac100_next_rx_pointer(int pointer)
316 return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
319 static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status)
321 struct net_device *netdev = priv->netdev;
323 if (status & FTGMAC100_RXDES0_RX_ERR)
324 netdev->stats.rx_errors++;
326 if (status & FTGMAC100_RXDES0_CRC_ERR)
327 netdev->stats.rx_crc_errors++;
329 if (status & (FTGMAC100_RXDES0_FTL |
330 FTGMAC100_RXDES0_RUNT |
331 FTGMAC100_RXDES0_RX_ODD_NB))
332 netdev->stats.rx_length_errors++;
335 static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
337 struct net_device *netdev = priv->netdev;
338 struct ftgmac100_rxdes *rxdes;
340 unsigned int pointer, size;
341 u32 status, csum_vlan;
344 /* Grab next RX descriptor */
345 pointer = priv->rx_pointer;
346 rxdes = &priv->descs->rxdes[pointer];
348 /* Grab descriptor status */
349 status = le32_to_cpu(rxdes->rxdes0);
351 /* Do we have a packet ? */
352 if (!(status & FTGMAC100_RXDES0_RXPKT_RDY))
355 /* Order subsequent reads with the test for the ready bit */
358 /* We don't cope with fragmented RX packets */
359 if (unlikely(!(status & FTGMAC100_RXDES0_FRS) ||
360 !(status & FTGMAC100_RXDES0_LRS)))
363 /* Grab received size and csum vlan field in the descriptor */
364 size = status & FTGMAC100_RXDES0_VDBC;
365 csum_vlan = le32_to_cpu(rxdes->rxdes1);
367 /* Any error (other than csum offload) flagged ? */
368 if (unlikely(status & RXDES0_ANY_ERROR)) {
369 /* Correct for incorrect flagging of runt packets
370 * with vlan tags... Just accept a runt packet that
371 * has been flagged as vlan and whose size is at
374 if ((status & FTGMAC100_RXDES0_RUNT) &&
375 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) &&
377 status &= ~FTGMAC100_RXDES0_RUNT;
379 /* Any error still in there ? */
380 if (status & RXDES0_ANY_ERROR) {
381 ftgmac100_rx_packet_error(priv, status);
386 /* If the packet had no skb (failed to allocate earlier)
387 * then try to allocate one and skip
389 skb = priv->rx_skbs[pointer];
390 if (!unlikely(skb)) {
391 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
395 if (unlikely(status & FTGMAC100_RXDES0_MULTICAST))
396 netdev->stats.multicast++;
398 /* If the HW found checksum errors, bounce it to software.
400 * If we didn't, we need to see if the packet was recognized
401 * by HW as one of the supported checksummed protocols before
402 * we accept the HW test results.
404 if (netdev->features & NETIF_F_RXCSUM) {
405 u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR |
406 FTGMAC100_RXDES1_UDP_CHKSUM_ERR |
407 FTGMAC100_RXDES1_IP_CHKSUM_ERR;
408 if ((csum_vlan & err_bits) ||
409 !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK))
410 skb->ip_summed = CHECKSUM_NONE;
412 skb->ip_summed = CHECKSUM_UNNECESSARY;
415 /* Transfer received size to skb */
418 /* Tear down DMA mapping, do necessary cache management */
419 map = le32_to_cpu(rxdes->rxdes3);
421 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
422 /* When we don't have an iommu, we can save cycles by not
423 * invalidating the cache for the part of the packet that
426 dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE);
428 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
432 /* Resplenish rx ring */
433 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
434 priv->rx_pointer = ftgmac100_next_rx_pointer(pointer);
436 skb->protocol = eth_type_trans(skb, netdev);
438 netdev->stats.rx_packets++;
439 netdev->stats.rx_bytes += size;
441 /* push packet to protocol stack */
442 if (skb->ip_summed == CHECKSUM_NONE)
443 netif_receive_skb(skb);
445 napi_gro_receive(&priv->napi, skb);
451 /* Clean rxdes0 (which resets own bit) */
452 rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
453 priv->rx_pointer = ftgmac100_next_rx_pointer(pointer);
454 netdev->stats.rx_dropped++;
458 static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv,
461 if (index == (TX_QUEUE_ENTRIES - 1))
462 return priv->txdes0_edotr_mask;
467 static int ftgmac100_next_tx_pointer(int pointer)
469 return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
472 static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv)
474 /* Returns the number of available slots in the TX queue
476 * This always leaves one free slot so we don't have to
477 * worry about empty vs. full, and this simplifies the
478 * test for ftgmac100_tx_buf_cleanable() below
480 return (priv->tx_clean_pointer - priv->tx_pointer - 1) &
481 (TX_QUEUE_ENTRIES - 1);
484 static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv)
486 return priv->tx_pointer != priv->tx_clean_pointer;
489 static void ftgmac100_free_tx_packet(struct ftgmac100 *priv,
490 unsigned int pointer,
492 struct ftgmac100_txdes *txdes,
495 dma_addr_t map = le32_to_cpu(txdes->txdes3);
498 if (ctl_stat & FTGMAC100_TXDES0_FTS) {
499 len = skb_headlen(skb);
500 dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE);
502 len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat);
503 dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE);
506 /* Free SKB on last segment */
507 if (ctl_stat & FTGMAC100_TXDES0_LTS)
509 priv->tx_skbs[pointer] = NULL;
512 static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
514 struct net_device *netdev = priv->netdev;
515 struct ftgmac100_txdes *txdes;
517 unsigned int pointer;
520 pointer = priv->tx_clean_pointer;
521 txdes = &priv->descs->txdes[pointer];
523 ctl_stat = le32_to_cpu(txdes->txdes0);
524 if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN)
527 skb = priv->tx_skbs[pointer];
528 netdev->stats.tx_packets++;
529 netdev->stats.tx_bytes += skb->len;
530 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
531 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
533 priv->tx_clean_pointer = ftgmac100_next_tx_pointer(pointer);
538 static void ftgmac100_tx_complete(struct ftgmac100 *priv)
540 struct net_device *netdev = priv->netdev;
542 /* Process all completed packets */
543 while (ftgmac100_tx_buf_cleanable(priv) &&
544 ftgmac100_tx_complete_packet(priv))
547 /* Restart queue if needed */
549 if (unlikely(netif_queue_stopped(netdev) &&
550 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) {
551 struct netdev_queue *txq;
553 txq = netdev_get_tx_queue(netdev, 0);
554 __netif_tx_lock(txq, smp_processor_id());
555 if (netif_queue_stopped(netdev) &&
556 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
557 netif_wake_queue(netdev);
558 __netif_tx_unlock(txq);
562 static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan)
564 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
565 u8 ip_proto = ip_hdr(skb)->protocol;
567 *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM;
570 *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM;
573 *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM;
579 return skb_checksum_help(skb) == 0;
582 static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
583 struct net_device *netdev)
585 struct ftgmac100 *priv = netdev_priv(netdev);
586 struct ftgmac100_txdes *txdes, *first;
587 unsigned int pointer, nfrags, len, i, j;
588 u32 f_ctl_stat, ctl_stat, csum_vlan;
591 /* The HW doesn't pad small frames */
592 if (eth_skb_pad(skb)) {
593 netdev->stats.tx_dropped++;
597 /* Reject oversize packets */
598 if (unlikely(skb->len > MAX_PKT_SIZE)) {
600 netdev_dbg(netdev, "tx packet too big\n");
604 /* Do we have a limit on #fragments ? I yet have to get a reply
605 * from Aspeed. If there's one I haven't hit it.
607 nfrags = skb_shinfo(skb)->nr_frags;
610 len = skb_headlen(skb);
612 /* Map the packet head */
613 map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
614 if (dma_mapping_error(priv->dev, map)) {
616 netdev_err(netdev, "map tx packet head failed\n");
620 /* Grab the next free tx descriptor */
621 pointer = priv->tx_pointer;
622 txdes = first = &priv->descs->txdes[pointer];
624 /* Setup it up with the packet head. Don't write the head to the
627 priv->tx_skbs[pointer] = skb;
628 f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
629 f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
630 f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
631 f_ctl_stat |= FTGMAC100_TXDES0_FTS;
633 f_ctl_stat |= FTGMAC100_TXDES0_LTS;
634 txdes->txdes3 = cpu_to_le32(map);
636 /* Setup HW checksumming */
638 if (skb->ip_summed == CHECKSUM_PARTIAL &&
639 !ftgmac100_prep_tx_csum(skb, &csum_vlan))
641 txdes->txdes1 = cpu_to_le32(csum_vlan);
643 /* Next descriptor */
644 pointer = ftgmac100_next_tx_pointer(pointer);
646 /* Add the fragments */
647 for (i = 0; i < nfrags; i++) {
648 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
653 map = skb_frag_dma_map(priv->dev, frag, 0, len,
655 if (dma_mapping_error(priv->dev, map))
658 /* Setup descriptor */
659 priv->tx_skbs[pointer] = skb;
660 txdes = &priv->descs->txdes[pointer];
661 ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
662 ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
663 ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
664 if (i == (nfrags - 1))
665 ctl_stat |= FTGMAC100_TXDES0_LTS;
666 txdes->txdes0 = cpu_to_le32(ctl_stat);
668 txdes->txdes3 = cpu_to_le32(map);
671 pointer = ftgmac100_next_tx_pointer(pointer);
674 /* Order the previous packet and descriptor udpates
675 * before setting the OWN bit on the first descriptor.
678 first->txdes0 = cpu_to_le32(f_ctl_stat);
680 /* Update next TX pointer */
681 priv->tx_pointer = pointer;
683 /* If there isn't enough room for all the fragments of a new packet
684 * in the TX ring, stop the queue. The sequence below is race free
685 * vs. a concurrent restart in ftgmac100_poll()
687 if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) {
688 netif_stop_queue(netdev);
689 /* Order the queue stop with the test below */
691 if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
692 netif_wake_queue(netdev);
695 /* Poke transmitter to read the updated TX descriptors */
696 iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
702 netdev_err(netdev, "map tx fragment failed\n");
705 pointer = priv->tx_pointer;
706 ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat);
707 first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask);
709 /* Then all fragments */
710 for (j = 0; j < i; j++) {
711 pointer = ftgmac100_next_tx_pointer(pointer);
712 txdes = &priv->descs->txdes[pointer];
713 ctl_stat = le32_to_cpu(txdes->txdes0);
714 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
715 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
718 /* This cannot be reached if we successfully mapped the
719 * last fragment, so we know ftgmac100_free_tx_packet()
720 * hasn't freed the skb yet.
723 /* Drop the packet */
724 dev_kfree_skb_any(skb);
725 netdev->stats.tx_dropped++;
730 static void ftgmac100_free_buffers(struct ftgmac100 *priv)
734 /* Free all RX buffers */
735 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
736 struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
737 struct sk_buff *skb = priv->rx_skbs[i];
738 dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
743 priv->rx_skbs[i] = NULL;
744 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
745 dev_kfree_skb_any(skb);
748 /* Free all TX buffers */
749 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
750 struct ftgmac100_txdes *txdes = &priv->descs->txdes[i];
751 struct sk_buff *skb = priv->tx_skbs[i];
755 ftgmac100_free_tx_packet(priv, i, skb, txdes,
756 le32_to_cpu(txdes->txdes0));
760 static void ftgmac100_free_rings(struct ftgmac100 *priv)
762 /* Free descriptors */
764 dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs),
765 priv->descs, priv->descs_dma_addr);
767 /* Free scratch packet buffer */
768 if (priv->rx_scratch)
769 dma_free_coherent(priv->dev, RX_BUF_SIZE,
770 priv->rx_scratch, priv->rx_scratch_dma);
773 static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
775 /* Allocate descriptors */
776 priv->descs = dma_zalloc_coherent(priv->dev,
777 sizeof(struct ftgmac100_descs),
778 &priv->descs_dma_addr, GFP_KERNEL);
782 /* Allocate scratch packet buffer */
783 priv->rx_scratch = dma_alloc_coherent(priv->dev,
785 &priv->rx_scratch_dma,
787 if (!priv->rx_scratch)
793 static void ftgmac100_init_rings(struct ftgmac100 *priv)
795 struct ftgmac100_rxdes *rxdes;
796 struct ftgmac100_txdes *txdes;
799 /* Initialize RX ring */
800 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
801 rxdes = &priv->descs->rxdes[i];
803 rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
805 /* Mark the end of the ring */
806 rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
808 /* Initialize TX ring */
809 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
810 txdes = &priv->descs->txdes[i];
813 txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
816 static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
820 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
821 struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
823 if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL))
829 static void ftgmac100_adjust_link(struct net_device *netdev)
831 struct ftgmac100 *priv = netdev_priv(netdev);
832 struct phy_device *phydev = netdev->phydev;
835 /* We store "no link" as speed 0 */
839 new_speed = phydev->speed;
841 if (phydev->speed == priv->cur_speed &&
842 phydev->duplex == priv->cur_duplex)
845 /* Print status if we have a link or we had one and just lost it,
846 * don't print otherwise.
848 if (new_speed || priv->cur_speed)
849 phy_print_status(phydev);
851 priv->cur_speed = new_speed;
852 priv->cur_duplex = phydev->duplex;
854 /* Link is down, do nothing else */
858 /* Disable all interrupts */
859 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
861 /* Reset the adapter asynchronously */
862 schedule_work(&priv->reset_task);
865 static int ftgmac100_mii_probe(struct ftgmac100 *priv)
867 struct net_device *netdev = priv->netdev;
868 struct phy_device *phydev;
870 phydev = phy_find_first(priv->mii_bus);
872 netdev_info(netdev, "%s: no PHY found\n", netdev->name);
876 phydev = phy_connect(netdev, phydev_name(phydev),
877 &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII);
879 if (IS_ERR(phydev)) {
880 netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
881 return PTR_ERR(phydev);
887 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
889 struct net_device *netdev = bus->priv;
890 struct ftgmac100 *priv = netdev_priv(netdev);
894 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
896 /* preserve MDC cycle threshold */
897 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
899 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
900 FTGMAC100_PHYCR_REGAD(regnum) |
901 FTGMAC100_PHYCR_MIIRD;
903 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
905 for (i = 0; i < 10; i++) {
906 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
908 if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) {
911 data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA);
912 return FTGMAC100_PHYDATA_MIIRDATA(data);
918 netdev_err(netdev, "mdio read timed out\n");
922 static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
923 int regnum, u16 value)
925 struct net_device *netdev = bus->priv;
926 struct ftgmac100 *priv = netdev_priv(netdev);
931 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
933 /* preserve MDC cycle threshold */
934 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
936 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
937 FTGMAC100_PHYCR_REGAD(regnum) |
938 FTGMAC100_PHYCR_MIIWR;
940 data = FTGMAC100_PHYDATA_MIIWDATA(value);
942 iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA);
943 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
945 for (i = 0; i < 10; i++) {
946 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
948 if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0)
954 netdev_err(netdev, "mdio write timed out\n");
958 static void ftgmac100_get_drvinfo(struct net_device *netdev,
959 struct ethtool_drvinfo *info)
961 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
962 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
963 strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
966 static const struct ethtool_ops ftgmac100_ethtool_ops = {
967 .get_drvinfo = ftgmac100_get_drvinfo,
968 .get_link = ethtool_op_get_link,
969 .get_link_ksettings = phy_ethtool_get_link_ksettings,
970 .set_link_ksettings = phy_ethtool_set_link_ksettings,
973 static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
975 struct net_device *netdev = dev_id;
976 struct ftgmac100 *priv = netdev_priv(netdev);
977 unsigned int status, new_mask = FTGMAC100_INT_BAD;
979 /* Fetch and clear interrupt bits, process abnormal ones */
980 status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
981 iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
982 if (unlikely(status & FTGMAC100_INT_BAD)) {
984 /* RX buffer unavailable */
985 if (status & FTGMAC100_INT_NO_RXBUF)
986 netdev->stats.rx_over_errors++;
988 /* received packet lost due to RX FIFO full */
989 if (status & FTGMAC100_INT_RPKT_LOST)
990 netdev->stats.rx_fifo_errors++;
992 /* sent packet lost due to excessive TX collision */
993 if (status & FTGMAC100_INT_XPKT_LOST)
994 netdev->stats.tx_fifo_errors++;
996 /* AHB error -> Reset the chip */
997 if (status & FTGMAC100_INT_AHB_ERR) {
1000 "AHB bus error ! Resetting chip.\n");
1001 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1002 schedule_work(&priv->reset_task);
1006 /* We may need to restart the MAC after such errors, delay
1007 * this until after we have freed some Rx buffers though
1009 priv->need_mac_restart = true;
1011 /* Disable those errors until we restart */
1012 new_mask &= ~status;
1015 /* Only enable "bad" interrupts while NAPI is on */
1016 iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER);
1018 /* Schedule NAPI bh */
1019 napi_schedule_irqoff(&priv->napi);
1024 static bool ftgmac100_check_rx(struct ftgmac100 *priv)
1026 struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[priv->rx_pointer];
1028 /* Do we have a packet ? */
1029 return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY));
1032 static int ftgmac100_poll(struct napi_struct *napi, int budget)
1034 struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
1038 /* Handle TX completions */
1039 if (ftgmac100_tx_buf_cleanable(priv))
1040 ftgmac100_tx_complete(priv);
1042 /* Handle RX packets */
1044 more = ftgmac100_rx_packet(priv, &work_done);
1045 } while (more && work_done < budget);
1048 /* The interrupt is telling us to kick the MAC back to life
1049 * after an RX overflow
1051 if (unlikely(priv->need_mac_restart)) {
1052 ftgmac100_start_hw(priv);
1054 /* Re-enable "bad" interrupts */
1055 iowrite32(FTGMAC100_INT_BAD,
1056 priv->base + FTGMAC100_OFFSET_IER);
1059 /* As long as we are waiting for transmit packets to be
1060 * completed we keep NAPI going
1062 if (ftgmac100_tx_buf_cleanable(priv))
1065 if (work_done < budget) {
1066 /* We are about to re-enable all interrupts. However
1067 * the HW has been latching RX/TX packet interrupts while
1068 * they were masked. So we clear them first, then we need
1069 * to re-check if there's something to process
1071 iowrite32(FTGMAC100_INT_RXTX,
1072 priv->base + FTGMAC100_OFFSET_ISR);
1073 if (ftgmac100_check_rx(priv) ||
1074 ftgmac100_tx_buf_cleanable(priv))
1077 /* deschedule NAPI */
1078 napi_complete(napi);
1080 /* enable all interrupts */
1081 iowrite32(FTGMAC100_INT_ALL,
1082 priv->base + FTGMAC100_OFFSET_IER);
1088 static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
1092 /* Re-init descriptors (adjust queue sizes) */
1093 ftgmac100_init_rings(priv);
1095 /* Realloc rx descriptors */
1096 err = ftgmac100_alloc_rx_buffers(priv);
1097 if (err && !ignore_alloc_err)
1100 /* Reinit and restart HW */
1101 ftgmac100_init_hw(priv);
1102 ftgmac100_start_hw(priv);
1104 /* Re-enable the device */
1105 napi_enable(&priv->napi);
1106 netif_start_queue(priv->netdev);
1108 /* Enable all interrupts */
1109 iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER);
1114 static void ftgmac100_reset_task(struct work_struct *work)
1116 struct ftgmac100 *priv = container_of(work, struct ftgmac100,
1118 struct net_device *netdev = priv->netdev;
1121 netdev_dbg(netdev, "Resetting NIC...\n");
1123 /* Lock the world */
1126 mutex_lock(&netdev->phydev->lock);
1128 mutex_lock(&priv->mii_bus->mdio_lock);
1131 /* Check if the interface is still up */
1132 if (!netif_running(netdev))
1135 /* Stop the network stack */
1136 netif_trans_update(netdev);
1137 napi_disable(&priv->napi);
1138 netif_tx_disable(netdev);
1140 /* Stop and reset the MAC */
1141 ftgmac100_stop_hw(priv);
1142 err = ftgmac100_reset_and_config_mac(priv);
1144 /* Not much we can do ... it might come back... */
1145 netdev_err(netdev, "attempting to continue...\n");
1148 /* Free all rx and tx buffers */
1149 ftgmac100_free_buffers(priv);
1151 /* Setup everything again and restart chip */
1152 ftgmac100_init_all(priv, true);
1154 netdev_dbg(netdev, "Reset done !\n");
1157 mutex_unlock(&priv->mii_bus->mdio_lock);
1159 mutex_unlock(&netdev->phydev->lock);
1163 static int ftgmac100_open(struct net_device *netdev)
1165 struct ftgmac100 *priv = netdev_priv(netdev);
1168 /* Allocate ring buffers */
1169 err = ftgmac100_alloc_rings(priv);
1171 netdev_err(netdev, "Failed to allocate descriptors\n");
1175 /* When using NC-SI we force the speed to 100Mbit/s full duplex,
1177 * Otherwise we leave it set to 0 (no link), the link
1178 * message from the PHY layer will handle setting it up to
1179 * something else if needed.
1181 if (priv->use_ncsi) {
1182 priv->cur_duplex = DUPLEX_FULL;
1183 priv->cur_speed = SPEED_100;
1185 priv->cur_duplex = 0;
1186 priv->cur_speed = 0;
1189 /* Reset the hardware */
1190 err = ftgmac100_reset_and_config_mac(priv);
1194 /* Initialize NAPI */
1195 netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
1197 /* Grab our interrupt */
1198 err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
1200 netdev_err(netdev, "failed to request irq %d\n", netdev->irq);
1204 /* Start things up */
1205 err = ftgmac100_init_all(priv, false);
1207 netdev_err(netdev, "Failed to allocate packet buffers\n");
1211 if (netdev->phydev) {
1212 /* If we have a PHY, start polling */
1213 phy_start(netdev->phydev);
1214 } else if (priv->use_ncsi) {
1215 /* If using NC-SI, set our carrier on and start the stack */
1216 netif_carrier_on(netdev);
1218 /* Start the NCSI device */
1219 err = ncsi_start_dev(priv->ndev);
1227 napi_disable(&priv->napi);
1228 netif_stop_queue(netdev);
1230 ftgmac100_free_buffers(priv);
1231 free_irq(netdev->irq, netdev);
1233 netif_napi_del(&priv->napi);
1235 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1236 ftgmac100_free_rings(priv);
1240 static int ftgmac100_stop(struct net_device *netdev)
1242 struct ftgmac100 *priv = netdev_priv(netdev);
1244 /* Note about the reset task: We are called with the rtnl lock
1245 * held, so we are synchronized against the core of the reset
1246 * task. We must not try to synchronously cancel it otherwise
1247 * we can deadlock. But since it will test for netif_running()
1248 * which has already been cleared by the net core, we don't
1249 * anything special to do.
1252 /* disable all interrupts */
1253 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1255 netif_stop_queue(netdev);
1256 napi_disable(&priv->napi);
1257 netif_napi_del(&priv->napi);
1259 phy_stop(netdev->phydev);
1260 else if (priv->use_ncsi)
1261 ncsi_stop_dev(priv->ndev);
1263 ftgmac100_stop_hw(priv);
1264 free_irq(netdev->irq, netdev);
1265 ftgmac100_free_buffers(priv);
1266 ftgmac100_free_rings(priv);
1272 static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1274 if (!netdev->phydev)
1277 return phy_mii_ioctl(netdev->phydev, ifr, cmd);
1280 static void ftgmac100_tx_timeout(struct net_device *netdev)
1282 struct ftgmac100 *priv = netdev_priv(netdev);
1284 /* Disable all interrupts */
1285 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1287 /* Do the reset outside of interrupt context */
1288 schedule_work(&priv->reset_task);
1291 static const struct net_device_ops ftgmac100_netdev_ops = {
1292 .ndo_open = ftgmac100_open,
1293 .ndo_stop = ftgmac100_stop,
1294 .ndo_start_xmit = ftgmac100_hard_start_xmit,
1295 .ndo_set_mac_address = ftgmac100_set_mac_addr,
1296 .ndo_validate_addr = eth_validate_addr,
1297 .ndo_do_ioctl = ftgmac100_do_ioctl,
1298 .ndo_tx_timeout = ftgmac100_tx_timeout,
1301 static int ftgmac100_setup_mdio(struct net_device *netdev)
1303 struct ftgmac100 *priv = netdev_priv(netdev);
1304 struct platform_device *pdev = to_platform_device(priv->dev);
1308 /* initialize mdio bus */
1309 priv->mii_bus = mdiobus_alloc();
1313 if (priv->is_aspeed) {
1314 /* This driver supports the old MDIO interface */
1315 reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
1316 reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
1317 iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
1320 priv->mii_bus->name = "ftgmac100_mdio";
1321 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
1322 pdev->name, pdev->id);
1323 priv->mii_bus->priv = priv->netdev;
1324 priv->mii_bus->read = ftgmac100_mdiobus_read;
1325 priv->mii_bus->write = ftgmac100_mdiobus_write;
1327 for (i = 0; i < PHY_MAX_ADDR; i++)
1328 priv->mii_bus->irq[i] = PHY_POLL;
1330 err = mdiobus_register(priv->mii_bus);
1332 dev_err(priv->dev, "Cannot register MDIO bus!\n");
1333 goto err_register_mdiobus;
1336 err = ftgmac100_mii_probe(priv);
1338 dev_err(priv->dev, "MII Probe failed!\n");
1345 mdiobus_unregister(priv->mii_bus);
1346 err_register_mdiobus:
1347 mdiobus_free(priv->mii_bus);
1351 static void ftgmac100_destroy_mdio(struct net_device *netdev)
1353 struct ftgmac100 *priv = netdev_priv(netdev);
1355 if (!netdev->phydev)
1358 phy_disconnect(netdev->phydev);
1359 mdiobus_unregister(priv->mii_bus);
1360 mdiobus_free(priv->mii_bus);
1363 static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
1365 if (unlikely(nd->state != ncsi_dev_state_functional))
1368 netdev_info(nd->dev, "NCSI interface %s\n",
1369 nd->link_up ? "up" : "down");
1372 static int ftgmac100_probe(struct platform_device *pdev)
1374 struct resource *res;
1376 struct net_device *netdev;
1377 struct ftgmac100 *priv;
1378 struct device_node *np;
1384 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1388 irq = platform_get_irq(pdev, 0);
1392 /* setup net_device */
1393 netdev = alloc_etherdev(sizeof(*priv));
1396 goto err_alloc_etherdev;
1399 SET_NETDEV_DEV(netdev, &pdev->dev);
1401 netdev->ethtool_ops = &ftgmac100_ethtool_ops;
1402 netdev->netdev_ops = &ftgmac100_netdev_ops;
1403 netdev->watchdog_timeo = 5 * HZ;
1405 platform_set_drvdata(pdev, netdev);
1407 /* setup private data */
1408 priv = netdev_priv(netdev);
1409 priv->netdev = netdev;
1410 priv->dev = &pdev->dev;
1411 INIT_WORK(&priv->reset_task, ftgmac100_reset_task);
1414 priv->res = request_mem_region(res->start, resource_size(res),
1415 dev_name(&pdev->dev));
1417 dev_err(&pdev->dev, "Could not reserve memory region\n");
1422 priv->base = ioremap(res->start, resource_size(res));
1424 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
1431 /* MAC address from chip or random one */
1432 ftgmac100_initial_mac(priv);
1434 np = pdev->dev.of_node;
1435 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
1436 of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
1437 priv->rxdes0_edorr_mask = BIT(30);
1438 priv->txdes0_edotr_mask = BIT(30);
1439 priv->is_aspeed = true;
1441 priv->rxdes0_edorr_mask = BIT(15);
1442 priv->txdes0_edotr_mask = BIT(15);
1445 if (np && of_get_property(np, "use-ncsi", NULL)) {
1446 if (!IS_ENABLED(CONFIG_NET_NCSI)) {
1447 dev_err(&pdev->dev, "NCSI stack not enabled\n");
1451 dev_info(&pdev->dev, "Using NCSI interface\n");
1452 priv->use_ncsi = true;
1453 priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
1457 priv->use_ncsi = false;
1458 err = ftgmac100_setup_mdio(netdev);
1460 goto err_setup_mdio;
1463 /* Base feature set */
1464 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
1465 NETIF_F_GRO | NETIF_F_SG;
1467 /* AST2400 doesn't have working HW checksum generation */
1468 if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
1469 netdev->hw_features &= ~NETIF_F_HW_CSUM;
1470 if (np && of_get_property(np, "no-hw-checksum", NULL))
1471 netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
1472 netdev->features |= netdev->hw_features;
1474 /* register network device */
1475 err = register_netdev(netdev);
1477 dev_err(&pdev->dev, "Failed to register netdev\n");
1478 goto err_register_netdev;
1481 netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
1486 err_register_netdev:
1487 ftgmac100_destroy_mdio(netdev);
1489 iounmap(priv->base);
1491 release_resource(priv->res);
1493 netif_napi_del(&priv->napi);
1494 free_netdev(netdev);
1499 static int ftgmac100_remove(struct platform_device *pdev)
1501 struct net_device *netdev;
1502 struct ftgmac100 *priv;
1504 netdev = platform_get_drvdata(pdev);
1505 priv = netdev_priv(netdev);
1507 unregister_netdev(netdev);
1509 /* There's a small chance the reset task will have been re-queued,
1510 * during stop, make sure it's gone before we free the structure.
1512 cancel_work_sync(&priv->reset_task);
1514 ftgmac100_destroy_mdio(netdev);
1516 iounmap(priv->base);
1517 release_resource(priv->res);
1519 netif_napi_del(&priv->napi);
1520 free_netdev(netdev);
1524 static const struct of_device_id ftgmac100_of_match[] = {
1525 { .compatible = "faraday,ftgmac100" },
1528 MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
1530 static struct platform_driver ftgmac100_driver = {
1531 .probe = ftgmac100_probe,
1532 .remove = ftgmac100_remove,
1535 .of_match_table = ftgmac100_of_match,
1538 module_platform_driver(ftgmac100_driver);
1540 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
1541 MODULE_DESCRIPTION("FTGMAC100 driver");
1542 MODULE_LICENSE("GPL");