2 * (C) Copyright 2009 Ilya Yanok, Emcraft Systems Ltd <yanok@emcraft.com>
3 * (C) Copyright 2008,2009 Eric Jarrige <eric.jarrige@armadeus.org>
4 * (C) Copyright 2008 Armadeus Systems nc
5 * (C) Copyright 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
6 * (C) Copyright 2007 Pengutronix, Juergen Beisert <j.beisert@pengutronix.de>
8 * SPDX-License-Identifier: GPL-2.0+
17 #include <asm/arch/sys_proto.h>
18 #include <asm/arch/clock.h>
19 #include <asm/arch/imx-regs.h>
21 #include <asm/errno.h>
22 #include <linux/compiler.h>
26 DECLARE_GLOBAL_DATA_PTR;
29 * Timeout the transfer after 5 mS. This is usually a bit more, since
30 * the code in the tightloops this timeout is used in adds some overhead.
32 #define FEC_XFER_TIMEOUT 5000
35 * The standard 32-byte DMA alignment does not work on mx6solox, which requires
36 * 64-byte alignment in the DMA RX FEC buffer.
37 * Introduce the FEC_DMA_RX_MINALIGN which can cover mx6solox needs and also
38 * satisfies the alignment on other SoCs (32-bytes)
40 #define FEC_DMA_RX_MINALIGN 64
43 #error "CONFIG_MII has to be defined!"
46 #ifndef CONFIG_FEC_XCV_TYPE
47 #define CONFIG_FEC_XCV_TYPE MII100
51 * The i.MX28 operates with packets in big endian. We need to swap them before
52 * sending and after receiving.
54 #ifdef CONFIG_SOC_MX28
55 #define CONFIG_FEC_MXC_SWAP_PACKET
58 #define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd))
60 /* Check various alignment issues at compile time */
61 #if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0))
62 #error "ARCH_DMA_MINALIGN must be multiple of 16!"
65 #if ((PKTALIGN < ARCH_DMA_MINALIGN) || \
66 (PKTALIGN % ARCH_DMA_MINALIGN != 0))
67 #error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!"
72 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
73 static void swap_packet(uint32_t *packet, int length)
77 for (i = 0; i < DIV_ROUND_UP(length, 4); i++)
78 packet[i] = __swab32(packet[i]);
83 * MII-interface related functions
85 static int fec_mdio_read(struct ethernet_regs *eth, uint8_t phyAddr,
88 uint32_t reg; /* convenient holder for the PHY register */
89 uint32_t phy; /* convenient holder for the PHY */
94 * reading from any PHY's register is done by properly
95 * programming the FEC's MII data register.
97 writel(FEC_IEVENT_MII, ð->ievent);
98 reg = regAddr << FEC_MII_DATA_RA_SHIFT;
99 phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
101 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA |
102 phy | reg, ð->mii_data);
105 * wait for the related interrupt
107 start = get_timer(0);
108 while (!(readl(ð->ievent) & FEC_IEVENT_MII)) {
109 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
110 if (readl(ð->ievent) & FEC_IEVENT_MII)
112 printf("Read MDIO failed...\n");
118 * clear mii interrupt bit
120 writel(FEC_IEVENT_MII, ð->ievent);
123 * it's now safe to read the PHY's register
125 val = (unsigned short)readl(ð->mii_data);
126 debug("%s: phy: %02x reg:%02x val:%#06x\n", __func__, phyAddr,
131 static void fec_mii_setspeed(struct ethernet_regs *eth)
134 * Set MII_SPEED = (1/(mii_speed * 2)) * System Clock
135 * and do not drop the Preamble.
137 register u32 speed = DIV_ROUND_UP(imx_get_fecclk(), 5000000);
138 #ifdef FEC_QUIRK_ENET_MAC
142 writel(speed, ð->mii_speed);
143 debug("%s: mii_speed %08x\n", __func__, readl(ð->mii_speed));
146 static int fec_mdio_write(struct ethernet_regs *eth, uint8_t phyAddr,
147 uint8_t regAddr, uint16_t data)
149 uint32_t reg; /* convenient holder for the PHY register */
150 uint32_t phy; /* convenient holder for the PHY */
153 reg = regAddr << FEC_MII_DATA_RA_SHIFT;
154 phy = phyAddr << FEC_MII_DATA_PA_SHIFT;
156 writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR |
157 FEC_MII_DATA_TA | phy | reg | data, ð->mii_data);
160 * wait for the MII interrupt
162 start = get_timer(0);
163 while (!(readl(ð->ievent) & FEC_IEVENT_MII)) {
164 if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
165 if (readl(ð->ievent) & FEC_IEVENT_MII)
167 printf("Write MDIO failed...\n");
173 * clear MII interrupt bit
175 writel(FEC_IEVENT_MII, ð->ievent);
176 debug("%s: phy: %02x reg:%02x val:%#06x\n", __func__, phyAddr,
182 static int fec_phy_read(struct mii_dev *bus, int phyAddr, int dev_addr,
185 return fec_mdio_read(bus->priv, phyAddr, regAddr);
188 static int fec_phy_write(struct mii_dev *bus, int phyAddr, int dev_addr,
189 int regAddr, u16 data)
191 return fec_mdio_write(bus->priv, phyAddr, regAddr, data);
194 #ifndef CONFIG_PHYLIB
195 static int miiphy_restart_aneg(struct eth_device *dev)
198 #if !defined(CONFIG_FEC_MXC_NO_ANEG)
199 struct fec_priv *fec = (struct fec_priv *)dev->priv;
200 struct ethernet_regs *eth = fec->bus->priv;
203 * Wake up from sleep if necessary
204 * Reset PHY, then delay 300ns
206 #ifdef CONFIG_SOC_MX27
207 fec_mdio_write(eth, fec->phy_id, MII_DCOUNTER, 0x00FF);
209 fec_mdio_write(eth, fec->phy_id, MII_BMCR, BMCR_RESET);
213 * Set the auto-negotiation advertisement register bits
215 fec_mdio_write(eth, fec->phy_id, MII_ADVERTISE,
216 LPA_100FULL | LPA_100HALF | LPA_10FULL |
217 LPA_10HALF | PHY_ANLPAR_PSB_802_3);
218 fec_mdio_write(eth, fec->phy_id, MII_BMCR,
219 BMCR_ANENABLE | BMCR_ANRESTART);
221 if (fec->mii_postcall)
222 ret = fec->mii_postcall(fec->phy_id);
228 static int miiphy_wait_aneg(struct eth_device *dev)
232 struct fec_priv *fec = (struct fec_priv *)dev->priv;
233 struct ethernet_regs *eth = fec->bus->priv;
236 * Wait for AN completion
238 start = get_timer(0);
240 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
241 printf("%s: Autonegotiation timeout\n", dev->name);
245 status = fec_mdio_read(eth, fec->phy_id, MII_BMSR);
247 printf("%s: Autonegotiation failed. status: %d\n",
251 } while (!(status & BMSR_LSTATUS));
257 static inline void fec_rx_task_enable(struct fec_priv *fec)
259 writel(FEC_X_DES_ACTIVE_TDAR, &fec->eth->r_des_active);
262 static inline void fec_rx_task_disable(struct fec_priv *fec)
266 static inline void fec_tx_task_enable(struct fec_priv *fec)
268 writel(FEC_X_DES_ACTIVE_TDAR, &fec->eth->x_des_active);
271 static inline void fec_tx_task_disable(struct fec_priv *fec)
276 * Initialize receive task's buffer descriptors
277 * @param[in] fec all we know about the device yet
278 * @param[in] count receive buffer count to be allocated
279 * @param[in] dsize desired size of each receive buffer
280 * @return 0 on success
282 * Init all RX descriptors to default values.
284 static void fec_rbd_init(struct fec_priv *fec, int count, int dsize)
286 size_t rbd_size, pkt_size;
291 * Reload the RX descriptors with default values and wipe
294 pkt_size = roundup(dsize, ARCH_DMA_MINALIGN);
295 for (i = 0; i < count; i++) {
296 data = (void *)fec->rbd_base[i].data_pointer;
297 memset(data, 0, dsize);
298 flush_dcache_range((unsigned long)data,
299 (unsigned long)data + pkt_size);
301 fec->rbd_base[i].status = FEC_RBD_EMPTY;
302 fec->rbd_base[i].data_length = 0;
305 /* Mark the last RBD to close the ring. */
306 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
309 rbd_size = roundup(sizeof(struct fec_bd) * count, ARCH_DMA_MINALIGN);
310 flush_dcache_range((unsigned long)fec->rbd_base,
311 (unsigned long)fec->rbd_base + rbd_size);
315 * Initialize transmit task's buffer descriptors
316 * @param[in] fec all we know about the device yet
318 * Transmit buffers are created externally. We only have to init the BDs here.\n
319 * Note: There is a race condition in the hardware. When only one BD is in
320 * use it must be marked with the WRAP bit to use it for every transmitt.
321 * This bit in combination with the READY bit results into double transmit
322 * of each data buffer. It seems the state machine checks READY earlier then
323 * resetting it after the first transfer.
324 * Using two BDs solves this issue.
326 static void fec_tbd_init(struct fec_priv *fec)
328 unsigned long addr = (unsigned long)fec->tbd_base;
329 unsigned size = roundup(2 * sizeof(struct fec_bd),
332 memset(fec->tbd_base, 0, size);
333 fec->tbd_base[0].status = 0;
334 fec->tbd_base[1].status = FEC_TBD_WRAP;
336 flush_dcache_range(addr, addr + size);
340 * Mark the given read buffer descriptor as free
341 * @param[in] last 1 if this is the last buffer descriptor in the chain, else 0
342 * @param[in] pRbd buffer descriptor to mark free again
344 static void fec_rbd_clean(int last, struct fec_bd *pRbd)
346 unsigned short flags = FEC_RBD_EMPTY;
348 flags |= FEC_RBD_WRAP;
349 writew(flags, &pRbd->status);
350 writew(0, &pRbd->data_length);
353 static int fec_get_hwaddr(struct eth_device *dev, int dev_id,
356 imx_get_mac_from_fuse(dev_id, mac);
357 return !is_valid_ethaddr(mac);
360 static int fec_set_hwaddr(struct eth_device *dev)
362 uchar *mac = dev->enetaddr;
363 struct fec_priv *fec = dev->priv;
365 writel(0, &fec->eth->iaddr1);
366 writel(0, &fec->eth->iaddr2);
367 writel(0, &fec->eth->gaddr1);
368 writel(0, &fec->eth->gaddr2);
371 * Set physical address
373 writel((mac[0] << 24) + (mac[1] << 16) + (mac[2] << 8) + mac[3],
375 writel((mac[4] << 24) + (mac[5] << 16) + 0x8808, &fec->eth->paddr2);
381 * Do initial configuration of the FEC registers
383 static void fec_reg_setup(struct fec_priv *fec)
388 * Set interrupt mask register
390 writel(0x00000000, &fec->eth->imask);
393 * Clear FEC-Lite interrupt event register(IEVENT)
395 writel(0xffffffff, &fec->eth->ievent);
399 * Set FEC-Lite receive control register(R_CNTRL):
402 /* Start with frame length = 1518, common for all modes. */
403 rcntrl = PKTSIZE << FEC_RCNTRL_MAX_FL_SHIFT;
404 if (fec->xcv_type != SEVENWIRE) /* xMII modes */
405 rcntrl |= FEC_RCNTRL_FCE | FEC_RCNTRL_MII_MODE;
406 if (fec->xcv_type == RGMII)
407 rcntrl |= FEC_RCNTRL_RGMII;
408 else if (fec->xcv_type == RMII)
409 rcntrl |= FEC_RCNTRL_RMII;
411 writel(rcntrl, &fec->eth->r_cntrl);
415 * Start the FEC engine
416 * @param[in] dev Our device to handle
418 static int fec_open(struct eth_device *edev)
420 struct fec_priv *fec = edev->priv;
425 debug("fec_open: fec_open(dev)\n");
426 /* full-duplex, heartbeat disabled */
427 writel(1 << 2, &fec->eth->x_cntrl);
430 /* Invalidate all descriptors */
431 for (i = 0; i < FEC_RBD_NUM - 1; i++)
432 fec_rbd_clean(0, &fec->rbd_base[i]);
433 fec_rbd_clean(1, &fec->rbd_base[i]);
435 /* Flush the descriptors into RAM */
436 size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
438 addr = (uint32_t)fec->rbd_base;
439 flush_dcache_range(addr, addr + size);
441 #ifdef FEC_QUIRK_ENET_MAC
442 /* Enable ENET HW endian SWAP */
443 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP,
445 /* Enable ENET store and forward mode */
446 writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD,
450 * Enable FEC-Lite controller
452 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN,
454 #if defined(CONFIG_SOC_MX25) || defined(CONFIG_SOC_MX53) || defined(CONFIG_SOC_MX6SL)
457 * setup the MII gasket for RMII mode
460 /* disable the gasket */
461 writew(0, &fec->eth->miigsk_enr);
463 /* wait for the gasket to be disabled */
464 while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY)
467 /* configure gasket for RMII, 50 MHz, no loopback, and no echo */
468 writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr);
470 /* re-enable the gasket */
471 writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr);
473 /* wait until MII gasket is ready */
475 while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) {
476 if (--max_loops <= 0) {
477 printf("WAIT for MII Gasket ready timed out\n");
485 /* Start up the PHY */
486 int ret = phy_startup(fec->phydev);
489 printf("Could not initialize PHY %s\n",
490 fec->phydev->dev->name);
493 speed = fec->phydev->speed;
496 miiphy_wait_aneg(edev);
497 speed = miiphy_speed(edev->name, fec->phy_id);
498 miiphy_duplex(edev->name, fec->phy_id);
501 #ifdef FEC_QUIRK_ENET_MAC
503 u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED;
504 u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T;
506 if (speed == _1000BASET)
507 ecr |= FEC_ECNTRL_SPEED;
508 else if (speed != _100BASET)
509 rcr |= FEC_RCNTRL_RMII_10T;
510 writel(ecr, &fec->eth->ecntrl);
511 writel(rcr, &fec->eth->r_cntrl);
513 #elif defined(CONFIG_SOC_MX28)
515 u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T;
517 if (speed == _10BASET)
518 rcr |= FEC_RCNTRL_RMII_10T;
519 writel(rcr, &fec->eth->r_cntrl);
522 debug("%s:Speed=%i\n", __func__, speed);
525 * Enable SmartDMA receive task
527 fec_rx_task_enable(fec);
533 static int fec_init(struct eth_device *dev, bd_t* bd)
535 struct fec_priv *fec = dev->priv;
537 /* Initialize MAC address */
541 * Setup transmit descriptors, there are two in total.
545 /* Setup receive descriptors. */
546 fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE);
550 if (fec->xcv_type != SEVENWIRE)
551 fec_mii_setspeed(fec->bus->priv);
554 * Set Opcode/Pause Duration Register
556 writel(0x00010020, &fec->eth->op_pause); /* FIXME 0xffff0020; */
557 writel(0x2, &fec->eth->x_wmrk);
559 * Set multicast address filter
561 writel(0x00000000, &fec->eth->gaddr1);
562 writel(0x00000000, &fec->eth->gaddr2);
564 /* Do not access reserved register for i.MX6UL */
565 #if !(defined(CONFIG_SOC_MX6UL) || defined(CONFIG_SOC_MX6ULL))
566 /* FIFO receive start register */
567 writel(0x520, &fec->eth->r_fstart);
569 /* size and address of each buffer */
570 writel(FEC_MAX_PKT_SIZE, &fec->eth->emrbr);
571 writel((uint32_t)fec->tbd_base, &fec->eth->etdsr);
572 writel((uint32_t)fec->rbd_base, &fec->eth->erdsr);
574 #ifndef CONFIG_PHYLIB
575 if (fec->xcv_type != SEVENWIRE)
576 miiphy_restart_aneg(dev);
583 * Halt the FEC engine
584 * @param[in] dev Our device to handle
586 static void fec_halt(struct eth_device *dev)
588 struct fec_priv *fec = dev->priv;
592 * issue graceful stop command to the FEC transmitter if necessary
594 writel(FEC_TCNTRL_GTS | readl(&fec->eth->x_cntrl),
597 debug("eth_halt: wait for stop regs\n");
599 * wait for graceful stop to register
601 while ((counter--) && (!(readl(&fec->eth->ievent) & FEC_IEVENT_GRA)))
605 * Disable SmartDMA tasks
607 fec_tx_task_disable(fec);
608 fec_rx_task_disable(fec);
611 * Disable the Ethernet Controller
612 * Note: this will also reset the BD index counter!
614 writel(readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_ETHER_EN,
618 debug("eth_halt: done\n");
623 * @param[in] dev Our ethernet device to handle
624 * @param[in] packet Pointer to the data to be transmitted
625 * @param[in] length Data count in bytes
626 * @return 0 on success
628 static int fec_send(struct eth_device *dev, void *packet, int length)
633 int timeout = FEC_XFER_TIMEOUT;
637 * This routine transmits one frame. This routine only accepts
638 * 6-byte Ethernet addresses.
640 struct fec_priv *fec = dev->priv;
643 * Check for valid length of data.
645 if ((length > 1500) || (length <= 0)) {
646 printf("Payload (%d) too large\n", length);
651 * Setup the transmit buffer. We are always using the first buffer for
652 * transmission, the second will be empty and only used to stop the DMA
653 * engine. We also flush the packet to RAM here to avoid cache trouble.
655 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
656 swap_packet(packet, length);
659 addr = (uint32_t)packet;
660 end = roundup(addr + length, ARCH_DMA_MINALIGN);
661 addr &= ~(ARCH_DMA_MINALIGN - 1);
662 flush_dcache_range(addr, end);
664 writew(length, &fec->tbd_base[fec->tbd_index].data_length);
665 writel((unsigned long)packet,
666 &fec->tbd_base[fec->tbd_index].data_pointer);
669 * update BD's status now
671 * - is always the last in a chain (means no chain)
672 * - should transmit the CRC
673 * - might be the last BD in the list, so the address counter should
674 * wrap (-> keep the WRAP flag)
676 status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP;
677 status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY;
678 writew(status, &fec->tbd_base[fec->tbd_index].status);
681 * Flush data cache. This code flushes both TX descriptors to RAM.
682 * After this code, the descriptors will be safely in RAM and we
685 size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
686 addr = (uint32_t)fec->tbd_base;
687 flush_dcache_range(addr, addr + size);
690 * Below we read the DMA descriptor's last four bytes back from the
691 * DRAM. This is important in order to make sure that all WRITE
692 * operations on the bus that were triggered by previous cache FLUSH
695 * Otherwise, on MX28, it is possible to observe a corruption of the
696 * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM
697 * for the bus structure of MX28. The scenario is as follows:
699 * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going
700 * to DRAM due to flush_dcache_range()
701 * 2) ARM core writes the FEC registers via AHB_ARB2
702 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3
704 * Note that 2) does sometimes finish before 1) due to reordering of
705 * WRITE accesses on the AHB bus, therefore triggering 3) before the
706 * DMA descriptor is fully written into DRAM. This results in occasional
707 * corruption of the DMA descriptor.
709 readl(addr + size - 4);
712 * Enable SmartDMA transmit task
714 fec_tx_task_enable(fec);
717 * Wait until frame is sent. On each turn of the wait cycle, we must
718 * invalidate data cache to see what's really in RAM. Also, we need
722 if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR))
733 * The TDAR bit is cleared when the descriptors are all out from TX
734 * but on mx6solox we noticed that the READY bit is still not cleared
736 * These are two distinct signals, and in IC simulation, we found that
737 * TDAR always gets cleared prior than the READY bit of last BD becomes
739 * In mx6solox, we use a later version of FEC IP. It looks like that
740 * this intrinsic behaviour of TDAR bit has changed in this newer FEC
743 * Fix this by polling the READY bit of BD after the TDAR polling,
744 * which covers the mx6solox case and does not harm the other SoCs.
746 timeout = FEC_XFER_TIMEOUT;
748 invalidate_dcache_range(addr, addr + size);
749 if (!(readw(&fec->tbd_base[fec->tbd_index].status) &
759 debug("fec_send: status 0x%x index %d ret %i\n",
760 readw(&fec->tbd_base[fec->tbd_index].status),
761 fec->tbd_index, ret);
762 /* for next transmission use the other buffer */
772 * Pull one frame from the card
773 * @param[in] dev Our ethernet device to handle
774 * @return Length of packet read
776 static int fec_recv(struct eth_device *dev)
778 struct fec_priv *fec = dev->priv;
779 struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index];
780 unsigned long ievent;
781 int frame_length, len = 0;
783 uint32_t addr, size, end;
785 ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE);
788 * Check if any critical events have happened
790 ievent = readl(&fec->eth->ievent);
792 writel(ievent, &fec->eth->ievent);
795 debug("fec_recv: ievent 0x%lx\n", ievent);
796 if (ievent & FEC_IEVENT_BABR) {
798 fec_init(dev, fec->bd);
799 printf("some error: 0x%08lx\n", ievent);
802 if (ievent & FEC_IEVENT_HBERR) {
803 /* Heartbeat error */
804 writel(0x00000001 | readl(&fec->eth->x_cntrl),
807 if (ievent & FEC_IEVENT_GRA) {
808 /* Graceful stop complete */
809 if (readl(&fec->eth->x_cntrl) & 0x00000001) {
811 writel(~0x00000001 & readl(&fec->eth->x_cntrl),
813 fec_init(dev, fec->bd);
818 * Read the buffer status. Before the status can be read, the data cache
819 * must be invalidated, because the data in RAM might have been changed
820 * by DMA. The descriptors are properly aligned to cachelines so there's
821 * no need to worry they'd overlap.
823 * WARNING: By invalidating the descriptor here, we also invalidate
824 * the descriptors surrounding this one. Therefore we can NOT change the
825 * contents of this descriptor nor the surrounding ones. The problem is
826 * that in order to mark the descriptor as processed, we need to change
827 * the descriptor. The solution is to mark the whole cache line when all
828 * descriptors in the cache line are processed.
830 addr = (uint32_t)rbd;
831 addr &= ~(ARCH_DMA_MINALIGN - 1);
832 size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
833 invalidate_dcache_range(addr, addr + size);
835 bd_status = readw(&rbd->status);
836 if (!(bd_status & FEC_RBD_EMPTY)) {
837 debug("fec_recv: status 0x%04x len %u\n", bd_status,
838 readw(&rbd->data_length) - 4);
839 if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) &&
840 ((readw(&rbd->data_length) - 4) > 14)) {
842 * Get buffer address and size
844 addr = readl(&rbd->data_pointer);
845 frame_length = readw(&rbd->data_length) - 4;
848 * Invalidate data cache over the buffer
850 end = roundup(addr + frame_length, ARCH_DMA_MINALIGN);
851 addr &= ~(ARCH_DMA_MINALIGN - 1);
852 invalidate_dcache_range(addr, end);
855 * Fill the buffer and pass it to upper layers
857 #ifdef CONFIG_FEC_MXC_SWAP_PACKET
858 swap_packet((uint32_t *)addr, frame_length);
860 memcpy(buff, (char *)addr, frame_length);
861 net_process_received_packet(buff, frame_length);
864 if (bd_status & FEC_RBD_ERR)
865 printf("error frame: 0x%08x 0x%08x\n",
870 * Free the current buffer, restart the engine and move forward
871 * to the next buffer. Here we check if the whole cacheline of
872 * descriptors was already processed and if so, we mark it free
875 size = RXDESC_PER_CACHELINE - 1;
876 if ((fec->rbd_index & size) == size) {
877 i = fec->rbd_index - size;
878 addr = (uint32_t)&fec->rbd_base[i];
879 for (; i <= fec->rbd_index ; i++) {
880 fec_rbd_clean(i == (FEC_RBD_NUM - 1),
883 flush_dcache_range(addr,
884 addr + ARCH_DMA_MINALIGN);
887 fec_rx_task_enable(fec);
888 fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM;
889 debug("fec_recv: stop\n");
895 static void fec_set_dev_name(char *dest, int dev_id)
897 sprintf(dest, (dev_id == -1) ? "FEC" : "FEC%i", dev_id);
900 static int fec_alloc_descs(struct fec_priv *fec)
902 size_t tbd_size, rbd_size, pkt_size;
906 /* Allocate TX descriptors. */
907 tbd_size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
908 fec->tbd_base = memalign(ARCH_DMA_MINALIGN, tbd_size);
912 /* Allocate RX descriptors. */
913 rbd_size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
914 fec->rbd_base = memalign(ARCH_DMA_MINALIGN, rbd_size);
918 memset(fec->rbd_base, 0, rbd_size);
920 /* Allocate RX buffers. */
922 /* Maximum RX buffer size. */
923 pkt_size = roundup(FEC_MAX_PKT_SIZE, FEC_DMA_RX_MINALIGN);
924 for (i = 0; i < FEC_RBD_NUM; i++) {
925 data = memalign(FEC_DMA_RX_MINALIGN, pkt_size);
927 printf("%s: error allocating rxbuf %d\n", __func__, i);
931 memset(data, 0, pkt_size);
933 fec->rbd_base[i].data_pointer = (uint32_t)data;
934 fec->rbd_base[i].status = FEC_RBD_EMPTY;
935 fec->rbd_base[i].data_length = 0;
936 /* Flush the buffer to memory. */
937 flush_dcache_range((uint32_t)data, (uint32_t)data + pkt_size);
940 /* Mark the last RBD to close the ring. */
941 fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
942 flush_dcache_range((unsigned long)fec->rbd_base, rbd_size);
951 free((void *)fec->rbd_base[i].data_pointer);
959 static void fec_free_descs(struct fec_priv *fec)
963 for (i = 0; i < FEC_RBD_NUM; i++)
964 free((void *)fec->rbd_base[i].data_pointer);
970 int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
971 struct mii_dev *bus, struct phy_device *phydev)
973 static int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
974 struct mii_dev *bus, int phy_id)
977 struct eth_device *edev;
978 struct fec_priv *fec;
979 unsigned char ethaddr[6];
983 /* create and fill edev struct */
984 edev = calloc(sizeof(struct eth_device), 1);
986 puts("fec_mxc: not enough malloc memory for eth_device\n");
991 fec = calloc(sizeof(struct fec_priv), 1);
993 puts("fec_mxc: not enough malloc memory for fec_priv\n");
998 ret = fec_alloc_descs(fec);
1003 edev->init = fec_init;
1004 edev->send = fec_send;
1005 edev->recv = fec_recv;
1006 edev->halt = fec_halt;
1007 edev->write_hwaddr = fec_set_hwaddr;
1009 fec->eth = (struct ethernet_regs *)base_addr;
1012 fec->xcv_type = CONFIG_FEC_XCV_TYPE;
1015 writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_RESET, &fec->eth->ecntrl);
1016 start = get_timer(0);
1017 while (readl(&fec->eth->ecntrl) & FEC_ECNTRL_RESET) {
1018 if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
1019 printf("FEC MXC: Timeout reseting chip\n");
1026 fec_set_dev_name(edev->name, dev_id);
1027 fec->dev_id = (dev_id == -1) ? 0 : dev_id;
1029 fec_mii_setspeed(bus->priv);
1030 #ifdef CONFIG_PHYLIB
1031 fec->phydev = phydev;
1032 phy_connect_dev(phydev, edev);
1036 fec->phy_id = phy_id;
1040 if (fec_get_hwaddr(edev, dev_id, ethaddr) == 0) {
1042 debug("got MAC address from fuse: %pM\n", ethaddr);
1044 debug("got MAC%d address from fuse: %pM\n", dev_id, ethaddr);
1045 memcpy(edev->enetaddr, ethaddr, 6);
1046 if (!getenv("ethaddr"))
1047 eth_setenv_enetaddr("ethaddr", ethaddr);
1051 fec_free_descs(fec);
1060 struct mii_dev *fec_get_miibus(uint32_t base_addr, int dev_id)
1062 struct ethernet_regs *eth = (struct ethernet_regs *)base_addr;
1063 struct mii_dev *bus;
1068 printf("mdio_alloc failed\n");
1071 bus->read = fec_phy_read;
1072 bus->write = fec_phy_write;
1074 fec_set_dev_name(bus->name, dev_id);
1076 ret = mdio_register(bus);
1078 printf("mdio_register failed\n");
1082 fec_mii_setspeed(eth);
1086 int fecmxc_initialize_multi(bd_t *bd, int dev_id, int phy_id, uint32_t addr)
1089 struct mii_dev *bus = NULL;
1090 #ifdef CONFIG_PHYLIB
1091 struct phy_device *phydev = NULL;
1095 #if defined(CONFIG_SOC_MX28)
1097 * The i.MX28 has two ethernet interfaces, but they are not equal.
1098 * Only the first one can access the MDIO bus.
1100 base_mii = MXS_ENET0_BASE;
1101 #elif defined(FEC_MDIO_BASE_ADDR)
1102 base_mii = FEC_MDIO_BASE_ADDR;
1106 debug("eth_init: fec_probe(bd, %i, %i) @ %08x\n", dev_id, phy_id, addr);
1107 bus = fec_get_miibus(base_mii, dev_id);
1110 #ifdef CONFIG_PHYLIB
1111 static u8 phy_mask = 0xff;
1112 phydev = phy_find_by_mask(bus, phy_id < 0 ? phy_mask : (1 << phy_id),
1113 PHY_INTERFACE_MODE_RGMII);
1118 phy_mask &= ~(1 << phydev->addr);
1119 ret = fec_probe(bd, dev_id, addr, bus, phydev);
1121 ret = fec_probe(bd, dev_id, addr, bus, phy_id);
1124 #ifdef CONFIG_PHYLIB
1132 #ifdef CONFIG_FEC_MXC_PHYADDR
1133 int fecmxc_initialize(bd_t *bd)
1135 return fecmxc_initialize_multi(bd, -1, CONFIG_FEC_MXC_PHYADDR,
1140 #ifndef CONFIG_PHYLIB
1141 int fecmxc_register_mii_postcall(struct eth_device *dev, int (*cb)(int))
1143 struct fec_priv *fec = dev->priv;
1144 fec->mii_postcall = cb;