2 * CPSW Ethernet Switch Driver
4 * See file CREDITS for list of people who contributed to this
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <asm/errno.h>
31 #include <asm/arch/cpu.h>
33 #define BITMASK(bits) ((1 << (bits)) - 1)
34 #define PHY_REG_MASK 0x1f
35 #define PHY_ID_MASK 0x1f
36 #define NUM_DESCS (PKTBUFSRX * 2)
38 #define PKT_MAX (1500 + 14 + 4 + 4)
41 #define CPDMA_TXCONTROL 0x004
42 #define CPDMA_RXCONTROL 0x014
43 #define CPDMA_SOFTRESET 0x01c
44 #define CPDMA_RXFREE 0x0e0
45 #define CPDMA_TXHDP_VER1 0x100
46 #define CPDMA_TXHDP_VER2 0x200
47 #define CPDMA_RXHDP_VER1 0x120
48 #define CPDMA_RXHDP_VER2 0x220
49 #define CPDMA_TXCP_VER1 0x140
50 #define CPDMA_TXCP_VER2 0x240
51 #define CPDMA_RXCP_VER1 0x160
52 #define CPDMA_RXCP_VER2 0x260
54 /* Descriptor mode bits */
55 #define CPDMA_DESC_SOP BIT(31)
56 #define CPDMA_DESC_EOP BIT(30)
57 #define CPDMA_DESC_OWNER BIT(29)
58 #define CPDMA_DESC_EOQ BIT(28)
60 #ifndef CONFIG_SYS_CACHELINE_SIZE
61 #define CONFIG_SYS_CACHELINE_SIZE 64
64 struct cpsw_mdio_regs {
67 #define CONTROL_IDLE (1 << 31)
68 #define CONTROL_ENABLE (1 << 30)
84 #define USERACCESS_GO (1 << 31)
85 #define USERACCESS_WRITE (1 << 30)
86 #define USERACCESS_ACK (1 << 29)
87 #define USERACCESS_READ 0
88 #define USERACCESS_DATA 0xffff
100 struct cpsw_slave_regs {
116 struct cpsw_host_regs {
122 u32 cpdma_tx_pri_map;
123 u32 cpdma_rx_chan_map;
126 struct cpsw_sliver_regs {
139 #define ALE_ENTRY_BITS 68
140 #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
143 #define ALE_CONTROL 0x08
144 #define ALE_UNKNOWNVLAN 0x18
145 #define ALE_TABLE_CONTROL 0x20
146 #define ALE_TABLE 0x34
147 #define ALE_PORTCTL 0x40
149 #define ALE_TABLE_WRITE BIT(31)
151 #define ALE_TYPE_FREE 0
152 #define ALE_TYPE_ADDR 1
153 #define ALE_TYPE_VLAN 2
154 #define ALE_TYPE_VLAN_ADDR 3
156 #define ALE_UCAST_PERSISTANT 0
157 #define ALE_UCAST_UNTOUCHED 1
158 #define ALE_UCAST_OUI 2
159 #define ALE_UCAST_TOUCHED 3
161 #define ALE_MCAST_FWD 0
162 #define ALE_MCAST_BLOCK_LEARN_FWD 1
163 #define ALE_MCAST_FWD_LEARN 2
164 #define ALE_MCAST_FWD_2 3
166 enum cpsw_ale_port_state {
167 ALE_PORT_STATE_DISABLE = 0x00,
168 ALE_PORT_STATE_BLOCK = 0x01,
169 ALE_PORT_STATE_LEARN = 0x02,
170 ALE_PORT_STATE_FORWARD = 0x03,
173 /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
175 #define ALE_BLOCKED 2
178 struct cpsw_slave_regs *regs;
179 struct cpsw_sliver_regs *sliver;
182 struct cpsw_slave_data *data;
186 /* hardware fields */
191 } __attribute__((aligned(CONFIG_SYS_CACHELINE_SIZE)));
194 volatile void *sw_buffer;
195 struct cpsw_desc *next;
196 struct cpdma_desc *dma_desc;
200 struct cpsw_desc *head, *tail;
201 void *hdp, *cp, *rxfree;
205 #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
206 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
207 #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
209 #define desc_write(desc, fld, val) ((desc)->dma_desc->fld = (u32)(val))
210 #define desc_read(desc, fld) __desc_read(&(desc)->dma_desc->fld, #fld, __func__, __LINE__)
211 static inline u32 __desc_read(u32 *fld, const char *name,
212 const char *fn, int ln)
216 debug("%s@%d: %s@%p=%08x\n", fn, ln, name, fld, val);
219 #define desc_read_ptr(desc, fld) ((void *)desc_read(desc->dma_desc, fld))
222 #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
223 #define chan_read(chan, fld) __raw_readl((chan)->fld)
224 #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
227 struct eth_device *dev;
228 struct cpsw_platform_data *data;
231 struct cpsw_regs *regs;
233 struct cpsw_host_regs *host_port_regs;
236 struct cpsw_desc descs[NUM_DESCS];
237 struct cpsw_desc *desc_free;
238 struct cpdma_chan rx_chan, tx_chan;
240 struct cpsw_slave *slaves;
243 #define for_each_slave(priv, func, arg...) \
246 for (idx = 0; idx < (priv)->data->slaves; idx++) \
247 (func)((priv)->slaves + idx, ##arg); \
250 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
256 idx = 2 - idx; /* flip */
257 return (ale_entry[idx] >> start) & BITMASK(bits);
260 static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
265 value &= BITMASK(bits);
268 idx = 2 - idx; /* flip */
269 ale_entry[idx] &= ~(BITMASK(bits) << start);
270 ale_entry[idx] |= (value << start);
273 #define DEFINE_ALE_FIELD(name, start, bits) \
274 static inline int cpsw_ale_get_##name(u32 *ale_entry) \
276 return cpsw_ale_get_field(ale_entry, start, bits); \
278 static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
280 cpsw_ale_set_field(ale_entry, start, bits, value); \
283 DEFINE_ALE_FIELD(entry_type, 60, 2)
284 DEFINE_ALE_FIELD(mcast_state, 62, 2)
285 DEFINE_ALE_FIELD(port_mask, 66, 3)
286 DEFINE_ALE_FIELD(ucast_type, 62, 2)
287 DEFINE_ALE_FIELD(port_num, 66, 2)
288 DEFINE_ALE_FIELD(blocked, 65, 1)
289 DEFINE_ALE_FIELD(secure, 64, 1)
290 DEFINE_ALE_FIELD(mcast, 40, 1)
292 /* The MAC address field in the ALE entry cannot be macroized as above */
293 static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
297 for (i = 0; i < 6; i++)
298 addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
301 static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
305 for (i = 0; i < 6; i++)
306 cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
309 static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
313 __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
315 for (i = 0; i < ALE_ENTRY_WORDS; i++)
316 ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
321 static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
325 for (i = 0; i < ALE_ENTRY_WORDS; i++)
326 __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
328 __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
333 static int cpsw_ale_match_addr(struct cpsw_priv *priv, u8* addr)
335 u32 ale_entry[ALE_ENTRY_WORDS];
338 for (idx = 0; idx < priv->data->ale_entries; idx++) {
341 cpsw_ale_read(priv, idx, ale_entry);
342 type = cpsw_ale_get_entry_type(ale_entry);
343 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
345 cpsw_ale_get_addr(ale_entry, entry_addr);
346 if (memcmp(entry_addr, addr, 6) == 0)
352 static int cpsw_ale_match_free(struct cpsw_priv *priv)
354 u32 ale_entry[ALE_ENTRY_WORDS];
357 for (idx = 0; idx < priv->data->ale_entries; idx++) {
358 cpsw_ale_read(priv, idx, ale_entry);
359 type = cpsw_ale_get_entry_type(ale_entry);
360 if (type == ALE_TYPE_FREE)
366 static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
368 u32 ale_entry[ALE_ENTRY_WORDS];
371 for (idx = 0; idx < priv->data->ale_entries; idx++) {
372 cpsw_ale_read(priv, idx, ale_entry);
373 type = cpsw_ale_get_entry_type(ale_entry);
374 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
376 if (cpsw_ale_get_mcast(ale_entry))
378 type = cpsw_ale_get_ucast_type(ale_entry);
379 if (type != ALE_UCAST_PERSISTANT &&
380 type != ALE_UCAST_OUI)
386 static int cpsw_ale_add_ucast(struct cpsw_priv *priv, u8 *addr,
389 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
392 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
393 cpsw_ale_set_addr(ale_entry, addr);
394 cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
395 cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
396 cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
397 cpsw_ale_set_port_num(ale_entry, port);
399 idx = cpsw_ale_match_addr(priv, addr);
401 idx = cpsw_ale_match_free(priv);
403 idx = cpsw_ale_find_ageable(priv);
407 cpsw_ale_write(priv, idx, ale_entry);
411 static int cpsw_ale_add_mcast(struct cpsw_priv *priv, u8 *addr, int port_mask)
413 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
416 idx = cpsw_ale_match_addr(priv, addr);
418 cpsw_ale_read(priv, idx, ale_entry);
420 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
421 cpsw_ale_set_addr(ale_entry, addr);
422 cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
424 mask = cpsw_ale_get_port_mask(ale_entry);
426 cpsw_ale_set_port_mask(ale_entry, port_mask);
429 idx = cpsw_ale_match_free(priv);
431 idx = cpsw_ale_find_ageable(priv);
435 cpsw_ale_write(priv, idx, ale_entry);
439 static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
441 u32 tmp, mask = BIT(bit);
443 tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
445 tmp |= val ? mask : 0;
446 __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
449 #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
450 #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
451 #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
453 static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
456 int offset = ALE_PORTCTL + 4 * port;
459 tmp = __raw_readl(priv->ale_regs + offset);
462 __raw_writel(tmp, priv->ale_regs + offset);
465 static struct cpsw_mdio_regs *mdio_regs;
467 /* wait until hardware is ready for another user access */
468 static inline u32 wait_for_user_access(void)
473 while ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO) {
475 if (--timeout <= 0) {
476 printf("TIMEOUT waiting for USERACCESS_GO\n");
484 /* wait until hardware state machine is idle */
485 static inline void wait_for_idle(void)
489 while ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0) {
490 if (--timeout <= 0) {
491 printf("TIMEOUT waiting for state machine idle\n");
498 static int cpsw_mdio_read(const char *devname, unsigned char phy_id,
499 unsigned char phy_reg, unsigned short *data)
503 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
506 wait_for_user_access();
507 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
509 __raw_writel(reg, &mdio_regs->user[0].access);
510 reg = wait_for_user_access();
512 *data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
513 return (reg & USERACCESS_ACK) ? 0 : -EIO;
516 static int cpsw_mdio_write(const char *devname, unsigned char phy_id,
517 unsigned char phy_reg, unsigned short data)
521 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
524 wait_for_user_access();
525 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
526 (phy_id << 16) | (data & USERACCESS_DATA));
527 __raw_writel(reg, &mdio_regs->user[0].access);
528 wait_for_user_access();
533 static void cpsw_mdio_init(char *name, u32 mdio_base, u32 div)
535 mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
537 /* set enable and clock divider */
538 __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
541 * wait for scan logic to settle:
542 * the scan time consists of (a) a large fixed component, and (b) a
543 * small component that varies with the mii bus frequency. These
544 * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
545 * silicon. Since the effect of (b) was found to be largely
546 * negligible, we keep things simple here.
550 miiphy_register(name, cpsw_mdio_read, cpsw_mdio_write);
553 static inline void soft_reset(void *reg)
557 debug("%s\n", __func__);
558 __raw_writel(1, reg);
559 while (__raw_readl(reg) & 1) {
562 debug("%s: reset finished after %u loops\n", __func__, loops);
565 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
566 ((mac)[2] << 16) | ((mac)[3] << 24))
567 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
569 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
570 struct cpsw_priv *priv)
572 __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
573 __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
576 static void cpsw_slave_update_link(struct cpsw_slave *slave,
577 struct cpsw_priv *priv, int *link)
579 char *name = priv->dev->name;
580 int phy_id = slave->data->phy_id;
585 debug("%s@%d\n", __func__, __LINE__);
586 if (miiphy_read(name, phy_id, MII_BMSR, ®)) {
587 printf("Failed to read PHY reg\n");
588 return; /* could not read, assume no link */
591 if (reg & BMSR_LSTATUS) { /* link up */
592 speed = miiphy_speed(name, phy_id);
593 duplex = miiphy_duplex(name, phy_id);
596 mac_control = priv->data->mac_control;
598 mac_control |= BIT(18); /* In Band mode */
599 else if (speed == 100)
600 mac_control |= BIT(15);
601 else if (speed == 1000) {
602 if (priv->data->gigabit_en)
603 mac_control |= BIT(7);
605 /* Disable gigabit as it's non-functional */
606 mac_control &= ~BIT(7);
612 mac_control |= BIT(0); /* FULLDUPLEXEN */
614 debug("%s: mac_control: %08x -> %08x\n", __func__,
615 slave->mac_control, mac_control);
617 if (mac_control == slave->mac_control)
621 printf("link up on port %d, speed %d, %s duplex\n",
622 slave->slave_num, speed,
623 (duplex == FULL) ? "full" : "half");
625 printf("link down on port %d\n", slave->slave_num);
628 debug("%s@%d\n", __func__, __LINE__);
629 __raw_writel(mac_control, &slave->sliver->mac_control);
630 debug("%s@%d\n", __func__, __LINE__);
631 slave->mac_control = mac_control;
632 debug("%s: done\n", __func__);
635 static int cpsw_update_link(struct cpsw_priv *priv)
638 for_each_slave(priv, cpsw_slave_update_link, priv, &link);
642 static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
644 if (priv->host_port == 0)
645 return slave_num + 1;
650 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
654 debug("%s\n", __func__);
656 soft_reset(&slave->sliver->soft_reset);
658 /* setup priority mapping */
659 __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
660 __raw_writel(0x33221100, &slave->regs->tx_pri_map);
662 /* setup max packet size, and mac address */
663 __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
664 cpsw_set_slave_mac(slave, priv);
666 slave->mac_control = 0; /* no link yet */
668 /* enable forwarding */
669 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
670 cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
672 cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << slave_port);
674 priv->data->phy_init(priv->dev->name, slave->data->phy_id);
678 #define cpdma_desc_get(d) __cpdma_desc_get(d, __func__, __LINE__)
679 #define cpdma_desc_put(d) __cpdma_desc_put(d, __func__, __LINE__)
681 static void __cpdma_desc_get(struct cpsw_desc *desc,
682 const char *fn, int ln)
684 debug("%s@%d: Invalidating DCACHE range: %p..%p\n", fn, ln,
685 desc->dma_desc, &desc->dma_desc[1]);
686 invalidate_dcache_range((u32)desc->dma_desc, (u32)(&desc->dma_desc[1]));
689 static void __cpdma_desc_put(struct cpsw_desc *desc,
690 const char *fn, int ln)
692 debug("%s@%d: Flushing DCACHE range: %p..%p\n", fn, ln,
693 desc->dma_desc, &desc->dma_desc[1]);
694 flush_dcache_range((u32)desc->dma_desc, (u32)(&desc->dma_desc[1]));
697 static void cpdma_desc_get(struct cpsw_desc *desc)
699 invalidate_dcache_range((u32)desc->dma_desc, (u32)(&desc->dma_desc[1]));
702 static void cpdma_desc_put(struct cpsw_desc *desc)
704 flush_dcache_range((u32)desc->dma_desc, (u32)(&desc->dma_desc[1]));
708 static struct cpsw_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
710 struct cpsw_desc *desc = priv->desc_free;
713 cpdma_desc_get(desc);
714 priv->desc_free = desc->next;
719 static void cpdma_desc_free(struct cpsw_priv *priv, struct cpsw_desc *desc)
722 desc_write(desc, hw_next, priv->desc_free->dma_desc);
723 cpdma_desc_put(desc);
724 desc->next = priv->desc_free;
725 priv->desc_free = desc;
728 static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
729 volatile void *buffer, int len)
731 struct cpsw_desc *desc, *prev;
735 printf("ERROR: %s() NULL buffer\n", __func__);
739 debug("%s@%d: buffer %p..%p\n", __func__, __LINE__,
740 buffer, buffer + len);
742 flush_dcache_range((u32)buffer, (u32)buffer + len);
744 desc = cpdma_desc_alloc(priv);
748 debug("%s@%d: %cX desc %p DMA %p\n", __func__, __LINE__,
749 chan == &priv->rx_chan ? 'R' : 'T', desc, desc->dma_desc);
753 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
756 desc_write(desc, hw_next, 0);
757 desc_write(desc, hw_buffer, buffer);
758 desc_write(desc, hw_len, len);
759 desc_write(desc, hw_mode, mode | len);
760 //desc_write(desc, sw_buffer, buffer);
761 desc->sw_buffer = buffer;
762 // desc_write(desc, sw_len, len);
765 /* simple case - first packet enqueued */
768 chan_write(chan, hdp, desc->dma_desc);
771 cpdma_desc_put(desc);
773 /* not the first packet - enqueue at the tail */
777 cpdma_desc_get(prev);
778 desc_write(prev, hw_next, desc->dma_desc);
779 cpdma_desc_put(prev);
783 /* next check if EOQ has been triggered already */
784 if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
785 chan_write(chan, hdp, desc->dma_desc);
789 chan_write(chan, rxfree, 1);
790 debug("%s@%d\n", __func__, __LINE__);
794 static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
795 volatile void **buffer, int *len)
797 struct cpsw_desc *desc = chan->head;
803 cpdma_desc_get(desc);
805 debug("%s@%d desc=%p chan=%p\n", __func__, __LINE__, desc->dma_desc, chan);
806 status = desc_read(desc, hw_mode);
809 *len = status & 0x7ff;
810 debug("%s@%d: status=%08x len=%u\n", __func__, __LINE__,
814 *buffer = desc->sw_buffer;
815 debug("%s@%d: buffer=%p\n", __func__, __LINE__, desc->sw_buffer);
817 if (status & CPDMA_DESC_OWNER)
820 chan->head = desc->next;
821 chan_write(chan, cp, desc->dma_desc);
823 cpdma_desc_free(priv, desc);
827 static int cpsw_init(struct eth_device *dev, bd_t *bis)
829 struct cpsw_priv *priv = dev->priv;
832 debug("%s\n", __func__);
834 priv->data->control(1);
836 /* soft reset the controller and initialize priv */
837 soft_reset(&priv->regs->soft_reset);
839 /* initialize and reset the address lookup engine */
840 cpsw_ale_enable(priv, 1);
841 cpsw_ale_clear(priv, 1);
842 cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
844 /* setup host port priority mapping */
845 __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
846 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
848 /* disable priority elevation and enable statistics on all ports */
849 __raw_writel(0, &priv->regs->ptype);
851 /* enable statistics collection only on the host port */
852 __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
854 cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
856 cpsw_ale_add_ucast(priv, priv->dev->enetaddr, priv->host_port,
858 cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << priv->host_port);
860 for_each_slave(priv, cpsw_slave_init, priv);
862 cpsw_update_link(priv);
864 /* init descriptor pool */
865 for (i = 0; i < NUM_DESCS; i++) {
866 struct cpsw_desc *next_desc = (i < (NUM_DESCS - 1)) ?
867 &priv->descs[i + 1] : NULL;
869 priv->descs[i].next = next_desc;
870 desc_write(&priv->descs[i], hw_next,
871 next_desc ? next_desc->dma_desc : 0);
872 cpdma_desc_put(&priv->descs[i]);
874 priv->desc_free = &priv->descs[0];
876 /* initialize channels */
877 if (priv->data->version == CPSW_CTRL_VERSION_2) {
878 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
879 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
880 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
881 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
883 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
884 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
885 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
887 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
888 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
889 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
890 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
892 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
893 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
894 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
897 /* clear dma state */
898 soft_reset(priv->dma_regs + CPDMA_SOFTRESET);
900 if (priv->data->version == CPSW_CTRL_VERSION_2) {
901 for (i = 0; i < priv->data->channels; i++) {
902 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4 * i);
903 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4 * i);
904 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4 * i);
905 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4 * i);
906 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4 * i);
909 for (i = 0; i < priv->data->channels; i++) {
910 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4 * i);
911 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4 * i);
912 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4 * i);
913 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4 * i);
914 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4 * i);
918 __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
919 __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
921 /* submit rx descs */
922 for (i = 0; i < PKTBUFSRX; i++) {
923 ret = cpdma_submit(priv, &priv->rx_chan, NetRxPackets[i],
926 printf("error %d submitting rx desc\n", ret);
934 static void cpsw_halt(struct eth_device *dev)
936 struct cpsw_priv *priv = dev->priv;
938 debug("%s\n", __func__);
939 priv->data->control(0);
942 static int cpsw_send(struct eth_device *dev, volatile void *packet, int length)
944 struct cpsw_priv *priv = dev->priv;
945 volatile void *buffer = (volatile void *)0xeeeeeeee;
946 int len = 0x77777777;
948 debug("%s@%d sending packet %p..%p\n", __func__, __LINE__,
949 packet, packet + length - 1);
950 if (!cpsw_update_link(priv))
953 debug("%s@%d\n", __func__, __LINE__);
954 /* first reap completed packets */
955 while (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0) {
956 debug("%s@%d: buffer=%p len=%d\n", __func__, __LINE__,
959 debug("%s@%d\n", __func__, __LINE__);
961 return cpdma_submit(priv, &priv->tx_chan, packet, length);
964 static int cpsw_recv(struct eth_device *dev)
966 struct cpsw_priv *priv = dev->priv;
967 volatile void *buffer;
970 debug("%s@%d\n", __func__, __LINE__);
971 while (cpdma_process(priv, &priv->rx_chan, &buffer, &len) >= 0) {
972 debug("invalidating %p..%p\n", buffer,
973 buffer + ALIGN(len, CONFIG_SYS_CACHELINE_SIZE));
974 invalidate_dcache_range((u32)buffer,
975 (u32)buffer + ALIGN(len, CONFIG_SYS_CACHELINE_SIZE));
976 NetReceive(buffer, len);
977 cpdma_submit(priv, &priv->rx_chan, buffer, PKTSIZE);
979 debug("%s@%d: done\n", __func__, __LINE__);
983 static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
984 struct cpsw_priv *priv)
986 void *regs = priv->regs;
987 struct cpsw_slave_data *data = priv->data->slave_data + slave_num;
989 debug("%s@%d: slave[%d] %p\n", __func__, __LINE__,
991 slave->slave_num = slave_num;
993 slave->regs = regs + data->slave_reg_ofs;
994 slave->sliver = regs + data->sliver_reg_ofs;
997 int cpsw_register(struct cpsw_platform_data *data)
999 struct cpsw_priv *priv;
1000 void *regs = (void *)data->cpsw_base;
1001 struct eth_device *dev;
1004 debug("%s@%d\n", __func__, __LINE__);
1006 dev = calloc(sizeof(*dev), 1);
1010 priv = calloc(sizeof(*priv), 1);
1019 priv->slaves = calloc(sizeof(struct cpsw_slave), data->slaves);
1020 if (!priv->slaves) {
1026 for (i = 0; i < NUM_DESCS; i++) {
1027 priv->descs[i].dma_desc = memalign(CONFIG_SYS_CACHELINE_SIZE,
1028 sizeof(struct cpsw_desc) * NUM_DESCS);
1029 if (!priv->descs[i].dma_desc) {
1031 free(priv->descs[i].dma_desc);
1038 debug("DMA desc[%d] allocated @ %p desc_size %u\n",
1039 i, priv->descs[i].dma_desc,
1040 sizeof(*priv->descs[i].dma_desc));
1043 priv->host_port = data->host_port_num;
1045 priv->host_port_regs = regs + data->host_port_reg_ofs;
1046 priv->dma_regs = regs + data->cpdma_reg_ofs;
1047 priv->ale_regs = regs + data->ale_reg_ofs;
1049 for_each_slave(priv, cpsw_slave_setup, idx, priv);
1050 debug("%s@%d\n", __func__, __LINE__);
1052 strcpy(dev->name, "cpsw");
1054 dev->init = cpsw_init;
1055 dev->halt = cpsw_halt;
1056 dev->send = cpsw_send;
1057 dev->recv = cpsw_recv;
1060 debug("%s@%d\n", __func__, __LINE__);
1063 debug("%s@%d\n", __func__, __LINE__);
1064 cpsw_mdio_init(dev->name, data->mdio_base, data->mdio_div);
1066 debug("%s@%d: done\n", __func__, __LINE__);