2 * Broadcom GENET (Gigabit Ethernet) controller driver
4 * Copyright (c) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) "bcmgenet: " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/clk.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
45 #include <linux/platform_data/bcmgenet.h>
47 #include <asm/unaligned.h>
51 /* Maximum number of hardware queues, downsized if needed */
52 #define GENET_MAX_MQ_CNT 4
54 /* Default highest priority queue for multi queue support */
55 #define GENET_Q0_PRIORITY 0
57 #define GENET_Q16_RX_BD_CNT \
58 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
59 #define GENET_Q16_TX_BD_CNT \
60 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
62 #define RX_BUF_LENGTH 2048
63 #define SKB_ALIGNMENT 32
65 /* Tx/Rx DMA register offset, skip 256 descriptors */
66 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
67 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
69 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
72 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
73 TOTAL_DESC * DMA_DESC_SIZE)
75 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
76 void __iomem *d, u32 value)
78 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
81 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
84 return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
87 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
91 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
93 /* Register writes to GISB bus can take couple hundred nanoseconds
94 * and are done for each packet, save these expensive writes unless
95 * the platform is explicitly configured for 64-bits/LPAE.
97 #ifdef CONFIG_PHYS_ADDR_T_64BIT
98 if (priv->hw_params->flags & GENET_HAS_40BITS)
99 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
103 /* Combined address + length/status setter */
104 static inline void dmadesc_set(struct bcmgenet_priv *priv,
105 void __iomem *d, dma_addr_t addr, u32 val)
107 dmadesc_set_length_status(priv, d, val);
108 dmadesc_set_addr(priv, d, addr);
111 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
116 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
118 /* Register writes to GISB bus can take couple hundred nanoseconds
119 * and are done for each packet, save these expensive writes unless
120 * the platform is explicitly configured for 64-bits/LPAE.
122 #ifdef CONFIG_PHYS_ADDR_T_64BIT
123 if (priv->hw_params->flags & GENET_HAS_40BITS)
124 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
129 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
131 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
134 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
136 if (GENET_IS_V1(priv))
137 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
139 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
142 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
144 if (GENET_IS_V1(priv))
145 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
147 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
150 /* These macros are defined to deal with register map change
151 * between GENET1.1 and GENET2. Only those currently being used
152 * by driver are defined.
154 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
156 if (GENET_IS_V1(priv))
157 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
159 return __raw_readl(priv->base +
160 priv->hw_params->tbuf_offset + TBUF_CTRL);
163 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
165 if (GENET_IS_V1(priv))
166 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
168 __raw_writel(val, priv->base +
169 priv->hw_params->tbuf_offset + TBUF_CTRL);
172 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
174 if (GENET_IS_V1(priv))
175 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
177 return __raw_readl(priv->base +
178 priv->hw_params->tbuf_offset + TBUF_BP_MC);
181 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
183 if (GENET_IS_V1(priv))
184 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
186 __raw_writel(val, priv->base +
187 priv->hw_params->tbuf_offset + TBUF_BP_MC);
190 /* RX/TX DMA register accessors */
210 static const u8 bcmgenet_dma_regs_v3plus[] = {
211 [DMA_RING_CFG] = 0x00,
214 [DMA_SCB_BURST_SIZE] = 0x0C,
215 [DMA_ARB_CTRL] = 0x2C,
216 [DMA_PRIORITY_0] = 0x30,
217 [DMA_PRIORITY_1] = 0x34,
218 [DMA_PRIORITY_2] = 0x38,
219 [DMA_INDEX2RING_0] = 0x70,
220 [DMA_INDEX2RING_1] = 0x74,
221 [DMA_INDEX2RING_2] = 0x78,
222 [DMA_INDEX2RING_3] = 0x7C,
223 [DMA_INDEX2RING_4] = 0x80,
224 [DMA_INDEX2RING_5] = 0x84,
225 [DMA_INDEX2RING_6] = 0x88,
226 [DMA_INDEX2RING_7] = 0x8C,
229 static const u8 bcmgenet_dma_regs_v2[] = {
230 [DMA_RING_CFG] = 0x00,
233 [DMA_SCB_BURST_SIZE] = 0x0C,
234 [DMA_ARB_CTRL] = 0x30,
235 [DMA_PRIORITY_0] = 0x34,
236 [DMA_PRIORITY_1] = 0x38,
237 [DMA_PRIORITY_2] = 0x3C,
240 static const u8 bcmgenet_dma_regs_v1[] = {
243 [DMA_SCB_BURST_SIZE] = 0x0C,
244 [DMA_ARB_CTRL] = 0x30,
245 [DMA_PRIORITY_0] = 0x34,
246 [DMA_PRIORITY_1] = 0x38,
247 [DMA_PRIORITY_2] = 0x3C,
250 /* Set at runtime once bcmgenet version is known */
251 static const u8 *bcmgenet_dma_regs;
253 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
255 return netdev_priv(dev_get_drvdata(dev));
258 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
261 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
262 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
265 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
266 u32 val, enum dma_reg r)
268 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
269 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
272 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
275 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
276 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
279 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
280 u32 val, enum dma_reg r)
282 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
283 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
286 /* RDMA/TDMA ring registers and accessors
287 * we merge the common fields and just prefix with T/D the registers
288 * having different meaning depending on the direction
292 RDMA_WRITE_PTR = TDMA_READ_PTR,
294 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
296 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
298 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
304 DMA_MBUF_DONE_THRESH,
306 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
308 RDMA_READ_PTR = TDMA_WRITE_PTR,
310 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
313 /* GENET v4 supports 40-bits pointer addressing
314 * for obvious reasons the LO and HI word parts
315 * are contiguous, but this offsets the other
318 static const u8 genet_dma_ring_regs_v4[] = {
319 [TDMA_READ_PTR] = 0x00,
320 [TDMA_READ_PTR_HI] = 0x04,
321 [TDMA_CONS_INDEX] = 0x08,
322 [TDMA_PROD_INDEX] = 0x0C,
323 [DMA_RING_BUF_SIZE] = 0x10,
324 [DMA_START_ADDR] = 0x14,
325 [DMA_START_ADDR_HI] = 0x18,
326 [DMA_END_ADDR] = 0x1C,
327 [DMA_END_ADDR_HI] = 0x20,
328 [DMA_MBUF_DONE_THRESH] = 0x24,
329 [TDMA_FLOW_PERIOD] = 0x28,
330 [TDMA_WRITE_PTR] = 0x2C,
331 [TDMA_WRITE_PTR_HI] = 0x30,
334 static const u8 genet_dma_ring_regs_v123[] = {
335 [TDMA_READ_PTR] = 0x00,
336 [TDMA_CONS_INDEX] = 0x04,
337 [TDMA_PROD_INDEX] = 0x08,
338 [DMA_RING_BUF_SIZE] = 0x0C,
339 [DMA_START_ADDR] = 0x10,
340 [DMA_END_ADDR] = 0x14,
341 [DMA_MBUF_DONE_THRESH] = 0x18,
342 [TDMA_FLOW_PERIOD] = 0x1C,
343 [TDMA_WRITE_PTR] = 0x20,
346 /* Set at runtime once GENET version is known */
347 static const u8 *genet_dma_ring_regs;
349 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
353 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
354 (DMA_RING_SIZE * ring) +
355 genet_dma_ring_regs[r]);
358 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
359 unsigned int ring, u32 val,
362 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
363 (DMA_RING_SIZE * ring) +
364 genet_dma_ring_regs[r]);
367 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
371 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
372 (DMA_RING_SIZE * ring) +
373 genet_dma_ring_regs[r]);
376 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
377 unsigned int ring, u32 val,
380 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
381 (DMA_RING_SIZE * ring) +
382 genet_dma_ring_regs[r]);
385 static int bcmgenet_get_settings(struct net_device *dev,
386 struct ethtool_cmd *cmd)
388 struct bcmgenet_priv *priv = netdev_priv(dev);
390 if (!netif_running(dev))
396 return phy_ethtool_gset(priv->phydev, cmd);
399 static int bcmgenet_set_settings(struct net_device *dev,
400 struct ethtool_cmd *cmd)
402 struct bcmgenet_priv *priv = netdev_priv(dev);
404 if (!netif_running(dev))
410 return phy_ethtool_sset(priv->phydev, cmd);
413 static int bcmgenet_set_rx_csum(struct net_device *dev,
414 netdev_features_t wanted)
416 struct bcmgenet_priv *priv = netdev_priv(dev);
420 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
422 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
424 /* enable rx checksumming */
426 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
428 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
429 priv->desc_rxchk_en = rx_csum_en;
431 /* If UniMAC forwards CRC, we need to skip over it to get
432 * a valid CHK bit to be set in the per-packet status word
434 if (rx_csum_en && priv->crc_fwd_en)
435 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
437 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
439 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
444 static int bcmgenet_set_tx_csum(struct net_device *dev,
445 netdev_features_t wanted)
447 struct bcmgenet_priv *priv = netdev_priv(dev);
449 u32 tbuf_ctrl, rbuf_ctrl;
451 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
452 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
454 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
456 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
458 tbuf_ctrl |= RBUF_64B_EN;
459 rbuf_ctrl |= RBUF_64B_EN;
461 tbuf_ctrl &= ~RBUF_64B_EN;
462 rbuf_ctrl &= ~RBUF_64B_EN;
464 priv->desc_64b_en = desc_64b_en;
466 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
467 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
472 static int bcmgenet_set_features(struct net_device *dev,
473 netdev_features_t features)
475 netdev_features_t changed = features ^ dev->features;
476 netdev_features_t wanted = dev->wanted_features;
479 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
480 ret = bcmgenet_set_tx_csum(dev, wanted);
481 if (changed & (NETIF_F_RXCSUM))
482 ret = bcmgenet_set_rx_csum(dev, wanted);
487 static u32 bcmgenet_get_msglevel(struct net_device *dev)
489 struct bcmgenet_priv *priv = netdev_priv(dev);
491 return priv->msg_enable;
494 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
496 struct bcmgenet_priv *priv = netdev_priv(dev);
498 priv->msg_enable = level;
501 /* standard ethtool support functions. */
502 enum bcmgenet_stat_type {
503 BCMGENET_STAT_NETDEV = -1,
504 BCMGENET_STAT_MIB_RX,
505 BCMGENET_STAT_MIB_TX,
511 struct bcmgenet_stats {
512 char stat_string[ETH_GSTRING_LEN];
515 enum bcmgenet_stat_type type;
516 /* reg offset from UMAC base for misc counters */
520 #define STAT_NETDEV(m) { \
521 .stat_string = __stringify(m), \
522 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
523 .stat_offset = offsetof(struct net_device_stats, m), \
524 .type = BCMGENET_STAT_NETDEV, \
527 #define STAT_GENET_MIB(str, m, _type) { \
528 .stat_string = str, \
529 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
530 .stat_offset = offsetof(struct bcmgenet_priv, m), \
534 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
535 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
536 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
537 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
539 #define STAT_GENET_MISC(str, m, offset) { \
540 .stat_string = str, \
541 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
542 .stat_offset = offsetof(struct bcmgenet_priv, m), \
543 .type = BCMGENET_STAT_MISC, \
544 .reg_offset = offset, \
548 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
549 * between the end of TX stats and the beginning of the RX RUNT
551 #define BCMGENET_STAT_OFFSET 0xc
553 /* Hardware counters must be kept in sync because the order/offset
554 * is important here (order in structure declaration = order in hardware)
556 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
558 STAT_NETDEV(rx_packets),
559 STAT_NETDEV(tx_packets),
560 STAT_NETDEV(rx_bytes),
561 STAT_NETDEV(tx_bytes),
562 STAT_NETDEV(rx_errors),
563 STAT_NETDEV(tx_errors),
564 STAT_NETDEV(rx_dropped),
565 STAT_NETDEV(tx_dropped),
566 STAT_NETDEV(multicast),
567 /* UniMAC RSV counters */
568 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
569 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
570 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
571 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
572 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
573 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
574 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
575 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
576 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
577 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
578 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
579 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
580 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
581 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
582 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
583 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
584 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
585 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
586 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
587 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
588 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
589 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
590 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
591 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
592 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
593 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
594 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
595 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
596 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
597 /* UniMAC TSV counters */
598 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
599 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
600 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
601 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
602 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
603 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
604 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
605 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
606 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
607 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
608 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
609 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
610 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
611 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
612 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
613 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
614 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
615 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
616 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
617 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
618 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
619 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
620 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
621 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
622 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
623 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
624 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
625 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
626 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
627 /* UniMAC RUNT counters */
628 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
629 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
630 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
631 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
632 /* Misc UniMAC counters */
633 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
635 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
636 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
637 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
638 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
639 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
642 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
644 static void bcmgenet_get_drvinfo(struct net_device *dev,
645 struct ethtool_drvinfo *info)
647 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
648 strlcpy(info->version, "v2.0", sizeof(info->version));
649 info->n_stats = BCMGENET_STATS_LEN;
652 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
654 switch (string_set) {
656 return BCMGENET_STATS_LEN;
662 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
669 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
670 memcpy(data + i * ETH_GSTRING_LEN,
671 bcmgenet_gstrings_stats[i].stat_string,
678 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
682 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
683 const struct bcmgenet_stats *s;
688 s = &bcmgenet_gstrings_stats[i];
690 case BCMGENET_STAT_NETDEV:
691 case BCMGENET_STAT_SOFT:
693 case BCMGENET_STAT_MIB_RX:
694 case BCMGENET_STAT_MIB_TX:
695 case BCMGENET_STAT_RUNT:
696 if (s->type != BCMGENET_STAT_MIB_RX)
697 offset = BCMGENET_STAT_OFFSET;
698 val = bcmgenet_umac_readl(priv,
699 UMAC_MIB_START + j + offset);
701 case BCMGENET_STAT_MISC:
702 val = bcmgenet_umac_readl(priv, s->reg_offset);
703 /* clear if overflowed */
705 bcmgenet_umac_writel(priv, 0, s->reg_offset);
710 p = (char *)priv + s->stat_offset;
715 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
716 struct ethtool_stats *stats,
719 struct bcmgenet_priv *priv = netdev_priv(dev);
722 if (netif_running(dev))
723 bcmgenet_update_mib_counters(priv);
725 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
726 const struct bcmgenet_stats *s;
729 s = &bcmgenet_gstrings_stats[i];
730 if (s->type == BCMGENET_STAT_NETDEV)
731 p = (char *)&dev->stats;
739 static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
741 struct bcmgenet_priv *priv = netdev_priv(dev);
742 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
745 if (enable && !priv->clk_eee_enabled) {
746 clk_prepare_enable(priv->clk_eee);
747 priv->clk_eee_enabled = true;
750 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
755 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
757 /* Enable EEE and switch to a 27Mhz clock automatically */
758 reg = __raw_readl(priv->base + off);
760 reg |= TBUF_EEE_EN | TBUF_PM_EN;
762 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
763 __raw_writel(reg, priv->base + off);
765 /* Do the same for thing for RBUF */
766 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
768 reg |= RBUF_EEE_EN | RBUF_PM_EN;
770 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
771 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
773 if (!enable && priv->clk_eee_enabled) {
774 clk_disable_unprepare(priv->clk_eee);
775 priv->clk_eee_enabled = false;
778 priv->eee.eee_enabled = enable;
779 priv->eee.eee_active = enable;
782 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
784 struct bcmgenet_priv *priv = netdev_priv(dev);
785 struct ethtool_eee *p = &priv->eee;
787 if (GENET_IS_V1(priv))
790 e->eee_enabled = p->eee_enabled;
791 e->eee_active = p->eee_active;
792 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
794 return phy_ethtool_get_eee(priv->phydev, e);
797 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
799 struct bcmgenet_priv *priv = netdev_priv(dev);
800 struct ethtool_eee *p = &priv->eee;
803 if (GENET_IS_V1(priv))
806 p->eee_enabled = e->eee_enabled;
808 if (!p->eee_enabled) {
809 bcmgenet_eee_enable_set(dev, false);
811 ret = phy_init_eee(priv->phydev, 0);
813 netif_err(priv, hw, dev, "EEE initialization failed\n");
817 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
818 bcmgenet_eee_enable_set(dev, true);
821 return phy_ethtool_set_eee(priv->phydev, e);
824 static int bcmgenet_nway_reset(struct net_device *dev)
826 struct bcmgenet_priv *priv = netdev_priv(dev);
828 return genphy_restart_aneg(priv->phydev);
831 /* standard ethtool support functions. */
832 static struct ethtool_ops bcmgenet_ethtool_ops = {
833 .get_strings = bcmgenet_get_strings,
834 .get_sset_count = bcmgenet_get_sset_count,
835 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
836 .get_settings = bcmgenet_get_settings,
837 .set_settings = bcmgenet_set_settings,
838 .get_drvinfo = bcmgenet_get_drvinfo,
839 .get_link = ethtool_op_get_link,
840 .get_msglevel = bcmgenet_get_msglevel,
841 .set_msglevel = bcmgenet_set_msglevel,
842 .get_wol = bcmgenet_get_wol,
843 .set_wol = bcmgenet_set_wol,
844 .get_eee = bcmgenet_get_eee,
845 .set_eee = bcmgenet_set_eee,
846 .nway_reset = bcmgenet_nway_reset,
849 /* Power down the unimac, based on mode. */
850 static int bcmgenet_power_down(struct bcmgenet_priv *priv,
851 enum bcmgenet_power_mode mode)
857 case GENET_POWER_CABLE_SENSE:
858 phy_detach(priv->phydev);
861 case GENET_POWER_WOL_MAGIC:
862 ret = bcmgenet_wol_power_down_cfg(priv, mode);
865 case GENET_POWER_PASSIVE:
867 if (priv->hw_params->flags & GENET_HAS_EXT) {
868 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
869 reg |= (EXT_PWR_DOWN_PHY |
870 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
871 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
873 bcmgenet_phy_power_set(priv->dev, false);
883 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
884 enum bcmgenet_power_mode mode)
888 if (!(priv->hw_params->flags & GENET_HAS_EXT))
891 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
894 case GENET_POWER_PASSIVE:
895 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
898 case GENET_POWER_CABLE_SENSE:
900 reg |= EXT_PWR_DN_EN_LD;
902 case GENET_POWER_WOL_MAGIC:
903 bcmgenet_wol_power_up_cfg(priv, mode);
909 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
910 if (mode == GENET_POWER_PASSIVE)
911 bcmgenet_phy_power_set(priv->dev, true);
914 /* ioctl handle special commands that are not present in ethtool. */
915 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
917 struct bcmgenet_priv *priv = netdev_priv(dev);
920 if (!netif_running(dev))
930 val = phy_mii_ioctl(priv->phydev, rq, cmd);
941 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
942 struct bcmgenet_tx_ring *ring)
944 struct enet_cb *tx_cb_ptr;
946 tx_cb_ptr = ring->cbs;
947 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
949 /* Advancing local write pointer */
950 if (ring->write_ptr == ring->end_ptr)
951 ring->write_ptr = ring->cb_ptr;
958 /* Simple helper to free a control block's resources */
959 static void bcmgenet_free_cb(struct enet_cb *cb)
961 dev_kfree_skb_any(cb->skb);
963 dma_unmap_addr_set(cb, dma_addr, 0);
966 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
968 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
969 INTRL2_CPU_MASK_SET);
972 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
974 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
975 INTRL2_CPU_MASK_CLEAR);
978 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
980 bcmgenet_intrl2_1_writel(ring->priv,
981 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
982 INTRL2_CPU_MASK_SET);
985 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
987 bcmgenet_intrl2_1_writel(ring->priv,
988 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
989 INTRL2_CPU_MASK_CLEAR);
992 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
994 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
995 INTRL2_CPU_MASK_SET);
998 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1000 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1001 INTRL2_CPU_MASK_CLEAR);
1004 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1006 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1007 INTRL2_CPU_MASK_CLEAR);
1010 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1012 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1013 INTRL2_CPU_MASK_SET);
1016 /* Unlocked version of the reclaim routine */
1017 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1018 struct bcmgenet_tx_ring *ring)
1020 struct bcmgenet_priv *priv = netdev_priv(dev);
1021 struct enet_cb *tx_cb_ptr;
1022 struct netdev_queue *txq;
1023 unsigned int pkts_compl = 0;
1024 unsigned int c_index;
1025 unsigned int txbds_ready;
1026 unsigned int txbds_processed = 0;
1028 /* Compute how many buffers are transmitted since last xmit call */
1029 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
1030 c_index &= DMA_C_INDEX_MASK;
1032 if (likely(c_index >= ring->c_index))
1033 txbds_ready = c_index - ring->c_index;
1035 txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
1037 netif_dbg(priv, tx_done, dev,
1038 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1039 __func__, ring->index, ring->c_index, c_index, txbds_ready);
1041 /* Reclaim transmitted buffers */
1042 while (txbds_processed < txbds_ready) {
1043 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
1044 if (tx_cb_ptr->skb) {
1046 dev->stats.tx_packets++;
1047 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1048 dma_unmap_single(&dev->dev,
1049 dma_unmap_addr(tx_cb_ptr, dma_addr),
1050 tx_cb_ptr->skb->len,
1052 bcmgenet_free_cb(tx_cb_ptr);
1053 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1054 dev->stats.tx_bytes +=
1055 dma_unmap_len(tx_cb_ptr, dma_len);
1056 dma_unmap_page(&dev->dev,
1057 dma_unmap_addr(tx_cb_ptr, dma_addr),
1058 dma_unmap_len(tx_cb_ptr, dma_len),
1060 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
1064 if (likely(ring->clean_ptr < ring->end_ptr))
1067 ring->clean_ptr = ring->cb_ptr;
1070 ring->free_bds += txbds_processed;
1071 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
1073 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1074 txq = netdev_get_tx_queue(dev, ring->queue);
1075 if (netif_tx_queue_stopped(txq))
1076 netif_tx_wake_queue(txq);
1082 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1083 struct bcmgenet_tx_ring *ring)
1085 unsigned int released;
1086 unsigned long flags;
1088 spin_lock_irqsave(&ring->lock, flags);
1089 released = __bcmgenet_tx_reclaim(dev, ring);
1090 spin_unlock_irqrestore(&ring->lock, flags);
1095 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1097 struct bcmgenet_tx_ring *ring =
1098 container_of(napi, struct bcmgenet_tx_ring, napi);
1099 unsigned int work_done = 0;
1101 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1103 if (work_done == 0) {
1104 napi_complete(napi);
1105 ring->int_enable(ring);
1113 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1115 struct bcmgenet_priv *priv = netdev_priv(dev);
1118 if (netif_is_multiqueue(dev)) {
1119 for (i = 0; i < priv->hw_params->tx_queues; i++)
1120 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1123 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1126 /* Transmits a single SKB (either head of a fragment or a single SKB)
1127 * caller must hold priv->lock
1129 static int bcmgenet_xmit_single(struct net_device *dev,
1130 struct sk_buff *skb,
1132 struct bcmgenet_tx_ring *ring)
1134 struct bcmgenet_priv *priv = netdev_priv(dev);
1135 struct device *kdev = &priv->pdev->dev;
1136 struct enet_cb *tx_cb_ptr;
1137 unsigned int skb_len;
1142 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1144 if (unlikely(!tx_cb_ptr))
1147 tx_cb_ptr->skb = skb;
1149 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
1151 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1152 ret = dma_mapping_error(kdev, mapping);
1154 priv->mib.tx_dma_failed++;
1155 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1160 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1161 dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
1162 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1163 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1166 if (skb->ip_summed == CHECKSUM_PARTIAL)
1167 length_status |= DMA_TX_DO_CSUM;
1169 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1174 /* Transmit a SKB fragment */
1175 static int bcmgenet_xmit_frag(struct net_device *dev,
1178 struct bcmgenet_tx_ring *ring)
1180 struct bcmgenet_priv *priv = netdev_priv(dev);
1181 struct device *kdev = &priv->pdev->dev;
1182 struct enet_cb *tx_cb_ptr;
1186 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1188 if (unlikely(!tx_cb_ptr))
1190 tx_cb_ptr->skb = NULL;
1192 mapping = skb_frag_dma_map(kdev, frag, 0,
1193 skb_frag_size(frag), DMA_TO_DEVICE);
1194 ret = dma_mapping_error(kdev, mapping);
1196 priv->mib.tx_dma_failed++;
1197 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1202 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1203 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1205 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1206 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1207 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1212 /* Reallocate the SKB to put enough headroom in front of it and insert
1213 * the transmit checksum offsets in the descriptors
1215 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1216 struct sk_buff *skb)
1218 struct status_64 *status = NULL;
1219 struct sk_buff *new_skb;
1225 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1226 /* If 64 byte status block enabled, must make sure skb has
1227 * enough headroom for us to insert 64B status block.
1229 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1232 dev->stats.tx_dropped++;
1238 skb_push(skb, sizeof(*status));
1239 status = (struct status_64 *)skb->data;
1241 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1242 ip_ver = htons(skb->protocol);
1245 ip_proto = ip_hdr(skb)->protocol;
1248 ip_proto = ipv6_hdr(skb)->nexthdr;
1254 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1255 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1256 (offset + skb->csum_offset);
1258 /* Set the length valid bit for TCP and UDP and just set
1259 * the special UDP flag for IPv4, else just set to 0.
1261 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1262 tx_csum_info |= STATUS_TX_CSUM_LV;
1263 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1264 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1269 status->tx_csum_info = tx_csum_info;
1275 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1277 struct bcmgenet_priv *priv = netdev_priv(dev);
1278 struct bcmgenet_tx_ring *ring = NULL;
1279 struct netdev_queue *txq;
1280 unsigned long flags = 0;
1281 int nr_frags, index;
1286 index = skb_get_queue_mapping(skb);
1287 /* Mapping strategy:
1288 * queue_mapping = 0, unclassified, packet xmited through ring16
1289 * queue_mapping = 1, goes to ring 0. (highest priority queue
1290 * queue_mapping = 2, goes to ring 1.
1291 * queue_mapping = 3, goes to ring 2.
1292 * queue_mapping = 4, goes to ring 3.
1299 nr_frags = skb_shinfo(skb)->nr_frags;
1300 ring = &priv->tx_rings[index];
1301 txq = netdev_get_tx_queue(dev, ring->queue);
1303 spin_lock_irqsave(&ring->lock, flags);
1304 if (ring->free_bds <= nr_frags + 1) {
1305 netif_tx_stop_queue(txq);
1306 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1307 __func__, index, ring->queue);
1308 ret = NETDEV_TX_BUSY;
1312 if (skb_padto(skb, ETH_ZLEN)) {
1317 /* set the SKB transmit checksum */
1318 if (priv->desc_64b_en) {
1319 skb = bcmgenet_put_tx_csum(dev, skb);
1326 dma_desc_flags = DMA_SOP;
1328 dma_desc_flags |= DMA_EOP;
1330 /* Transmit single SKB or head of fragment list */
1331 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1338 for (i = 0; i < nr_frags; i++) {
1339 ret = bcmgenet_xmit_frag(dev,
1340 &skb_shinfo(skb)->frags[i],
1341 (i == nr_frags - 1) ? DMA_EOP : 0,
1349 skb_tx_timestamp(skb);
1351 /* Decrement total BD count and advance our write pointer */
1352 ring->free_bds -= nr_frags + 1;
1353 ring->prod_index += nr_frags + 1;
1354 ring->prod_index &= DMA_P_INDEX_MASK;
1356 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1357 netif_tx_stop_queue(txq);
1359 if (!skb->xmit_more || netif_xmit_stopped(txq))
1360 /* Packets are ready, update producer index */
1361 bcmgenet_tdma_ring_writel(priv, ring->index,
1362 ring->prod_index, TDMA_PROD_INDEX);
1364 spin_unlock_irqrestore(&ring->lock, flags);
1369 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1372 struct device *kdev = &priv->pdev->dev;
1373 struct sk_buff *skb;
1374 struct sk_buff *rx_skb;
1377 /* Allocate a new Rx skb */
1378 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
1380 priv->mib.alloc_rx_buff_failed++;
1381 netif_err(priv, rx_err, priv->dev,
1382 "%s: Rx skb allocation failed\n", __func__);
1386 /* DMA-map the new Rx skb */
1387 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
1389 if (dma_mapping_error(kdev, mapping)) {
1390 priv->mib.rx_dma_failed++;
1391 dev_kfree_skb_any(skb);
1392 netif_err(priv, rx_err, priv->dev,
1393 "%s: Rx skb DMA mapping failed\n", __func__);
1397 /* Grab the current Rx skb from the ring and DMA-unmap it */
1400 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
1401 priv->rx_buf_len, DMA_FROM_DEVICE);
1403 /* Put the new Rx skb on the ring */
1405 dma_unmap_addr_set(cb, dma_addr, mapping);
1406 dmadesc_set_addr(priv, cb->bd_addr, mapping);
1408 /* Return the current Rx skb to caller */
1412 /* bcmgenet_desc_rx - descriptor based rx process.
1413 * this could be called from bottom half, or from NAPI polling method.
1415 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1416 unsigned int budget)
1418 struct bcmgenet_priv *priv = ring->priv;
1419 struct net_device *dev = priv->dev;
1421 struct sk_buff *skb;
1422 u32 dma_length_status;
1423 unsigned long dma_flag;
1425 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1426 unsigned int p_index;
1427 unsigned int discards;
1428 unsigned int chksum_ok = 0;
1430 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
1432 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1433 DMA_P_INDEX_DISCARD_CNT_MASK;
1434 if (discards > ring->old_discards) {
1435 discards = discards - ring->old_discards;
1436 dev->stats.rx_missed_errors += discards;
1437 dev->stats.rx_errors += discards;
1438 ring->old_discards += discards;
1440 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1441 if (ring->old_discards >= 0xC000) {
1442 ring->old_discards = 0;
1443 bcmgenet_rdma_ring_writel(priv, ring->index, 0,
1448 p_index &= DMA_P_INDEX_MASK;
1450 if (likely(p_index >= ring->c_index))
1451 rxpkttoprocess = p_index - ring->c_index;
1453 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
1456 netif_dbg(priv, rx_status, dev,
1457 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1459 while ((rxpktprocessed < rxpkttoprocess) &&
1460 (rxpktprocessed < budget)) {
1461 cb = &priv->rx_cbs[ring->read_ptr];
1462 skb = bcmgenet_rx_refill(priv, cb);
1464 if (unlikely(!skb)) {
1465 dev->stats.rx_dropped++;
1469 if (!priv->desc_64b_en) {
1471 dmadesc_get_length_status(priv, cb->bd_addr);
1473 struct status_64 *status;
1475 status = (struct status_64 *)skb->data;
1476 dma_length_status = status->length_status;
1479 /* DMA flags and length are still valid no matter how
1480 * we got the Receive Status Vector (64B RSB or register)
1482 dma_flag = dma_length_status & 0xffff;
1483 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1485 netif_dbg(priv, rx_status, dev,
1486 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1487 __func__, p_index, ring->c_index,
1488 ring->read_ptr, dma_length_status);
1490 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1491 netif_err(priv, rx_status, dev,
1492 "dropping fragmented packet!\n");
1493 dev->stats.rx_errors++;
1494 dev_kfree_skb_any(skb);
1499 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1504 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1505 (unsigned int)dma_flag);
1506 if (dma_flag & DMA_RX_CRC_ERROR)
1507 dev->stats.rx_crc_errors++;
1508 if (dma_flag & DMA_RX_OV)
1509 dev->stats.rx_over_errors++;
1510 if (dma_flag & DMA_RX_NO)
1511 dev->stats.rx_frame_errors++;
1512 if (dma_flag & DMA_RX_LG)
1513 dev->stats.rx_length_errors++;
1514 dev->stats.rx_errors++;
1515 dev_kfree_skb_any(skb);
1517 } /* error packet */
1519 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1520 priv->desc_rxchk_en;
1523 if (priv->desc_64b_en) {
1528 if (likely(chksum_ok))
1529 skb->ip_summed = CHECKSUM_UNNECESSARY;
1531 /* remove hardware 2bytes added for IP alignment */
1535 if (priv->crc_fwd_en) {
1536 skb_trim(skb, len - ETH_FCS_LEN);
1540 /*Finish setting up the received SKB and send it to the kernel*/
1541 skb->protocol = eth_type_trans(skb, priv->dev);
1542 dev->stats.rx_packets++;
1543 dev->stats.rx_bytes += len;
1544 if (dma_flag & DMA_RX_MULT)
1545 dev->stats.multicast++;
1548 napi_gro_receive(&ring->napi, skb);
1549 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1553 if (likely(ring->read_ptr < ring->end_ptr))
1556 ring->read_ptr = ring->cb_ptr;
1558 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
1559 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
1562 return rxpktprocessed;
1565 /* Rx NAPI polling method */
1566 static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1568 struct bcmgenet_rx_ring *ring = container_of(napi,
1569 struct bcmgenet_rx_ring, napi);
1570 unsigned int work_done;
1572 work_done = bcmgenet_desc_rx(ring, budget);
1574 if (work_done < budget) {
1575 napi_complete(napi);
1576 ring->int_enable(ring);
1582 /* Assign skb to RX DMA descriptor. */
1583 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1584 struct bcmgenet_rx_ring *ring)
1587 struct sk_buff *skb;
1590 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
1592 /* loop here for each buffer needing assign */
1593 for (i = 0; i < ring->size; i++) {
1595 skb = bcmgenet_rx_refill(priv, cb);
1597 dev_kfree_skb_any(skb);
1605 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1610 for (i = 0; i < priv->num_rx_bds; i++) {
1611 cb = &priv->rx_cbs[i];
1613 if (dma_unmap_addr(cb, dma_addr)) {
1614 dma_unmap_single(&priv->dev->dev,
1615 dma_unmap_addr(cb, dma_addr),
1616 priv->rx_buf_len, DMA_FROM_DEVICE);
1617 dma_unmap_addr_set(cb, dma_addr, 0);
1621 bcmgenet_free_cb(cb);
1625 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1629 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1634 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1636 /* UniMAC stops on a packet boundary, wait for a full-size packet
1640 usleep_range(1000, 2000);
1643 static int reset_umac(struct bcmgenet_priv *priv)
1645 struct device *kdev = &priv->pdev->dev;
1646 unsigned int timeout = 0;
1649 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1650 bcmgenet_rbuf_ctrl_set(priv, 0);
1653 /* disable MAC while updating its registers */
1654 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1656 /* issue soft reset, wait for it to complete */
1657 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1658 while (timeout++ < 1000) {
1659 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1660 if (!(reg & CMD_SW_RESET))
1666 if (timeout == 1000) {
1668 "timeout waiting for MAC to come out of reset\n");
1675 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1677 /* Mask all interrupts.*/
1678 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1679 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1680 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1681 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1682 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1683 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1686 static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
1688 u32 int0_enable = 0;
1690 /* Monitor cable plug/unplugged event for internal PHY, external PHY
1693 if (priv->internal_phy) {
1694 int0_enable |= UMAC_IRQ_LINK_EVENT;
1695 } else if (priv->ext_phy) {
1696 int0_enable |= UMAC_IRQ_LINK_EVENT;
1697 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1698 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
1699 int0_enable |= UMAC_IRQ_LINK_EVENT;
1701 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1704 static int init_umac(struct bcmgenet_priv *priv)
1706 struct device *kdev = &priv->pdev->dev;
1709 u32 int0_enable = 0;
1710 u32 int1_enable = 0;
1713 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1715 ret = reset_umac(priv);
1719 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1720 /* clear tx/rx counter */
1721 bcmgenet_umac_writel(priv,
1722 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1724 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1726 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1728 /* init rx registers, enable ip header optimization */
1729 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1730 reg |= RBUF_ALIGN_2B;
1731 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1733 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1734 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1736 bcmgenet_intr_disable(priv);
1738 /* Enable Rx default queue 16 interrupts */
1739 int0_enable |= UMAC_IRQ_RXDMA_DONE;
1741 /* Enable Tx default queue 16 interrupts */
1742 int0_enable |= UMAC_IRQ_TXDMA_DONE;
1744 /* Configure backpressure vectors for MoCA */
1745 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1746 reg = bcmgenet_bp_mc_get(priv);
1747 reg |= BIT(priv->hw_params->bp_in_en_shift);
1749 /* bp_mask: back pressure mask */
1750 if (netif_is_multiqueue(priv->dev))
1751 reg |= priv->hw_params->bp_in_mask;
1753 reg &= ~priv->hw_params->bp_in_mask;
1754 bcmgenet_bp_mc_set(priv, reg);
1757 /* Enable MDIO interrupts on GENET v3+ */
1758 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1759 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1761 /* Enable Rx priority queue interrupts */
1762 for (i = 0; i < priv->hw_params->rx_queues; ++i)
1763 int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
1765 /* Enable Tx priority queue interrupts */
1766 for (i = 0; i < priv->hw_params->tx_queues; ++i)
1767 int1_enable |= (1 << i);
1769 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1770 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
1772 /* Enable rx/tx engine.*/
1773 dev_dbg(kdev, "done init umac\n");
1778 /* Initialize a Tx ring along with corresponding hardware registers */
1779 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1780 unsigned int index, unsigned int size,
1781 unsigned int start_ptr, unsigned int end_ptr)
1783 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1784 u32 words_per_bd = WORDS_PER_BD(priv);
1785 u32 flow_period_val = 0;
1787 spin_lock_init(&ring->lock);
1789 ring->index = index;
1790 if (index == DESC_INDEX) {
1792 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1793 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1795 ring->queue = index + 1;
1796 ring->int_enable = bcmgenet_tx_ring_int_enable;
1797 ring->int_disable = bcmgenet_tx_ring_int_disable;
1799 ring->cbs = priv->tx_cbs + start_ptr;
1801 ring->clean_ptr = start_ptr;
1803 ring->free_bds = size;
1804 ring->write_ptr = start_ptr;
1805 ring->cb_ptr = start_ptr;
1806 ring->end_ptr = end_ptr - 1;
1807 ring->prod_index = 0;
1809 /* Set flow period for ring != 16 */
1810 if (index != DESC_INDEX)
1811 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1813 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1814 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1815 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1816 /* Disable rate control for now */
1817 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1819 bcmgenet_tdma_ring_writel(priv, index,
1820 ((size << DMA_RING_SIZE_SHIFT) |
1821 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1823 /* Set start and end address, read and write pointers */
1824 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1826 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1828 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1830 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1834 /* Initialize a RDMA ring */
1835 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1836 unsigned int index, unsigned int size,
1837 unsigned int start_ptr, unsigned int end_ptr)
1839 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
1840 u32 words_per_bd = WORDS_PER_BD(priv);
1844 ring->index = index;
1845 if (index == DESC_INDEX) {
1846 ring->int_enable = bcmgenet_rx_ring16_int_enable;
1847 ring->int_disable = bcmgenet_rx_ring16_int_disable;
1849 ring->int_enable = bcmgenet_rx_ring_int_enable;
1850 ring->int_disable = bcmgenet_rx_ring_int_disable;
1852 ring->cbs = priv->rx_cbs + start_ptr;
1855 ring->read_ptr = start_ptr;
1856 ring->cb_ptr = start_ptr;
1857 ring->end_ptr = end_ptr - 1;
1859 ret = bcmgenet_alloc_rx_buffers(priv, ring);
1863 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
1864 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
1865 bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1866 bcmgenet_rdma_ring_writel(priv, index,
1867 ((size << DMA_RING_SIZE_SHIFT) |
1868 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1869 bcmgenet_rdma_ring_writel(priv, index,
1870 (DMA_FC_THRESH_LO <<
1871 DMA_XOFF_THRESHOLD_SHIFT) |
1872 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
1874 /* Set start and end address, read and write pointers */
1875 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
1877 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
1879 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
1881 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1887 static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
1890 struct bcmgenet_tx_ring *ring;
1892 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1893 ring = &priv->tx_rings[i];
1894 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1897 ring = &priv->tx_rings[DESC_INDEX];
1898 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1901 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
1904 struct bcmgenet_tx_ring *ring;
1906 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1907 ring = &priv->tx_rings[i];
1908 napi_enable(&ring->napi);
1911 ring = &priv->tx_rings[DESC_INDEX];
1912 napi_enable(&ring->napi);
1915 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
1918 struct bcmgenet_tx_ring *ring;
1920 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1921 ring = &priv->tx_rings[i];
1922 napi_disable(&ring->napi);
1925 ring = &priv->tx_rings[DESC_INDEX];
1926 napi_disable(&ring->napi);
1929 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
1932 struct bcmgenet_tx_ring *ring;
1934 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1935 ring = &priv->tx_rings[i];
1936 netif_napi_del(&ring->napi);
1939 ring = &priv->tx_rings[DESC_INDEX];
1940 netif_napi_del(&ring->napi);
1943 /* Initialize Tx queues
1945 * Queues 0-3 are priority-based, each one has 32 descriptors,
1946 * with queue 0 being the highest priority queue.
1948 * Queue 16 is the default Tx queue with
1949 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
1951 * The transmit control block pool is then partitioned as follows:
1952 * - Tx queue 0 uses tx_cbs[0..31]
1953 * - Tx queue 1 uses tx_cbs[32..63]
1954 * - Tx queue 2 uses tx_cbs[64..95]
1955 * - Tx queue 3 uses tx_cbs[96..127]
1956 * - Tx queue 16 uses tx_cbs[128..255]
1958 static void bcmgenet_init_tx_queues(struct net_device *dev)
1960 struct bcmgenet_priv *priv = netdev_priv(dev);
1962 u32 dma_ctrl, ring_cfg;
1963 u32 dma_priority[3] = {0, 0, 0};
1965 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
1966 dma_enable = dma_ctrl & DMA_EN;
1967 dma_ctrl &= ~DMA_EN;
1968 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1973 /* Enable strict priority arbiter mode */
1974 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
1976 /* Initialize Tx priority queues */
1977 for (i = 0; i < priv->hw_params->tx_queues; i++) {
1978 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
1979 i * priv->hw_params->tx_bds_per_q,
1980 (i + 1) * priv->hw_params->tx_bds_per_q);
1981 ring_cfg |= (1 << i);
1982 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
1983 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
1984 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
1987 /* Initialize Tx default queue 16 */
1988 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
1989 priv->hw_params->tx_queues *
1990 priv->hw_params->tx_bds_per_q,
1992 ring_cfg |= (1 << DESC_INDEX);
1993 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
1994 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
1995 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
1996 DMA_PRIO_REG_SHIFT(DESC_INDEX));
1998 /* Set Tx queue priorities */
1999 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2000 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2001 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2003 /* Initialize Tx NAPI */
2004 bcmgenet_init_tx_napi(priv);
2006 /* Enable Tx queues */
2007 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2012 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2015 static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
2018 struct bcmgenet_rx_ring *ring;
2020 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2021 ring = &priv->rx_rings[i];
2022 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2025 ring = &priv->rx_rings[DESC_INDEX];
2026 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2029 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2032 struct bcmgenet_rx_ring *ring;
2034 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2035 ring = &priv->rx_rings[i];
2036 napi_enable(&ring->napi);
2039 ring = &priv->rx_rings[DESC_INDEX];
2040 napi_enable(&ring->napi);
2043 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2046 struct bcmgenet_rx_ring *ring;
2048 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2049 ring = &priv->rx_rings[i];
2050 napi_disable(&ring->napi);
2053 ring = &priv->rx_rings[DESC_INDEX];
2054 napi_disable(&ring->napi);
2057 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2060 struct bcmgenet_rx_ring *ring;
2062 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2063 ring = &priv->rx_rings[i];
2064 netif_napi_del(&ring->napi);
2067 ring = &priv->rx_rings[DESC_INDEX];
2068 netif_napi_del(&ring->napi);
2071 /* Initialize Rx queues
2073 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2074 * used to direct traffic to these queues.
2076 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2078 static int bcmgenet_init_rx_queues(struct net_device *dev)
2080 struct bcmgenet_priv *priv = netdev_priv(dev);
2087 dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2088 dma_enable = dma_ctrl & DMA_EN;
2089 dma_ctrl &= ~DMA_EN;
2090 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2095 /* Initialize Rx priority queues */
2096 for (i = 0; i < priv->hw_params->rx_queues; i++) {
2097 ret = bcmgenet_init_rx_ring(priv, i,
2098 priv->hw_params->rx_bds_per_q,
2099 i * priv->hw_params->rx_bds_per_q,
2101 priv->hw_params->rx_bds_per_q);
2105 ring_cfg |= (1 << i);
2106 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2109 /* Initialize Rx default queue 16 */
2110 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2111 priv->hw_params->rx_queues *
2112 priv->hw_params->rx_bds_per_q,
2117 ring_cfg |= (1 << DESC_INDEX);
2118 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2120 /* Initialize Rx NAPI */
2121 bcmgenet_init_rx_napi(priv);
2124 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2126 /* Configure ring as descriptor ring and re-enable DMA if enabled */
2129 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2134 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2142 /* Disable TDMA to stop add more frames in TX DMA */
2143 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2145 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2147 /* Check TDMA status register to confirm TDMA is disabled */
2148 while (timeout++ < DMA_TIMEOUT_VAL) {
2149 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2150 if (reg & DMA_DISABLED)
2156 if (timeout == DMA_TIMEOUT_VAL) {
2157 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2161 /* Wait 10ms for packet drain in both tx and rx dma */
2162 usleep_range(10000, 20000);
2165 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2167 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2170 /* Check RDMA status register to confirm RDMA is disabled */
2171 while (timeout++ < DMA_TIMEOUT_VAL) {
2172 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2173 if (reg & DMA_DISABLED)
2179 if (timeout == DMA_TIMEOUT_VAL) {
2180 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2185 for (i = 0; i < priv->hw_params->rx_queues; i++)
2186 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2187 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2189 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2192 for (i = 0; i < priv->hw_params->tx_queues; i++)
2193 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2194 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2196 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2201 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2205 bcmgenet_fini_rx_napi(priv);
2206 bcmgenet_fini_tx_napi(priv);
2209 bcmgenet_dma_teardown(priv);
2211 for (i = 0; i < priv->num_tx_bds; i++) {
2212 if (priv->tx_cbs[i].skb != NULL) {
2213 dev_kfree_skb(priv->tx_cbs[i].skb);
2214 priv->tx_cbs[i].skb = NULL;
2218 bcmgenet_free_rx_buffers(priv);
2219 kfree(priv->rx_cbs);
2220 kfree(priv->tx_cbs);
2223 /* init_edma: Initialize DMA control register */
2224 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2230 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2232 /* Initialize common Rx ring structures */
2233 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2234 priv->num_rx_bds = TOTAL_DESC;
2235 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
2240 for (i = 0; i < priv->num_rx_bds; i++) {
2241 cb = priv->rx_cbs + i;
2242 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2245 /* Initialize common TX ring structures */
2246 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2247 priv->num_tx_bds = TOTAL_DESC;
2248 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2250 if (!priv->tx_cbs) {
2251 kfree(priv->rx_cbs);
2255 for (i = 0; i < priv->num_tx_bds; i++) {
2256 cb = priv->tx_cbs + i;
2257 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2261 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2263 /* Initialize Rx queues */
2264 ret = bcmgenet_init_rx_queues(priv->dev);
2266 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2267 bcmgenet_free_rx_buffers(priv);
2268 kfree(priv->rx_cbs);
2269 kfree(priv->tx_cbs);
2274 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2276 /* Initialize Tx queues */
2277 bcmgenet_init_tx_queues(priv->dev);
2282 /* Interrupt bottom half */
2283 static void bcmgenet_irq_task(struct work_struct *work)
2285 struct bcmgenet_priv *priv = container_of(
2286 work, struct bcmgenet_priv, bcmgenet_irq_work);
2288 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2290 if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
2291 priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
2292 netif_dbg(priv, wol, priv->dev,
2293 "magic packet detected, waking up\n");
2294 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2297 /* Link UP/DOWN event */
2298 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2299 (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
2300 phy_mac_interrupt(priv->phydev,
2301 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
2302 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
2306 /* bcmgenet_isr1: handle Rx and Tx priority queues */
2307 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2309 struct bcmgenet_priv *priv = dev_id;
2310 struct bcmgenet_rx_ring *rx_ring;
2311 struct bcmgenet_tx_ring *tx_ring;
2314 /* Save irq status for bottom-half processing. */
2316 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2317 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2319 /* clear interrupts */
2320 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2322 netif_dbg(priv, intr, priv->dev,
2323 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2325 /* Check Rx priority queue interrupts */
2326 for (index = 0; index < priv->hw_params->rx_queues; index++) {
2327 if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2330 rx_ring = &priv->rx_rings[index];
2332 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2333 rx_ring->int_disable(rx_ring);
2334 __napi_schedule(&rx_ring->napi);
2338 /* Check Tx priority queue interrupts */
2339 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2340 if (!(priv->irq1_stat & BIT(index)))
2343 tx_ring = &priv->tx_rings[index];
2345 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2346 tx_ring->int_disable(tx_ring);
2347 __napi_schedule(&tx_ring->napi);
2354 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
2355 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2357 struct bcmgenet_priv *priv = dev_id;
2358 struct bcmgenet_rx_ring *rx_ring;
2359 struct bcmgenet_tx_ring *tx_ring;
2361 /* Save irq status for bottom-half processing. */
2363 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2364 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2366 /* clear interrupts */
2367 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
2369 netif_dbg(priv, intr, priv->dev,
2370 "IRQ=0x%x\n", priv->irq0_stat);
2372 if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
2373 rx_ring = &priv->rx_rings[DESC_INDEX];
2375 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2376 rx_ring->int_disable(rx_ring);
2377 __napi_schedule(&rx_ring->napi);
2381 if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
2382 tx_ring = &priv->tx_rings[DESC_INDEX];
2384 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2385 tx_ring->int_disable(tx_ring);
2386 __napi_schedule(&tx_ring->napi);
2390 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2391 UMAC_IRQ_PHY_DET_F |
2392 UMAC_IRQ_LINK_EVENT |
2396 /* all other interested interrupts handled in bottom half */
2397 schedule_work(&priv->bcmgenet_irq_work);
2400 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2401 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2402 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2409 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2411 struct bcmgenet_priv *priv = dev_id;
2413 pm_wakeup_event(&priv->pdev->dev, 0);
2418 #ifdef CONFIG_NET_POLL_CONTROLLER
2419 static void bcmgenet_poll_controller(struct net_device *dev)
2421 struct bcmgenet_priv *priv = netdev_priv(dev);
2423 /* Invoke the main RX/TX interrupt handler */
2424 disable_irq(priv->irq0);
2425 bcmgenet_isr0(priv->irq0, priv);
2426 enable_irq(priv->irq0);
2428 /* And the interrupt handler for RX/TX priority queues */
2429 disable_irq(priv->irq1);
2430 bcmgenet_isr1(priv->irq1, priv);
2431 enable_irq(priv->irq1);
2435 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2439 reg = bcmgenet_rbuf_ctrl_get(priv);
2441 bcmgenet_rbuf_ctrl_set(priv, reg);
2445 bcmgenet_rbuf_ctrl_set(priv, reg);
2449 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2450 unsigned char *addr)
2452 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2453 (addr[2] << 8) | addr[3], UMAC_MAC0);
2454 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2457 /* Returns a reusable dma control register value */
2458 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2464 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2465 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2467 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2469 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2471 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2473 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2475 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2480 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2484 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2486 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2488 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2490 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2493 static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
2499 offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
2500 reg = bcmgenet_hfb_reg_readl(priv, offset);
2501 return !!(reg & (1 << (f_index % 32)));
2504 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
2509 offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
2510 reg = bcmgenet_hfb_reg_readl(priv, offset);
2511 reg |= (1 << (f_index % 32));
2512 bcmgenet_hfb_reg_writel(priv, reg, offset);
2515 static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
2516 u32 f_index, u32 rx_queue)
2521 offset = f_index / 8;
2522 reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
2523 reg &= ~(0xF << (4 * (f_index % 8)));
2524 reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
2525 bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
2528 static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
2529 u32 f_index, u32 f_length)
2534 offset = HFB_FLT_LEN_V3PLUS +
2535 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
2537 reg = bcmgenet_hfb_reg_readl(priv, offset);
2538 reg &= ~(0xFF << (8 * (f_index % 4)));
2539 reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
2540 bcmgenet_hfb_reg_writel(priv, reg, offset);
2543 static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
2547 for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
2548 if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
2554 /* bcmgenet_hfb_add_filter
2556 * Add new filter to Hardware Filter Block to match and direct Rx traffic to
2559 * f_data is an array of unsigned 32-bit integers where each 32-bit integer
2560 * provides filter data for 2 bytes (4 nibbles) of Rx frame:
2562 * bits 31:20 - unused
2563 * bit 19 - nibble 0 match enable
2564 * bit 18 - nibble 1 match enable
2565 * bit 17 - nibble 2 match enable
2566 * bit 16 - nibble 3 match enable
2567 * bits 15:12 - nibble 0 data
2568 * bits 11:8 - nibble 1 data
2569 * bits 7:4 - nibble 2 data
2570 * bits 3:0 - nibble 3 data
2573 * In order to match:
2574 * - Ethernet frame type = 0x0800 (IP)
2575 * - IP version field = 4
2576 * - IP protocol field = 0x11 (UDP)
2578 * The following filter is needed:
2579 * u32 hfb_filter_ipv4_udp[] = {
2580 * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2581 * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
2582 * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
2585 * To add the filter to HFB and direct the traffic to Rx queue 0, call:
2586 * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
2587 * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
2589 int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
2590 u32 f_length, u32 rx_queue)
2595 f_index = bcmgenet_hfb_find_unused_filter(priv);
2599 if (f_length > priv->hw_params->hfb_filter_size)
2602 for (i = 0; i < f_length; i++)
2603 bcmgenet_hfb_writel(priv, f_data[i],
2604 (f_index * priv->hw_params->hfb_filter_size + i) *
2607 bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
2608 bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
2609 bcmgenet_hfb_enable_filter(priv, f_index);
2610 bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
2615 /* bcmgenet_hfb_clear
2617 * Clear Hardware Filter Block and disable all filtering.
2619 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
2623 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
2624 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
2625 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
2627 for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
2628 bcmgenet_rdma_writel(priv, 0x0, i);
2630 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
2631 bcmgenet_hfb_reg_writel(priv, 0x0,
2632 HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
2634 for (i = 0; i < priv->hw_params->hfb_filter_cnt *
2635 priv->hw_params->hfb_filter_size; i++)
2636 bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
2639 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
2641 if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
2644 bcmgenet_hfb_clear(priv);
2647 static void bcmgenet_netif_start(struct net_device *dev)
2649 struct bcmgenet_priv *priv = netdev_priv(dev);
2651 /* Start the network engine */
2652 bcmgenet_enable_rx_napi(priv);
2653 bcmgenet_enable_tx_napi(priv);
2655 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2657 netif_tx_start_all_queues(dev);
2659 /* Monitor link interrupts now */
2660 bcmgenet_link_intr_enable(priv);
2662 phy_start(priv->phydev);
2665 static int bcmgenet_open(struct net_device *dev)
2667 struct bcmgenet_priv *priv = netdev_priv(dev);
2668 unsigned long dma_ctrl;
2672 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2674 /* Turn on the clock */
2675 clk_prepare_enable(priv->clk);
2677 /* If this is an internal GPHY, power it back on now, before UniMAC is
2678 * brought out of reset as absolutely no UniMAC activity is allowed
2680 if (priv->internal_phy)
2681 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2683 /* take MAC out of reset */
2684 bcmgenet_umac_reset(priv);
2686 ret = init_umac(priv);
2688 goto err_clk_disable;
2690 /* disable ethernet MAC while updating its registers */
2691 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2693 /* Make sure we reflect the value of CRC_CMD_FWD */
2694 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2695 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2697 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2699 if (priv->internal_phy) {
2700 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2701 reg |= EXT_ENERGY_DET_MASK;
2702 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2705 /* Disable RX/TX DMA and flush TX queues */
2706 dma_ctrl = bcmgenet_dma_disable(priv);
2708 /* Reinitialize TDMA and RDMA and SW housekeeping */
2709 ret = bcmgenet_init_dma(priv);
2711 netdev_err(dev, "failed to initialize DMA\n");
2712 goto err_clk_disable;
2715 /* Always enable ring 16 - descriptor ring */
2716 bcmgenet_enable_dma(priv, dma_ctrl);
2719 bcmgenet_hfb_init(priv);
2721 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2724 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2728 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2731 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2735 ret = bcmgenet_mii_probe(dev);
2737 netdev_err(dev, "failed to connect to PHY\n");
2741 bcmgenet_netif_start(dev);
2746 free_irq(priv->irq1, priv);
2748 free_irq(priv->irq0, priv);
2750 bcmgenet_fini_dma(priv);
2752 clk_disable_unprepare(priv->clk);
2756 static void bcmgenet_netif_stop(struct net_device *dev)
2758 struct bcmgenet_priv *priv = netdev_priv(dev);
2760 netif_tx_stop_all_queues(dev);
2761 phy_stop(priv->phydev);
2762 bcmgenet_intr_disable(priv);
2763 bcmgenet_disable_rx_napi(priv);
2764 bcmgenet_disable_tx_napi(priv);
2766 /* Wait for pending work items to complete. Since interrupts are
2767 * disabled no new work will be scheduled.
2769 cancel_work_sync(&priv->bcmgenet_irq_work);
2771 priv->old_link = -1;
2772 priv->old_speed = -1;
2773 priv->old_duplex = -1;
2774 priv->old_pause = -1;
2777 static int bcmgenet_close(struct net_device *dev)
2779 struct bcmgenet_priv *priv = netdev_priv(dev);
2782 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2784 bcmgenet_netif_stop(dev);
2786 /* Really kill the PHY state machine and disconnect from it */
2787 phy_disconnect(priv->phydev);
2789 /* Disable MAC receive */
2790 umac_enable_set(priv, CMD_RX_EN, false);
2792 ret = bcmgenet_dma_teardown(priv);
2796 /* Disable MAC transmit. TX DMA disabled have to done before this */
2797 umac_enable_set(priv, CMD_TX_EN, false);
2800 bcmgenet_tx_reclaim_all(dev);
2801 bcmgenet_fini_dma(priv);
2803 free_irq(priv->irq0, priv);
2804 free_irq(priv->irq1, priv);
2806 if (priv->internal_phy)
2807 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2809 clk_disable_unprepare(priv->clk);
2814 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
2816 struct bcmgenet_priv *priv = ring->priv;
2817 u32 p_index, c_index, intsts, intmsk;
2818 struct netdev_queue *txq;
2819 unsigned int free_bds;
2820 unsigned long flags;
2823 if (!netif_msg_tx_err(priv))
2826 txq = netdev_get_tx_queue(priv->dev, ring->queue);
2828 spin_lock_irqsave(&ring->lock, flags);
2829 if (ring->index == DESC_INDEX) {
2830 intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2831 intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
2833 intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2834 intmsk = 1 << ring->index;
2836 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
2837 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
2838 txq_stopped = netif_tx_queue_stopped(txq);
2839 free_bds = ring->free_bds;
2840 spin_unlock_irqrestore(&ring->lock, flags);
2842 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
2843 "TX queue status: %s, interrupts: %s\n"
2844 "(sw)free_bds: %d (sw)size: %d\n"
2845 "(sw)p_index: %d (hw)p_index: %d\n"
2846 "(sw)c_index: %d (hw)c_index: %d\n"
2847 "(sw)clean_p: %d (sw)write_p: %d\n"
2848 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
2849 ring->index, ring->queue,
2850 txq_stopped ? "stopped" : "active",
2851 intsts & intmsk ? "enabled" : "disabled",
2852 free_bds, ring->size,
2853 ring->prod_index, p_index & DMA_P_INDEX_MASK,
2854 ring->c_index, c_index & DMA_C_INDEX_MASK,
2855 ring->clean_ptr, ring->write_ptr,
2856 ring->cb_ptr, ring->end_ptr);
2859 static void bcmgenet_timeout(struct net_device *dev)
2861 struct bcmgenet_priv *priv = netdev_priv(dev);
2862 u32 int0_enable = 0;
2863 u32 int1_enable = 0;
2866 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
2868 for (q = 0; q < priv->hw_params->tx_queues; q++)
2869 bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
2870 bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
2872 bcmgenet_tx_reclaim_all(dev);
2874 for (q = 0; q < priv->hw_params->tx_queues; q++)
2875 int1_enable |= (1 << q);
2877 int0_enable = UMAC_IRQ_TXDMA_DONE;
2879 /* Re-enable TX interrupts if disabled */
2880 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2881 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
2883 dev->trans_start = jiffies;
2885 dev->stats.tx_errors++;
2887 netif_tx_wake_all_queues(dev);
2890 #define MAX_MC_COUNT 16
2892 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
2893 unsigned char *addr,
2899 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
2900 UMAC_MDF_ADDR + (*i * 4));
2901 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
2902 addr[4] << 8 | addr[5],
2903 UMAC_MDF_ADDR + ((*i + 1) * 4));
2904 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
2905 reg |= (1 << (MAX_MC_COUNT - *mc));
2906 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
2911 static void bcmgenet_set_rx_mode(struct net_device *dev)
2913 struct bcmgenet_priv *priv = netdev_priv(dev);
2914 struct netdev_hw_addr *ha;
2918 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
2920 /* Promiscuous mode */
2921 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2922 if (dev->flags & IFF_PROMISC) {
2924 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2925 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
2928 reg &= ~CMD_PROMISC;
2929 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2932 /* UniMac doesn't support ALLMULTI */
2933 if (dev->flags & IFF_ALLMULTI) {
2934 netdev_warn(dev, "ALLMULTI is not supported\n");
2938 /* update MDF filter */
2942 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
2943 /* my own address.*/
2944 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
2946 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
2949 if (!netdev_uc_empty(dev))
2950 netdev_for_each_uc_addr(ha, dev)
2951 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2953 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
2956 netdev_for_each_mc_addr(ha, dev)
2957 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2960 /* Set the hardware MAC address. */
2961 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
2963 struct sockaddr *addr = p;
2965 /* Setting the MAC address at the hardware level is not possible
2966 * without disabling the UniMAC RX/TX enable bits.
2968 if (netif_running(dev))
2971 ether_addr_copy(dev->dev_addr, addr->sa_data);
2976 static const struct net_device_ops bcmgenet_netdev_ops = {
2977 .ndo_open = bcmgenet_open,
2978 .ndo_stop = bcmgenet_close,
2979 .ndo_start_xmit = bcmgenet_xmit,
2980 .ndo_tx_timeout = bcmgenet_timeout,
2981 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
2982 .ndo_set_mac_address = bcmgenet_set_mac_addr,
2983 .ndo_do_ioctl = bcmgenet_ioctl,
2984 .ndo_set_features = bcmgenet_set_features,
2985 #ifdef CONFIG_NET_POLL_CONTROLLER
2986 .ndo_poll_controller = bcmgenet_poll_controller,
2990 /* Array of GENET hardware parameters/characteristics */
2991 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2997 .bp_in_en_shift = 16,
2998 .bp_in_mask = 0xffff,
2999 .hfb_filter_cnt = 16,
3001 .hfb_offset = 0x1000,
3002 .rdma_offset = 0x2000,
3003 .tdma_offset = 0x3000,
3011 .bp_in_en_shift = 16,
3012 .bp_in_mask = 0xffff,
3013 .hfb_filter_cnt = 16,
3015 .tbuf_offset = 0x0600,
3016 .hfb_offset = 0x1000,
3017 .hfb_reg_offset = 0x2000,
3018 .rdma_offset = 0x3000,
3019 .tdma_offset = 0x4000,
3021 .flags = GENET_HAS_EXT,
3028 .bp_in_en_shift = 17,
3029 .bp_in_mask = 0x1ffff,
3030 .hfb_filter_cnt = 48,
3031 .hfb_filter_size = 128,
3033 .tbuf_offset = 0x0600,
3034 .hfb_offset = 0x8000,
3035 .hfb_reg_offset = 0xfc00,
3036 .rdma_offset = 0x10000,
3037 .tdma_offset = 0x11000,
3039 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3040 GENET_HAS_MOCA_LINK_DET,
3047 .bp_in_en_shift = 17,
3048 .bp_in_mask = 0x1ffff,
3049 .hfb_filter_cnt = 48,
3050 .hfb_filter_size = 128,
3052 .tbuf_offset = 0x0600,
3053 .hfb_offset = 0x8000,
3054 .hfb_reg_offset = 0xfc00,
3055 .rdma_offset = 0x2000,
3056 .tdma_offset = 0x4000,
3058 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3059 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3063 /* Infer hardware parameters from the detected GENET version */
3064 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3066 struct bcmgenet_hw_params *params;
3071 if (GENET_IS_V4(priv)) {
3072 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3073 genet_dma_ring_regs = genet_dma_ring_regs_v4;
3074 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3075 priv->version = GENET_V4;
3076 } else if (GENET_IS_V3(priv)) {
3077 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3078 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3079 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3080 priv->version = GENET_V3;
3081 } else if (GENET_IS_V2(priv)) {
3082 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3083 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3084 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3085 priv->version = GENET_V2;
3086 } else if (GENET_IS_V1(priv)) {
3087 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3088 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3089 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3090 priv->version = GENET_V1;
3093 /* enum genet_version starts at 1 */
3094 priv->hw_params = &bcmgenet_hw_params[priv->version];
3095 params = priv->hw_params;
3097 /* Read GENET HW version */
3098 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3099 major = (reg >> 24 & 0x0f);
3102 else if (major == 0)
3104 if (major != priv->version) {
3105 dev_err(&priv->pdev->dev,
3106 "GENET version mismatch, got: %d, configured for: %d\n",
3107 major, priv->version);
3110 /* Print the GENET core version */
3111 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3112 major, (reg >> 16) & 0x0f, reg & 0xffff);
3114 /* Store the integrated PHY revision for the MDIO probing function
3115 * to pass this information to the PHY driver. The PHY driver expects
3116 * to find the PHY major revision in bits 15:8 while the GENET register
3117 * stores that information in bits 7:0, account for that.
3119 * On newer chips, starting with PHY revision G0, a new scheme is
3120 * deployed similar to the Starfighter 2 switch with GPHY major
3121 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3122 * is reserved as well as special value 0x01ff, we have a small
3123 * heuristic to check for the new GPHY revision and re-arrange things
3124 * so the GPHY driver is happy.
3126 gphy_rev = reg & 0xffff;
3128 /* This is the good old scheme, just GPHY major, no minor nor patch */
3129 if ((gphy_rev & 0xf0) != 0)
3130 priv->gphy_rev = gphy_rev << 8;
3132 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3133 else if ((gphy_rev & 0xff00) != 0)
3134 priv->gphy_rev = gphy_rev;
3136 /* This is reserved so should require special treatment */
3137 else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3138 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3142 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3143 if (!(params->flags & GENET_HAS_40BITS))
3144 pr_warn("GENET does not support 40-bits PA\n");
3147 pr_debug("Configuration for version: %d\n"
3148 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3149 "BP << en: %2d, BP msk: 0x%05x\n"
3150 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3151 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3152 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3155 params->tx_queues, params->tx_bds_per_q,
3156 params->rx_queues, params->rx_bds_per_q,
3157 params->bp_in_en_shift, params->bp_in_mask,
3158 params->hfb_filter_cnt, params->qtag_mask,
3159 params->tbuf_offset, params->hfb_offset,
3160 params->hfb_reg_offset,
3161 params->rdma_offset, params->tdma_offset,
3162 params->words_per_bd);
3165 static const struct of_device_id bcmgenet_match[] = {
3166 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
3167 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
3168 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
3169 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3172 MODULE_DEVICE_TABLE(of, bcmgenet_match);
3174 static int bcmgenet_probe(struct platform_device *pdev)
3176 struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3177 struct device_node *dn = pdev->dev.of_node;
3178 const struct of_device_id *of_id = NULL;
3179 struct bcmgenet_priv *priv;
3180 struct net_device *dev;
3181 const void *macaddr;
3185 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3186 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3187 GENET_MAX_MQ_CNT + 1);
3189 dev_err(&pdev->dev, "can't allocate net device\n");
3194 of_id = of_match_node(bcmgenet_match, dn);
3199 priv = netdev_priv(dev);
3200 priv->irq0 = platform_get_irq(pdev, 0);
3201 priv->irq1 = platform_get_irq(pdev, 1);
3202 priv->wol_irq = platform_get_irq(pdev, 2);
3203 if (!priv->irq0 || !priv->irq1) {
3204 dev_err(&pdev->dev, "can't find IRQs\n");
3210 macaddr = of_get_mac_address(dn);
3212 dev_err(&pdev->dev, "can't find MAC address\n");
3217 macaddr = pd->mac_address;
3220 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3221 priv->base = devm_ioremap_resource(&pdev->dev, r);
3222 if (IS_ERR(priv->base)) {
3223 err = PTR_ERR(priv->base);
3227 SET_NETDEV_DEV(dev, &pdev->dev);
3228 dev_set_drvdata(&pdev->dev, dev);
3229 ether_addr_copy(dev->dev_addr, macaddr);
3230 dev->watchdog_timeo = 2 * HZ;
3231 dev->ethtool_ops = &bcmgenet_ethtool_ops;
3232 dev->netdev_ops = &bcmgenet_netdev_ops;
3234 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
3236 /* Set hardware features */
3237 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
3238 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
3240 /* Request the WOL interrupt and advertise suspend if available */
3241 priv->wol_irq_disabled = true;
3242 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
3245 device_set_wakeup_capable(&pdev->dev, 1);
3247 /* Set the needed headroom to account for any possible
3248 * features enabling/disabling at runtime
3250 dev->needed_headroom += 64;
3252 netdev_boot_setup_check(dev);
3257 priv->version = (enum bcmgenet_version)of_id->data;
3259 priv->version = pd->genet_version;
3261 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
3262 if (IS_ERR(priv->clk)) {
3263 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
3267 clk_prepare_enable(priv->clk);
3269 bcmgenet_set_hw_params(priv);
3271 /* Mii wait queue */
3272 init_waitqueue_head(&priv->wq);
3273 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
3274 priv->rx_buf_len = RX_BUF_LENGTH;
3275 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
3277 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
3278 if (IS_ERR(priv->clk_wol)) {
3279 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
3280 priv->clk_wol = NULL;
3283 priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
3284 if (IS_ERR(priv->clk_eee)) {
3285 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
3286 priv->clk_eee = NULL;
3289 err = reset_umac(priv);
3291 goto err_clk_disable;
3293 err = bcmgenet_mii_init(dev);
3295 goto err_clk_disable;
3297 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
3298 * just the ring 16 descriptor based TX
3300 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
3301 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
3303 /* libphy will determine the link state */
3304 netif_carrier_off(dev);
3306 /* Turn off the main clock, WOL clock is handled separately */
3307 clk_disable_unprepare(priv->clk);
3309 err = register_netdev(dev);
3316 clk_disable_unprepare(priv->clk);
3322 static int bcmgenet_remove(struct platform_device *pdev)
3324 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
3326 dev_set_drvdata(&pdev->dev, NULL);
3327 unregister_netdev(priv->dev);
3328 bcmgenet_mii_exit(priv->dev);
3329 free_netdev(priv->dev);
3334 #ifdef CONFIG_PM_SLEEP
3335 static int bcmgenet_suspend(struct device *d)
3337 struct net_device *dev = dev_get_drvdata(d);
3338 struct bcmgenet_priv *priv = netdev_priv(dev);
3341 if (!netif_running(dev))
3344 bcmgenet_netif_stop(dev);
3346 phy_suspend(priv->phydev);
3348 netif_device_detach(dev);
3350 /* Disable MAC receive */
3351 umac_enable_set(priv, CMD_RX_EN, false);
3353 ret = bcmgenet_dma_teardown(priv);
3357 /* Disable MAC transmit. TX DMA disabled have to done before this */
3358 umac_enable_set(priv, CMD_TX_EN, false);
3361 bcmgenet_tx_reclaim_all(dev);
3362 bcmgenet_fini_dma(priv);
3364 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
3365 if (device_may_wakeup(d) && priv->wolopts) {
3366 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
3367 clk_prepare_enable(priv->clk_wol);
3368 } else if (priv->internal_phy) {
3369 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3372 /* Turn off the clocks */
3373 clk_disable_unprepare(priv->clk);
3378 static int bcmgenet_resume(struct device *d)
3380 struct net_device *dev = dev_get_drvdata(d);
3381 struct bcmgenet_priv *priv = netdev_priv(dev);
3382 unsigned long dma_ctrl;
3386 if (!netif_running(dev))
3389 /* Turn on the clock */
3390 ret = clk_prepare_enable(priv->clk);
3394 /* If this is an internal GPHY, power it back on now, before UniMAC is
3395 * brought out of reset as absolutely no UniMAC activity is allowed
3397 if (priv->internal_phy)
3398 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3400 bcmgenet_umac_reset(priv);
3402 ret = init_umac(priv);
3404 goto out_clk_disable;
3406 /* From WOL-enabled suspend, switch to regular clock */
3408 clk_disable_unprepare(priv->clk_wol);
3410 phy_init_hw(priv->phydev);
3411 /* Speed settings must be restored */
3412 bcmgenet_mii_config(priv->dev);
3414 /* disable ethernet MAC while updating its registers */
3415 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
3417 bcmgenet_set_hw_addr(priv, dev->dev_addr);
3419 if (priv->internal_phy) {
3420 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
3421 reg |= EXT_ENERGY_DET_MASK;
3422 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
3426 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
3428 /* Disable RX/TX DMA and flush TX queues */
3429 dma_ctrl = bcmgenet_dma_disable(priv);
3431 /* Reinitialize TDMA and RDMA and SW housekeeping */
3432 ret = bcmgenet_init_dma(priv);
3434 netdev_err(dev, "failed to initialize DMA\n");
3435 goto out_clk_disable;
3438 /* Always enable ring 16 - descriptor ring */
3439 bcmgenet_enable_dma(priv, dma_ctrl);
3441 netif_device_attach(dev);
3443 phy_resume(priv->phydev);
3445 if (priv->eee.eee_enabled)
3446 bcmgenet_eee_enable_set(dev, true);
3448 bcmgenet_netif_start(dev);
3453 clk_disable_unprepare(priv->clk);
3456 #endif /* CONFIG_PM_SLEEP */
3458 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
3460 static struct platform_driver bcmgenet_driver = {
3461 .probe = bcmgenet_probe,
3462 .remove = bcmgenet_remove,
3465 .of_match_table = bcmgenet_match,
3466 .pm = &bcmgenet_pm_ops,
3469 module_platform_driver(bcmgenet_driver);
3471 MODULE_AUTHOR("Broadcom Corporation");
3472 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3473 MODULE_ALIAS("platform:bcmgenet");
3474 MODULE_LICENSE("GPL");