1 diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
2 index 2204c57..25e3425 100644
3 --- a/drivers/net/ethernet/freescale/Kconfig
4 +++ b/drivers/net/ethernet/freescale/Kconfig
5 @@ -7,10 +7,11 @@ config NET_VENDOR_FREESCALE
7 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
8 M523x || M527x || M5272 || M528x || M520x || M532x || \
9 - ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
11 + ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
13 - If you have a network (Ethernet) card belonging to this class, say Y.
14 + If you have a network (Ethernet) card belonging to this class, say Y
15 + and read the Ethernet-HOWTO, available from
16 + <http://www.tldp.org/docs.html#howto>.
18 Note that the answer to this question doesn't directly affect the
19 kernel: saying N will just cause the configurator to skip all
20 @@ -22,8 +23,8 @@ if NET_VENDOR_FREESCALE
22 tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
23 depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
26 + ARCH_MXC || SOC_IMX28)
27 + default ARCH_MXC || SOC_IMX28 if ARM
31 @@ -54,7 +55,6 @@ config FEC_MPC52xx_MDIO
32 If compiled as module, it will be called fec_mpc52xx_phy.
34 source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
35 -source "drivers/net/ethernet/freescale/fman/Kconfig"
38 tristate "Freescale PQ MDIO"
39 @@ -85,12 +85,12 @@ config UGETH_TX_ON_DEMAND
42 tristate "Gianfar Ethernet"
48 This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
49 - and MPC86xx family of chips, the eTSEC on LS1021A and the FEC
51 + and MPC86xx family of chips, and the FEC on the 8540.
53 endif # NET_VENDOR_FREESCALE
54 diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
55 index 7f022dd..71debd1 100644
56 --- a/drivers/net/ethernet/freescale/Makefile
57 +++ b/drivers/net/ethernet/freescale/Makefile
61 obj-$(CONFIG_FEC) += fec.o
62 -fec-objs :=fec_main.o fec_fixup.o fec_ptp.o
63 -CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
64 -CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
66 +fec-objs :=fec_main.o fec_ptp.o
67 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
68 ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
69 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
70 @@ -20,5 +17,3 @@ gianfar_driver-objs := gianfar.o \
72 obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
73 ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
75 -obj-$(CONFIG_FSL_FMAN) += fman/
76 diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
77 index 1d7b3cc..ecdc711 100644
78 --- a/drivers/net/ethernet/freescale/fec.h
79 +++ b/drivers/net/ethernet/freescale/fec.h
81 #include <linux/timecounter.h>
83 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
84 - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
85 - defined(CONFIG_ARM64)
86 + defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
87 + defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
89 * Just figures, Motorola would have to change the offsets for
90 * registers in the same peripheral device on different models
94 * Define the buffer descriptor structure.
96 - * Evidently, ARM SoCs have the FEC block generated in a
97 - * little endian mode so adjust endianness accordingly.
99 -#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
100 -#define fec32_to_cpu le32_to_cpu
101 -#define fec16_to_cpu le16_to_cpu
102 -#define cpu_to_fec32 cpu_to_le32
103 -#define cpu_to_fec16 cpu_to_le16
104 -#define __fec32 __le32
105 -#define __fec16 __le16
107 +#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
109 - __fec16 cbd_datlen; /* Data length */
110 - __fec16 cbd_sc; /* Control and status info */
111 - __fec32 cbd_bufaddr; /* Buffer address */
112 + unsigned short cbd_datlen; /* Data length */
113 + unsigned short cbd_sc; /* Control and status info */
114 + unsigned long cbd_bufaddr; /* Buffer address */
117 -#define fec32_to_cpu be32_to_cpu
118 -#define fec16_to_cpu be16_to_cpu
119 -#define cpu_to_fec32 cpu_to_be32
120 -#define cpu_to_fec16 cpu_to_be16
121 -#define __fec32 __be32
122 -#define __fec16 __be16
125 - __fec16 cbd_sc; /* Control and status info */
126 - __fec16 cbd_datlen; /* Data length */
127 - __fec32 cbd_bufaddr; /* Buffer address */
128 + unsigned short cbd_sc; /* Control and status info */
129 + unsigned short cbd_datlen; /* Data length */
130 + unsigned long cbd_bufaddr; /* Buffer address */
141 + unsigned long cbd_esc;
142 + unsigned long cbd_prot;
143 + unsigned long cbd_bdu;
145 + unsigned short res0[4];
149 @@ -294,7 +277,7 @@ struct bufdesc_ex {
152 /* This device has up to three irqs on some platforms */
153 -#define FEC_IRQ_NUM 4
154 +#define FEC_IRQ_NUM 3
156 /* Maximum number of queues supported
157 * ENET with AVB IP can support up to 3 independent tx queues and rx queues.
158 @@ -312,6 +295,12 @@ struct bufdesc_ex {
159 #define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
161 FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
162 +#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \
164 + FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
165 +#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \
167 + FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
169 #define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
171 @@ -379,7 +368,6 @@ struct bufdesc_ex {
172 #define FEC_ENET_TS_TIMER ((uint)0x00008000)
174 #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
175 -#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER)
176 #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
178 #define FEC_ENET_ETHEREN ((uint)0x00000002)
179 @@ -448,32 +436,12 @@ struct bufdesc_ex {
180 #define FEC_QUIRK_SINGLE_MDIO (1 << 11)
181 /* Controller supports RACC register */
182 #define FEC_QUIRK_HAS_RACC (1 << 12)
183 -/* Controller supports interrupt coalesc */
184 -#define FEC_QUIRK_HAS_COALESCE (1 << 13)
185 -/* Interrupt doesn't wake CPU from deep idle */
186 -#define FEC_QUIRK_ERR006687 (1 << 14)
188 * i.MX6Q/DL ENET cannot wake up system in wait mode because ENET tx & rx
189 * interrupt signal don't connect to GPC. So use pm qos to avoid cpu enter
192 -#define FEC_QUIRK_BUG_WAITMODE (1 << 15)
194 -/* PHY fixup flag define */
195 -#define FEC_QUIRK_AR8031_FIXUP (1 << 0)
197 -struct bufdesc_prop {
199 - /* Address of Rx and Tx buffers */
200 - struct bufdesc *base;
201 - struct bufdesc *last;
202 - struct bufdesc *cur;
203 - void __iomem *reg_desc_active;
205 - unsigned short ring_size;
206 - unsigned char dsize;
207 - unsigned char dsize_log2;
209 +#define FEC_QUIRK_BUG_WAITMODE (1 << 13)
211 struct fec_enet_stop_mode {
213 @@ -482,21 +450,32 @@ struct fec_enet_stop_mode {
216 struct fec_enet_priv_tx_q {
217 - struct bufdesc_prop bd;
219 unsigned char *tx_bounce[TX_RING_SIZE];
220 struct sk_buff *tx_skbuff[TX_RING_SIZE];
223 + struct bufdesc *tx_bd_base;
226 unsigned short tx_stop_threshold;
227 unsigned short tx_wake_threshold;
229 + struct bufdesc *cur_tx;
230 struct bufdesc *dirty_tx;
232 dma_addr_t tso_hdrs_dma;
235 struct fec_enet_priv_rx_q {
236 - struct bufdesc_prop bd;
238 struct sk_buff *rx_skbuff[RX_RING_SIZE];
241 + struct bufdesc *rx_bd_base;
244 + struct bufdesc *cur_rx;
247 /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
248 @@ -536,20 +515,22 @@ struct fec_enet_private {
249 unsigned long work_ts;
250 unsigned long work_mdio;
252 + unsigned short bufdesc_size;
254 struct platform_device *pdev;
258 /* Phylib and MDIO interface */
259 struct mii_bus *mii_bus;
260 + struct phy_device *phy_dev;
263 - bool active_in_suspend;
264 + bool miibus_up_failed;
266 phy_interface_t phy_interface;
267 struct device_node *phy_node;
272 struct completion mdio_done;
273 @@ -559,7 +540,8 @@ struct fec_enet_private {
278 + int phy_reset_gpio;
279 + int phy_reset_duration;
281 struct napi_struct napi;
283 @@ -602,19 +584,14 @@ struct fec_enet_private {
285 unsigned int next_counter;
287 - u64 ethtool_stats[0];
289 struct fec_enet_stop_mode gpr;
292 void fec_ptp_init(struct platform_device *pdev);
293 -void fec_ptp_stop(struct platform_device *pdev);
294 void fec_ptp_start_cyclecounter(struct net_device *ndev);
295 int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
296 int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
297 uint fec_ptp_check_pps_event(struct fec_enet_private *fep);
298 -void fec_enet_register_fixup(struct net_device *ndev);
299 -int of_fec_enet_parse_fixup(struct device_node *np);
301 /****************************************************************************/
303 diff --git a/drivers/net/ethernet/freescale/fec_fixup.c b/drivers/net/ethernet/freescale/fec_fixup.c
304 deleted file mode 100644
305 index 5a8497c..0000000
306 --- a/drivers/net/ethernet/freescale/fec_fixup.c
310 - * Copyright 2017 NXP
312 - * This program is free software; you can redistribute it and/or
313 - * modify it under the terms of the GNU General Public License
314 - * as published by the Free Software Foundation; either version 2
315 - * of the License, or (at your option) any later version.
317 - * This program is distributed in the hope that it will be useful,
318 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
319 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
320 - * GNU General Public License for more details.
323 -#include <linux/netdevice.h>
324 -#include <linux/phy.h>
327 -#define PHY_ID_AR8031 0x004dd074
329 -static int ar8031_phy_fixup(struct phy_device *dev)
333 - /* Set RGMII IO voltage to 1.8V */
334 - phy_write(dev, 0x1d, 0x1f);
335 - phy_write(dev, 0x1e, 0x8);
337 - /* Disable phy AR8031 SmartEEE function */
338 - phy_write(dev, 0xd, 0x3);
339 - phy_write(dev, 0xe, 0x805d);
340 - phy_write(dev, 0xd, 0x4003);
341 - val = phy_read(dev, 0xe);
342 - val &= ~(0x1 << 8);
343 - phy_write(dev, 0xe, val);
345 - /* Introduce tx clock delay */
346 - phy_write(dev, 0x1d, 0x5);
347 - phy_write(dev, 0x1e, 0x100);
352 -void fec_enet_register_fixup(struct net_device *ndev)
354 - struct fec_enet_private *fep = netdev_priv(ndev);
355 - static int registered = 0;
358 - if (!IS_BUILTIN(CONFIG_PHYLIB))
361 - if (fep->fixups & FEC_QUIRK_AR8031_FIXUP) {
362 - static int ar8031_registered = 0;
364 - if (ar8031_registered)
366 - err = phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef,
369 - netdev_info(ndev, "Cannot register PHY board fixup\n");
374 -int of_fec_enet_parse_fixup(struct device_node *np)
378 - if (of_get_property(np, "fsl,ar8031-phy-fixup", NULL))
379 - fixups |= FEC_QUIRK_AR8031_FIXUP;
383 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
384 index 41a31f2..15c06df 100644
385 --- a/drivers/net/ethernet/freescale/fec_main.c
386 +++ b/drivers/net/ethernet/freescale/fec_main.c
388 * Copyright (c) 2004-2006 Macq Electronique SA.
390 * Copyright (C) 2010-2014 Freescale Semiconductor, Inc.
392 - * Copyright 2017 NXP
395 #include <linux/module.h>
397 #include <linux/io.h>
398 #include <linux/irq.h>
399 #include <linux/clk.h>
400 -#include <linux/clk/clk-conf.h>
401 #include <linux/platform_device.h>
402 -#include <linux/mdio.h>
403 #include <linux/phy.h>
404 #include <linux/fec.h>
405 #include <linux/of.h>
407 #include <linux/regmap.h>
409 #include <asm/cacheflush.h>
410 -#include <soc/imx/cpuidle.h>
414 static void set_multicast_list(struct net_device *ndev);
415 static void fec_enet_itr_coal_init(struct net_device *ndev);
416 +static void fec_reset_phy(struct platform_device *pdev);
418 #define DRIVER_NAME "fec"
420 @@ -87,7 +83,6 @@ static const u16 fec_enet_vlan_pri_to_queue[8] = {1, 1, 1, 1, 2, 2, 2, 2};
421 #define FEC_ENET_RAEM_V 0x8
422 #define FEC_ENET_RAFL_V 0x8
423 #define FEC_ENET_OPD_V 0xFFF0
424 -#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
426 static struct platform_device_id fec_devtype[] = {
428 @@ -96,10 +91,10 @@ static struct platform_device_id fec_devtype[] = {
432 - .driver_data = FEC_QUIRK_USE_GASKET,
433 + .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
437 + .driver_data = FEC_QUIRK_HAS_RACC,
440 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
441 @@ -119,20 +114,12 @@ static struct platform_device_id fec_devtype[] = {
442 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
443 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
444 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
445 - FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
446 + FEC_QUIRK_HAS_RACC,
448 .name = "imx6ul-fec",
449 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
450 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
451 - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
452 - FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
454 - .name = "imx8qm-fec",
455 - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
456 - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
457 - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
458 - FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
459 - FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
460 + FEC_QUIRK_HAS_VLAN,
464 @@ -147,7 +134,6 @@ enum imx_fec_type {
471 static const struct of_device_id fec_dt_ids[] = {
472 @@ -158,7 +144,6 @@ static const struct of_device_id fec_dt_ids[] = {
473 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
474 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
475 { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
476 - { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
479 MODULE_DEVICE_TABLE(of, fec_dt_ids);
480 @@ -196,7 +181,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
481 /* FEC receive acceleration */
482 #define FEC_RACC_IPDIS (1 << 1)
483 #define FEC_RACC_PRODIS (1 << 2)
484 -#define FEC_RACC_SHIFT16 BIT(7)
485 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
488 @@ -205,8 +189,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
489 * account when setting it.
491 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
492 - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
493 - defined(CONFIG_ARM64)
494 + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
495 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
497 #define OPT_FRAME_SIZE 0
498 @@ -244,38 +227,86 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
500 #define IS_TSO_HEADER(txq, addr) \
501 ((addr >= txq->tso_hdrs_dma) && \
502 - (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
503 + (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
507 -static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
508 - struct bufdesc_prop *bd)
510 - return (bdp >= bd->last) ? bd->base
511 - : (struct bufdesc *)(((void *)bdp) + bd->dsize);
514 +struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
515 + struct fec_enet_private *fep,
518 + struct bufdesc *new_bd = bdp + 1;
519 + struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
520 + struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
521 + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
522 + struct bufdesc_ex *ex_base;
523 + struct bufdesc *base;
526 + if (bdp >= txq->tx_bd_base) {
527 + base = txq->tx_bd_base;
528 + ring_size = txq->tx_ring_size;
529 + ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
531 + base = rxq->rx_bd_base;
532 + ring_size = rxq->rx_ring_size;
533 + ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
536 -static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
537 - struct bufdesc_prop *bd)
539 - return (bdp <= bd->base) ? bd->last
540 - : (struct bufdesc *)(((void *)bdp) - bd->dsize);
541 + if (fep->bufdesc_ex)
542 + return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
543 + ex_base : ex_new_bd);
545 + return (new_bd >= (base + ring_size)) ?
550 +struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
551 + struct fec_enet_private *fep,
554 + struct bufdesc *new_bd = bdp - 1;
555 + struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
556 + struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
557 + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
558 + struct bufdesc_ex *ex_base;
559 + struct bufdesc *base;
562 + if (bdp >= txq->tx_bd_base) {
563 + base = txq->tx_bd_base;
564 + ring_size = txq->tx_ring_size;
565 + ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
567 + base = rxq->rx_bd_base;
568 + ring_size = rxq->rx_ring_size;
569 + ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
572 + if (fep->bufdesc_ex)
573 + return (struct bufdesc *)((ex_new_bd < ex_base) ?
574 + (ex_new_bd + ring_size) : ex_new_bd);
576 + return (new_bd < base) ? (new_bd + ring_size) : new_bd;
579 -static int fec_enet_get_bd_index(struct bufdesc *bdp,
580 - struct bufdesc_prop *bd)
581 +static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
582 + struct fec_enet_private *fep)
584 - return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
585 + return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
588 -static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
589 +static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
590 + struct fec_enet_priv_tx_q *txq)
594 - entries = (((const char *)txq->dirty_tx -
595 - (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
596 + entries = ((const char *)txq->dirty_tx -
597 + (const char *)txq->cur_tx) / fep->bufdesc_size - 1;
599 - return entries >= 0 ? entries : entries + txq->bd.ring_size;
600 + return entries >= 0 ? entries : entries + txq->tx_ring_size;
603 static void swap_buffer(void *bufaddr, int len)
604 @@ -308,20 +339,18 @@ static void fec_dump(struct net_device *ndev)
605 pr_info("Nr SC addr len SKB\n");
607 txq = fep->tx_queue[0];
608 - bdp = txq->bd.base;
609 + bdp = txq->tx_bd_base;
612 - pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
613 + pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
615 - bdp == txq->bd.cur ? 'S' : ' ',
616 + bdp == txq->cur_tx ? 'S' : ' ',
617 bdp == txq->dirty_tx ? 'H' : ' ',
618 - fec16_to_cpu(bdp->cbd_sc),
619 - fec32_to_cpu(bdp->cbd_bufaddr),
620 - fec16_to_cpu(bdp->cbd_datlen),
621 + bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
622 txq->tx_skbuff[index]);
623 - bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
624 + bdp = fec_enet_get_nextdesc(bdp, fep, 0);
626 - } while (bdp != txq->bd.base);
627 + } while (bdp != txq->tx_bd_base);
630 static inline bool is_ipv4_pkt(struct sk_buff *skb)
631 @@ -352,9 +381,10 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
632 struct net_device *ndev)
634 struct fec_enet_private *fep = netdev_priv(ndev);
635 - struct bufdesc *bdp = txq->bd.cur;
636 + struct bufdesc *bdp = txq->cur_tx;
637 struct bufdesc_ex *ebdp;
638 int nr_frags = skb_shinfo(skb)->nr_frags;
639 + unsigned short queue = skb_get_queue_mapping(skb);
641 unsigned short status;
642 unsigned int estatus = 0;
643 @@ -366,10 +396,10 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
645 for (frag = 0; frag < nr_frags; frag++) {
646 this_frag = &skb_shinfo(skb)->frags[frag];
647 - bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
648 + bdp = fec_enet_get_nextdesc(bdp, fep, queue);
649 ebdp = (struct bufdesc_ex *)bdp;
651 - status = fec16_to_cpu(bdp->cbd_sc);
652 + status = bdp->cbd_sc;
653 status &= ~BD_ENET_TX_STATS;
654 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
655 frag_len = skb_shinfo(skb)->frags[frag].size;
656 @@ -387,16 +417,16 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
658 if (fep->bufdesc_ex) {
659 if (fep->quirks & FEC_QUIRK_HAS_AVB)
660 - estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
661 + estatus |= FEC_TX_BD_FTYPE(queue);
662 if (skb->ip_summed == CHECKSUM_PARTIAL)
663 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
665 - ebdp->cbd_esc = cpu_to_fec32(estatus);
666 + ebdp->cbd_esc = estatus;
669 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
671 - index = fec_enet_get_bd_index(bdp, &txq->bd);
672 + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
673 if (((unsigned long) bufaddr) & fep->tx_align ||
674 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
675 memcpy(txq->tx_bounce[index], bufaddr, frag_len);
676 @@ -409,27 +439,24 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
677 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
679 if (dma_mapping_error(&fep->pdev->dev, addr)) {
680 + dev_kfree_skb_any(skb);
682 netdev_err(ndev, "Tx DMA memory map failed\n");
683 goto dma_mapping_error;
686 - bdp->cbd_bufaddr = cpu_to_fec32(addr);
687 - bdp->cbd_datlen = cpu_to_fec16(frag_len);
688 - /* Make sure the updates to rest of the descriptor are
689 - * performed before transferring ownership.
692 - bdp->cbd_sc = cpu_to_fec16(status);
693 + bdp->cbd_bufaddr = addr;
694 + bdp->cbd_datlen = frag_len;
695 + bdp->cbd_sc = status;
702 for (i = 0; i < frag; i++) {
703 - bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
704 - dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
705 - fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
706 + bdp = fec_enet_get_nextdesc(bdp, fep, queue);
707 + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
708 + bdp->cbd_datlen, DMA_TO_DEVICE);
710 return ERR_PTR(-ENOMEM);
712 @@ -444,11 +471,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
714 unsigned short status;
715 unsigned short buflen;
716 + unsigned short queue;
717 unsigned int estatus = 0;
721 - entries_free = fec_enet_get_free_txdesc_num(txq);
722 + entries_free = fec_enet_get_free_txdesc_num(fep, txq);
723 if (entries_free < MAX_SKB_FRAGS + 1) {
724 dev_kfree_skb_any(skb);
726 @@ -463,16 +491,17 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
729 /* Fill in a Tx ring entry */
733 - status = fec16_to_cpu(bdp->cbd_sc);
734 + status = bdp->cbd_sc;
735 status &= ~BD_ENET_TX_STATS;
737 /* Set buffer length and buffer pointer */
739 buflen = skb_headlen(skb);
741 - index = fec_enet_get_bd_index(bdp, &txq->bd);
742 + queue = skb_get_queue_mapping(skb);
743 + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
744 if (((unsigned long) bufaddr) & fep->tx_align ||
745 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
746 memcpy(txq->tx_bounce[index], skb->data, buflen);
747 @@ -493,12 +522,8 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
750 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
751 - if (IS_ERR(last_bdp)) {
752 - dma_unmap_single(&fep->pdev->dev, addr,
753 - buflen, DMA_TO_DEVICE);
754 - dev_kfree_skb_any(skb);
755 + if (IS_ERR(last_bdp))
759 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
760 if (fep->bufdesc_ex) {
761 @@ -508,8 +533,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
762 estatus |= BD_ENET_TX_TS;
765 - bdp->cbd_bufaddr = cpu_to_fec32(addr);
766 - bdp->cbd_datlen = cpu_to_fec16(buflen);
768 if (fep->bufdesc_ex) {
770 @@ -520,43 +543,41 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
771 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
773 if (fep->quirks & FEC_QUIRK_HAS_AVB)
774 - estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
775 + estatus |= FEC_TX_BD_FTYPE(queue);
777 if (skb->ip_summed == CHECKSUM_PARTIAL)
778 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
781 - ebdp->cbd_esc = cpu_to_fec32(estatus);
782 + ebdp->cbd_esc = estatus;
785 - index = fec_enet_get_bd_index(last_bdp, &txq->bd);
786 + index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
787 /* Save skb pointer */
788 txq->tx_skbuff[index] = skb;
790 - /* Make sure the updates to rest of the descriptor are performed before
791 - * transferring ownership.
794 + bdp->cbd_datlen = buflen;
795 + bdp->cbd_bufaddr = addr;
797 /* Send it on its way. Tell FEC it's ready, interrupt when done,
798 * it's the last BD of the frame, and to put the CRC on the end.
800 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
801 - bdp->cbd_sc = cpu_to_fec16(status);
802 + bdp->cbd_sc = status;
804 /* If this was the last BD in the ring, start at the beginning again. */
805 - bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
806 + bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
808 skb_tx_timestamp(skb);
810 /* Make sure the update to bdp and tx_skbuff are performed before
818 /* Trigger transmission start */
819 - writel(0, txq->bd.reg_desc_active);
820 + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
824 @@ -569,11 +590,12 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
826 struct fec_enet_private *fep = netdev_priv(ndev);
827 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
828 + unsigned short queue = skb_get_queue_mapping(skb);
829 unsigned short status;
830 unsigned int estatus = 0;
833 - status = fec16_to_cpu(bdp->cbd_sc);
834 + status = bdp->cbd_sc;
835 status &= ~BD_ENET_TX_STATS;
837 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
838 @@ -595,16 +617,16 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
839 return NETDEV_TX_BUSY;
842 - bdp->cbd_datlen = cpu_to_fec16(size);
843 - bdp->cbd_bufaddr = cpu_to_fec32(addr);
844 + bdp->cbd_datlen = size;
845 + bdp->cbd_bufaddr = addr;
847 if (fep->bufdesc_ex) {
848 if (fep->quirks & FEC_QUIRK_HAS_AVB)
849 - estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
850 + estatus |= FEC_TX_BD_FTYPE(queue);
851 if (skb->ip_summed == CHECKSUM_PARTIAL)
852 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
854 - ebdp->cbd_esc = cpu_to_fec32(estatus);
855 + ebdp->cbd_esc = estatus;
858 /* Handle the last BD specially */
859 @@ -613,10 +635,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
861 status |= BD_ENET_TX_INTR;
863 - ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
864 + ebdp->cbd_esc |= BD_ENET_TX_INT;
867 - bdp->cbd_sc = cpu_to_fec16(status);
868 + bdp->cbd_sc = status;
872 @@ -629,12 +651,13 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
873 struct fec_enet_private *fep = netdev_priv(ndev);
874 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
875 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
876 + unsigned short queue = skb_get_queue_mapping(skb);
878 unsigned long dmabuf;
879 unsigned short status;
880 unsigned int estatus = 0;
882 - status = fec16_to_cpu(bdp->cbd_sc);
883 + status = bdp->cbd_sc;
884 status &= ~BD_ENET_TX_STATS;
885 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
887 @@ -658,19 +681,19 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
891 - bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
892 - bdp->cbd_datlen = cpu_to_fec16(hdr_len);
893 + bdp->cbd_bufaddr = dmabuf;
894 + bdp->cbd_datlen = hdr_len;
896 if (fep->bufdesc_ex) {
897 if (fep->quirks & FEC_QUIRK_HAS_AVB)
898 - estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
899 + estatus |= FEC_TX_BD_FTYPE(queue);
900 if (skb->ip_summed == CHECKSUM_PARTIAL)
901 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
903 - ebdp->cbd_esc = cpu_to_fec32(estatus);
904 + ebdp->cbd_esc = estatus;
907 - bdp->cbd_sc = cpu_to_fec16(status);
908 + bdp->cbd_sc = status;
912 @@ -682,12 +705,13 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
913 struct fec_enet_private *fep = netdev_priv(ndev);
914 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
915 int total_len, data_left;
916 - struct bufdesc *bdp = txq->bd.cur;
917 + struct bufdesc *bdp = txq->cur_tx;
918 + unsigned short queue = skb_get_queue_mapping(skb);
920 unsigned int index = 0;
923 - if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
924 + if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
925 dev_kfree_skb_any(skb);
927 netdev_err(ndev, "NOT enough BD for TSO!\n");
928 @@ -707,7 +731,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
929 while (total_len > 0) {
932 - index = fec_enet_get_bd_index(bdp, &txq->bd);
933 + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
934 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
935 total_len -= data_left;
937 @@ -722,8 +746,9 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
940 size = min_t(int, tso.size, data_left);
941 - bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
942 - index = fec_enet_get_bd_index(bdp, &txq->bd);
943 + bdp = fec_enet_get_nextdesc(bdp, fep, queue);
944 + index = fec_enet_get_bd_index(txq->tx_bd_base,
946 ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
949 @@ -736,22 +761,22 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
950 tso_build_data(skb, &tso, size);
953 - bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
954 + bdp = fec_enet_get_nextdesc(bdp, fep, queue);
957 /* Save skb pointer */
958 txq->tx_skbuff[index] = skb;
960 skb_tx_timestamp(skb);
964 /* Trigger transmission start */
965 if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
966 - !readl(txq->bd.reg_desc_active) ||
967 - !readl(txq->bd.reg_desc_active) ||
968 - !readl(txq->bd.reg_desc_active) ||
969 - !readl(txq->bd.reg_desc_active))
970 - writel(0, txq->bd.reg_desc_active);
971 + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
972 + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
973 + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
974 + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)))
975 + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
979 @@ -781,7 +806,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
983 - entries_free = fec_enet_get_free_txdesc_num(txq);
984 + entries_free = fec_enet_get_free_txdesc_num(fep, txq);
985 if (entries_free <= txq->tx_stop_threshold)
986 netif_tx_stop_queue(nq);
988 @@ -802,45 +827,45 @@ static void fec_enet_bd_init(struct net_device *dev)
989 for (q = 0; q < fep->num_rx_queues; q++) {
990 /* Initialize the receive buffer descriptors. */
991 rxq = fep->rx_queue[q];
992 - bdp = rxq->bd.base;
993 + bdp = rxq->rx_bd_base;
995 - for (i = 0; i < rxq->bd.ring_size; i++) {
996 + for (i = 0; i < rxq->rx_ring_size; i++) {
998 /* Initialize the BD for every fragment in the page. */
999 if (bdp->cbd_bufaddr)
1000 - bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
1001 + bdp->cbd_sc = BD_ENET_RX_EMPTY;
1003 - bdp->cbd_sc = cpu_to_fec16(0);
1004 - bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1006 + bdp = fec_enet_get_nextdesc(bdp, fep, q);
1009 /* Set the last buffer to wrap */
1010 - bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
1011 - bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
1012 + bdp = fec_enet_get_prevdesc(bdp, fep, q);
1013 + bdp->cbd_sc |= BD_SC_WRAP;
1015 - rxq->bd.cur = rxq->bd.base;
1016 + rxq->cur_rx = rxq->rx_bd_base;
1019 for (q = 0; q < fep->num_tx_queues; q++) {
1020 /* ...and the same for transmit */
1021 txq = fep->tx_queue[q];
1022 - bdp = txq->bd.base;
1023 - txq->bd.cur = bdp;
1024 + bdp = txq->tx_bd_base;
1025 + txq->cur_tx = bdp;
1027 - for (i = 0; i < txq->bd.ring_size; i++) {
1028 + for (i = 0; i < txq->tx_ring_size; i++) {
1029 /* Initialize the BD for every fragment in the page. */
1030 - bdp->cbd_sc = cpu_to_fec16(0);
1032 if (txq->tx_skbuff[i]) {
1033 dev_kfree_skb_any(txq->tx_skbuff[i]);
1034 txq->tx_skbuff[i] = NULL;
1036 - bdp->cbd_bufaddr = cpu_to_fec32(0);
1037 - bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1038 + bdp->cbd_bufaddr = 0;
1039 + bdp = fec_enet_get_nextdesc(bdp, fep, q);
1042 /* Set the last buffer to wrap */
1043 - bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
1044 - bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
1045 + bdp = fec_enet_get_prevdesc(bdp, fep, q);
1046 + bdp->cbd_sc |= BD_SC_WRAP;
1047 txq->dirty_tx = bdp;
1050 @@ -851,7 +876,7 @@ static void fec_enet_active_rxring(struct net_device *ndev)
1053 for (i = 0; i < fep->num_rx_queues; i++)
1054 - writel(0, fep->rx_queue[i]->bd.reg_desc_active);
1055 + writel(0, fep->hwp + FEC_R_DES_ACTIVE(i));
1058 static void fec_enet_enable_ring(struct net_device *ndev)
1059 @@ -863,7 +888,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
1061 for (i = 0; i < fep->num_rx_queues; i++) {
1062 rxq = fep->rx_queue[i];
1063 - writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
1064 + writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
1065 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
1068 @@ -874,7 +899,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
1070 for (i = 0; i < fep->num_tx_queues; i++) {
1071 txq = fep->tx_queue[i];
1072 - writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
1073 + writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i));
1077 @@ -892,7 +917,7 @@ static void fec_enet_reset_skb(struct net_device *ndev)
1078 for (i = 0; i < fep->num_tx_queues; i++) {
1079 txq = fep->tx_queue[i];
1081 - for (j = 0; j < txq->bd.ring_size; j++) {
1082 + for (j = 0; j < txq->tx_ring_size; j++) {
1083 if (txq->tx_skbuff[j]) {
1084 dev_kfree_skb_any(txq->tx_skbuff[j]);
1085 txq->tx_skbuff[j] = NULL;
1086 @@ -930,11 +955,11 @@ fec_restart(struct net_device *ndev)
1087 * enet-mac reset will reset mac address registers too,
1088 * so need to reconfigure it.
1090 - memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1091 - writel((__force u32)cpu_to_be32(temp_mac[0]),
1092 - fep->hwp + FEC_ADDR_LOW);
1093 - writel((__force u32)cpu_to_be32(temp_mac[1]),
1094 - fep->hwp + FEC_ADDR_HIGH);
1095 + if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1096 + memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1097 + writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
1098 + writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
1101 /* Clear any outstanding interrupt. */
1102 writel(0xffffffff, fep->hwp + FEC_IEVENT);
1103 @@ -961,16 +986,13 @@ fec_restart(struct net_device *ndev)
1105 #if !defined(CONFIG_M5272)
1106 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1107 + /* set RX checksum */
1108 val = readl(fep->hwp + FEC_RACC);
1109 - /* align IP header */
1110 - val |= FEC_RACC_SHIFT16;
1111 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1112 - /* set RX checksum */
1113 val |= FEC_RACC_OPTIONS;
1115 val &= ~FEC_RACC_OPTIONS;
1116 writel(val, fep->hwp + FEC_RACC);
1117 - writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1119 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1121 @@ -995,10 +1017,10 @@ fec_restart(struct net_device *ndev)
1124 /* 1G, 100M or 10M */
1125 - if (ndev->phydev) {
1126 - if (ndev->phydev->speed == SPEED_1000)
1127 + if (fep->phy_dev) {
1128 + if (fep->phy_dev->speed == SPEED_1000)
1130 - else if (ndev->phydev->speed == SPEED_100)
1131 + else if (fep->phy_dev->speed == SPEED_100)
1135 @@ -1019,7 +1041,7 @@ fec_restart(struct net_device *ndev)
1137 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1138 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1139 - if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1140 + if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
1141 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1142 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1144 @@ -1033,7 +1055,7 @@ fec_restart(struct net_device *ndev)
1145 /* enable pause frame*/
1146 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1147 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1148 - ndev->phydev && ndev->phydev->pause)) {
1149 + fep->phy_dev && fep->phy_dev->pause)) {
1150 rcntl |= FEC_ENET_FCE;
1152 /* set FIFO threshold parameter to reduce overrun */
1153 @@ -1213,12 +1235,13 @@ static void
1154 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1156 struct fec_enet_private *fep;
1157 - struct bufdesc *bdp;
1158 + struct bufdesc *bdp, *bdp_t;
1159 unsigned short status;
1160 struct sk_buff *skb;
1161 struct fec_enet_priv_tx_q *txq;
1162 struct netdev_queue *nq;
1167 fep = netdev_priv(ndev);
1168 @@ -1231,27 +1254,37 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1169 bdp = txq->dirty_tx;
1171 /* get next bdp of dirty_tx */
1172 - bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1173 + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1175 - while (bdp != READ_ONCE(txq->bd.cur)) {
1176 - /* Order the load of bd.cur and cbd_sc */
1177 + while (bdp != READ_ONCE(txq->cur_tx)) {
1178 + /* Order the load of cur_tx and cbd_sc */
1180 - status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1181 + status = READ_ONCE(bdp->cbd_sc);
1182 if (status & BD_ENET_TX_READY)
1185 - index = fec_enet_get_bd_index(bdp, &txq->bd);
1189 + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1190 skb = txq->tx_skbuff[index];
1192 + bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
1193 + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1194 + skb = txq->tx_skbuff[index];
1197 + if ((status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
1200 + for (i = 0; i < bdnum; i++) {
1201 + if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1202 + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1203 + bdp->cbd_datlen, DMA_TO_DEVICE);
1204 + bdp->cbd_bufaddr = 0;
1205 + if (i < bdnum - 1)
1206 + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1208 txq->tx_skbuff[index] = NULL;
1209 - if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1210 - dma_unmap_single(&fep->pdev->dev,
1211 - fec32_to_cpu(bdp->cbd_bufaddr),
1212 - fec16_to_cpu(bdp->cbd_datlen),
1214 - bdp->cbd_bufaddr = cpu_to_fec32(0);
1218 /* Check for errors. */
1219 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1220 @@ -1278,7 +1311,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1221 struct skb_shared_hwtstamps shhwtstamps;
1222 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1224 - fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1225 + fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
1226 skb_tstamp_tx(skb, &shhwtstamps);
1229 @@ -1290,7 +1323,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1231 /* Free the sk buffer associated with this last transmit */
1232 dev_kfree_skb_any(skb);
1235 /* Make sure the update to bdp and tx_skbuff are performed
1238 @@ -1298,21 +1331,21 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1239 txq->dirty_tx = bdp;
1241 /* Update pointer to next buffer descriptor to be transmitted */
1242 - bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1243 + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1245 /* Since we have freed up a buffer, the ring is no longer full
1247 if (netif_queue_stopped(ndev)) {
1248 - entries_free = fec_enet_get_free_txdesc_num(txq);
1249 + entries_free = fec_enet_get_free_txdesc_num(fep, txq);
1250 if (entries_free >= txq->tx_wake_threshold)
1251 netif_tx_wake_queue(nq);
1255 /* ERR006538: Keep the transmitter going */
1256 - if (bdp != txq->bd.cur &&
1257 - readl(txq->bd.reg_desc_active) == 0)
1258 - writel(0, txq->bd.reg_desc_active);
1259 + if (bdp != txq->cur_tx &&
1260 + readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0)
1261 + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id));
1265 @@ -1338,8 +1371,10 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
1267 skb_reserve(skb, fep->rx_align + 1 - off);
1269 - bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
1270 - if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1271 + bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
1272 + FEC_ENET_RX_FRSIZE - fep->rx_align,
1274 + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
1275 if (net_ratelimit())
1276 netdev_err(ndev, "Rx DMA memory map failed\n");
1278 @@ -1361,8 +1396,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1282 - dma_sync_single_for_cpu(&fep->pdev->dev,
1283 - fec32_to_cpu(bdp->cbd_bufaddr),
1284 + dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
1285 FEC_ENET_RX_FRSIZE - fep->rx_align,
1288 @@ -1374,7 +1408,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1292 -/* During a receive, the bd_rx.cur points to the current incoming buffer.
1293 +/* During a receive, the cur_rx points to the current incoming buffer.
1294 * When we update through the ring, if the next incoming buffer has
1295 * not been given to the system, we just set the empty indicator,
1296 * effectively tossing the packet.
1297 @@ -1407,9 +1441,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1298 /* First, grab all of the stats for the incoming packet.
1299 * These get messed up if we get called due to a busy condition.
1301 - bdp = rxq->bd.cur;
1302 + bdp = rxq->cur_rx;
1304 - while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1305 + while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
1307 if (pkt_received >= budget)
1309 @@ -1445,10 +1479,10 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1311 /* Process the incoming frame. */
1312 ndev->stats.rx_packets++;
1313 - pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1314 + pkt_len = bdp->cbd_datlen;
1315 ndev->stats.rx_bytes += pkt_len;
1317 - index = fec_enet_get_bd_index(bdp, &rxq->bd);
1318 + index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
1319 skb = rxq->rx_skbuff[index];
1321 /* The packet length includes FCS, but we don't want to
1322 @@ -1463,8 +1497,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1323 ndev->stats.rx_dropped++;
1324 goto rx_processing_done;
1326 - dma_unmap_single(&fep->pdev->dev,
1327 - fec32_to_cpu(bdp->cbd_bufaddr),
1328 + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1329 FEC_ENET_RX_FRSIZE - fep->rx_align,
1332 @@ -1472,15 +1505,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1333 prefetch(skb->data - NET_IP_ALIGN);
1334 skb_put(skb, pkt_len - 4);
1337 if (!is_copybreak && need_swap)
1338 swap_buffer(data, pkt_len);
1340 -#if !defined(CONFIG_M5272)
1341 - if (fep->quirks & FEC_QUIRK_HAS_RACC)
1342 - data = skb_pull_inline(skb, 2);
1345 /* Extract the enhanced buffer descriptor */
1347 if (fep->bufdesc_ex)
1348 @@ -1489,8 +1516,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1349 /* If this is a VLAN packet remove the VLAN Tag */
1350 vlan_packet_rcvd = false;
1351 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1352 - fep->bufdesc_ex &&
1353 - (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1354 + fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
1355 /* Push and remove the vlan tag */
1356 struct vlan_hdr *vlan_header =
1357 (struct vlan_hdr *) (data + ETH_HLEN);
1358 @@ -1506,12 +1532,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1360 /* Get receive timestamp from the skb */
1361 if (fep->hwts_rx_en && fep->bufdesc_ex)
1362 - fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1363 + fec_enet_hwtstamp(fep, ebdp->ts,
1364 skb_hwtstamps(skb));
1366 if (fep->bufdesc_ex &&
1367 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1368 - if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1369 + if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
1370 /* don't check it */
1371 skb->ip_summed = CHECKSUM_UNNECESSARY;
1373 @@ -1528,8 +1554,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1374 napi_gro_receive(&fep->napi, skb);
1377 - dma_sync_single_for_device(&fep->pdev->dev,
1378 - fec32_to_cpu(bdp->cbd_bufaddr),
1379 + dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
1380 FEC_ENET_RX_FRSIZE - fep->rx_align,
1383 @@ -1543,30 +1568,26 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1385 /* Mark the buffer empty */
1386 status |= BD_ENET_RX_EMPTY;
1387 + bdp->cbd_sc = status;
1389 if (fep->bufdesc_ex) {
1390 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1392 - ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1393 + ebdp->cbd_esc = BD_ENET_RX_INT;
1397 - /* Make sure the updates to rest of the descriptor are
1398 - * performed before transferring ownership.
1401 - bdp->cbd_sc = cpu_to_fec16(status);
1403 /* Update BD pointer to next entry */
1404 - bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1405 + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1407 /* Doing this here will keep the FEC running while we process
1408 * incoming frames. On a heavily loaded network, we should be
1409 * able to keep up at the expense of system resources.
1411 - writel(0, rxq->bd.reg_desc_active);
1412 + writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id));
1414 - rxq->bd.cur = bdp;
1415 + rxq->cur_rx = bdp;
1416 return pkt_received;
1419 @@ -1578,15 +1599,9 @@ fec_enet_rx(struct net_device *ndev, int budget)
1420 struct fec_enet_private *fep = netdev_priv(ndev);
1422 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1425 - ret = fec_enet_rx_queue(ndev,
1426 + clear_bit(queue_id, &fep->work_rx);
1427 + pkt_received += fec_enet_rx_queue(ndev,
1428 budget - pkt_received, queue_id);
1430 - if (ret < budget - pkt_received)
1431 - clear_bit(queue_id, &fep->work_rx);
1433 - pkt_received += ret;
1435 return pkt_received;
1437 @@ -1631,7 +1646,7 @@ fec_enet_interrupt(int irq, void *dev_id)
1439 if (napi_schedule_prep(&fep->napi)) {
1440 /* Disable the NAPI interrupts */
1441 - writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
1442 + writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1443 __napi_schedule(&fep->napi);
1446 @@ -1742,7 +1757,7 @@ static void fec_get_mac(struct net_device *ndev)
1447 static void fec_enet_adjust_link(struct net_device *ndev)
1449 struct fec_enet_private *fep = netdev_priv(ndev);
1450 - struct phy_device *phy_dev = ndev->phydev;
1451 + struct phy_device *phy_dev = fep->phy_dev;
1452 int status_change = 0;
1454 /* Prevent a state halted on mii error */
1455 @@ -1802,16 +1817,10 @@ static void fec_enet_adjust_link(struct net_device *ndev)
1456 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1458 struct fec_enet_private *fep = bus->priv;
1459 - struct device *dev = &fep->pdev->dev;
1460 unsigned long time_left;
1463 - ret = pm_runtime_get_sync(dev);
1467 fep->mii_timeout = 0;
1468 - reinit_completion(&fep->mdio_done);
1469 + init_completion(&fep->mdio_done);
1471 /* start a read op */
1472 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
1473 @@ -1824,35 +1833,21 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1474 if (time_left == 0) {
1475 fep->mii_timeout = 1;
1476 netdev_err(fep->netdev, "MDIO read timeout\n");
1479 + return -ETIMEDOUT;
1482 - ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1485 - pm_runtime_mark_last_busy(dev);
1486 - pm_runtime_put_autosuspend(dev);
1489 + /* return value */
1490 + return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1493 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1496 struct fec_enet_private *fep = bus->priv;
1497 - struct device *dev = &fep->pdev->dev;
1498 unsigned long time_left;
1501 - ret = pm_runtime_get_sync(dev);
1507 fep->mii_timeout = 0;
1508 - reinit_completion(&fep->mdio_done);
1509 + init_completion(&fep->mdio_done);
1511 /* start a write op */
1512 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1513 @@ -1866,13 +1861,10 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1514 if (time_left == 0) {
1515 fep->mii_timeout = 1;
1516 netdev_err(fep->netdev, "MDIO write timeout\n");
1518 + return -ETIMEDOUT;
1521 - pm_runtime_mark_last_busy(dev);
1522 - pm_runtime_put_autosuspend(dev);
1528 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1529 @@ -1881,10 +1873,18 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1533 + ret = clk_prepare_enable(fep->clk_ahb);
1536 + ret = clk_prepare_enable(fep->clk_ipg);
1538 + goto failed_clk_ipg;
1539 if (fep->clk_enet_out) {
1540 ret = clk_prepare_enable(fep->clk_enet_out);
1543 + goto failed_clk_enet_out;
1545 + fec_reset_phy(fep->pdev);
1548 mutex_lock(&fep->ptp_clk_mutex);
1549 @@ -1903,6 +1903,8 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1550 goto failed_clk_ref;
1553 + clk_disable_unprepare(fep->clk_ahb);
1554 + clk_disable_unprepare(fep->clk_ipg);
1555 if (fep->clk_enet_out)
1556 clk_disable_unprepare(fep->clk_enet_out);
1558 @@ -1923,27 +1925,23 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1560 if (fep->clk_enet_out)
1561 clk_disable_unprepare(fep->clk_enet_out);
1562 +failed_clk_enet_out:
1563 + clk_disable_unprepare(fep->clk_ipg);
1565 + clk_disable_unprepare(fep->clk_ahb);
1570 -static int fec_restore_mii_bus(struct net_device *ndev)
1571 +static void fec_restore_mii_bus(struct net_device *ndev)
1573 struct fec_enet_private *fep = netdev_priv(ndev);
1576 - ret = pm_runtime_get_sync(&fep->pdev->dev);
1580 + fec_enet_clk_enable(ndev, true);
1581 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1582 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1583 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1584 writel(FEC_ENET_ETHEREN, fep->hwp + FEC_ECNTRL);
1586 - pm_runtime_mark_last_busy(&fep->pdev->dev);
1587 - pm_runtime_put_autosuspend(&fep->pdev->dev);
1591 static int fec_enet_mii_probe(struct net_device *ndev)
1592 @@ -1955,6 +1953,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1594 int dev_id = fep->dev_id;
1596 + fep->phy_dev = NULL;
1598 if (fep->phy_node) {
1599 phy_dev = of_phy_connect(ndev, fep->phy_node,
1600 &fec_enet_adjust_link, 0,
1601 @@ -1964,7 +1964,11 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1603 /* check for attached phy */
1604 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1605 - if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
1606 + if ((fep->mii_bus->phy_mask & (1 << phy_id)))
1608 + if (fep->mii_bus->mdio_map[phy_id] == NULL)
1610 + if (fep->mii_bus->mdio_map[phy_id]->addr == 0)
1614 @@ -2002,10 +2006,13 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1616 phy_dev->advertising = phy_dev->supported;
1618 + fep->phy_dev = phy_dev;
1620 fep->full_duplex = 0;
1622 - phy_attached_info(phy_dev);
1623 + netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1624 + fep->phy_dev->drv->name, NULL,
1625 + fep->phy_dev->irq);
1629 @@ -2017,7 +2024,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1630 struct net_device *ndev = platform_get_drvdata(pdev);
1631 struct fec_enet_private *fep = netdev_priv(ndev);
1632 struct device_node *node;
1634 + int err = -ENXIO, i;
1635 u32 mii_speed, holdtime;
1638 @@ -2036,7 +2043,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1639 * mdio interface in board design, and need to be configured by
1642 - if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
1643 + if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
1644 /* fec1 uses fec0 mii_bus */
1645 if (mii_cnt && fec0_mii_bus) {
1646 fep->mii_bus = fec0_mii_bus;
1647 @@ -2100,29 +2107,38 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1648 fep->mii_bus->priv = fep;
1649 fep->mii_bus->parent = &pdev->dev;
1651 +/* fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1652 + if (!fep->mii_bus->irq) {
1654 + goto err_out_free_mdiobus;
1657 + for (i = 0; i < PHY_MAX_ADDR; i++)
1658 + fep->mii_bus->irq[i] = PHY_POLL;
1660 node = of_get_child_by_name(pdev->dev.of_node, "mdio");
1662 err = of_mdiobus_register(fep->mii_bus, node);
1664 - } else if (fep->phy_node && !fep->fixed_link) {
1665 - err = -EPROBE_DEFER;
1667 err = mdiobus_register(fep->mii_bus);
1671 - goto err_out_free_mdiobus;
1672 + goto err_out_free_mdio_irq;
1676 /* save fec0 mii_bus */
1677 - if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) {
1678 + if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1679 fec0_mii_bus = fep->mii_bus;
1680 fec_mii_bus_share = &fep->mii_bus_share;
1685 +err_out_free_mdio_irq:
1686 + kfree(fep->mii_bus->irq);
1687 err_out_free_mdiobus:
1688 mdiobus_free(fep->mii_bus);
1690 @@ -2133,10 +2149,35 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
1692 if (--mii_cnt == 0) {
1693 mdiobus_unregister(fep->mii_bus);
1694 + kfree(fep->mii_bus->irq);
1695 mdiobus_free(fep->mii_bus);
1699 +static int fec_enet_get_settings(struct net_device *ndev,
1700 + struct ethtool_cmd *cmd)
1702 + struct fec_enet_private *fep = netdev_priv(ndev);
1703 + struct phy_device *phydev = fep->phy_dev;
1708 + return phy_ethtool_gset(phydev, cmd);
1711 +static int fec_enet_set_settings(struct net_device *ndev,
1712 + struct ethtool_cmd *cmd)
1714 + struct fec_enet_private *fep = netdev_priv(ndev);
1715 + struct phy_device *phydev = fep->phy_dev;
1720 + return phy_ethtool_sset(phydev, cmd);
1723 static void fec_enet_get_drvinfo(struct net_device *ndev,
1724 struct ethtool_drvinfo *info)
1726 @@ -2163,8 +2204,8 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
1728 /* List of registers that can be safety be read to dump them with ethtool */
1729 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
1730 - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
1731 - defined(CONFIG_ARM64)
1732 + defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
1733 + defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
1734 static u32 fec_enet_register_offset[] = {
1735 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
1736 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
1737 @@ -2270,7 +2311,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1739 struct fec_enet_private *fep = netdev_priv(ndev);
1741 - if (!ndev->phydev)
1742 + if (!fep->phy_dev)
1745 if (pause->tx_pause != pause->rx_pause) {
1746 @@ -2286,17 +2327,17 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1747 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
1749 if (pause->rx_pause || pause->autoneg) {
1750 - ndev->phydev->supported |= ADVERTISED_Pause;
1751 - ndev->phydev->advertising |= ADVERTISED_Pause;
1752 + fep->phy_dev->supported |= ADVERTISED_Pause;
1753 + fep->phy_dev->advertising |= ADVERTISED_Pause;
1755 - ndev->phydev->supported &= ~ADVERTISED_Pause;
1756 - ndev->phydev->advertising &= ~ADVERTISED_Pause;
1757 + fep->phy_dev->supported &= ~ADVERTISED_Pause;
1758 + fep->phy_dev->advertising &= ~ADVERTISED_Pause;
1761 if (pause->autoneg) {
1762 if (netif_running(ndev))
1764 - phy_start_aneg(ndev->phydev);
1765 + phy_start_aneg(fep->phy_dev);
1767 if (netif_running(ndev)) {
1768 napi_disable(&fep->napi);
1769 @@ -2376,26 +2417,14 @@ static const struct fec_stat {
1770 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
1773 -#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
1775 -static void fec_enet_update_ethtool_stats(struct net_device *dev)
1776 +static void fec_enet_get_ethtool_stats(struct net_device *dev,
1777 + struct ethtool_stats *stats, u64 *data)
1779 struct fec_enet_private *fep = netdev_priv(dev);
1782 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
1783 - fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
1786 -static void fec_enet_get_ethtool_stats(struct net_device *dev,
1787 - struct ethtool_stats *stats, u64 *data)
1789 - struct fec_enet_private *fep = netdev_priv(dev);
1791 - if (netif_running(dev))
1792 - fec_enet_update_ethtool_stats(dev);
1794 - memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
1795 + data[i] = readl(fep->hwp + fec_stats[i].offset);
1798 static void fec_enet_get_strings(struct net_device *netdev,
1799 @@ -2420,17 +2449,12 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
1804 -#else /* !defined(CONFIG_M5272) */
1805 -#define FEC_STATS_SIZE 0
1806 -static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
1809 #endif /* !defined(CONFIG_M5272) */
1811 static int fec_enet_nway_reset(struct net_device *dev)
1813 - struct phy_device *phydev = dev->phydev;
1814 + struct fec_enet_private *fep = netdev_priv(dev);
1815 + struct phy_device *phydev = fep->phy_dev;
1819 @@ -2455,6 +2479,9 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
1820 struct fec_enet_private *fep = netdev_priv(ndev);
1823 + if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
1826 /* Must be greater than zero to avoid unpredictable behavior */
1827 if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
1828 !fep->tx_time_itr || !fep->tx_pkts_itr)
1829 @@ -2477,12 +2504,10 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
1831 writel(tx_itr, fep->hwp + FEC_TXIC0);
1832 writel(rx_itr, fep->hwp + FEC_RXIC0);
1833 - if (fep->quirks & FEC_QUIRK_HAS_AVB) {
1834 - writel(tx_itr, fep->hwp + FEC_TXIC1);
1835 - writel(rx_itr, fep->hwp + FEC_RXIC1);
1836 - writel(tx_itr, fep->hwp + FEC_TXIC2);
1837 - writel(rx_itr, fep->hwp + FEC_RXIC2);
1839 + writel(tx_itr, fep->hwp + FEC_TXIC1);
1840 + writel(rx_itr, fep->hwp + FEC_RXIC1);
1841 + writel(tx_itr, fep->hwp + FEC_TXIC2);
1842 + writel(rx_itr, fep->hwp + FEC_RXIC2);
1846 @@ -2490,7 +2515,7 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
1848 struct fec_enet_private *fep = netdev_priv(ndev);
1850 - if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
1851 + if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
1854 ec->rx_coalesce_usecs = fep->rx_time_itr;
1855 @@ -2508,28 +2533,28 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
1856 struct fec_enet_private *fep = netdev_priv(ndev);
1859 - if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
1860 + if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
1863 if (ec->rx_max_coalesced_frames > 255) {
1864 - pr_err("Rx coalesced frames exceed hardware limitation\n");
1865 + pr_err("Rx coalesced frames exceed hardware limiation");
1869 if (ec->tx_max_coalesced_frames > 255) {
1870 - pr_err("Tx coalesced frame exceed hardware limitation\n");
1871 + pr_err("Tx coalesced frame exceed hardware limiation");
1875 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
1876 if (cycle > 0xFFFF) {
1877 - pr_err("Rx coalesced usec exceed hardware limitation\n");
1878 + pr_err("Rx coalesed usec exceeed hardware limiation");
1882 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
1883 if (cycle > 0xFFFF) {
1884 - pr_err("Rx coalesced usec exceed hardware limitation\n");
1885 + pr_err("Rx coalesed usec exceeed hardware limiation");
1889 @@ -2629,6 +2654,8 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1892 static const struct ethtool_ops fec_enet_ethtool_ops = {
1893 + .get_settings = fec_enet_get_settings,
1894 + .set_settings = fec_enet_set_settings,
1895 .get_drvinfo = fec_enet_get_drvinfo,
1896 .get_regs_len = fec_enet_get_regs_len,
1897 .get_regs = fec_enet_get_regs,
1898 @@ -2648,14 +2675,12 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
1899 .set_tunable = fec_enet_set_tunable,
1900 .get_wol = fec_enet_get_wol,
1901 .set_wol = fec_enet_set_wol,
1902 - .get_link_ksettings = phy_ethtool_get_link_ksettings,
1903 - .set_link_ksettings = phy_ethtool_set_link_ksettings,
1906 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1908 struct fec_enet_private *fep = netdev_priv(ndev);
1909 - struct phy_device *phydev = ndev->phydev;
1910 + struct phy_device *phydev = fep->phy_dev;
1912 if (!netif_running(ndev))
1914 @@ -2685,25 +2710,25 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1916 for (q = 0; q < fep->num_rx_queues; q++) {
1917 rxq = fep->rx_queue[q];
1918 - bdp = rxq->bd.base;
1919 - for (i = 0; i < rxq->bd.ring_size; i++) {
1920 + bdp = rxq->rx_bd_base;
1921 + for (i = 0; i < rxq->rx_ring_size; i++) {
1922 skb = rxq->rx_skbuff[i];
1923 rxq->rx_skbuff[i] = NULL;
1925 dma_unmap_single(&fep->pdev->dev,
1926 - fec32_to_cpu(bdp->cbd_bufaddr),
1928 FEC_ENET_RX_FRSIZE - fep->rx_align,
1932 - bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1933 + bdp = fec_enet_get_nextdesc(bdp, fep, q);
1937 for (q = 0; q < fep->num_tx_queues; q++) {
1938 txq = fep->tx_queue[q];
1939 - bdp = txq->bd.base;
1940 - for (i = 0; i < txq->bd.ring_size; i++) {
1941 + bdp = txq->tx_bd_base;
1942 + for (i = 0; i < txq->tx_ring_size; i++) {
1943 kfree(txq->tx_bounce[i]);
1944 txq->tx_bounce[i] = NULL;
1945 skb = txq->tx_skbuff[i];
1946 @@ -2722,8 +2747,8 @@ static void fec_enet_free_queue(struct net_device *ndev)
1947 for (i = 0; i < fep->num_tx_queues; i++)
1948 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
1949 txq = fep->tx_queue[i];
1950 - dma_free_coherent(&fep->pdev->dev,
1951 - txq->bd.ring_size * TSO_HEADER_SIZE,
1952 + dma_free_coherent(NULL,
1953 + txq->tx_ring_size * TSO_HEADER_SIZE,
1957 @@ -2749,15 +2774,15 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
1960 fep->tx_queue[i] = txq;
1961 - txq->bd.ring_size = TX_RING_SIZE;
1962 - fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
1963 + txq->tx_ring_size = TX_RING_SIZE;
1964 + fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size;
1966 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
1967 txq->tx_wake_threshold =
1968 - (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
1969 + (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
1971 - txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
1972 - txq->bd.ring_size * TSO_HEADER_SIZE,
1973 + txq->tso_hdrs = dma_alloc_coherent(NULL,
1974 + txq->tx_ring_size * TSO_HEADER_SIZE,
1977 if (!txq->tso_hdrs) {
1978 @@ -2774,8 +2799,8 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
1982 - fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
1983 - fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
1984 + fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE;
1985 + fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size;
1989 @@ -2794,8 +2819,8 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
1990 struct fec_enet_priv_rx_q *rxq;
1992 rxq = fep->rx_queue[queue];
1993 - bdp = rxq->bd.base;
1994 - for (i = 0; i < rxq->bd.ring_size; i++) {
1995 + bdp = rxq->rx_bd_base;
1996 + for (i = 0; i < rxq->rx_ring_size; i++) {
1997 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2000 @@ -2806,19 +2831,19 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2003 rxq->rx_skbuff[i] = skb;
2004 - bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
2005 + bdp->cbd_sc = BD_ENET_RX_EMPTY;
2007 if (fep->bufdesc_ex) {
2008 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2009 - ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
2010 + ebdp->cbd_esc = BD_ENET_RX_INT;
2013 - bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2014 + bdp = fec_enet_get_nextdesc(bdp, fep, queue);
2017 /* Set the last buffer to wrap. */
2018 - bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
2019 - bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2020 + bdp = fec_enet_get_prevdesc(bdp, fep, queue);
2021 + bdp->cbd_sc |= BD_SC_WRAP;
2025 @@ -2835,26 +2860,26 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
2026 struct fec_enet_priv_tx_q *txq;
2028 txq = fep->tx_queue[queue];
2029 - bdp = txq->bd.base;
2030 - for (i = 0; i < txq->bd.ring_size; i++) {
2031 + bdp = txq->tx_bd_base;
2032 + for (i = 0; i < txq->tx_ring_size; i++) {
2033 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
2034 if (!txq->tx_bounce[i])
2037 - bdp->cbd_sc = cpu_to_fec16(0);
2038 - bdp->cbd_bufaddr = cpu_to_fec32(0);
2040 + bdp->cbd_bufaddr = 0;
2042 if (fep->bufdesc_ex) {
2043 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2044 - ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
2045 + ebdp->cbd_esc = BD_ENET_TX_INT;
2048 - bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
2049 + bdp = fec_enet_get_nextdesc(bdp, fep, queue);
2052 /* Set the last buffer to wrap. */
2053 - bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
2054 - bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2055 + bdp = fec_enet_get_prevdesc(bdp, fep, queue);
2056 + bdp->cbd_sc |= BD_SC_WRAP;
2060 @@ -2903,14 +2928,10 @@ fec_enet_open(struct net_device *ndev)
2061 platform_get_device_id(fep->pdev);
2064 - ret = pm_runtime_get_sync(&fep->pdev->dev);
2068 pinctrl_pm_select_default_state(&fep->pdev->dev);
2069 ret = fec_enet_clk_enable(ndev, true);
2074 /* I should reset the ring buffers here, but I don't yet know
2075 * a simple way to do that.
2076 @@ -2928,13 +2949,11 @@ fec_enet_open(struct net_device *ndev)
2078 goto err_enet_mii_probe;
2080 - if (fep->quirks & FEC_QUIRK_ERR006687)
2081 - imx6q_cpuidle_fec_irqs_used();
2083 napi_enable(&fep->napi);
2084 - phy_start(ndev->phydev);
2085 + phy_start(fep->phy_dev);
2086 netif_tx_start_all_queues(ndev);
2088 + pm_runtime_get_sync(ndev->dev.parent);
2089 if ((id_entry->driver_data & FEC_QUIRK_BUG_WAITMODE) &&
2090 !fec_enet_irq_workaround(fep))
2091 pm_qos_add_request(&fep->pm_qos_req,
2092 @@ -2947,16 +2966,14 @@ fec_enet_open(struct net_device *ndev)
2094 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
2095 FEC_WOL_FLAG_ENABLE);
2096 + fep->miibus_up_failed = false;
2101 fec_enet_free_buffers(ndev);
2103 - fec_enet_clk_enable(ndev, false);
2105 - pm_runtime_mark_last_busy(&fep->pdev->dev);
2106 - pm_runtime_put_autosuspend(&fep->pdev->dev);
2107 + fep->miibus_up_failed = true;
2108 if (!fep->mii_bus_share)
2109 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2111 @@ -2967,7 +2984,7 @@ fec_enet_close(struct net_device *ndev)
2113 struct fec_enet_private *fep = netdev_priv(ndev);
2115 - phy_stop(ndev->phydev);
2116 + phy_stop(fep->phy_dev);
2118 if (netif_device_present(ndev)) {
2119 napi_disable(&fep->napi);
2120 @@ -2975,21 +2992,13 @@ fec_enet_close(struct net_device *ndev)
2124 - phy_disconnect(ndev->phydev);
2125 - ndev->phydev = NULL;
2127 - if (fep->quirks & FEC_QUIRK_ERR006687)
2128 - imx6q_cpuidle_fec_irqs_unused();
2130 - fec_enet_update_ethtool_stats(ndev);
2131 + phy_disconnect(fep->phy_dev);
2132 + fep->phy_dev = NULL;
2134 fec_enet_clk_enable(ndev, false);
2135 pm_qos_remove_request(&fep->pm_qos_req);
2136 - if (!fep->mii_bus_share)
2137 - pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2138 - pm_runtime_mark_last_busy(&fep->pdev->dev);
2139 - pm_runtime_put_autosuspend(&fep->pdev->dev);
2141 + pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2142 + pm_runtime_put_sync_suspend(ndev->dev.parent);
2143 fec_enet_free_buffers(ndev);
2146 @@ -3005,7 +3014,7 @@ fec_enet_close(struct net_device *ndev)
2147 * this kind of feature?).
2150 -#define FEC_HASH_BITS 6 /* #bits in hash */
2151 +#define HASH_BITS 6 /* #bits in hash */
2152 #define CRC32_POLY 0xEDB88320
2154 static void set_multicast_list(struct net_device *ndev)
2155 @@ -3014,7 +3023,6 @@ static void set_multicast_list(struct net_device *ndev)
2156 struct netdev_hw_addr *ha;
2157 unsigned int i, bit, data, crc, tmp;
2159 - unsigned int hash_high, hash_low;
2161 if (ndev->flags & IFF_PROMISC) {
2162 tmp = readl(fep->hwp + FEC_R_CNTRL);
2163 @@ -3037,10 +3045,10 @@ static void set_multicast_list(struct net_device *ndev)
2167 - /* Add the addresses in hash register
2168 + /* Clear filter and add the addresses in hash register
2172 + writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2173 + writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2175 netdev_for_each_mc_addr(ha, ndev) {
2176 /* calculate crc32 value of mac address */
2177 @@ -3054,20 +3062,21 @@ static void set_multicast_list(struct net_device *ndev)
2181 - /* only upper 6 bits (FEC_HASH_BITS) are used
2182 + /* only upper 6 bits (HASH_BITS) are used
2183 * which point to specific bit in he hash registers
2185 - hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
2186 + hash = (crc >> (32 - HASH_BITS)) & 0x3f;
2189 - hash_high |= 1 << (hash - 32);
2190 + tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2191 + tmp |= 1 << (hash - 32);
2192 + writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2194 - hash_low |= 1 << hash;
2195 + tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2197 + writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2201 - writel_relaxed(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2202 - writel_relaxed(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2205 /* Set a MAC change in hardware. */
2206 @@ -3122,6 +3131,7 @@ static void fec_poll_controller(struct net_device *dev)
2210 +#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
2211 static inline void fec_enet_set_netdev_features(struct net_device *netdev,
2212 netdev_features_t features)
2214 @@ -3145,7 +3155,7 @@ static int fec_set_features(struct net_device *netdev,
2215 struct fec_enet_private *fep = netdev_priv(netdev);
2216 netdev_features_t changed = features ^ netdev->features;
2218 - if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
2219 + if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
2220 napi_disable(&fep->napi);
2221 netif_tx_lock_bh(netdev);
2223 @@ -3209,14 +3219,6 @@ static const struct net_device_ops fec_netdev_ops = {
2224 .ndo_set_features = fec_set_features,
2227 -static const unsigned short offset_des_active_rxq[] = {
2228 - FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
2231 -static const unsigned short offset_des_active_txq[] = {
2232 - FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
2236 * XXX: We need to clean up on failure exits here.
2238 @@ -3224,16 +3226,14 @@ static const unsigned short offset_des_active_txq[] = {
2239 static int fec_enet_init(struct net_device *ndev)
2241 struct fec_enet_private *fep = netdev_priv(ndev);
2242 + struct fec_enet_priv_tx_q *txq;
2243 + struct fec_enet_priv_rx_q *rxq;
2244 struct bufdesc *cbd_base;
2248 - unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
2249 - sizeof(struct bufdesc);
2250 - unsigned dsize_log2 = __fls(dsize);
2252 - WARN_ON(dsize != (1 << dsize_log2));
2253 -#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
2254 +#if defined(CONFIG_ARM)
2255 fep->rx_align = 0xf;
2256 fep->tx_align = 0xf;
2258 @@ -3243,11 +3243,16 @@ static int fec_enet_init(struct net_device *ndev)
2260 fec_enet_alloc_queue(ndev);
2262 - bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
2263 + if (fep->bufdesc_ex)
2264 + fep->bufdesc_size = sizeof(struct bufdesc_ex);
2266 + fep->bufdesc_size = sizeof(struct bufdesc);
2267 + bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) *
2268 + fep->bufdesc_size;
2270 /* Allocate memory for buffer descriptors. */
2271 - cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
2273 + cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
2278 @@ -3261,35 +3266,33 @@ static int fec_enet_init(struct net_device *ndev)
2280 /* Set receive and transmit descriptor base. */
2281 for (i = 0; i < fep->num_rx_queues; i++) {
2282 - struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
2283 - unsigned size = dsize * rxq->bd.ring_size;
2286 - rxq->bd.base = cbd_base;
2287 - rxq->bd.cur = cbd_base;
2288 - rxq->bd.dma = bd_dma;
2289 - rxq->bd.dsize = dsize;
2290 - rxq->bd.dsize_log2 = dsize_log2;
2291 - rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
2293 - cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
2294 - rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
2295 + rxq = fep->rx_queue[i];
2297 + rxq->rx_bd_base = (struct bufdesc *)cbd_base;
2298 + rxq->bd_dma = bd_dma;
2299 + if (fep->bufdesc_ex) {
2300 + bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size;
2301 + cbd_base = (struct bufdesc *)
2302 + (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
2304 + bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size;
2305 + cbd_base += rxq->rx_ring_size;
2309 for (i = 0; i < fep->num_tx_queues; i++) {
2310 - struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
2311 - unsigned size = dsize * txq->bd.ring_size;
2314 - txq->bd.base = cbd_base;
2315 - txq->bd.cur = cbd_base;
2316 - txq->bd.dma = bd_dma;
2317 - txq->bd.dsize = dsize;
2318 - txq->bd.dsize_log2 = dsize_log2;
2319 - txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
2321 - cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
2322 - txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
2323 + txq = fep->tx_queue[i];
2325 + txq->tx_bd_base = (struct bufdesc *)cbd_base;
2326 + txq->bd_dma = bd_dma;
2327 + if (fep->bufdesc_ex) {
2328 + bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size;
2329 + cbd_base = (struct bufdesc *)
2330 + (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size);
2332 + bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size;
2333 + cbd_base += txq->tx_ring_size;
2338 @@ -3323,60 +3326,62 @@ static int fec_enet_init(struct net_device *ndev)
2342 - fec_enet_update_ethtool_stats(ndev);
2348 -static int fec_reset_phy(struct platform_device *pdev)
2349 +static void fec_reset_phy(struct platform_device *pdev)
2351 + struct net_device *ndev = platform_get_drvdata(pdev);
2352 + struct fec_enet_private *fep = netdev_priv(ndev);
2354 + if (!gpio_is_valid(fep->phy_reset_gpio))
2357 + gpio_set_value_cansleep(fep->phy_reset_gpio, 0);
2358 + msleep(fep->phy_reset_duration);
2359 + gpio_set_value_cansleep(fep->phy_reset_gpio, 1);
2362 +static int fec_get_reset_gpio(struct platform_device *pdev)
2365 - bool active_high = false;
2367 struct device_node *np = pdev->dev.of_node;
2372 - err = of_property_read_u32(np, "phy-reset-duration", &msec);
2373 - /* A sane reset duration should not be longer than 1s */
2374 - if (!err && msec > 1000)
2376 + struct net_device *ndev = platform_get_drvdata(pdev);
2377 + struct fec_enet_private *fep = netdev_priv(ndev);
2379 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
2380 - if (phy_reset == -EPROBE_DEFER)
2381 + if (!gpio_is_valid(phy_reset))
2383 - else if (!gpio_is_valid(phy_reset))
2386 - active_high = of_property_read_bool(np, "phy-reset-active-high");
2388 err = devm_gpio_request_one(&pdev->dev, phy_reset,
2389 - active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
2391 + GPIOF_OUT_INIT_LOW, "phy-reset");
2393 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
2400 - usleep_range(msec * 1000, msec * 1000 + 1000);
2402 - gpio_set_value_cansleep(phy_reset, !active_high);
2406 + of_property_read_u32(np, "phy-reset-duration", &msec);
2407 + /* A sane reset duration should not be longer than 1s */
2410 + fep->phy_reset_duration = msec;
2414 #else /* CONFIG_OF */
2415 -static int fec_reset_phy(struct platform_device *pdev)
2416 +static void fec_reset_phy(struct platform_device *pdev)
2419 * In case of platform probe, the reset has been done
2425 +static inline int fec_get_reset_gpio(struct platform_device *pdev)
2429 #endif /* CONFIG_OF */
2431 @@ -3384,6 +3389,7 @@ static void
2432 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
2434 struct device_node *np = pdev->dev.of_node;
2437 *num_tx = *num_rx = 1;
2439 @@ -3391,9 +3397,13 @@ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
2442 /* parse the num of tx and rx queues */
2443 - of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
2444 + err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
2448 - of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
2449 + err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
2453 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
2454 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
2455 @@ -3460,13 +3470,11 @@ fec_probe(struct platform_device *pdev)
2459 - of_dma_configure(&pdev->dev, np);
2461 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
2463 /* Init network device */
2464 - ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
2465 - FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
2466 + ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private),
2467 + num_tx_qs, num_rx_qs);
2471 @@ -3505,13 +3513,14 @@ fec_probe(struct platform_device *pdev)
2473 platform_set_drvdata(pdev, ndev);
2475 - if ((of_machine_is_compatible("fsl,imx6q") ||
2476 - of_machine_is_compatible("fsl,imx6dl")) &&
2477 - !of_property_read_bool(np, "fsl,err006687-workaround-present"))
2478 - fep->quirks |= FEC_QUIRK_ERR006687;
2480 fec_enet_of_parse_stop_mode(pdev);
2482 + ret = fec_get_reset_gpio(pdev);
2483 + if (ret == -EPROBE_DEFER)
2485 + fep->phy_reset_gpio = ret;
2488 if (of_get_property(np, "fsl,magic-packet", NULL))
2489 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
2491 @@ -3524,7 +3533,6 @@ fec_probe(struct platform_device *pdev)
2494 phy_node = of_node_get(np);
2495 - fep->fixed_link = true;
2497 fep->phy_node = phy_node;
2499 @@ -3539,10 +3547,6 @@ fec_probe(struct platform_device *pdev)
2500 fep->phy_interface = ret;
2503 -#if !defined(CONFIG_ARM64)
2504 - request_bus_freq(BUS_FREQ_HIGH);
2507 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2508 if (IS_ERR(fep->clk_ipg)) {
2509 ret = PTR_ERR(fep->clk_ipg);
2510 @@ -3577,39 +3581,24 @@ fec_probe(struct platform_device *pdev)
2511 fep->bufdesc_ex = false;
2514 + pm_runtime_enable(&pdev->dev);
2515 ret = fec_enet_clk_enable(ndev, true);
2519 - ret = clk_prepare_enable(fep->clk_ipg);
2521 - goto failed_clk_ipg;
2522 - ret = clk_prepare_enable(fep->clk_ahb);
2524 - goto failed_clk_ahb;
2526 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
2527 if (!IS_ERR(fep->reg_phy)) {
2528 ret = regulator_enable(fep->reg_phy);
2531 "Failed to enable phy regulator: %d\n", ret);
2532 - clk_disable_unprepare(fep->clk_ipg);
2533 goto failed_regulator;
2536 fep->reg_phy = NULL;
2539 - pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
2540 - pm_runtime_use_autosuspend(&pdev->dev);
2541 - pm_runtime_get_noresume(&pdev->dev);
2542 - pm_runtime_set_active(&pdev->dev);
2543 - pm_runtime_enable(&pdev->dev);
2545 - ret = fec_reset_phy(pdev);
2547 - goto failed_reset;
2548 + fec_reset_phy(pdev);
2550 if (fep->bufdesc_ex)
2552 @@ -3641,15 +3630,9 @@ fec_probe(struct platform_device *pdev)
2553 fep->wake_irq = fep->irq[0];
2555 init_completion(&fep->mdio_done);
2557 - /* board only enable one mii bus in default */
2558 - if (!of_get_property(np, "fsl,mii-exclusive", NULL))
2559 - fep->quirks |= FEC_QUIRK_SINGLE_MDIO;
2560 ret = fec_enet_mii_init(pdev);
2564 goto failed_mii_init;
2567 /* Carrier starts down, phylib will bring it up */
2568 netif_carrier_off(ndev);
2569 @@ -3660,11 +3643,6 @@ fec_probe(struct platform_device *pdev)
2571 goto failed_register;
2573 - if (!fep->fixed_link) {
2574 - fep->fixups = of_fec_enet_parse_fixup(np);
2575 - fec_enet_register_fixup(ndev);
2578 device_init_wakeup(&ndev->dev, fep->wol_flag &
2579 FEC_WOL_HAS_MAGIC_PACKET);
2581 @@ -3673,10 +3651,6 @@ fec_probe(struct platform_device *pdev)
2583 fep->rx_copybreak = COPYBREAK_DEFAULT;
2584 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
2586 - pm_runtime_mark_last_busy(&pdev->dev);
2587 - pm_runtime_put_autosuspend(&pdev->dev);
2592 @@ -3684,22 +3658,14 @@ fec_probe(struct platform_device *pdev)
2596 - fec_ptp_stop(pdev);
2598 regulator_disable(fep->reg_phy);
2600 - pm_runtime_put(&pdev->dev);
2601 - pm_runtime_disable(&pdev->dev);
2604 - clk_disable_unprepare(fep->clk_ipg);
2606 fec_enet_clk_enable(ndev, false);
2608 - if (of_phy_is_fixed_link(np))
2609 - of_phy_deregister_fixed_link(np);
2611 of_node_put(phy_node);
2616 @@ -3711,16 +3677,15 @@ fec_drv_remove(struct platform_device *pdev)
2618 struct net_device *ndev = platform_get_drvdata(pdev);
2619 struct fec_enet_private *fep = netdev_priv(ndev);
2620 - struct device_node *np = pdev->dev.of_node;
2622 + cancel_delayed_work_sync(&fep->time_keep);
2623 cancel_work_sync(&fep->tx_timeout_work);
2624 - fec_ptp_stop(pdev);
2625 unregister_netdev(ndev);
2626 fec_enet_mii_remove(fep);
2628 regulator_disable(fep->reg_phy);
2629 - if (of_phy_is_fixed_link(np))
2630 - of_phy_deregister_fixed_link(np);
2631 + if (fep->ptp_clock)
2632 + ptp_clock_unregister(fep->ptp_clock);
2633 of_node_put(fep->phy_node);
2636 @@ -3731,13 +3696,12 @@ static int __maybe_unused fec_suspend(struct device *dev)
2638 struct net_device *ndev = dev_get_drvdata(dev);
2639 struct fec_enet_private *fep = netdev_priv(ndev);
2643 if (netif_running(ndev)) {
2644 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
2645 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
2646 - phy_stop(ndev->phydev);
2647 + phy_stop(fep->phy_dev);
2648 napi_disable(&fep->napi);
2649 netif_tx_lock_bh(ndev);
2650 netif_device_detach(ndev);
2651 @@ -3751,12 +3715,8 @@ static int __maybe_unused fec_suspend(struct device *dev)
2652 enable_irq_wake(fep->wake_irq);
2654 fec_enet_clk_enable(ndev, false);
2655 - fep->active_in_suspend = !pm_runtime_status_suspended(dev);
2656 - if (fep->active_in_suspend)
2657 - ret = pm_runtime_force_suspend(dev);
2660 - } else if (fep->mii_bus_share && !ndev->phydev) {
2661 + } else if (fep->mii_bus_share && fep->miibus_up_failed && !fep->phy_dev) {
2662 + fec_enet_clk_enable(ndev, false);
2663 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2666 @@ -3777,7 +3737,7 @@ static int __maybe_unused fec_resume(struct device *dev)
2668 struct net_device *ndev = dev_get_drvdata(dev);
2669 struct fec_enet_private *fep = netdev_priv(ndev);
2674 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
2675 @@ -3788,8 +3748,6 @@ static int __maybe_unused fec_resume(struct device *dev)
2678 if (netif_running(ndev)) {
2679 - if (fep->active_in_suspend)
2680 - pm_runtime_force_resume(dev);
2681 ret = fec_enet_clk_enable(ndev, true);
2684 @@ -3812,15 +3770,16 @@ static int __maybe_unused fec_resume(struct device *dev)
2685 netif_device_attach(ndev);
2686 netif_tx_unlock_bh(ndev);
2687 napi_enable(&fep->napi);
2688 - phy_start(ndev->phydev);
2689 - } else if (fep->mii_bus_share && !ndev->phydev) {
2690 + phy_start(fep->phy_dev);
2691 + } else if (fep->mii_bus_share && !fep->phy_dev) {
2692 pinctrl_pm_select_default_state(&fep->pdev->dev);
2693 + fep->miibus_up_failed = true;
2694 /* And then recovery mii bus */
2695 - ret = fec_restore_mii_bus(ndev);
2696 + fec_restore_mii_bus(ndev);
2705 @@ -3828,46 +3787,21 @@ static int __maybe_unused fec_resume(struct device *dev)
2709 -static int __maybe_unused fec_runtime_suspend(struct device *dev)
2710 +static int fec_runtime_suspend(struct device *dev)
2712 - struct net_device *ndev = dev_get_drvdata(dev);
2713 - struct fec_enet_private *fep = netdev_priv(ndev);
2715 - clk_disable_unprepare(fep->clk_ahb);
2716 - clk_disable_unprepare(fep->clk_ipg);
2717 -#if !defined(CONFIG_ARM64)
2718 release_bus_freq(BUS_FREQ_HIGH);
2724 -static int __maybe_unused fec_runtime_resume(struct device *dev)
2725 +static int fec_runtime_resume(struct device *dev)
2727 - struct net_device *ndev = dev_get_drvdata(dev);
2728 - struct fec_enet_private *fep = netdev_priv(ndev);
2731 -#if !defined(CONFIG_ARM64)
2732 request_bus_freq(BUS_FREQ_HIGH);
2734 - ret = clk_prepare_enable(fep->clk_ahb);
2737 - ret = clk_prepare_enable(fep->clk_ipg);
2739 - goto failed_clk_ipg;
2744 - clk_disable_unprepare(fep->clk_ahb);
2748 static const struct dev_pm_ops fec_pm_ops = {
2749 - SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
2750 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
2751 + SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
2754 static struct platform_driver fec_driver = {
2755 diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
2756 index 446ae9d..afe7f39 100644
2757 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
2758 +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
2759 @@ -66,6 +66,7 @@ struct mpc52xx_fec_priv {
2760 /* MDIO link details */
2761 unsigned int mdio_speed;
2762 struct device_node *phy_node;
2763 + struct phy_device *phydev;
2764 enum phy_state link;
2765 int seven_wire_mode;
2767 @@ -164,7 +165,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task
2768 static void mpc52xx_fec_adjust_link(struct net_device *dev)
2770 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2771 - struct phy_device *phydev = dev->phydev;
2772 + struct phy_device *phydev = priv->phydev;
2775 if (phydev->link != PHY_DOWN) {
2776 @@ -214,17 +215,16 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev)
2777 static int mpc52xx_fec_open(struct net_device *dev)
2779 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2780 - struct phy_device *phydev = NULL;
2783 if (priv->phy_node) {
2784 - phydev = of_phy_connect(priv->ndev, priv->phy_node,
2785 - mpc52xx_fec_adjust_link, 0, 0);
2787 + priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
2788 + mpc52xx_fec_adjust_link, 0, 0);
2789 + if (!priv->phydev) {
2790 dev_err(&dev->dev, "of_phy_connect failed\n");
2793 - phy_start(phydev);
2794 + phy_start(priv->phydev);
2797 if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
2798 @@ -268,9 +268,10 @@ static int mpc52xx_fec_open(struct net_device *dev)
2800 free_irq(dev->irq, dev);
2804 - phy_disconnect(phydev);
2805 + if (priv->phydev) {
2806 + phy_stop(priv->phydev);
2807 + phy_disconnect(priv->phydev);
2808 + priv->phydev = NULL;
2812 @@ -279,7 +280,6 @@ static int mpc52xx_fec_open(struct net_device *dev)
2813 static int mpc52xx_fec_close(struct net_device *dev)
2815 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2816 - struct phy_device *phydev = dev->phydev;
2818 netif_stop_queue(dev);
2820 @@ -291,10 +291,11 @@ static int mpc52xx_fec_close(struct net_device *dev)
2821 free_irq(priv->r_irq, dev);
2822 free_irq(priv->t_irq, dev);
2825 + if (priv->phydev) {
2826 /* power down phy */
2828 - phy_disconnect(phydev);
2829 + phy_stop(priv->phydev);
2830 + phy_disconnect(priv->phydev);
2831 + priv->phydev = NULL;
2835 @@ -762,6 +763,26 @@ static void mpc52xx_fec_reset(struct net_device *dev)
2837 /* ethtool interface */
2839 +static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2841 + struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2843 + if (!priv->phydev)
2846 + return phy_ethtool_gset(priv->phydev, cmd);
2849 +static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2851 + struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2853 + if (!priv->phydev)
2856 + return phy_ethtool_sset(priv->phydev, cmd);
2859 static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
2861 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2862 @@ -775,23 +796,23 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
2865 static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
2866 + .get_settings = mpc52xx_fec_get_settings,
2867 + .set_settings = mpc52xx_fec_set_settings,
2868 .get_link = ethtool_op_get_link,
2869 .get_msglevel = mpc52xx_fec_get_msglevel,
2870 .set_msglevel = mpc52xx_fec_set_msglevel,
2871 .get_ts_info = ethtool_op_get_ts_info,
2872 - .get_link_ksettings = phy_ethtool_get_link_ksettings,
2873 - .set_link_ksettings = phy_ethtool_set_link_ksettings,
2877 static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2879 - struct phy_device *phydev = dev->phydev;
2880 + struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2883 + if (!priv->phydev)
2886 - return phy_mii_ioctl(phydev, rq, cmd);
2887 + return phy_mii_ioctl(priv->phydev, rq, cmd);
2890 static const struct net_device_ops mpc52xx_fec_netdev_ops = {
2891 @@ -1063,23 +1084,27 @@ static struct platform_driver mpc52xx_fec_driver = {
2893 /* ======================================================================== */
2895 -static struct platform_driver * const drivers[] = {
2896 -#ifdef CONFIG_FEC_MPC52xx_MDIO
2897 - &mpc52xx_fec_mdio_driver,
2899 - &mpc52xx_fec_driver,
2903 mpc52xx_fec_init(void)
2905 - return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2906 +#ifdef CONFIG_FEC_MPC52xx_MDIO
2908 + ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
2910 + pr_err("failed to register mdio driver\n");
2914 + return platform_driver_register(&mpc52xx_fec_driver);
2918 mpc52xx_fec_exit(void)
2920 - platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2921 + platform_driver_unregister(&mpc52xx_fec_driver);
2922 +#ifdef CONFIG_FEC_MPC52xx_MDIO
2923 + platform_driver_unregister(&mpc52xx_fec_mdio_driver);
2928 diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
2929 index b5497e3..1e647be 100644
2930 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
2931 +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
2934 struct mpc52xx_fec_mdio_priv {
2935 struct mpc52xx_fec __iomem *regs;
2936 + int mdio_irqs[PHY_MAX_ADDR];
2939 static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
2940 @@ -82,6 +83,9 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
2941 bus->read = mpc52xx_fec_mdio_read;
2942 bus->write = mpc52xx_fec_mdio_write;
2945 + bus->irq = priv->mdio_irqs;
2947 /* setup registers */
2948 err = of_address_to_resource(np, 0, &res);
2950 diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
2951 index f9e7446..7a8386a 100644
2952 --- a/drivers/net/ethernet/freescale/fec_ptp.c
2953 +++ b/drivers/net/ethernet/freescale/fec_ptp.c
2954 @@ -112,8 +112,9 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
2955 unsigned long flags;
2958 - struct timespec64 ts;
2959 + struct timespec ts;
2964 if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
2965 @@ -162,7 +163,8 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
2966 tempval = readl(fep->hwp + FEC_ATIME);
2967 /* Convert the ptp local counter to 1588 timestamp */
2968 ns = timecounter_cyc2time(&fep->tc, tempval);
2969 - ts = ns_to_timespec64(ns);
2970 + ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
2971 + ts.tv_nsec = remainder;
2973 /* The tempval is less than 3 seconds, and so val is less than
2974 * 4 seconds. No overflow for 32bit calculation.
2975 @@ -596,16 +598,6 @@ void fec_ptp_init(struct platform_device *pdev)
2976 schedule_delayed_work(&fep->time_keep, HZ);
2979 -void fec_ptp_stop(struct platform_device *pdev)
2981 - struct net_device *ndev = platform_get_drvdata(pdev);
2982 - struct fec_enet_private *fep = netdev_priv(ndev);
2984 - cancel_delayed_work_sync(&fep->time_keep);
2985 - if (fep->ptp_clock)
2986 - ptp_clock_unregister(fep->ptp_clock);
2990 * fec_ptp_check_pps_event
2991 * @fep: the fec_enet_private structure handle
2992 diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
2993 deleted file mode 100644
2994 index 79b7c84..0000000
2995 --- a/drivers/net/ethernet/freescale/fman/Kconfig
2999 - tristate "FMan support"
3000 - depends on FSL_SOC || COMPILE_TEST
3001 - select GENERIC_ALLOCATOR
3005 - Freescale Data-Path Acceleration Architecture Frame Manager
3007 diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile
3008 deleted file mode 100644
3009 index 6049177..0000000
3010 --- a/drivers/net/ethernet/freescale/fman/Makefile
3013 -subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman
3015 -obj-$(CONFIG_FSL_FMAN) += fsl_fman.o
3016 -obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o
3017 -obj-$(CONFIG_FSL_FMAN) += fsl_mac.o
3019 -fsl_fman-objs := fman_muram.o fman.o fman_sp.o
3020 -fsl_fman_port-objs := fman_port.o
3021 -fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
3022 diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
3023 deleted file mode 100644
3024 index dafd9e1..0000000
3025 --- a/drivers/net/ethernet/freescale/fman/fman.c
3029 - * Copyright 2008-2015 Freescale Semiconductor Inc.
3031 - * Redistribution and use in source and binary forms, with or without
3032 - * modification, are permitted provided that the following conditions are met:
3033 - * * Redistributions of source code must retain the above copyright
3034 - * notice, this list of conditions and the following disclaimer.
3035 - * * Redistributions in binary form must reproduce the above copyright
3036 - * notice, this list of conditions and the following disclaimer in the
3037 - * documentation and/or other materials provided with the distribution.
3038 - * * Neither the name of Freescale Semiconductor nor the
3039 - * names of its contributors may be used to endorse or promote products
3040 - * derived from this software without specific prior written permission.
3043 - * ALTERNATIVELY, this software may be distributed under the terms of the
3044 - * GNU General Public License ("GPL") as published by the Free Software
3045 - * Foundation, either version 2 of that License or (at your option) any
3048 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3049 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3050 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3051 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3052 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3053 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3054 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3055 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3056 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3057 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3060 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3063 -#include "fman_muram.h"
3065 -#include <linux/fsl/guts.h>
3066 -#include <linux/slab.h>
3067 -#include <linux/delay.h>
3068 -#include <linux/module.h>
3069 -#include <linux/of_platform.h>
3070 -#include <linux/clk.h>
3071 -#include <linux/of_address.h>
3072 -#include <linux/of_irq.h>
3073 -#include <linux/interrupt.h>
3074 -#include <linux/libfdt_env.h>
3076 -/* General defines */
3077 -#define FMAN_LIODN_TBL 64 /* size of LIODN table */
3078 -#define MAX_NUM_OF_MACS 10
3079 -#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
3080 -#define BASE_RX_PORTID 0x08
3081 -#define BASE_TX_PORTID 0x28
3083 -/* Modules registers offsets */
3084 -#define BMI_OFFSET 0x00080000
3085 -#define QMI_OFFSET 0x00080400
3086 -#define DMA_OFFSET 0x000C2000
3087 -#define FPM_OFFSET 0x000C3000
3088 -#define IMEM_OFFSET 0x000C4000
3089 -#define CGP_OFFSET 0x000DB000
3091 -/* Exceptions bit map */
3092 -#define EX_DMA_BUS_ERROR 0x80000000
3093 -#define EX_DMA_READ_ECC 0x40000000
3094 -#define EX_DMA_SYSTEM_WRITE_ECC 0x20000000
3095 -#define EX_DMA_FM_WRITE_ECC 0x10000000
3096 -#define EX_FPM_STALL_ON_TASKS 0x08000000
3097 -#define EX_FPM_SINGLE_ECC 0x04000000
3098 -#define EX_FPM_DOUBLE_ECC 0x02000000
3099 -#define EX_QMI_SINGLE_ECC 0x01000000
3100 -#define EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000
3101 -#define EX_QMI_DOUBLE_ECC 0x00400000
3102 -#define EX_BMI_LIST_RAM_ECC 0x00200000
3103 -#define EX_BMI_STORAGE_PROFILE_ECC 0x00100000
3104 -#define EX_BMI_STATISTICS_RAM_ECC 0x00080000
3105 -#define EX_IRAM_ECC 0x00040000
3106 -#define EX_MURAM_ECC 0x00020000
3107 -#define EX_BMI_DISPATCH_RAM_ECC 0x00010000
3108 -#define EX_DMA_SINGLE_PORT_ECC 0x00008000
3112 -#define DMA_MODE_BER 0x00200000
3113 -#define DMA_MODE_ECC 0x00000020
3114 -#define DMA_MODE_SECURE_PROT 0x00000800
3115 -#define DMA_MODE_AXI_DBG_MASK 0x0F000000
3117 -#define DMA_TRANSFER_PORTID_MASK 0xFF000000
3118 -#define DMA_TRANSFER_TNUM_MASK 0x00FF0000
3119 -#define DMA_TRANSFER_LIODN_MASK 0x00000FFF
3121 -#define DMA_STATUS_BUS_ERR 0x08000000
3122 -#define DMA_STATUS_READ_ECC 0x04000000
3123 -#define DMA_STATUS_SYSTEM_WRITE_ECC 0x02000000
3124 -#define DMA_STATUS_FM_WRITE_ECC 0x01000000
3125 -#define DMA_STATUS_FM_SPDAT_ECC 0x00080000
3127 -#define DMA_MODE_CACHE_OR_SHIFT 30
3128 -#define DMA_MODE_AXI_DBG_SHIFT 24
3129 -#define DMA_MODE_CEN_SHIFT 13
3130 -#define DMA_MODE_CEN_MASK 0x00000007
3131 -#define DMA_MODE_DBG_SHIFT 7
3132 -#define DMA_MODE_AID_MODE_SHIFT 4
3134 -#define DMA_THRESH_COMMQ_SHIFT 24
3135 -#define DMA_THRESH_READ_INT_BUF_SHIFT 16
3136 -#define DMA_THRESH_READ_INT_BUF_MASK 0x0000003f
3137 -#define DMA_THRESH_WRITE_INT_BUF_MASK 0x0000003f
3139 -#define DMA_TRANSFER_PORTID_SHIFT 24
3140 -#define DMA_TRANSFER_TNUM_SHIFT 16
3142 -#define DMA_CAM_SIZEOF_ENTRY 0x40
3143 -#define DMA_CAM_UNITS 8
3145 -#define DMA_LIODN_SHIFT 16
3146 -#define DMA_LIODN_BASE_MASK 0x00000FFF
3149 -#define FPM_EV_MASK_DOUBLE_ECC 0x80000000
3150 -#define FPM_EV_MASK_STALL 0x40000000
3151 -#define FPM_EV_MASK_SINGLE_ECC 0x20000000
3152 -#define FPM_EV_MASK_RELEASE_FM 0x00010000
3153 -#define FPM_EV_MASK_DOUBLE_ECC_EN 0x00008000
3154 -#define FPM_EV_MASK_STALL_EN 0x00004000
3155 -#define FPM_EV_MASK_SINGLE_ECC_EN 0x00002000
3156 -#define FPM_EV_MASK_EXTERNAL_HALT 0x00000008
3157 -#define FPM_EV_MASK_ECC_ERR_HALT 0x00000004
3159 -#define FPM_RAM_MURAM_ECC 0x00008000
3160 -#define FPM_RAM_IRAM_ECC 0x00004000
3161 -#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000
3162 -#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000
3163 -#define FPM_RAM_IRAM_ECC_EN 0x40000000
3164 -#define FPM_RAM_RAMS_ECC_EN 0x80000000
3165 -#define FPM_RAM_RAMS_ECC_EN_SRC_SEL 0x08000000
3167 -#define FPM_REV1_MAJOR_MASK 0x0000FF00
3168 -#define FPM_REV1_MINOR_MASK 0x000000FF
3170 -#define FPM_DISP_LIMIT_SHIFT 24
3172 -#define FPM_PRT_FM_CTL1 0x00000001
3173 -#define FPM_PRT_FM_CTL2 0x00000002
3174 -#define FPM_PORT_FM_CTL_PORTID_SHIFT 24
3175 -#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT 16
3177 -#define FPM_THR1_PRS_SHIFT 24
3178 -#define FPM_THR1_KG_SHIFT 16
3179 -#define FPM_THR1_PLCR_SHIFT 8
3180 -#define FPM_THR1_BMI_SHIFT 0
3182 -#define FPM_THR2_QMI_ENQ_SHIFT 24
3183 -#define FPM_THR2_QMI_DEQ_SHIFT 0
3184 -#define FPM_THR2_FM_CTL1_SHIFT 16
3185 -#define FPM_THR2_FM_CTL2_SHIFT 8
3187 -#define FPM_EV_MASK_CAT_ERR_SHIFT 1
3188 -#define FPM_EV_MASK_DMA_ERR_SHIFT 0
3190 -#define FPM_REV1_MAJOR_SHIFT 8
3192 -#define FPM_RSTC_FM_RESET 0x80000000
3193 -#define FPM_RSTC_MAC0_RESET 0x40000000
3194 -#define FPM_RSTC_MAC1_RESET 0x20000000
3195 -#define FPM_RSTC_MAC2_RESET 0x10000000
3196 -#define FPM_RSTC_MAC3_RESET 0x08000000
3197 -#define FPM_RSTC_MAC8_RESET 0x04000000
3198 -#define FPM_RSTC_MAC4_RESET 0x02000000
3199 -#define FPM_RSTC_MAC5_RESET 0x01000000
3200 -#define FPM_RSTC_MAC6_RESET 0x00800000
3201 -#define FPM_RSTC_MAC7_RESET 0x00400000
3202 -#define FPM_RSTC_MAC9_RESET 0x00200000
3204 -#define FPM_TS_INT_SHIFT 16
3205 -#define FPM_TS_CTL_EN 0x80000000
3208 -#define BMI_INIT_START 0x80000000
3209 -#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000
3210 -#define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000
3211 -#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000
3212 -#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000
3213 -#define BMI_NUM_OF_TASKS_MASK 0x3F000000
3214 -#define BMI_NUM_OF_EXTRA_TASKS_MASK 0x000F0000
3215 -#define BMI_NUM_OF_DMAS_MASK 0x00000F00
3216 -#define BMI_NUM_OF_EXTRA_DMAS_MASK 0x0000000F
3217 -#define BMI_FIFO_SIZE_MASK 0x000003FF
3218 -#define BMI_EXTRA_FIFO_SIZE_MASK 0x03FF0000
3219 -#define BMI_CFG2_DMAS_MASK 0x0000003F
3220 -#define BMI_CFG2_TASKS_MASK 0x0000003F
3222 -#define BMI_CFG2_TASKS_SHIFT 16
3223 -#define BMI_CFG2_DMAS_SHIFT 0
3224 -#define BMI_CFG1_FIFO_SIZE_SHIFT 16
3225 -#define BMI_NUM_OF_TASKS_SHIFT 24
3226 -#define BMI_EXTRA_NUM_OF_TASKS_SHIFT 16
3227 -#define BMI_NUM_OF_DMAS_SHIFT 8
3228 -#define BMI_EXTRA_NUM_OF_DMAS_SHIFT 0
3230 -#define BMI_FIFO_ALIGN 0x100
3232 -#define BMI_EXTRA_FIFO_SIZE_SHIFT 16
3235 -#define QMI_CFG_ENQ_EN 0x80000000
3236 -#define QMI_CFG_DEQ_EN 0x40000000
3237 -#define QMI_CFG_EN_COUNTERS 0x10000000
3238 -#define QMI_CFG_DEQ_MASK 0x0000003F
3239 -#define QMI_CFG_ENQ_MASK 0x00003F00
3240 -#define QMI_CFG_ENQ_SHIFT 8
3242 -#define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000
3243 -#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000
3244 -#define QMI_INTR_EN_SINGLE_ECC 0x80000000
3246 -#define QMI_GS_HALT_NOT_BUSY 0x00000002
3249 -#define IRAM_IADD_AIE 0x80000000
3250 -#define IRAM_READY 0x80000000
3252 -/* Default values */
3253 -#define DEFAULT_CATASTROPHIC_ERR 0
3254 -#define DEFAULT_DMA_ERR 0
3255 -#define DEFAULT_AID_MODE FMAN_DMA_AID_OUT_TNUM
3256 -#define DEFAULT_DMA_COMM_Q_LOW 0x2A
3257 -#define DEFAULT_DMA_COMM_Q_HIGH 0x3F
3258 -#define DEFAULT_CACHE_OVERRIDE 0
3259 -#define DEFAULT_DMA_CAM_NUM_OF_ENTRIES 64
3260 -#define DEFAULT_DMA_DBG_CNT_MODE 0
3261 -#define DEFAULT_DMA_SOS_EMERGENCY 0
3262 -#define DEFAULT_DMA_WATCHDOG 0
3263 -#define DEFAULT_DISP_LIMIT 0
3264 -#define DEFAULT_PRS_DISP_TH 16
3265 -#define DEFAULT_PLCR_DISP_TH 16
3266 -#define DEFAULT_KG_DISP_TH 16
3267 -#define DEFAULT_BMI_DISP_TH 16
3268 -#define DEFAULT_QMI_ENQ_DISP_TH 16
3269 -#define DEFAULT_QMI_DEQ_DISP_TH 16
3270 -#define DEFAULT_FM_CTL1_DISP_TH 16
3271 -#define DEFAULT_FM_CTL2_DISP_TH 16
3273 -#define DFLT_AXI_DBG_NUM_OF_BEATS 1
3275 -#define DFLT_DMA_READ_INT_BUF_LOW(dma_thresh_max_buf) \
3276 - ((dma_thresh_max_buf + 1) / 2)
3277 -#define DFLT_DMA_READ_INT_BUF_HIGH(dma_thresh_max_buf) \
3278 - ((dma_thresh_max_buf + 1) * 3 / 4)
3279 -#define DFLT_DMA_WRITE_INT_BUF_LOW(dma_thresh_max_buf) \
3280 - ((dma_thresh_max_buf + 1) / 2)
3281 -#define DFLT_DMA_WRITE_INT_BUF_HIGH(dma_thresh_max_buf)\
3282 - ((dma_thresh_max_buf + 1) * 3 / 4)
3284 -#define DMA_COMM_Q_LOW_FMAN_V3 0x2A
3285 -#define DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq) \
3286 - ((dma_thresh_max_commq + 1) / 2)
3287 -#define DFLT_DMA_COMM_Q_LOW(major, dma_thresh_max_commq) \
3288 - ((major == 6) ? DMA_COMM_Q_LOW_FMAN_V3 : \
3289 - DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq))
3291 -#define DMA_COMM_Q_HIGH_FMAN_V3 0x3f
3292 -#define DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq) \
3293 - ((dma_thresh_max_commq + 1) * 3 / 4)
3294 -#define DFLT_DMA_COMM_Q_HIGH(major, dma_thresh_max_commq) \
3295 - ((major == 6) ? DMA_COMM_Q_HIGH_FMAN_V3 : \
3296 - DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq))
3298 -#define TOTAL_NUM_OF_TASKS_FMAN_V3L 59
3299 -#define TOTAL_NUM_OF_TASKS_FMAN_V3H 124
3300 -#define DFLT_TOTAL_NUM_OF_TASKS(major, minor, bmi_max_num_of_tasks) \
3301 - ((major == 6) ? ((minor == 1 || minor == 4) ? \
3302 - TOTAL_NUM_OF_TASKS_FMAN_V3L : TOTAL_NUM_OF_TASKS_FMAN_V3H) : \
3303 - bmi_max_num_of_tasks)
3305 -#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 64
3306 -#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V2 32
3307 -#define DFLT_DMA_CAM_NUM_OF_ENTRIES(major) \
3308 - (major == 6 ? DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 : \
3309 - DMA_CAM_NUM_OF_ENTRIES_FMAN_V2)
3311 -#define FM_TIMESTAMP_1_USEC_BIT 8
3313 -/* Defines used for enabling/disabling FMan interrupts */
3314 -#define ERR_INTR_EN_DMA 0x00010000
3315 -#define ERR_INTR_EN_FPM 0x80000000
3316 -#define ERR_INTR_EN_BMI 0x00800000
3317 -#define ERR_INTR_EN_QMI 0x00400000
3318 -#define ERR_INTR_EN_MURAM 0x00040000
3319 -#define ERR_INTR_EN_MAC0 0x00004000
3320 -#define ERR_INTR_EN_MAC1 0x00002000
3321 -#define ERR_INTR_EN_MAC2 0x00001000
3322 -#define ERR_INTR_EN_MAC3 0x00000800
3323 -#define ERR_INTR_EN_MAC4 0x00000400
3324 -#define ERR_INTR_EN_MAC5 0x00000200
3325 -#define ERR_INTR_EN_MAC6 0x00000100
3326 -#define ERR_INTR_EN_MAC7 0x00000080
3327 -#define ERR_INTR_EN_MAC8 0x00008000
3328 -#define ERR_INTR_EN_MAC9 0x00000040
3330 -#define INTR_EN_QMI 0x40000000
3331 -#define INTR_EN_MAC0 0x00080000
3332 -#define INTR_EN_MAC1 0x00040000
3333 -#define INTR_EN_MAC2 0x00020000
3334 -#define INTR_EN_MAC3 0x00010000
3335 -#define INTR_EN_MAC4 0x00000040
3336 -#define INTR_EN_MAC5 0x00000020
3337 -#define INTR_EN_MAC6 0x00000008
3338 -#define INTR_EN_MAC7 0x00000002
3339 -#define INTR_EN_MAC8 0x00200000
3340 -#define INTR_EN_MAC9 0x00100000
3341 -#define INTR_EN_REV0 0x00008000
3342 -#define INTR_EN_REV1 0x00004000
3343 -#define INTR_EN_REV2 0x00002000
3344 -#define INTR_EN_REV3 0x00001000
3345 -#define INTR_EN_TMR 0x01000000
3347 -enum fman_dma_aid_mode {
3348 - FMAN_DMA_AID_OUT_PORT_ID = 0, /* 4 LSB of PORT_ID */
3349 - FMAN_DMA_AID_OUT_TNUM /* 4 LSB of TNUM */
3352 -struct fman_iram_regs {
3353 - u32 iadd; /* FM IRAM instruction address register */
3354 - u32 idata; /* FM IRAM instruction data register */
3355 - u32 itcfg; /* FM IRAM timing config register */
3356 - u32 iready; /* FM IRAM ready register */
3359 -struct fman_fpm_regs {
3360 - u32 fmfp_tnc; /* FPM TNUM Control 0x00 */
3361 - u32 fmfp_prc; /* FPM Port_ID FmCtl Association 0x04 */
3362 - u32 fmfp_brkc; /* FPM Breakpoint Control 0x08 */
3363 - u32 fmfp_mxd; /* FPM Flush Control 0x0c */
3364 - u32 fmfp_dist1; /* FPM Dispatch Thresholds1 0x10 */
3365 - u32 fmfp_dist2; /* FPM Dispatch Thresholds2 0x14 */
3366 - u32 fm_epi; /* FM Error Pending Interrupts 0x18 */
3367 - u32 fm_rie; /* FM Error Interrupt Enable 0x1c */
3368 - u32 fmfp_fcev[4]; /* FPM FMan-Controller Event 1-4 0x20-0x2f */
3369 - u32 res0030[4]; /* res 0x30 - 0x3f */
3370 - u32 fmfp_cee[4]; /* PM FMan-Controller Event 1-4 0x40-0x4f */
3371 - u32 res0050[4]; /* res 0x50-0x5f */
3372 - u32 fmfp_tsc1; /* FPM TimeStamp Control1 0x60 */
3373 - u32 fmfp_tsc2; /* FPM TimeStamp Control2 0x64 */
3374 - u32 fmfp_tsp; /* FPM Time Stamp 0x68 */
3375 - u32 fmfp_tsf; /* FPM Time Stamp Fraction 0x6c */
3376 - u32 fm_rcr; /* FM Rams Control 0x70 */
3377 - u32 fmfp_extc; /* FPM External Requests Control 0x74 */
3378 - u32 fmfp_ext1; /* FPM External Requests Config1 0x78 */
3379 - u32 fmfp_ext2; /* FPM External Requests Config2 0x7c */
3380 - u32 fmfp_drd[16]; /* FPM Data_Ram Data 0-15 0x80 - 0xbf */
3381 - u32 fmfp_dra; /* FPM Data Ram Access 0xc0 */
3382 - u32 fm_ip_rev_1; /* FM IP Block Revision 1 0xc4 */
3383 - u32 fm_ip_rev_2; /* FM IP Block Revision 2 0xc8 */
3384 - u32 fm_rstc; /* FM Reset Command 0xcc */
3385 - u32 fm_cld; /* FM Classifier Debug 0xd0 */
3386 - u32 fm_npi; /* FM Normal Pending Interrupts 0xd4 */
3387 - u32 fmfp_exte; /* FPM External Requests Enable 0xd8 */
3388 - u32 fmfp_ee; /* FPM Event&Mask 0xdc */
3389 - u32 fmfp_cev[4]; /* FPM CPU Event 1-4 0xe0-0xef */
3390 - u32 res00f0[4]; /* res 0xf0-0xff */
3391 - u32 fmfp_ps[50]; /* FPM Port Status 0x100-0x1c7 */
3392 - u32 res01c8[14]; /* res 0x1c8-0x1ff */
3393 - u32 fmfp_clfabc; /* FPM CLFABC 0x200 */
3394 - u32 fmfp_clfcc; /* FPM CLFCC 0x204 */
3395 - u32 fmfp_clfaval; /* FPM CLFAVAL 0x208 */
3396 - u32 fmfp_clfbval; /* FPM CLFBVAL 0x20c */
3397 - u32 fmfp_clfcval; /* FPM CLFCVAL 0x210 */
3398 - u32 fmfp_clfamsk; /* FPM CLFAMSK 0x214 */
3399 - u32 fmfp_clfbmsk; /* FPM CLFBMSK 0x218 */
3400 - u32 fmfp_clfcmsk; /* FPM CLFCMSK 0x21c */
3401 - u32 fmfp_clfamc; /* FPM CLFAMC 0x220 */
3402 - u32 fmfp_clfbmc; /* FPM CLFBMC 0x224 */
3403 - u32 fmfp_clfcmc; /* FPM CLFCMC 0x228 */
3404 - u32 fmfp_decceh; /* FPM DECCEH 0x22c */
3405 - u32 res0230[116]; /* res 0x230 - 0x3ff */
3406 - u32 fmfp_ts[128]; /* 0x400: FPM Task Status 0x400 - 0x5ff */
3407 - u32 res0600[0x400 - 384];
3410 -struct fman_bmi_regs {
3411 - u32 fmbm_init; /* BMI Initialization 0x00 */
3412 - u32 fmbm_cfg1; /* BMI Configuration 1 0x04 */
3413 - u32 fmbm_cfg2; /* BMI Configuration 2 0x08 */
3414 - u32 res000c[5]; /* 0x0c - 0x1f */
3415 - u32 fmbm_ievr; /* Interrupt Event Register 0x20 */
3416 - u32 fmbm_ier; /* Interrupt Enable Register 0x24 */
3417 - u32 fmbm_ifr; /* Interrupt Force Register 0x28 */
3418 - u32 res002c[5]; /* 0x2c - 0x3f */
3419 - u32 fmbm_arb[8]; /* BMI Arbitration 0x40 - 0x5f */
3420 - u32 res0060[12]; /* 0x60 - 0x8f */
3421 - u32 fmbm_dtc[3]; /* Debug Trap Counter 0x90 - 0x9b */
3422 - u32 res009c; /* 0x9c */
3423 - u32 fmbm_dcv[3][4]; /* Debug Compare val 0xa0-0xcf */
3424 - u32 fmbm_dcm[3][4]; /* Debug Compare Mask 0xd0-0xff */
3425 - u32 fmbm_gde; /* BMI Global Debug Enable 0x100 */
3426 - u32 fmbm_pp[63]; /* BMI Port Parameters 0x104 - 0x1ff */
3427 - u32 res0200; /* 0x200 */
3428 - u32 fmbm_pfs[63]; /* BMI Port FIFO Size 0x204 - 0x2ff */
3429 - u32 res0300; /* 0x300 */
3430 - u32 fmbm_spliodn[63]; /* Port Partition ID 0x304 - 0x3ff */
3433 -struct fman_qmi_regs {
3434 - u32 fmqm_gc; /* General Configuration Register 0x00 */
3435 - u32 res0004; /* 0x04 */
3436 - u32 fmqm_eie; /* Error Interrupt Event Register 0x08 */
3437 - u32 fmqm_eien; /* Error Interrupt Enable Register 0x0c */
3438 - u32 fmqm_eif; /* Error Interrupt Force Register 0x10 */
3439 - u32 fmqm_ie; /* Interrupt Event Register 0x14 */
3440 - u32 fmqm_ien; /* Interrupt Enable Register 0x18 */
3441 - u32 fmqm_if; /* Interrupt Force Register 0x1c */
3442 - u32 fmqm_gs; /* Global Status Register 0x20 */
3443 - u32 fmqm_ts; /* Task Status Register 0x24 */
3444 - u32 fmqm_etfc; /* Enqueue Total Frame Counter 0x28 */
3445 - u32 fmqm_dtfc; /* Dequeue Total Frame Counter 0x2c */
3446 - u32 fmqm_dc0; /* Dequeue Counter 0 0x30 */
3447 - u32 fmqm_dc1; /* Dequeue Counter 1 0x34 */
3448 - u32 fmqm_dc2; /* Dequeue Counter 2 0x38 */
3449 - u32 fmqm_dc3; /* Dequeue Counter 3 0x3c */
3450 - u32 fmqm_dfdc; /* Dequeue FQID from Default Counter 0x40 */
3451 - u32 fmqm_dfcc; /* Dequeue FQID from Context Counter 0x44 */
3452 - u32 fmqm_dffc; /* Dequeue FQID from FD Counter 0x48 */
3453 - u32 fmqm_dcc; /* Dequeue Confirm Counter 0x4c */
3454 - u32 res0050[7]; /* 0x50 - 0x6b */
3455 - u32 fmqm_tapc; /* Tnum Aging Period Control 0x6c */
3456 - u32 fmqm_dmcvc; /* Dequeue MAC Command Valid Counter 0x70 */
3457 - u32 fmqm_difdcc; /* Dequeue Invalid FD Command Counter 0x74 */
3458 - u32 fmqm_da1v; /* Dequeue A1 Valid Counter 0x78 */
3459 - u32 res007c; /* 0x7c */
3460 - u32 fmqm_dtc; /* 0x80 Debug Trap Counter 0x80 */
3461 - u32 fmqm_efddd; /* 0x84 Enqueue Frame desc Dynamic dbg 0x84 */
3462 - u32 res0088[2]; /* 0x88 - 0x8f */
3464 - u32 fmqm_dtcfg1; /* 0x90 dbg trap cfg 1 Register 0x00 */
3465 - u32 fmqm_dtval1; /* Debug Trap Value 1 Register 0x04 */
3466 - u32 fmqm_dtm1; /* Debug Trap Mask 1 Register 0x08 */
3467 - u32 fmqm_dtc1; /* Debug Trap Counter 1 Register 0x0c */
3468 - u32 fmqm_dtcfg2; /* dbg Trap cfg 2 Register 0x10 */
3469 - u32 fmqm_dtval2; /* Debug Trap Value 2 Register 0x14 */
3470 - u32 fmqm_dtm2; /* Debug Trap Mask 2 Register 0x18 */
3471 - u32 res001c; /* 0x1c */
3472 - } dbg_traps[3]; /* 0x90 - 0xef */
3473 - u8 res00f0[0x400 - 0xf0]; /* 0xf0 - 0x3ff */
3476 -struct fman_dma_regs {
3477 - u32 fmdmsr; /* FM DMA status register 0x00 */
3478 - u32 fmdmmr; /* FM DMA mode register 0x04 */
3479 - u32 fmdmtr; /* FM DMA bus threshold register 0x08 */
3480 - u32 fmdmhy; /* FM DMA bus hysteresis register 0x0c */
3481 - u32 fmdmsetr; /* FM DMA SOS emergency Threshold Register 0x10 */
3482 - u32 fmdmtah; /* FM DMA transfer bus address high reg 0x14 */
3483 - u32 fmdmtal; /* FM DMA transfer bus address low reg 0x18 */
3484 - u32 fmdmtcid; /* FM DMA transfer bus communication ID reg 0x1c */
3485 - u32 fmdmra; /* FM DMA bus internal ram address register 0x20 */
3486 - u32 fmdmrd; /* FM DMA bus internal ram data register 0x24 */
3487 - u32 fmdmwcr; /* FM DMA CAM watchdog counter value 0x28 */
3488 - u32 fmdmebcr; /* FM DMA CAM base in MURAM register 0x2c */
3489 - u32 fmdmccqdr; /* FM DMA CAM and CMD Queue Debug reg 0x30 */
3490 - u32 fmdmccqvr1; /* FM DMA CAM and CMD Queue Value reg #1 0x34 */
3491 - u32 fmdmccqvr2; /* FM DMA CAM and CMD Queue Value reg #2 0x38 */
3492 - u32 fmdmcqvr3; /* FM DMA CMD Queue Value register #3 0x3c */
3493 - u32 fmdmcqvr4; /* FM DMA CMD Queue Value register #4 0x40 */
3494 - u32 fmdmcqvr5; /* FM DMA CMD Queue Value register #5 0x44 */
3495 - u32 fmdmsefrc; /* FM DMA Semaphore Entry Full Reject Cntr 0x48 */
3496 - u32 fmdmsqfrc; /* FM DMA Semaphore Queue Full Reject Cntr 0x4c */
3497 - u32 fmdmssrc; /* FM DMA Semaphore SYNC Reject Counter 0x50 */
3498 - u32 fmdmdcr; /* FM DMA Debug Counter 0x54 */
3499 - u32 fmdmemsr; /* FM DMA Emergency Smoother Register 0x58 */
3500 - u32 res005c; /* 0x5c */
3501 - u32 fmdmplr[FMAN_LIODN_TBL / 2]; /* DMA LIODN regs 0x60-0xdf */
3502 - u32 res00e0[0x400 - 56];
3505 -/* Structure that holds current FMan state.
3506 - * Used for saving run time information.
3508 -struct fman_state_struct {
3511 - struct fman_rev_info rev_info;
3512 - bool enabled_time_stamp;
3513 - u8 count1_micro_bit;
3514 - u8 total_num_of_tasks;
3515 - u8 accumulated_num_of_tasks;
3516 - u32 accumulated_fifo_size;
3517 - u8 accumulated_num_of_open_dmas;
3518 - u8 accumulated_num_of_deq_tnums;
3520 - u32 extra_fifo_pool_size;
3521 - u8 extra_tasks_pool_size;
3522 - u8 extra_open_dmas_pool_size;
3523 - u16 port_mfl[MAX_NUM_OF_MACS];
3524 - u16 mac_mfl[MAX_NUM_OF_MACS];
3526 - /* SOC specific */
3529 - u32 dma_thresh_max_commq;
3530 - u32 dma_thresh_max_buf;
3531 - u32 max_num_of_open_dmas;
3533 - u32 qmi_max_num_of_tnums;
3534 - u32 qmi_def_tnums_thresh;
3536 - u32 bmi_max_num_of_tasks;
3537 - u32 bmi_max_fifo_size;
3539 - u32 fm_port_num_of_cg;
3540 - u32 num_of_rx_ports;
3541 - u32 total_fifo_size;
3543 - u32 qman_channel_base;
3544 - u32 num_of_qman_channels;
3546 - struct resource *res;
3549 -/* Structure that holds FMan initial configuration */
3551 - u8 disp_limit_tsh;
3556 - u8 qmi_enq_disp_tsh;
3557 - u8 qmi_deq_disp_tsh;
3558 - u8 fm_ctl1_disp_tsh;
3559 - u8 fm_ctl2_disp_tsh;
3560 - int dma_cache_override;
3561 - enum fman_dma_aid_mode dma_aid_mode;
3562 - u32 dma_axi_dbg_num_of_beats;
3563 - u32 dma_cam_num_of_entries;
3565 - u8 dma_comm_qtsh_asrt_emer;
3566 - u32 dma_write_buf_tsh_asrt_emer;
3567 - u32 dma_read_buf_tsh_asrt_emer;
3568 - u8 dma_comm_qtsh_clr_emer;
3569 - u32 dma_write_buf_tsh_clr_emer;
3570 - u32 dma_read_buf_tsh_clr_emer;
3571 - u32 dma_sos_emergency;
3572 - int dma_dbg_cnt_mode;
3573 - int catastrophic_err;
3577 - u32 cam_base_addr;
3578 - u32 fifo_base_addr;
3579 - u32 total_fifo_size;
3580 - u32 total_num_of_tasks;
3581 - u32 qmi_def_tnums_thresh;
3584 -/* Structure that holds information received from device tree */
3585 -struct fman_dts_params {
3586 - void __iomem *base_addr; /* FMan virtual address */
3587 - struct resource *res; /* FMan memory resource */
3588 - u8 id; /* FMan ID */
3590 - int err_irq; /* FMan Error IRQ */
3592 - u16 clk_freq; /* FMan clock freq (In Mhz) */
3594 - u32 qman_channel_base; /* QMan channels base */
3595 - u32 num_of_qman_channels; /* Number of QMan channels */
3597 - struct resource muram_res; /* MURAM resource */
3600 -/** fman_exceptions_cb
3601 - * fman - Pointer to FMan
3602 - * exception - The exception.
3604 - * Exceptions user callback routine, will be called upon an exception
3605 - * passing the exception identification.
3607 - * Return: irq status
3609 -typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman,
3610 - enum fman_exceptions exception);
3612 -/** fman_bus_error_cb
3613 - * fman - Pointer to FMan
3614 - * port_id - Port id
3615 - * addr - Address that caused the error
3616 - * tnum - Owner of error
3617 - * liodn - Logical IO device number
3619 - * Bus error user callback routine, will be called upon bus error,
3620 - * passing parameters describing the errors and the owner.
3622 - * Return: IRQ status
3624 -typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id,
3625 - u64 addr, u8 tnum, u16 liodn);
3628 - struct device *dev;
3629 - void __iomem *base_addr;
3630 - struct fman_intr_src intr_mng[FMAN_EV_CNT];
3632 - struct fman_fpm_regs __iomem *fpm_regs;
3633 - struct fman_bmi_regs __iomem *bmi_regs;
3634 - struct fman_qmi_regs __iomem *qmi_regs;
3635 - struct fman_dma_regs __iomem *dma_regs;
3636 - fman_exceptions_cb *exception_cb;
3637 - fman_bus_error_cb *bus_error_cb;
3638 - /* Spinlock for FMan use */
3639 - spinlock_t spinlock;
3640 - struct fman_state_struct *state;
3642 - struct fman_cfg *cfg;
3643 - struct muram_info *muram;
3644 - /* cam section in muram */
3645 - unsigned long cam_offset;
3647 - /* Fifo in MURAM */
3648 - unsigned long fifo_offset;
3651 - u32 liodn_base[64];
3652 - u32 liodn_offset[64];
3654 - struct fman_dts_params dts_params;
3657 -static irqreturn_t fman_exceptions(struct fman *fman,
3658 - enum fman_exceptions exception)
3660 - dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n",
3661 - __func__, fman->state->fm_id, exception);
3663 - return IRQ_HANDLED;
3666 -static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
3667 - u64 __maybe_unused addr,
3668 - u8 __maybe_unused tnum,
3669 - u16 __maybe_unused liodn)
3671 - dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n",
3672 - __func__, fman->state->fm_id, port_id);
3674 - return IRQ_HANDLED;
3677 -static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id)
3679 - if (fman->intr_mng[id].isr_cb) {
3680 - fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle);
3682 - return IRQ_HANDLED;
3688 -static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id)
3690 - u8 sw_port_id = 0;
3692 - if (hw_port_id >= BASE_TX_PORTID)
3693 - sw_port_id = hw_port_id - BASE_TX_PORTID;
3694 - else if (hw_port_id >= BASE_RX_PORTID)
3695 - sw_port_id = hw_port_id - BASE_RX_PORTID;
3699 - return sw_port_id;
3702 -static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg,
3707 - tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT;
3709 - tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1;
3711 - /* order restoration */
3713 - tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
3715 - tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
3717 - iowrite32be(tmp, &fpm_rg->fmfp_prc);
3720 -static void set_port_liodn(struct fman *fman, u8 port_id,
3721 - u32 liodn_base, u32 liodn_ofst)
3725 - /* set LIODN base for this port */
3726 - tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
3727 - if (port_id % 2) {
3728 - tmp &= ~DMA_LIODN_BASE_MASK;
3729 - tmp |= liodn_base;
3731 - tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
3732 - tmp |= liodn_base << DMA_LIODN_SHIFT;
3734 - iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
3735 - iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
3738 -static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
3742 - tmp = ioread32be(&fpm_rg->fm_rcr);
3743 - if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
3744 - iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
3746 - iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN |
3747 - FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
3750 -static void disable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
3754 - tmp = ioread32be(&fpm_rg->fm_rcr);
3755 - if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
3756 - iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
3758 - iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN),
3762 -static void fman_defconfig(struct fman_cfg *cfg)
3764 - memset(cfg, 0, sizeof(struct fman_cfg));
3766 - cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
3767 - cfg->dma_err = DEFAULT_DMA_ERR;
3768 - cfg->dma_aid_mode = DEFAULT_AID_MODE;
3769 - cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
3770 - cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
3771 - cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
3772 - cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
3773 - cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
3774 - cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
3775 - cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
3776 - cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
3777 - cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
3778 - cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
3779 - cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH;
3780 - cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH;
3781 - cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH;
3782 - cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
3783 - cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
3784 - cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
3787 -static int dma_init(struct fman *fman)
3789 - struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
3790 - struct fman_cfg *cfg = fman->cfg;
3793 - /* Init DMA Registers */
3795 - /* clear status reg events */
3796 - tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC |
3797 - DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC);
3798 - iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg, &dma_rg->fmdmsr);
3800 - /* configure mode register */
3802 - tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
3803 - if (cfg->exceptions & EX_DMA_BUS_ERROR)
3804 - tmp_reg |= DMA_MODE_BER;
3805 - if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) |
3806 - (cfg->exceptions & EX_DMA_READ_ECC) |
3807 - (cfg->exceptions & EX_DMA_FM_WRITE_ECC))
3808 - tmp_reg |= DMA_MODE_ECC;
3809 - if (cfg->dma_axi_dbg_num_of_beats)
3810 - tmp_reg |= (DMA_MODE_AXI_DBG_MASK &
3811 - ((cfg->dma_axi_dbg_num_of_beats - 1)
3812 - << DMA_MODE_AXI_DBG_SHIFT));
3814 - tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) &
3815 - DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT;
3816 - tmp_reg |= DMA_MODE_SECURE_PROT;
3817 - tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
3818 - tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
3820 - iowrite32be(tmp_reg, &dma_rg->fmdmmr);
3822 - /* configure thresholds register */
3823 - tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer <<
3824 - DMA_THRESH_COMMQ_SHIFT);
3825 - tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer &
3826 - DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
3827 - tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer &
3828 - DMA_THRESH_WRITE_INT_BUF_MASK;
3830 - iowrite32be(tmp_reg, &dma_rg->fmdmtr);
3832 - /* configure hysteresis register */
3833 - tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer <<
3834 - DMA_THRESH_COMMQ_SHIFT);
3835 - tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer &
3836 - DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
3837 - tmp_reg |= cfg->dma_write_buf_tsh_clr_emer &
3838 - DMA_THRESH_WRITE_INT_BUF_MASK;
3840 - iowrite32be(tmp_reg, &dma_rg->fmdmhy);
3842 - /* configure emergency threshold */
3843 - iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr);
3845 - /* configure Watchdog */
3846 - iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr);
3848 - iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr);
3850 - /* Allocate MURAM for CAM */
3852 - (u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY);
3853 - fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size);
3854 - if (IS_ERR_VALUE(fman->cam_offset)) {
3855 - dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
3860 - if (fman->state->rev_info.major == 2) {
3861 - u32 __iomem *cam_base_addr;
3863 - fman_muram_free_mem(fman->muram, fman->cam_offset,
3866 - fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128;
3867 - fman->cam_offset = fman_muram_alloc(fman->muram,
3869 - if (IS_ERR_VALUE(fman->cam_offset)) {
3870 - dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
3875 - if (fman->cfg->dma_cam_num_of_entries % 8 ||
3876 - fman->cfg->dma_cam_num_of_entries > 32) {
3877 - dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n",
3882 - cam_base_addr = (u32 __iomem *)
3883 - fman_muram_offset_to_vbase(fman->muram,
3884 - fman->cam_offset);
3885 - iowrite32be(~((1 <<
3886 - (32 - fman->cfg->dma_cam_num_of_entries)) - 1),
3890 - fman->cfg->cam_base_addr = fman->cam_offset;
3895 -static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
3900 - /* Init FPM Registers */
3902 - tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT);
3903 - iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd);
3905 - tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) |
3906 - ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) |
3907 - ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) |
3908 - ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT));
3909 - iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1);
3912 - (((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) |
3913 - ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) |
3914 - ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) |
3915 - ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT));
3916 - iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2);
3918 - /* define exceptions and error behavior */
3920 - /* Clear events */
3921 - tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC |
3922 - FPM_EV_MASK_SINGLE_ECC);
3923 - /* enable interrupts */
3924 - if (cfg->exceptions & EX_FPM_STALL_ON_TASKS)
3925 - tmp_reg |= FPM_EV_MASK_STALL_EN;
3926 - if (cfg->exceptions & EX_FPM_SINGLE_ECC)
3927 - tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN;
3928 - if (cfg->exceptions & EX_FPM_DOUBLE_ECC)
3929 - tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
3930 - tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
3931 - tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
3932 - /* FMan is not halted upon external halt activation */
3933 - tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
3934 - /* Man is not halted upon Unrecoverable ECC error behavior */
3935 - tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
3936 - iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
3938 - /* clear all fmCtls event registers */
3939 - for (i = 0; i < FM_NUM_OF_FMAN_CTRL_EVENT_REGS; i++)
3940 - iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]);
3942 - /* RAM ECC - enable and clear events */
3943 - /* first we need to clear all parser memory,
3944 - * as it is uninitialized and may cause ECC errors
3947 - tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
3949 - iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
3952 - if (cfg->exceptions & EX_IRAM_ECC) {
3953 - tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN;
3954 - enable_rams_ecc(fpm_rg);
3956 - if (cfg->exceptions & EX_MURAM_ECC) {
3957 - tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN;
3958 - enable_rams_ecc(fpm_rg);
3960 - iowrite32be(tmp_reg, &fpm_rg->fm_rie);
3963 -static void bmi_init(struct fman_bmi_regs __iomem *bmi_rg,
3964 - struct fman_cfg *cfg)
3968 - /* Init BMI Registers */
3970 - /* define common resources */
3971 - tmp_reg = cfg->fifo_base_addr;
3972 - tmp_reg = tmp_reg / BMI_FIFO_ALIGN;
3974 - tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) <<
3975 - BMI_CFG1_FIFO_SIZE_SHIFT);
3976 - iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1);
3978 - tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) <<
3979 - BMI_CFG2_TASKS_SHIFT;
3980 - /* num of DMA's will be dynamically updated when each port is set */
3981 - iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2);
3983 - /* define unmaskable exceptions, enable and clear events */
3985 - iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC |
3986 - BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC |
3987 - BMI_ERR_INTR_EN_STATISTICS_RAM_ECC |
3988 - BMI_ERR_INTR_EN_DISPATCH_RAM_ECC, &bmi_rg->fmbm_ievr);
3990 - if (cfg->exceptions & EX_BMI_LIST_RAM_ECC)
3991 - tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
3992 - if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC)
3993 - tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
3994 - if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC)
3995 - tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
3996 - if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC)
3997 - tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
3998 - iowrite32be(tmp_reg, &bmi_rg->fmbm_ier);
4001 -static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
4002 - struct fman_cfg *cfg)
4006 - /* Init QMI Registers */
4008 - /* Clear error interrupt events */
4010 - iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF,
4011 - &qmi_rg->fmqm_eie);
4013 - if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID)
4014 - tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
4015 - if (cfg->exceptions & EX_QMI_DOUBLE_ECC)
4016 - tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC;
4017 - /* enable events */
4018 - iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
4021 - /* Clear interrupt events */
4022 - iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
4023 - if (cfg->exceptions & EX_QMI_SINGLE_ECC)
4024 - tmp_reg |= QMI_INTR_EN_SINGLE_ECC;
4025 - /* enable events */
4026 - iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
4029 -static int enable(struct fman *fman, struct fman_cfg *cfg)
4033 - /* Enable all modules */
4035 - /* clear&enable global counters - calculate reg and save for later,
4036 - * because it's the same reg for QMI enable
4038 - cfg_reg = QMI_CFG_EN_COUNTERS;
4040 - /* Set enqueue and dequeue thresholds */
4041 - cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh;
4043 - iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init);
4044 - iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
4045 - &fman->qmi_regs->fmqm_gc);
4050 -static int set_exception(struct fman *fman,
4051 - enum fman_exceptions exception, bool enable)
4055 - switch (exception) {
4056 - case FMAN_EX_DMA_BUS_ERROR:
4057 - tmp = ioread32be(&fman->dma_regs->fmdmmr);
4059 - tmp |= DMA_MODE_BER;
4061 - tmp &= ~DMA_MODE_BER;
4062 - /* disable bus error */
4063 - iowrite32be(tmp, &fman->dma_regs->fmdmmr);
4065 - case FMAN_EX_DMA_READ_ECC:
4066 - case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
4067 - case FMAN_EX_DMA_FM_WRITE_ECC:
4068 - tmp = ioread32be(&fman->dma_regs->fmdmmr);
4070 - tmp |= DMA_MODE_ECC;
4072 - tmp &= ~DMA_MODE_ECC;
4073 - iowrite32be(tmp, &fman->dma_regs->fmdmmr);
4075 - case FMAN_EX_FPM_STALL_ON_TASKS:
4076 - tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
4078 - tmp |= FPM_EV_MASK_STALL_EN;
4080 - tmp &= ~FPM_EV_MASK_STALL_EN;
4081 - iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
4083 - case FMAN_EX_FPM_SINGLE_ECC:
4084 - tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
4086 - tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
4088 - tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
4089 - iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
4091 - case FMAN_EX_FPM_DOUBLE_ECC:
4092 - tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
4094 - tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
4096 - tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
4097 - iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
4099 - case FMAN_EX_QMI_SINGLE_ECC:
4100 - tmp = ioread32be(&fman->qmi_regs->fmqm_ien);
4102 - tmp |= QMI_INTR_EN_SINGLE_ECC;
4104 - tmp &= ~QMI_INTR_EN_SINGLE_ECC;
4105 - iowrite32be(tmp, &fman->qmi_regs->fmqm_ien);
4107 - case FMAN_EX_QMI_DOUBLE_ECC:
4108 - tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
4110 - tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
4112 - tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
4113 - iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
4115 - case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
4116 - tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
4118 - tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
4120 - tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
4121 - iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
4123 - case FMAN_EX_BMI_LIST_RAM_ECC:
4124 - tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
4126 - tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
4128 - tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
4129 - iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
4131 - case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
4132 - tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
4134 - tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
4136 - tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
4137 - iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
4139 - case FMAN_EX_BMI_STATISTICS_RAM_ECC:
4140 - tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
4142 - tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
4144 - tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
4145 - iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
4147 - case FMAN_EX_BMI_DISPATCH_RAM_ECC:
4148 - tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
4150 - tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
4152 - tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
4153 - iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
4155 - case FMAN_EX_IRAM_ECC:
4156 - tmp = ioread32be(&fman->fpm_regs->fm_rie);
4158 - /* enable ECC if not enabled */
4159 - enable_rams_ecc(fman->fpm_regs);
4160 - /* enable ECC interrupts */
4161 - tmp |= FPM_IRAM_ECC_ERR_EX_EN;
4163 - /* ECC mechanism may be disabled,
4164 - * depending on driver status
4166 - disable_rams_ecc(fman->fpm_regs);
4167 - tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
4169 - iowrite32be(tmp, &fman->fpm_regs->fm_rie);
4171 - case FMAN_EX_MURAM_ECC:
4172 - tmp = ioread32be(&fman->fpm_regs->fm_rie);
4174 - /* enable ECC if not enabled */
4175 - enable_rams_ecc(fman->fpm_regs);
4176 - /* enable ECC interrupts */
4177 - tmp |= FPM_MURAM_ECC_ERR_EX_EN;
4179 - /* ECC mechanism may be disabled,
4180 - * depending on driver status
4182 - disable_rams_ecc(fman->fpm_regs);
4183 - tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
4185 - iowrite32be(tmp, &fman->fpm_regs->fm_rie);
4193 -static void resume(struct fman_fpm_regs __iomem *fpm_rg)
4197 - tmp = ioread32be(&fpm_rg->fmfp_ee);
4198 - /* clear tmp_reg event bits in order not to clear standing events */
4199 - tmp &= ~(FPM_EV_MASK_DOUBLE_ECC |
4200 - FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC);
4201 - tmp |= FPM_EV_MASK_RELEASE_FM;
4203 - iowrite32be(tmp, &fpm_rg->fmfp_ee);
4206 -static int fill_soc_specific_params(struct fman_state_struct *state)
4208 - u8 minor = state->rev_info.minor;
4209 - /* P4080 - Major 2
4210 - * P2041/P3041/P5020/P5040 - Major 3
4213 - switch (state->rev_info.major) {
4215 - state->bmi_max_fifo_size = 160 * 1024;
4216 - state->fm_iram_size = 64 * 1024;
4217 - state->dma_thresh_max_commq = 31;
4218 - state->dma_thresh_max_buf = 127;
4219 - state->qmi_max_num_of_tnums = 64;
4220 - state->qmi_def_tnums_thresh = 48;
4221 - state->bmi_max_num_of_tasks = 128;
4222 - state->max_num_of_open_dmas = 32;
4223 - state->fm_port_num_of_cg = 256;
4224 - state->num_of_rx_ports = 6;
4225 - state->total_fifo_size = 122 * 1024;
4229 - state->bmi_max_fifo_size = 160 * 1024;
4230 - state->fm_iram_size = 64 * 1024;
4231 - state->dma_thresh_max_commq = 31;
4232 - state->dma_thresh_max_buf = 127;
4233 - state->qmi_max_num_of_tnums = 64;
4234 - state->qmi_def_tnums_thresh = 48;
4235 - state->bmi_max_num_of_tasks = 128;
4236 - state->max_num_of_open_dmas = 32;
4237 - state->fm_port_num_of_cg = 256;
4238 - state->num_of_rx_ports = 5;
4239 - state->total_fifo_size = 100 * 1024;
4243 - state->dma_thresh_max_commq = 83;
4244 - state->dma_thresh_max_buf = 127;
4245 - state->qmi_max_num_of_tnums = 64;
4246 - state->qmi_def_tnums_thresh = 32;
4247 - state->fm_port_num_of_cg = 256;
4250 - if (minor == 1 || minor == 4) {
4251 - state->bmi_max_fifo_size = 192 * 1024;
4252 - state->bmi_max_num_of_tasks = 64;
4253 - state->max_num_of_open_dmas = 32;
4254 - state->num_of_rx_ports = 5;
4256 - state->fm_iram_size = 32 * 1024;
4258 - state->fm_iram_size = 64 * 1024;
4259 - state->total_fifo_size = 156 * 1024;
4262 - else if (minor == 0 || minor == 2 || minor == 3) {
4263 - state->bmi_max_fifo_size = 384 * 1024;
4264 - state->fm_iram_size = 64 * 1024;
4265 - state->bmi_max_num_of_tasks = 128;
4266 - state->max_num_of_open_dmas = 84;
4267 - state->num_of_rx_ports = 8;
4268 - state->total_fifo_size = 295 * 1024;
4270 - pr_err("Unsupported FManv3 version\n");
4276 - pr_err("Unsupported FMan version\n");
4283 -static bool is_init_done(struct fman_cfg *cfg)
4285 - /* Checks if FMan driver parameters were initialized */
4292 -static void free_init_resources(struct fman *fman)
4294 - if (fman->cam_offset)
4295 - fman_muram_free_mem(fman->muram, fman->cam_offset,
4297 - if (fman->fifo_offset)
4298 - fman_muram_free_mem(fman->muram, fman->fifo_offset,
4302 -static irqreturn_t bmi_err_event(struct fman *fman)
4304 - u32 event, mask, force;
4305 - struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
4306 - irqreturn_t ret = IRQ_NONE;
4308 - event = ioread32be(&bmi_rg->fmbm_ievr);
4309 - mask = ioread32be(&bmi_rg->fmbm_ier);
4311 - /* clear the forced events */
4312 - force = ioread32be(&bmi_rg->fmbm_ifr);
4313 - if (force & event)
4314 - iowrite32be(force & ~event, &bmi_rg->fmbm_ifr);
4315 - /* clear the acknowledged events */
4316 - iowrite32be(event, &bmi_rg->fmbm_ievr);
4318 - if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
4319 - ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
4320 - if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
4321 - ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
4322 - if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
4323 - ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
4324 - if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
4325 - ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
4330 -static irqreturn_t qmi_err_event(struct fman *fman)
4332 - u32 event, mask, force;
4333 - struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
4334 - irqreturn_t ret = IRQ_NONE;
4336 - event = ioread32be(&qmi_rg->fmqm_eie);
4337 - mask = ioread32be(&qmi_rg->fmqm_eien);
4340 - /* clear the forced events */
4341 - force = ioread32be(&qmi_rg->fmqm_eif);
4342 - if (force & event)
4343 - iowrite32be(force & ~event, &qmi_rg->fmqm_eif);
4344 - /* clear the acknowledged events */
4345 - iowrite32be(event, &qmi_rg->fmqm_eie);
4347 - if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
4348 - ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
4349 - if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
4350 - ret = fman->exception_cb(fman,
4351 - FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
4356 -static irqreturn_t dma_err_event(struct fman *fman)
4358 - u32 status, mask, com_id;
4359 - u8 tnum, port_id, relative_port_id;
4361 - struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
4362 - irqreturn_t ret = IRQ_NONE;
4364 - status = ioread32be(&dma_rg->fmdmsr);
4365 - mask = ioread32be(&dma_rg->fmdmmr);
4367 - /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */
4368 - if ((mask & DMA_MODE_BER) != DMA_MODE_BER)
4369 - status &= ~DMA_STATUS_BUS_ERR;
4371 - /* clear relevant bits if mask has no DMA_MODE_ECC */
4372 - if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC)
4373 - status &= ~(DMA_STATUS_FM_SPDAT_ECC |
4374 - DMA_STATUS_READ_ECC |
4375 - DMA_STATUS_SYSTEM_WRITE_ECC |
4376 - DMA_STATUS_FM_WRITE_ECC);
4378 - /* clear set events */
4379 - iowrite32be(status, &dma_rg->fmdmsr);
4381 - if (status & DMA_STATUS_BUS_ERR) {
4384 - addr = (u64)ioread32be(&dma_rg->fmdmtal);
4385 - addr |= ((u64)(ioread32be(&dma_rg->fmdmtah)) << 32);
4387 - com_id = ioread32be(&dma_rg->fmdmtcid);
4388 - port_id = (u8)(((com_id & DMA_TRANSFER_PORTID_MASK) >>
4389 - DMA_TRANSFER_PORTID_SHIFT));
4390 - relative_port_id =
4391 - hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
4392 - tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >>
4393 - DMA_TRANSFER_TNUM_SHIFT);
4394 - liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK);
4395 - ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum,
4398 - if (status & DMA_STATUS_FM_SPDAT_ECC)
4399 - ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
4400 - if (status & DMA_STATUS_READ_ECC)
4401 - ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
4402 - if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
4403 - ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
4404 - if (status & DMA_STATUS_FM_WRITE_ECC)
4405 - ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
4410 -static irqreturn_t fpm_err_event(struct fman *fman)
4413 - struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
4414 - irqreturn_t ret = IRQ_NONE;
4416 - event = ioread32be(&fpm_rg->fmfp_ee);
4417 - /* clear the all occurred events */
4418 - iowrite32be(event, &fpm_rg->fmfp_ee);
4420 - if ((event & FPM_EV_MASK_DOUBLE_ECC) &&
4421 - (event & FPM_EV_MASK_DOUBLE_ECC_EN))
4422 - ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
4423 - if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
4424 - ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
4425 - if ((event & FPM_EV_MASK_SINGLE_ECC) &&
4426 - (event & FPM_EV_MASK_SINGLE_ECC_EN))
4427 - ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
4432 -static irqreturn_t muram_err_intr(struct fman *fman)
4435 - struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
4436 - irqreturn_t ret = IRQ_NONE;
4438 - event = ioread32be(&fpm_rg->fm_rcr);
4439 - mask = ioread32be(&fpm_rg->fm_rie);
4441 - /* clear MURAM event bit (do not clear IRAM event) */
4442 - iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
4444 - if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC))
4445 - ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
4450 -static irqreturn_t qmi_event(struct fman *fman)
4452 - u32 event, mask, force;
4453 - struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
4454 - irqreturn_t ret = IRQ_NONE;
4456 - event = ioread32be(&qmi_rg->fmqm_ie);
4457 - mask = ioread32be(&qmi_rg->fmqm_ien);
4459 - /* clear the forced events */
4460 - force = ioread32be(&qmi_rg->fmqm_if);
4461 - if (force & event)
4462 - iowrite32be(force & ~event, &qmi_rg->fmqm_if);
4463 - /* clear the acknowledged events */
4464 - iowrite32be(event, &qmi_rg->fmqm_ie);
4466 - if (event & QMI_INTR_EN_SINGLE_ECC)
4467 - ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
4472 -static void enable_time_stamp(struct fman *fman)
4474 - struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
4475 - u16 fm_clk_freq = fman->state->fm_clk_freq;
4476 - u32 tmp, intgr, ts_freq;
4479 - ts_freq = (u32)(1 << fman->state->count1_micro_bit);
4480 - /* configure timestamp so that bit 8 will count 1 microsecond
4481 - * Find effective count rate at TIMESTAMP least significant bits:
4482 - * Effective_Count_Rate = 1MHz x 2^8 = 256MHz
4483 - * Find frequency ratio between effective count rate and the clock:
4484 - * Effective_Count_Rate / CLK e.g. for 600 MHz clock:
4485 - * 256/600 = 0.4266666...
4488 - intgr = ts_freq / fm_clk_freq;
4489 - /* we multiply by 2^16 to keep the fraction of the division
4490 - * we do not div back, since we write this value as a fraction
4494 - frac = ((ts_freq << 16) - (intgr << 16) * fm_clk_freq) / fm_clk_freq;
4495 - /* we check remainder of the division in order to round up if not int */
4496 - if (((ts_freq << 16) - (intgr << 16) * fm_clk_freq) % fm_clk_freq)
4499 - tmp = (intgr << FPM_TS_INT_SHIFT) | (u16)frac;
4500 - iowrite32be(tmp, &fpm_rg->fmfp_tsc2);
4502 - /* enable timestamp with original clock */
4503 - iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1);
4504 - fman->state->enabled_time_stamp = true;
4507 -static int clear_iram(struct fman *fman)
4509 - struct fman_iram_regs __iomem *iram;
4512 - iram = fman->base_addr + IMEM_OFFSET;
4514 - /* Enable the auto-increment */
4515 - iowrite32be(IRAM_IADD_AIE, &iram->iadd);
4519 - } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count);
4523 - for (i = 0; i < (fman->state->fm_iram_size / 4); i++)
4524 - iowrite32be(0xffffffff, &iram->idata);
4526 - iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd);
4530 - } while ((ioread32be(&iram->idata) != 0xffffffff) && --count);
4537 -static u32 get_exception_flag(enum fman_exceptions exception)
4541 - switch (exception) {
4542 - case FMAN_EX_DMA_BUS_ERROR:
4543 - bit_mask = EX_DMA_BUS_ERROR;
4545 - case FMAN_EX_DMA_SINGLE_PORT_ECC:
4546 - bit_mask = EX_DMA_SINGLE_PORT_ECC;
4548 - case FMAN_EX_DMA_READ_ECC:
4549 - bit_mask = EX_DMA_READ_ECC;
4551 - case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
4552 - bit_mask = EX_DMA_SYSTEM_WRITE_ECC;
4554 - case FMAN_EX_DMA_FM_WRITE_ECC:
4555 - bit_mask = EX_DMA_FM_WRITE_ECC;
4557 - case FMAN_EX_FPM_STALL_ON_TASKS:
4558 - bit_mask = EX_FPM_STALL_ON_TASKS;
4560 - case FMAN_EX_FPM_SINGLE_ECC:
4561 - bit_mask = EX_FPM_SINGLE_ECC;
4563 - case FMAN_EX_FPM_DOUBLE_ECC:
4564 - bit_mask = EX_FPM_DOUBLE_ECC;
4566 - case FMAN_EX_QMI_SINGLE_ECC:
4567 - bit_mask = EX_QMI_SINGLE_ECC;
4569 - case FMAN_EX_QMI_DOUBLE_ECC:
4570 - bit_mask = EX_QMI_DOUBLE_ECC;
4572 - case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
4573 - bit_mask = EX_QMI_DEQ_FROM_UNKNOWN_PORTID;
4575 - case FMAN_EX_BMI_LIST_RAM_ECC:
4576 - bit_mask = EX_BMI_LIST_RAM_ECC;
4578 - case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
4579 - bit_mask = EX_BMI_STORAGE_PROFILE_ECC;
4581 - case FMAN_EX_BMI_STATISTICS_RAM_ECC:
4582 - bit_mask = EX_BMI_STATISTICS_RAM_ECC;
4584 - case FMAN_EX_BMI_DISPATCH_RAM_ECC:
4585 - bit_mask = EX_BMI_DISPATCH_RAM_ECC;
4587 - case FMAN_EX_MURAM_ECC:
4588 - bit_mask = EX_MURAM_ECC;
4598 -static int get_module_event(enum fman_event_modules module, u8 mod_id,
4599 - enum fman_intr_type intr_type)
4604 - case FMAN_MOD_MAC:
4605 - if (intr_type == FMAN_INTR_TYPE_ERR)
4606 - event = FMAN_EV_ERR_MAC0 + mod_id;
4608 - event = FMAN_EV_MAC0 + mod_id;
4610 - case FMAN_MOD_FMAN_CTRL:
4611 - if (intr_type == FMAN_INTR_TYPE_ERR)
4612 - event = FMAN_EV_CNT;
4614 - event = (FMAN_EV_FMAN_CTRL_0 + mod_id);
4616 - case FMAN_MOD_DUMMY_LAST:
4617 - event = FMAN_EV_CNT;
4620 - event = FMAN_EV_CNT;
4627 -static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo,
4628 - u32 *extra_size_of_fifo)
4630 - struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
4631 - u32 fifo = *size_of_fifo;
4632 - u32 extra_fifo = *extra_size_of_fifo;
4635 - /* if this is the first time a port requires extra_fifo_pool_size,
4636 - * the total extra_fifo_pool_size must be initialized to 1 buffer per
4639 - if (extra_fifo && !fman->state->extra_fifo_pool_size)
4640 - fman->state->extra_fifo_pool_size =
4641 - fman->state->num_of_rx_ports * FMAN_BMI_FIFO_UNITS;
4643 - fman->state->extra_fifo_pool_size =
4644 - max(fman->state->extra_fifo_pool_size, extra_fifo);
4646 - /* check that there are enough uncommitted fifo size */
4647 - if ((fman->state->accumulated_fifo_size + fifo) >
4648 - (fman->state->total_fifo_size -
4649 - fman->state->extra_fifo_pool_size)) {
4650 - dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n",
4655 - /* Read, modify and write to HW */
4656 - tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) |
4657 - ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
4658 - BMI_EXTRA_FIFO_SIZE_SHIFT);
4659 - iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
4661 - /* update accumulated */
4662 - fman->state->accumulated_fifo_size += fifo;
4667 -static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks,
4668 - u8 *num_of_extra_tasks)
4670 - struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
4671 - u8 tasks = *num_of_tasks;
4672 - u8 extra_tasks = *num_of_extra_tasks;
4676 - fman->state->extra_tasks_pool_size =
4677 - max(fman->state->extra_tasks_pool_size, extra_tasks);
4679 - /* check that there are enough uncommitted tasks */
4680 - if ((fman->state->accumulated_num_of_tasks + tasks) >
4681 - (fman->state->total_num_of_tasks -
4682 - fman->state->extra_tasks_pool_size)) {
4683 - dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
4684 - __func__, fman->state->fm_id);
4687 - /* update accumulated */
4688 - fman->state->accumulated_num_of_tasks += tasks;
4691 - tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
4692 - ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK);
4693 - tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) |
4694 - (u32)(extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT));
4695 - iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
4700 -static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
4701 - u8 *num_of_open_dmas,
4702 - u8 *num_of_extra_open_dmas)
4704 - struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
4705 - u8 open_dmas = *num_of_open_dmas;
4706 - u8 extra_open_dmas = *num_of_extra_open_dmas;
4707 - u8 total_num_dmas = 0, current_val = 0, current_extra_val = 0;
4711 - /* Configuration according to values in the HW.
4712 - * read the current number of open Dma's
4714 - tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
4715 - current_extra_val = (u8)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >>
4716 - BMI_EXTRA_NUM_OF_DMAS_SHIFT);
4718 - tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
4719 - current_val = (u8)(((tmp & BMI_NUM_OF_DMAS_MASK) >>
4720 - BMI_NUM_OF_DMAS_SHIFT) + 1);
4722 - /* This is the first configuration and user did not
4723 - * specify value (!open_dmas), reset values will be used
4724 - * and we just save these values for resource management
4726 - fman->state->extra_open_dmas_pool_size =
4727 - (u8)max(fman->state->extra_open_dmas_pool_size,
4728 - current_extra_val);
4729 - fman->state->accumulated_num_of_open_dmas += current_val;
4730 - *num_of_open_dmas = current_val;
4731 - *num_of_extra_open_dmas = current_extra_val;
4735 - if (extra_open_dmas > current_extra_val)
4736 - fman->state->extra_open_dmas_pool_size =
4737 - (u8)max(fman->state->extra_open_dmas_pool_size,
4740 - if ((fman->state->rev_info.major < 6) &&
4741 - (fman->state->accumulated_num_of_open_dmas - current_val +
4742 - open_dmas > fman->state->max_num_of_open_dmas)) {
4743 - dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
4744 - __func__, fman->state->fm_id);
4746 - } else if ((fman->state->rev_info.major >= 6) &&
4747 - !((fman->state->rev_info.major == 6) &&
4748 - (fman->state->rev_info.minor == 0)) &&
4749 - (fman->state->accumulated_num_of_open_dmas -
4750 - current_val + open_dmas >
4751 - fman->state->dma_thresh_max_commq + 1)) {
4752 - dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
4753 - __func__, fman->state->fm_id,
4754 - fman->state->dma_thresh_max_commq + 1);
4758 - WARN_ON(fman->state->accumulated_num_of_open_dmas < current_val);
4759 - /* update acummulated */
4760 - fman->state->accumulated_num_of_open_dmas -= current_val;
4761 - fman->state->accumulated_num_of_open_dmas += open_dmas;
4763 - if (fman->state->rev_info.major < 6)
4765 - (u8)(fman->state->accumulated_num_of_open_dmas +
4766 - fman->state->extra_open_dmas_pool_size);
4768 - /* calculate reg */
4769 - tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
4770 - ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK);
4771 - tmp |= (u32)(((open_dmas - 1) << BMI_NUM_OF_DMAS_SHIFT) |
4772 - (extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT));
4773 - iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
4775 - /* update total num of DMA's with committed number of open DMAS,
4776 - * and max uncommitted pool.
4778 - if (total_num_dmas) {
4779 - tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK;
4780 - tmp |= (u32)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT;
4781 - iowrite32be(tmp, &bmi_rg->fmbm_cfg2);
4787 -static int fman_config(struct fman *fman)
4789 - void __iomem *base_addr;
4792 - base_addr = fman->dts_params.base_addr;
4794 - fman->state = kzalloc(sizeof(*fman->state), GFP_KERNEL);
4796 - goto err_fm_state;
4798 - /* Allocate the FM driver's parameters structure */
4799 - fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL);
4803 - /* Initialize MURAM block */
4805 - fman_muram_init(fman->dts_params.muram_res.start,
4806 - resource_size(&fman->dts_params.muram_res));
4808 - goto err_fm_soc_specific;
4810 - /* Initialize FM parameters which will be kept by the driver */
4811 - fman->state->fm_id = fman->dts_params.id;
4812 - fman->state->fm_clk_freq = fman->dts_params.clk_freq;
4813 - fman->state->qman_channel_base = fman->dts_params.qman_channel_base;
4814 - fman->state->num_of_qman_channels =
4815 - fman->dts_params.num_of_qman_channels;
4816 - fman->state->res = fman->dts_params.res;
4817 - fman->exception_cb = fman_exceptions;
4818 - fman->bus_error_cb = fman_bus_error;
4819 - fman->fpm_regs = base_addr + FPM_OFFSET;
4820 - fman->bmi_regs = base_addr + BMI_OFFSET;
4821 - fman->qmi_regs = base_addr + QMI_OFFSET;
4822 - fman->dma_regs = base_addr + DMA_OFFSET;
4823 - fman->base_addr = base_addr;
4825 - spin_lock_init(&fman->spinlock);
4826 - fman_defconfig(fman->cfg);
4828 - fman->state->extra_fifo_pool_size = 0;
4829 - fman->state->exceptions = (EX_DMA_BUS_ERROR |
4831 - EX_DMA_SYSTEM_WRITE_ECC |
4832 - EX_DMA_FM_WRITE_ECC |
4833 - EX_FPM_STALL_ON_TASKS |
4834 - EX_FPM_SINGLE_ECC |
4835 - EX_FPM_DOUBLE_ECC |
4836 - EX_QMI_DEQ_FROM_UNKNOWN_PORTID |
4837 - EX_BMI_LIST_RAM_ECC |
4838 - EX_BMI_STORAGE_PROFILE_ECC |
4839 - EX_BMI_STATISTICS_RAM_ECC |
4841 - EX_BMI_DISPATCH_RAM_ECC |
4842 - EX_QMI_DOUBLE_ECC |
4843 - EX_QMI_SINGLE_ECC);
4845 - /* Read FMan revision for future use*/
4846 - fman_get_revision(fman, &fman->state->rev_info);
4848 - err = fill_soc_specific_params(fman->state);
4850 - goto err_fm_soc_specific;
4852 - /* FM_AID_MODE_NO_TNUM_SW005 Errata workaround */
4853 - if (fman->state->rev_info.major >= 6)
4854 - fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID;
4856 - fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh;
4858 - fman->state->total_num_of_tasks =
4859 - (u8)DFLT_TOTAL_NUM_OF_TASKS(fman->state->rev_info.major,
4860 - fman->state->rev_info.minor,
4861 - fman->state->bmi_max_num_of_tasks);
4863 - if (fman->state->rev_info.major < 6) {
4864 - fman->cfg->dma_comm_qtsh_clr_emer =
4865 - (u8)DFLT_DMA_COMM_Q_LOW(fman->state->rev_info.major,
4866 - fman->state->dma_thresh_max_commq);
4868 - fman->cfg->dma_comm_qtsh_asrt_emer =
4869 - (u8)DFLT_DMA_COMM_Q_HIGH(fman->state->rev_info.major,
4870 - fman->state->dma_thresh_max_commq);
4872 - fman->cfg->dma_cam_num_of_entries =
4873 - DFLT_DMA_CAM_NUM_OF_ENTRIES(fman->state->rev_info.major);
4875 - fman->cfg->dma_read_buf_tsh_clr_emer =
4876 - DFLT_DMA_READ_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
4878 - fman->cfg->dma_read_buf_tsh_asrt_emer =
4879 - DFLT_DMA_READ_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
4881 - fman->cfg->dma_write_buf_tsh_clr_emer =
4882 - DFLT_DMA_WRITE_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
4884 - fman->cfg->dma_write_buf_tsh_asrt_emer =
4885 - DFLT_DMA_WRITE_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
4887 - fman->cfg->dma_axi_dbg_num_of_beats =
4888 - DFLT_AXI_DBG_NUM_OF_BEATS;
4893 -err_fm_soc_specific:
4896 - kfree(fman->state);
4902 -static int fman_reset(struct fman *fman)
4907 - if (fman->state->rev_info.major < 6) {
4908 - iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
4909 - /* Wait for reset completion */
4913 - } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
4914 - FPM_RSTC_FM_RESET) && --count);
4920 - struct device_node *guts_node;
4921 - struct ccsr_guts __iomem *guts_regs;
4922 - u32 devdisr2, reg;
4924 - /* Errata A007273 */
4926 - of_find_compatible_node(NULL, NULL,
4927 - "fsl,qoriq-device-config-2.0");
4929 - dev_err(fman->dev, "%s: Couldn't find guts node\n",
4934 - guts_regs = of_iomap(guts_node, 0);
4936 - dev_err(fman->dev, "%s: Couldn't map %s regs\n",
4937 - __func__, guts_node->full_name);
4940 -#define FMAN1_ALL_MACS_MASK 0xFCC00000
4941 -#define FMAN2_ALL_MACS_MASK 0x000FCC00
4942 - /* Read current state */
4943 - devdisr2 = ioread32be(&guts_regs->devdisr2);
4944 - if (fman->dts_params.id == 0)
4945 - reg = devdisr2 & ~FMAN1_ALL_MACS_MASK;
4947 - reg = devdisr2 & ~FMAN2_ALL_MACS_MASK;
4949 - /* Enable all MACs */
4950 - iowrite32be(reg, &guts_regs->devdisr2);
4952 - /* Perform FMan reset */
4953 - iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
4955 - /* Wait for reset completion */
4959 - } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
4960 - FPM_RSTC_FM_RESET) && --count);
4962 - iounmap(guts_regs);
4963 - of_node_put(guts_node);
4968 - /* Restore devdisr2 value */
4969 - iowrite32be(devdisr2, &guts_regs->devdisr2);
4971 - iounmap(guts_regs);
4972 - of_node_put(guts_node);
4977 - of_node_put(guts_node);
4979 - dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
4986 -static int fman_init(struct fman *fman)
4988 - struct fman_cfg *cfg = NULL;
4989 - int err = 0, i, count;
4991 - if (is_init_done(fman->cfg))
4994 - fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT;
4998 - /* clear revision-dependent non existing exception */
4999 - if (fman->state->rev_info.major < 6)
5000 - fman->state->exceptions &= ~FMAN_EX_BMI_DISPATCH_RAM_ECC;
5002 - if (fman->state->rev_info.major >= 6)
5003 - fman->state->exceptions &= ~FMAN_EX_QMI_SINGLE_ECC;
5006 - memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0,
5007 - fman->state->fm_port_num_of_cg);
5009 - /* Save LIODN info before FMan reset
5010 - * Skipping non-existent port 0 (i = 1)
5012 - for (i = 1; i < FMAN_LIODN_TBL; i++) {
5015 - fman->liodn_offset[i] =
5016 - ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
5017 - liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
5019 - /* FMDM_PLR LSB holds LIODN base for odd ports */
5020 - liodn_base &= DMA_LIODN_BASE_MASK;
5022 - /* FMDM_PLR MSB holds LIODN base for even ports */
5023 - liodn_base >>= DMA_LIODN_SHIFT;
5024 - liodn_base &= DMA_LIODN_BASE_MASK;
5026 - fman->liodn_base[i] = liodn_base;
5029 - err = fman_reset(fman);
5033 - if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) {
5034 - resume(fman->fpm_regs);
5035 - /* Wait until QMI is not in halt not busy state */
5039 - } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) &
5040 - QMI_GS_HALT_NOT_BUSY) && --count);
5042 - dev_warn(fman->dev, "%s: QMI is in halt not busy state\n",
5046 - if (clear_iram(fman) != 0)
5049 - cfg->exceptions = fman->state->exceptions;
5051 - /* Init DMA Registers */
5053 - err = dma_init(fman);
5055 - free_init_resources(fman);
5059 - /* Init FPM Registers */
5060 - fpm_init(fman->fpm_regs, fman->cfg);
5062 - /* define common resources */
5063 - /* allocate MURAM for FIFO according to total size */
5064 - fman->fifo_offset = fman_muram_alloc(fman->muram,
5065 - fman->state->total_fifo_size);
5066 - if (IS_ERR_VALUE(fman->fifo_offset)) {
5067 - free_init_resources(fman);
5068 - dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n",
5073 - cfg->fifo_base_addr = fman->fifo_offset;
5074 - cfg->total_fifo_size = fman->state->total_fifo_size;
5075 - cfg->total_num_of_tasks = fman->state->total_num_of_tasks;
5076 - cfg->clk_freq = fman->state->fm_clk_freq;
5078 - /* Init BMI Registers */
5079 - bmi_init(fman->bmi_regs, fman->cfg);
5081 - /* Init QMI Registers */
5082 - qmi_init(fman->qmi_regs, fman->cfg);
5084 - err = enable(fman, cfg);
5088 - enable_time_stamp(fman);
5096 -static int fman_set_exception(struct fman *fman,
5097 - enum fman_exceptions exception, bool enable)
5101 - if (!is_init_done(fman->cfg))
5104 - bit_mask = get_exception_flag(exception);
5107 - fman->state->exceptions |= bit_mask;
5109 - fman->state->exceptions &= ~bit_mask;
5111 - dev_err(fman->dev, "%s: Undefined exception (%d)\n",
5112 - __func__, exception);
5116 - return set_exception(fman, exception, enable);
5120 - * fman_register_intr
5121 - * @fman: A Pointer to FMan device
5122 - * @mod: Calling module
5123 - * @mod_id: Module id (if more than 1 exists, '0' if not)
5124 - * @intr_type: Interrupt type (error/normal) selection.
5125 - * @f_isr: The interrupt service routine.
5126 - * @h_src_arg: Argument to be passed to f_isr.
5128 - * Used to register an event handler to be processed by FMan
5130 - * Return: 0 on success; Error code otherwise.
5132 -void fman_register_intr(struct fman *fman, enum fman_event_modules module,
5133 - u8 mod_id, enum fman_intr_type intr_type,
5134 - void (*isr_cb)(void *src_arg), void *src_arg)
5138 - event = get_module_event(module, mod_id, intr_type);
5139 - WARN_ON(event >= FMAN_EV_CNT);
5141 - /* register in local FM structure */
5142 - fman->intr_mng[event].isr_cb = isr_cb;
5143 - fman->intr_mng[event].src_handle = src_arg;
5145 -EXPORT_SYMBOL(fman_register_intr);
5148 - * fman_unregister_intr
5149 - * @fman: A Pointer to FMan device
5150 - * @mod: Calling module
5151 - * @mod_id: Module id (if more than 1 exists, '0' if not)
5152 - * @intr_type: Interrupt type (error/normal) selection.
5154 - * Used to unregister an event handler to be processed by FMan
5156 - * Return: 0 on success; Error code otherwise.
5158 -void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
5159 - u8 mod_id, enum fman_intr_type intr_type)
5163 - event = get_module_event(module, mod_id, intr_type);
5164 - WARN_ON(event >= FMAN_EV_CNT);
5166 - fman->intr_mng[event].isr_cb = NULL;
5167 - fman->intr_mng[event].src_handle = NULL;
5169 -EXPORT_SYMBOL(fman_unregister_intr);
5172 - * fman_set_port_params
5173 - * @fman: A Pointer to FMan device
5174 - * @port_params: Port parameters
5176 - * Used by FMan Port to pass parameters to the FMan
5178 - * Return: 0 on success; Error code otherwise.
5180 -int fman_set_port_params(struct fman *fman,
5181 - struct fman_port_init_params *port_params)
5184 - unsigned long flags;
5185 - u8 port_id = port_params->port_id, mac_id;
5187 - spin_lock_irqsave(&fman->spinlock, flags);
5189 - err = set_num_of_tasks(fman, port_params->port_id,
5190 - &port_params->num_of_tasks,
5191 - &port_params->num_of_extra_tasks);
5196 - if (port_params->port_type != FMAN_PORT_TYPE_RX) {
5197 - u32 enq_th, deq_th, reg;
5199 - /* update qmi ENQ/DEQ threshold */
5200 - fman->state->accumulated_num_of_deq_tnums +=
5201 - port_params->deq_pipeline_depth;
5202 - enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) &
5203 - QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT;
5204 - /* if enq_th is too big, we reduce it to the max value
5207 - if (enq_th >= (fman->state->qmi_max_num_of_tnums -
5208 - fman->state->accumulated_num_of_deq_tnums)) {
5210 - fman->state->qmi_max_num_of_tnums -
5211 - fman->state->accumulated_num_of_deq_tnums - 1;
5213 - reg = ioread32be(&fman->qmi_regs->fmqm_gc);
5214 - reg &= ~QMI_CFG_ENQ_MASK;
5215 - reg |= (enq_th << QMI_CFG_ENQ_SHIFT);
5216 - iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
5219 - deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) &
5221 - /* if deq_th is too small, we enlarge it to the min
5222 - * value that is still 0.
5223 - * depTh may not be larger than 63
5224 - * (fman->state->qmi_max_num_of_tnums-1).
5226 - if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) &&
5227 - (deq_th < fman->state->qmi_max_num_of_tnums - 1)) {
5228 - deq_th = fman->state->accumulated_num_of_deq_tnums + 1;
5229 - reg = ioread32be(&fman->qmi_regs->fmqm_gc);
5230 - reg &= ~QMI_CFG_DEQ_MASK;
5232 - iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
5236 - err = set_size_of_fifo(fman, port_params->port_id,
5237 - &port_params->size_of_fifo,
5238 - &port_params->extra_size_of_fifo);
5242 - err = set_num_of_open_dmas(fman, port_params->port_id,
5243 - &port_params->num_of_open_dmas,
5244 - &port_params->num_of_extra_open_dmas);
5248 - set_port_liodn(fman, port_id, fman->liodn_base[port_id],
5249 - fman->liodn_offset[port_id]);
5251 - if (fman->state->rev_info.major < 6)
5252 - set_port_order_restoration(fman->fpm_regs, port_id);
5254 - mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
5256 - if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) {
5257 - fman->state->port_mfl[mac_id] = port_params->max_frame_length;
5259 - dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n",
5260 - __func__, port_id, mac_id);
5265 - spin_unlock_irqrestore(&fman->spinlock, flags);
5270 - spin_unlock_irqrestore(&fman->spinlock, flags);
5273 -EXPORT_SYMBOL(fman_set_port_params);
5277 - * @fman: A Pointer to FMan device
5278 - * @mac_id: MAC id to be reset
5280 - * Reset a specific MAC
5282 - * Return: 0 on success; Error code otherwise.
5284 -int fman_reset_mac(struct fman *fman, u8 mac_id)
5286 - struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
5287 - u32 msk, timeout = 100;
5289 - if (fman->state->rev_info.major >= 6) {
5290 - dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n",
5295 - /* Get the relevant bit mask */
5298 - msk = FPM_RSTC_MAC0_RESET;
5301 - msk = FPM_RSTC_MAC1_RESET;
5304 - msk = FPM_RSTC_MAC2_RESET;
5307 - msk = FPM_RSTC_MAC3_RESET;
5310 - msk = FPM_RSTC_MAC4_RESET;
5313 - msk = FPM_RSTC_MAC5_RESET;
5316 - msk = FPM_RSTC_MAC6_RESET;
5319 - msk = FPM_RSTC_MAC7_RESET;
5322 - msk = FPM_RSTC_MAC8_RESET;
5325 - msk = FPM_RSTC_MAC9_RESET;
5328 - dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n",
5329 - __func__, mac_id);
5334 - iowrite32be(msk, &fpm_rg->fm_rstc);
5335 - while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout)
5343 -EXPORT_SYMBOL(fman_reset_mac);
5346 - * fman_set_mac_max_frame
5347 - * @fman: A Pointer to FMan device
5349 - * @mfl: Maximum frame length
5351 - * Set maximum frame length of specific MAC in FMan driver
5353 - * Return: 0 on success; Error code otherwise.
5355 -int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
5357 - /* if port is already initialized, check that MaxFrameLength is smaller
5358 - * or equal to the port's max
5360 - if ((!fman->state->port_mfl[mac_id]) ||
5361 - (mfl <= fman->state->port_mfl[mac_id])) {
5362 - fman->state->mac_mfl[mac_id] = mfl;
5364 - dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n",
5370 -EXPORT_SYMBOL(fman_set_mac_max_frame);
5373 - * fman_get_clock_freq
5374 - * @fman: A Pointer to FMan device
5376 - * Get FMan clock frequency
5378 - * Return: FMan clock frequency
5380 -u16 fman_get_clock_freq(struct fman *fman)
5382 - return fman->state->fm_clk_freq;
5386 - * fman_get_bmi_max_fifo_size
5387 - * @fman: A Pointer to FMan device
5389 - * Get FMan maximum FIFO size
5391 - * Return: FMan Maximum FIFO size
5393 -u32 fman_get_bmi_max_fifo_size(struct fman *fman)
5395 - return fman->state->bmi_max_fifo_size;
5397 -EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
5400 - * fman_get_revision
5401 - * @fman - Pointer to the FMan module
5402 - * @rev_info - A structure of revision information parameters.
5404 - * Returns the FM revision
5406 - * Allowed only following fman_init().
5408 - * Return: 0 on success; Error code otherwise.
5410 -void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
5414 - tmp = ioread32be(&fman->fpm_regs->fm_ip_rev_1);
5415 - rev_info->major = (u8)((tmp & FPM_REV1_MAJOR_MASK) >>
5416 - FPM_REV1_MAJOR_SHIFT);
5417 - rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
5419 -EXPORT_SYMBOL(fman_get_revision);
5422 - * fman_get_qman_channel_id
5423 - * @fman: A Pointer to FMan device
5424 - * @port_id: Port id
5426 - * Get QMan channel ID associated to the Port id
5428 - * Return: QMan channel ID
5430 -u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
5434 - if (fman->state->rev_info.major >= 6) {
5435 - u32 port_ids[] = {0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b,
5436 - 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
5437 - for (i = 0; i < fman->state->num_of_qman_channels; i++) {
5438 - if (port_ids[i] == port_id)
5442 - u32 port_ids[] = {0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1,
5443 - 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
5444 - for (i = 0; i < fman->state->num_of_qman_channels; i++) {
5445 - if (port_ids[i] == port_id)
5450 - if (i == fman->state->num_of_qman_channels)
5453 - return fman->state->qman_channel_base + i;
5455 -EXPORT_SYMBOL(fman_get_qman_channel_id);
5458 - * fman_get_mem_region
5459 - * @fman: A Pointer to FMan device
5461 - * Get FMan memory region
5463 - * Return: A structure with FMan memory region information
5465 -struct resource *fman_get_mem_region(struct fman *fman)
5467 - return fman->state->res;
5469 -EXPORT_SYMBOL(fman_get_mem_region);
5471 -/* Bootargs defines */
5472 -/* Extra headroom for RX buffers - Default, min and max */
5473 -#define FSL_FM_RX_EXTRA_HEADROOM 64
5474 -#define FSL_FM_RX_EXTRA_HEADROOM_MIN 16
5475 -#define FSL_FM_RX_EXTRA_HEADROOM_MAX 384
5477 -/* Maximum frame length */
5478 -#define FSL_FM_MAX_FRAME_SIZE 1522
5479 -#define FSL_FM_MAX_POSSIBLE_FRAME_SIZE 9600
5480 -#define FSL_FM_MIN_POSSIBLE_FRAME_SIZE 64
5482 -/* Extra headroom for Rx buffers.
5483 - * FMan is instructed to allocate, on the Rx path, this amount of
5484 - * space at the beginning of a data buffer, beside the DPA private
5485 - * data area and the IC fields.
5486 - * Does not impact Tx buffer layout.
5487 - * Configurable from bootargs. 64 by default, it's needed on
5488 - * particular forwarding scenarios that add extra headers to the
5489 - * forwarded frame.
5491 -static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
5492 -module_param(fsl_fm_rx_extra_headroom, int, 0);
5493 -MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
5495 -/* Max frame size, across all interfaces.
5496 - * Configurable from bootargs, to avoid allocating oversized (socket)
5497 - * buffers when not using jumbo frames.
5498 - * Must be large enough to accommodate the network MTU, but small enough
5499 - * to avoid wasting skb memory.
5501 - * Could be overridden once, at boot-time, via the
5502 - * fm_set_max_frm() callback.
5504 -static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
5505 -module_param(fsl_fm_max_frm, int, 0);
5506 -MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
5509 - * fman_get_max_frm
5511 - * Return: Max frame length configured in the FM driver
5513 -u16 fman_get_max_frm(void)
5515 - static bool fm_check_mfl;
5517 - if (!fm_check_mfl) {
5518 - if (fsl_fm_max_frm > FSL_FM_MAX_POSSIBLE_FRAME_SIZE ||
5519 - fsl_fm_max_frm < FSL_FM_MIN_POSSIBLE_FRAME_SIZE) {
5520 - pr_warn("Invalid fsl_fm_max_frm value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
5522 - FSL_FM_MIN_POSSIBLE_FRAME_SIZE,
5523 - FSL_FM_MAX_POSSIBLE_FRAME_SIZE,
5524 - FSL_FM_MAX_FRAME_SIZE);
5525 - fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
5527 - fm_check_mfl = true;
5530 - return fsl_fm_max_frm;
5532 -EXPORT_SYMBOL(fman_get_max_frm);
5535 - * fman_get_rx_extra_headroom
5537 - * Return: Extra headroom size configured in the FM driver
5539 -int fman_get_rx_extra_headroom(void)
5541 - static bool fm_check_rx_extra_headroom;
5543 - if (!fm_check_rx_extra_headroom) {
5544 - if (fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX ||
5545 - fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN) {
5546 - pr_warn("Invalid fsl_fm_rx_extra_headroom value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
5547 - fsl_fm_rx_extra_headroom,
5548 - FSL_FM_RX_EXTRA_HEADROOM_MIN,
5549 - FSL_FM_RX_EXTRA_HEADROOM_MAX,
5550 - FSL_FM_RX_EXTRA_HEADROOM);
5551 - fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
5554 - fm_check_rx_extra_headroom = true;
5555 - fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16);
5558 - return fsl_fm_rx_extra_headroom;
5560 -EXPORT_SYMBOL(fman_get_rx_extra_headroom);
5564 - * @dev: FMan OF device pointer
5566 - * Bind to a specific FMan device.
5568 - * Allowed only after the port was created.
5570 - * Return: A pointer to the FMan device
5572 -struct fman *fman_bind(struct device *fm_dev)
5574 - return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
5576 -EXPORT_SYMBOL(fman_bind);
5578 -static irqreturn_t fman_err_irq(int irq, void *handle)
5580 - struct fman *fman = (struct fman *)handle;
5582 - struct fman_fpm_regs __iomem *fpm_rg;
5583 - irqreturn_t single_ret, ret = IRQ_NONE;
5585 - if (!is_init_done(fman->cfg))
5588 - fpm_rg = fman->fpm_regs;
5590 - /* error interrupts */
5591 - pending = ioread32be(&fpm_rg->fm_epi);
5595 - if (pending & ERR_INTR_EN_BMI) {
5596 - single_ret = bmi_err_event(fman);
5597 - if (single_ret == IRQ_HANDLED)
5598 - ret = IRQ_HANDLED;
5600 - if (pending & ERR_INTR_EN_QMI) {
5601 - single_ret = qmi_err_event(fman);
5602 - if (single_ret == IRQ_HANDLED)
5603 - ret = IRQ_HANDLED;
5605 - if (pending & ERR_INTR_EN_FPM) {
5606 - single_ret = fpm_err_event(fman);
5607 - if (single_ret == IRQ_HANDLED)
5608 - ret = IRQ_HANDLED;
5610 - if (pending & ERR_INTR_EN_DMA) {
5611 - single_ret = dma_err_event(fman);
5612 - if (single_ret == IRQ_HANDLED)
5613 - ret = IRQ_HANDLED;
5615 - if (pending & ERR_INTR_EN_MURAM) {
5616 - single_ret = muram_err_intr(fman);
5617 - if (single_ret == IRQ_HANDLED)
5618 - ret = IRQ_HANDLED;
5621 - /* MAC error interrupts */
5622 - if (pending & ERR_INTR_EN_MAC0) {
5623 - single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
5624 - if (single_ret == IRQ_HANDLED)
5625 - ret = IRQ_HANDLED;
5627 - if (pending & ERR_INTR_EN_MAC1) {
5628 - single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
5629 - if (single_ret == IRQ_HANDLED)
5630 - ret = IRQ_HANDLED;
5632 - if (pending & ERR_INTR_EN_MAC2) {
5633 - single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
5634 - if (single_ret == IRQ_HANDLED)
5635 - ret = IRQ_HANDLED;
5637 - if (pending & ERR_INTR_EN_MAC3) {
5638 - single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
5639 - if (single_ret == IRQ_HANDLED)
5640 - ret = IRQ_HANDLED;
5642 - if (pending & ERR_INTR_EN_MAC4) {
5643 - single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
5644 - if (single_ret == IRQ_HANDLED)
5645 - ret = IRQ_HANDLED;
5647 - if (pending & ERR_INTR_EN_MAC5) {
5648 - single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
5649 - if (single_ret == IRQ_HANDLED)
5650 - ret = IRQ_HANDLED;
5652 - if (pending & ERR_INTR_EN_MAC6) {
5653 - single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
5654 - if (single_ret == IRQ_HANDLED)
5655 - ret = IRQ_HANDLED;
5657 - if (pending & ERR_INTR_EN_MAC7) {
5658 - single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
5659 - if (single_ret == IRQ_HANDLED)
5660 - ret = IRQ_HANDLED;
5662 - if (pending & ERR_INTR_EN_MAC8) {
5663 - single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
5664 - if (single_ret == IRQ_HANDLED)
5665 - ret = IRQ_HANDLED;
5667 - if (pending & ERR_INTR_EN_MAC9) {
5668 - single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
5669 - if (single_ret == IRQ_HANDLED)
5670 - ret = IRQ_HANDLED;
5676 -static irqreturn_t fman_irq(int irq, void *handle)
5678 - struct fman *fman = (struct fman *)handle;
5680 - struct fman_fpm_regs __iomem *fpm_rg;
5681 - irqreturn_t single_ret, ret = IRQ_NONE;
5683 - if (!is_init_done(fman->cfg))
5686 - fpm_rg = fman->fpm_regs;
5688 - /* normal interrupts */
5689 - pending = ioread32be(&fpm_rg->fm_npi);
5693 - if (pending & INTR_EN_QMI) {
5694 - single_ret = qmi_event(fman);
5695 - if (single_ret == IRQ_HANDLED)
5696 - ret = IRQ_HANDLED;
5699 - /* MAC interrupts */
5700 - if (pending & INTR_EN_MAC0) {
5701 - single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0);
5702 - if (single_ret == IRQ_HANDLED)
5703 - ret = IRQ_HANDLED;
5705 - if (pending & INTR_EN_MAC1) {
5706 - single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1);
5707 - if (single_ret == IRQ_HANDLED)
5708 - ret = IRQ_HANDLED;
5710 - if (pending & INTR_EN_MAC2) {
5711 - single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2);
5712 - if (single_ret == IRQ_HANDLED)
5713 - ret = IRQ_HANDLED;
5715 - if (pending & INTR_EN_MAC3) {
5716 - single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3);
5717 - if (single_ret == IRQ_HANDLED)
5718 - ret = IRQ_HANDLED;
5720 - if (pending & INTR_EN_MAC4) {
5721 - single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4);
5722 - if (single_ret == IRQ_HANDLED)
5723 - ret = IRQ_HANDLED;
5725 - if (pending & INTR_EN_MAC5) {
5726 - single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5);
5727 - if (single_ret == IRQ_HANDLED)
5728 - ret = IRQ_HANDLED;
5730 - if (pending & INTR_EN_MAC6) {
5731 - single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6);
5732 - if (single_ret == IRQ_HANDLED)
5733 - ret = IRQ_HANDLED;
5735 - if (pending & INTR_EN_MAC7) {
5736 - single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7);
5737 - if (single_ret == IRQ_HANDLED)
5738 - ret = IRQ_HANDLED;
5740 - if (pending & INTR_EN_MAC8) {
5741 - single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8);
5742 - if (single_ret == IRQ_HANDLED)
5743 - ret = IRQ_HANDLED;
5745 - if (pending & INTR_EN_MAC9) {
5746 - single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9);
5747 - if (single_ret == IRQ_HANDLED)
5748 - ret = IRQ_HANDLED;
5754 -static const struct of_device_id fman_muram_match[] = {
5756 - .compatible = "fsl,fman-muram"},
5759 -MODULE_DEVICE_TABLE(of, fman_muram_match);
5761 -static struct fman *read_dts_node(struct platform_device *of_dev)
5763 - struct fman *fman;
5764 - struct device_node *fm_node, *muram_node;
5765 - struct resource *res;
5766 - u32 val, range[2];
5770 - phys_addr_t phys_base_addr;
5771 - resource_size_t mem_size;
5773 - fman = kzalloc(sizeof(*fman), GFP_KERNEL);
5777 - fm_node = of_node_get(of_dev->dev.of_node);
5779 - err = of_property_read_u32(fm_node, "cell-index", &val);
5781 - dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n",
5782 - __func__, fm_node->full_name);
5783 - goto fman_node_put;
5785 - fman->dts_params.id = (u8)val;
5787 - /* Get the FM interrupt */
5788 - res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
5790 - dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n",
5792 - goto fman_node_put;
5796 - /* Get the FM error interrupt */
5797 - res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
5799 - dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n",
5801 - goto fman_node_put;
5803 - fman->dts_params.err_irq = res->start;
5805 - /* Get the FM address */
5806 - res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
5808 - dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
5810 - goto fman_node_put;
5813 - phys_base_addr = res->start;
5814 - mem_size = resource_size(res);
5816 - clk = of_clk_get(fm_node, 0);
5817 - if (IS_ERR(clk)) {
5818 - dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
5819 - __func__, fman->dts_params.id);
5820 - goto fman_node_put;
5823 - clk_rate = clk_get_rate(clk);
5825 - dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
5826 - __func__, fman->dts_params.id);
5827 - goto fman_node_put;
5829 - /* Rounding to MHz */
5830 - fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000);
5832 - err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range",
5835 - dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n",
5836 - __func__, fm_node->full_name);
5837 - goto fman_node_put;
5839 - fman->dts_params.qman_channel_base = range[0];
5840 - fman->dts_params.num_of_qman_channels = range[1];
5842 - /* Get the MURAM base address and size */
5843 - muram_node = of_find_matching_node(fm_node, fman_muram_match);
5844 - if (!muram_node) {
5845 - dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
5847 - goto fman_node_put;
5850 - err = of_address_to_resource(muram_node, 0,
5851 - &fman->dts_params.muram_res);
5853 - of_node_put(muram_node);
5854 - dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
5856 - goto fman_node_put;
5859 - of_node_put(muram_node);
5860 - of_node_put(fm_node);
5862 - err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
5864 - dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
5865 - __func__, irq, err);
5869 - if (fman->dts_params.err_irq != 0) {
5870 - err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq,
5871 - fman_err_irq, IRQF_SHARED,
5872 - "fman-err", fman);
5874 - dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
5875 - __func__, fman->dts_params.err_irq, err);
5880 - fman->dts_params.res =
5881 - devm_request_mem_region(&of_dev->dev, phys_base_addr,
5882 - mem_size, "fman");
5883 - if (!fman->dts_params.res) {
5884 - dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
5889 - fman->dts_params.base_addr =
5890 - devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
5891 - if (!fman->dts_params.base_addr) {
5892 - dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
5896 - fman->dev = &of_dev->dev;
5901 - of_node_put(fm_node);
5907 -static int fman_probe(struct platform_device *of_dev)
5909 - struct fman *fman;
5910 - struct device *dev;
5913 - dev = &of_dev->dev;
5915 - fman = read_dts_node(of_dev);
5919 - err = fman_config(fman);
5921 - dev_err(dev, "%s: FMan config failed\n", __func__);
5925 - if (fman_init(fman) != 0) {
5926 - dev_err(dev, "%s: FMan init failed\n", __func__);
5930 - if (fman->dts_params.err_irq == 0) {
5931 - fman_set_exception(fman, FMAN_EX_DMA_BUS_ERROR, false);
5932 - fman_set_exception(fman, FMAN_EX_DMA_READ_ECC, false);
5933 - fman_set_exception(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC, false);
5934 - fman_set_exception(fman, FMAN_EX_DMA_FM_WRITE_ECC, false);
5935 - fman_set_exception(fman, FMAN_EX_DMA_SINGLE_PORT_ECC, false);
5936 - fman_set_exception(fman, FMAN_EX_FPM_STALL_ON_TASKS, false);
5937 - fman_set_exception(fman, FMAN_EX_FPM_SINGLE_ECC, false);
5938 - fman_set_exception(fman, FMAN_EX_FPM_DOUBLE_ECC, false);
5939 - fman_set_exception(fman, FMAN_EX_QMI_SINGLE_ECC, false);
5940 - fman_set_exception(fman, FMAN_EX_QMI_DOUBLE_ECC, false);
5941 - fman_set_exception(fman,
5942 - FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID, false);
5943 - fman_set_exception(fman, FMAN_EX_BMI_LIST_RAM_ECC, false);
5944 - fman_set_exception(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC,
5946 - fman_set_exception(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC, false);
5947 - fman_set_exception(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC, false);
5950 - dev_set_drvdata(dev, fman);
5952 - dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
5957 -static const struct of_device_id fman_match[] = {
5959 - .compatible = "fsl,fman"},
5963 -MODULE_DEVICE_TABLE(of, fman_match);
5965 -static struct platform_driver fman_driver = {
5967 - .name = "fsl-fman",
5968 - .of_match_table = fman_match,
5970 - .probe = fman_probe,
5973 -static int __init fman_load(void)
5977 - pr_debug("FSL DPAA FMan driver\n");
5979 - err = platform_driver_register(&fman_driver);
5981 - pr_err("Error, platform_driver_register() = %d\n", err);
5985 -module_init(fman_load);
5987 -static void __exit fman_unload(void)
5989 - platform_driver_unregister(&fman_driver);
5991 -module_exit(fman_unload);
5993 -MODULE_LICENSE("Dual BSD/GPL");
5994 -MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver");
5995 diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
5996 deleted file mode 100644
5997 index 57aae8d..0000000
5998 --- a/drivers/net/ethernet/freescale/fman/fman.h
6002 - * Copyright 2008-2015 Freescale Semiconductor Inc.
6004 - * Redistribution and use in source and binary forms, with or without
6005 - * modification, are permitted provided that the following conditions are met:
6006 - * * Redistributions of source code must retain the above copyright
6007 - * notice, this list of conditions and the following disclaimer.
6008 - * * Redistributions in binary form must reproduce the above copyright
6009 - * notice, this list of conditions and the following disclaimer in the
6010 - * documentation and/or other materials provided with the distribution.
6011 - * * Neither the name of Freescale Semiconductor nor the
6012 - * names of its contributors may be used to endorse or promote products
6013 - * derived from this software without specific prior written permission.
6016 - * ALTERNATIVELY, this software may be distributed under the terms of the
6017 - * GNU General Public License ("GPL") as published by the Free Software
6018 - * Foundation, either version 2 of that License or (at your option) any
6021 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
6022 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
6023 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
6024 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
6025 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
6026 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
6027 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
6028 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
6029 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6030 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6036 -#include <linux/io.h>
6038 -/* FM Frame descriptor macros */
6039 -/* Frame queue Context Override */
6040 -#define FM_FD_CMD_FCO 0x80000000
6041 -#define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */
6042 -#define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */
6044 -/* TX-Port: Unsupported Format */
6045 -#define FM_FD_ERR_UNSUPPORTED_FORMAT 0x04000000
6046 -/* TX Port: Length Error */
6047 -#define FM_FD_ERR_LENGTH 0x02000000
6048 -#define FM_FD_ERR_DMA 0x01000000 /* DMA Data error */
6050 -/* IPR frame (not error) */
6051 -#define FM_FD_IPR 0x00000001
6052 -/* IPR non-consistent-sp */
6053 -#define FM_FD_ERR_IPR_NCSP (0x00100000 | FM_FD_IPR)
6055 -#define FM_FD_ERR_IPR (0x00200000 | FM_FD_IPR)
6057 -#define FM_FD_ERR_IPR_TO (0x00300000 | FM_FD_IPR)
6058 -/* TX Port: Length Error */
6059 -#define FM_FD_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
6061 -/* Rx FIFO overflow, FCS error, code error, running disparity error
6062 - * (SGMII and TBI modes), FIFO parity error. PHY Sequence error,
6063 - * PHY error control character detected.
6065 -#define FM_FD_ERR_PHYSICAL 0x00080000
6066 -/* Frame too long OR Frame size exceeds max_length_frame */
6067 -#define FM_FD_ERR_SIZE 0x00040000
6068 -/* classification discard */
6069 -#define FM_FD_ERR_CLS_DISCARD 0x00020000
6070 -/* Extract Out of Frame */
6071 -#define FM_FD_ERR_EXTRACTION 0x00008000
6072 -/* No Scheme Selected */
6073 -#define FM_FD_ERR_NO_SCHEME 0x00004000
6074 -/* Keysize Overflow */
6075 -#define FM_FD_ERR_KEYSIZE_OVERFLOW 0x00002000
6076 -/* Frame color is red */
6077 -#define FM_FD_ERR_COLOR_RED 0x00000800
6078 -/* Frame color is yellow */
6079 -#define FM_FD_ERR_COLOR_YELLOW 0x00000400
6080 -/* Parser Time out Exceed */
6081 -#define FM_FD_ERR_PRS_TIMEOUT 0x00000080
6082 -/* Invalid Soft Parser instruction */
6083 -#define FM_FD_ERR_PRS_ILL_INSTRUCT 0x00000040
6084 -/* Header error was identified during parsing */
6085 -#define FM_FD_ERR_PRS_HDR_ERR 0x00000020
6086 -/* Frame parsed beyind 256 first bytes */
6087 -#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED 0x00000008
6089 -/* non Frame-Manager error */
6090 -#define FM_FD_RX_STATUS_ERR_NON_FM 0x00400000
6092 -/* FMan driver defines */
6093 -#define FMAN_BMI_FIFO_UNITS 0x100
6094 -#define OFFSET_UNITS 16
6097 -#define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */
6098 -#define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */
6100 -struct fman; /* FMan data */
6102 -/* Enum for defining port types */
6103 -enum fman_port_type {
6104 - FMAN_PORT_TYPE_TX = 0, /* TX Port */
6105 - FMAN_PORT_TYPE_RX, /* RX Port */
6108 -struct fman_rev_info {
6109 - u8 major; /* Major revision */
6110 - u8 minor; /* Minor revision */
6113 -enum fman_exceptions {
6114 - FMAN_EX_DMA_BUS_ERROR = 0, /* DMA bus error. */
6115 - FMAN_EX_DMA_READ_ECC, /* Read Buffer ECC error */
6116 - FMAN_EX_DMA_SYSTEM_WRITE_ECC, /* Write Buffer ECC err on sys side */
6117 - FMAN_EX_DMA_FM_WRITE_ECC, /* Write Buffer ECC error on FM side */
6118 - FMAN_EX_DMA_SINGLE_PORT_ECC, /* Single Port ECC error on FM side */
6119 - FMAN_EX_FPM_STALL_ON_TASKS, /* Stall of tasks on FPM */
6120 - FMAN_EX_FPM_SINGLE_ECC, /* Single ECC on FPM. */
6121 - FMAN_EX_FPM_DOUBLE_ECC, /* Double ECC error on FPM ram access */
6122 - FMAN_EX_QMI_SINGLE_ECC, /* Single ECC on QMI. */
6123 - FMAN_EX_QMI_DOUBLE_ECC, /* Double bit ECC occurred on QMI */
6124 - FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/* DeQ from unknown port id */
6125 - FMAN_EX_BMI_LIST_RAM_ECC, /* Linked List RAM ECC error */
6126 - FMAN_EX_BMI_STORAGE_PROFILE_ECC,/* storage profile */
6127 - FMAN_EX_BMI_STATISTICS_RAM_ECC,/* Statistics RAM ECC Err Enable */
6128 - FMAN_EX_BMI_DISPATCH_RAM_ECC, /* Dispatch RAM ECC Error Enable */
6129 - FMAN_EX_IRAM_ECC, /* Double bit ECC occurred on IRAM */
6130 - FMAN_EX_MURAM_ECC /* Double bit ECC occurred on MURAM */
6133 -/* Parse results memory layout */
6134 -struct fman_prs_result {
6135 - u8 lpid; /* Logical port id */
6136 - u8 shimr; /* Shim header result */
6137 - u16 l2r; /* Layer 2 result */
6138 - u16 l3r; /* Layer 3 result */
6139 - u8 l4r; /* Layer 4 result */
6140 - u8 cplan; /* Classification plan id */
6141 - u16 nxthdr; /* Next Header */
6142 - u16 cksum; /* Running-sum */
6143 - /* Flags&fragment-offset field of the last IP-header */
6144 - u16 flags_frag_off;
6145 - /* Routing type field of a IPV6 routing extension header */
6147 - /* Routing Extension Header Present; last bit is IP valid */
6149 - u8 shim_off[2]; /* Shim offset */
6150 - u8 ip_pid_off; /* IP PID (last IP-proto) offset */
6151 - u8 eth_off; /* ETH offset */
6152 - u8 llc_snap_off; /* LLC_SNAP offset */
6153 - u8 vlan_off[2]; /* VLAN offset */
6154 - u8 etype_off; /* ETYPE offset */
6155 - u8 pppoe_off; /* PPP offset */
6156 - u8 mpls_off[2]; /* MPLS offset */
6157 - u8 ip_off[2]; /* IP offset */
6158 - u8 gre_off; /* GRE offset */
6159 - u8 l4_off; /* Layer 4 offset */
6160 - u8 nxthdr_off; /* Parser end point */
6163 -/* A structure for defining buffer prefix area content. */
6164 -struct fman_buffer_prefix_content {
6165 - /* Number of bytes to be left at the beginning of the external
6166 - * buffer; Note that the private-area will start from the base
6167 - * of the buffer address.
6169 - u16 priv_data_size;
6170 - /* true to pass the parse result to/from the FM;
6171 - * User may use FM_PORT_GetBufferPrsResult() in
6172 - * order to get the parser-result from a buffer.
6174 - bool pass_prs_result;
6175 - /* true to pass the timeStamp to/from the FM User */
6176 - bool pass_time_stamp;
6177 - /* true to pass the KG hash result to/from the FM User may
6178 - * use FM_PORT_GetBufferHashResult() in order to get the
6179 - * parser-result from a buffer.
6181 - bool pass_hash_result;
6182 - /* Add all other Internal-Context information: AD,
6183 - * hash-result, key, etc.
6188 -/* A structure of information about each of the external
6189 - * buffer pools used by a port or storage-profile.
6191 -struct fman_ext_pool_params {
6192 - u8 id; /* External buffer pool id */
6193 - u16 size; /* External buffer pool buffer size */
6196 -/* A structure for informing the driver about the external
6197 - * buffer pools allocated in the BM and used by a port or a
6198 - * storage-profile.
6200 -struct fman_ext_pools {
6201 - u8 num_of_pools_used; /* Number of pools use by this port */
6202 - struct fman_ext_pool_params ext_buf_pool[FMAN_PORT_MAX_EXT_POOLS_NUM];
6203 - /* Parameters for each port */
6206 -/* A structure for defining BM pool depletion criteria */
6207 -struct fman_buf_pool_depletion {
6208 - /* select mode in which pause frames will be sent after a
6209 - * number of pools (all together!) are depleted
6211 - bool pools_grp_mode_enable;
6212 - /* the number of depleted pools that will invoke pause
6213 - * frames transmission.
6216 - /* For each pool, true if it should be considered for
6217 - * depletion (Note - this pool must be used by this port!).
6219 - bool pools_to_consider[BM_MAX_NUM_OF_POOLS];
6220 - /* select mode in which pause frames will be sent
6221 - * after a single-pool is depleted;
6223 - bool single_pool_mode_enable;
6224 - /* For each pool, true if it should be considered
6225 - * for depletion (Note - this pool must be used by this port!)
6227 - bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
6230 -/* Enum for inter-module interrupts registration */
6231 -enum fman_event_modules {
6232 - FMAN_MOD_MAC = 0, /* MAC event */
6233 - FMAN_MOD_FMAN_CTRL, /* FMAN Controller */
6234 - FMAN_MOD_DUMMY_LAST
6237 -/* Enum for interrupts types */
6238 -enum fman_intr_type {
6239 - FMAN_INTR_TYPE_ERR,
6240 - FMAN_INTR_TYPE_NORMAL
6243 -/* Enum for inter-module interrupts registration */
6244 -enum fman_inter_module_event {
6245 - FMAN_EV_ERR_MAC0 = 0, /* MAC 0 error event */
6246 - FMAN_EV_ERR_MAC1, /* MAC 1 error event */
6247 - FMAN_EV_ERR_MAC2, /* MAC 2 error event */
6248 - FMAN_EV_ERR_MAC3, /* MAC 3 error event */
6249 - FMAN_EV_ERR_MAC4, /* MAC 4 error event */
6250 - FMAN_EV_ERR_MAC5, /* MAC 5 error event */
6251 - FMAN_EV_ERR_MAC6, /* MAC 6 error event */
6252 - FMAN_EV_ERR_MAC7, /* MAC 7 error event */
6253 - FMAN_EV_ERR_MAC8, /* MAC 8 error event */
6254 - FMAN_EV_ERR_MAC9, /* MAC 9 error event */
6255 - FMAN_EV_MAC0, /* MAC 0 event (Magic packet detection) */
6256 - FMAN_EV_MAC1, /* MAC 1 event (Magic packet detection) */
6257 - FMAN_EV_MAC2, /* MAC 2 (Magic packet detection) */
6258 - FMAN_EV_MAC3, /* MAC 3 (Magic packet detection) */
6259 - FMAN_EV_MAC4, /* MAC 4 (Magic packet detection) */
6260 - FMAN_EV_MAC5, /* MAC 5 (Magic packet detection) */
6261 - FMAN_EV_MAC6, /* MAC 6 (Magic packet detection) */
6262 - FMAN_EV_MAC7, /* MAC 7 (Magic packet detection) */
6263 - FMAN_EV_MAC8, /* MAC 8 event (Magic packet detection) */
6264 - FMAN_EV_MAC9, /* MAC 9 event (Magic packet detection) */
6265 - FMAN_EV_FMAN_CTRL_0, /* Fman controller event 0 */
6266 - FMAN_EV_FMAN_CTRL_1, /* Fman controller event 1 */
6267 - FMAN_EV_FMAN_CTRL_2, /* Fman controller event 2 */
6268 - FMAN_EV_FMAN_CTRL_3, /* Fman controller event 3 */
6272 -struct fman_intr_src {
6273 - void (*isr_cb)(void *src_arg);
6277 -/* Structure for port-FM communication during fman_port_init. */
6278 -struct fman_port_init_params {
6279 - u8 port_id; /* port Id */
6280 - enum fman_port_type port_type; /* Port type */
6281 - u16 port_speed; /* Port speed */
6282 - u16 liodn_offset; /* Port's requested resource */
6283 - u8 num_of_tasks; /* Port's requested resource */
6284 - u8 num_of_extra_tasks; /* Port's requested resource */
6285 - u8 num_of_open_dmas; /* Port's requested resource */
6286 - u8 num_of_extra_open_dmas; /* Port's requested resource */
6287 - u32 size_of_fifo; /* Port's requested resource */
6288 - u32 extra_size_of_fifo; /* Port's requested resource */
6289 - u8 deq_pipeline_depth; /* Port's requested resource */
6290 - u16 max_frame_length; /* Port's max frame length. */
6292 - /* LIODN base for this port, to be used together with LIODN offset. */
6295 -void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info);
6297 -void fman_register_intr(struct fman *fman, enum fman_event_modules mod,
6298 - u8 mod_id, enum fman_intr_type intr_type,
6299 - void (*f_isr)(void *h_src_arg), void *h_src_arg);
6301 -void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod,
6302 - u8 mod_id, enum fman_intr_type intr_type);
6304 -int fman_set_port_params(struct fman *fman,
6305 - struct fman_port_init_params *port_params);
6307 -int fman_reset_mac(struct fman *fman, u8 mac_id);
6309 -u16 fman_get_clock_freq(struct fman *fman);
6311 -u32 fman_get_bmi_max_fifo_size(struct fman *fman);
6313 -int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl);
6315 -u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id);
6317 -struct resource *fman_get_mem_region(struct fman *fman);
6319 -u16 fman_get_max_frm(void);
6321 -int fman_get_rx_extra_headroom(void);
6323 -struct fman *fman_bind(struct device *dev);
6325 -#endif /* __FM_H */
6326 diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
6327 deleted file mode 100644
6328 index c88918c..0000000
6329 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
6333 - * Copyright 2008-2015 Freescale Semiconductor Inc.
6335 - * Redistribution and use in source and binary forms, with or without
6336 - * modification, are permitted provided that the following conditions are met:
6337 - * * Redistributions of source code must retain the above copyright
6338 - * notice, this list of conditions and the following disclaimer.
6339 - * * Redistributions in binary form must reproduce the above copyright
6340 - * notice, this list of conditions and the following disclaimer in the
6341 - * documentation and/or other materials provided with the distribution.
6342 - * * Neither the name of Freescale Semiconductor nor the
6343 - * names of its contributors may be used to endorse or promote products
6344 - * derived from this software without specific prior written permission.
6347 - * ALTERNATIVELY, this software may be distributed under the terms of the
6348 - * GNU General Public License ("GPL") as published by the Free Software
6349 - * Foundation, either version 2 of that License or (at your option) any
6352 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
6353 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
6354 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
6355 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
6356 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
6357 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
6358 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
6359 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
6360 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6361 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6364 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6366 -#include "fman_dtsec.h"
6369 -#include <linux/slab.h>
6370 -#include <linux/bitrev.h>
6371 -#include <linux/io.h>
6372 -#include <linux/delay.h>
6373 -#include <linux/phy.h>
6374 -#include <linux/crc32.h>
6375 -#include <linux/of_mdio.h>
6376 -#include <linux/mii.h>
6378 -/* TBI register addresses */
6379 -#define MII_TBICON 0x11
6381 -/* TBICON register bit fields */
6382 -#define TBICON_SOFT_RESET 0x8000 /* Soft reset */
6383 -#define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
6384 -#define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
6385 -#define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
6386 -#define TBICON_CLK_SELECT 0x0020 /* Clock select */
6387 -#define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
6389 -#define TBIANA_SGMII 0x4001
6390 -#define TBIANA_1000X 0x01a0
6392 -/* Interrupt Mask Register (IMASK) */
6393 -#define DTSEC_IMASK_BREN 0x80000000
6394 -#define DTSEC_IMASK_RXCEN 0x40000000
6395 -#define DTSEC_IMASK_MSROEN 0x04000000
6396 -#define DTSEC_IMASK_GTSCEN 0x02000000
6397 -#define DTSEC_IMASK_BTEN 0x01000000
6398 -#define DTSEC_IMASK_TXCEN 0x00800000
6399 -#define DTSEC_IMASK_TXEEN 0x00400000
6400 -#define DTSEC_IMASK_LCEN 0x00040000
6401 -#define DTSEC_IMASK_CRLEN 0x00020000
6402 -#define DTSEC_IMASK_XFUNEN 0x00010000
6403 -#define DTSEC_IMASK_ABRTEN 0x00008000
6404 -#define DTSEC_IMASK_IFERREN 0x00004000
6405 -#define DTSEC_IMASK_MAGEN 0x00000800
6406 -#define DTSEC_IMASK_MMRDEN 0x00000400
6407 -#define DTSEC_IMASK_MMWREN 0x00000200
6408 -#define DTSEC_IMASK_GRSCEN 0x00000100
6409 -#define DTSEC_IMASK_TDPEEN 0x00000002
6410 -#define DTSEC_IMASK_RDPEEN 0x00000001
6412 -#define DTSEC_EVENTS_MASK \
6413 - ((u32)(DTSEC_IMASK_BREN | \
6414 - DTSEC_IMASK_RXCEN | \
6415 - DTSEC_IMASK_BTEN | \
6416 - DTSEC_IMASK_TXCEN | \
6417 - DTSEC_IMASK_TXEEN | \
6418 - DTSEC_IMASK_ABRTEN | \
6419 - DTSEC_IMASK_LCEN | \
6420 - DTSEC_IMASK_CRLEN | \
6421 - DTSEC_IMASK_XFUNEN | \
6422 - DTSEC_IMASK_IFERREN | \
6423 - DTSEC_IMASK_MAGEN | \
6424 - DTSEC_IMASK_TDPEEN | \
6425 - DTSEC_IMASK_RDPEEN))
6427 -/* dtsec timestamp event bits */
6428 -#define TMR_PEMASK_TSREEN 0x00010000
6429 -#define TMR_PEVENT_TSRE 0x00010000
6431 -/* Group address bit indication */
6432 -#define MAC_GROUP_ADDRESS 0x0000010000000000ULL
6435 -#define DEFAULT_HALFDUP_RETRANSMIT 0xf
6436 -#define DEFAULT_HALFDUP_COLL_WINDOW 0x37
6437 -#define DEFAULT_TX_PAUSE_TIME 0xf000
6438 -#define DEFAULT_RX_PREPEND 0
6439 -#define DEFAULT_PREAMBLE_LEN 7
6440 -#define DEFAULT_TX_PAUSE_TIME_EXTD 0
6441 -#define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
6442 -#define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
6443 -#define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
6444 -#define DEFAULT_BACK_TO_BACK_IPG 0x60
6445 -#define DEFAULT_MAXIMUM_FRAME 0x600
6447 -/* register related defines (bits, field offsets..) */
6448 -#define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
6450 -#define DTSEC_ECNTRL_GMIIM 0x00000040
6451 -#define DTSEC_ECNTRL_TBIM 0x00000020
6452 -#define DTSEC_ECNTRL_SGMIIM 0x00000002
6453 -#define DTSEC_ECNTRL_RPM 0x00000010
6454 -#define DTSEC_ECNTRL_R100M 0x00000008
6455 -#define DTSEC_ECNTRL_QSGMIIM 0x00000001
6457 -#define DTSEC_TCTRL_GTS 0x00000020
6459 -#define RCTRL_PAL_MASK 0x001f0000
6460 -#define RCTRL_PAL_SHIFT 16
6461 -#define RCTRL_GHTX 0x00000400
6462 -#define RCTRL_GRS 0x00000020
6463 -#define RCTRL_MPROM 0x00000008
6464 -#define RCTRL_RSF 0x00000004
6465 -#define RCTRL_UPROM 0x00000001
6467 -#define MACCFG1_SOFT_RESET 0x80000000
6468 -#define MACCFG1_RX_FLOW 0x00000020
6469 -#define MACCFG1_TX_FLOW 0x00000010
6470 -#define MACCFG1_TX_EN 0x00000001
6471 -#define MACCFG1_RX_EN 0x00000004
6473 -#define MACCFG2_NIBBLE_MODE 0x00000100
6474 -#define MACCFG2_BYTE_MODE 0x00000200
6475 -#define MACCFG2_PAD_CRC_EN 0x00000004
6476 -#define MACCFG2_FULL_DUPLEX 0x00000001
6477 -#define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
6478 -#define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
6480 -#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
6481 -#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
6482 -#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
6484 -#define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
6485 -#define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
6486 -#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
6487 -#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
6489 -#define HAFDUP_EXCESS_DEFER 0x00010000
6490 -#define HAFDUP_COLLISION_WINDOW 0x000003ff
6491 -#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
6492 -#define HAFDUP_RETRANSMISSION_MAX 0x0000f000
6494 -#define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
6496 -#define PTV_PTE_MASK 0xffff0000
6497 -#define PTV_PT_MASK 0x0000ffff
6498 -#define PTV_PTE_SHIFT 16
6500 -#define MAX_PACKET_ALIGNMENT 31
6501 -#define MAX_INTER_PACKET_GAP 0x7f
6502 -#define MAX_RETRANSMISSION 0x0f
6503 -#define MAX_COLLISION_WINDOW 0x03ff
6505 -/* Hash table size (32 bits*8 regs) */
6506 -#define DTSEC_HASH_TABLE_SIZE 256
6507 -/* Extended Hash table size (32 bits*16 regs) */
6508 -#define EXTENDED_HASH_TABLE_SIZE 512
6510 -/* dTSEC Memory Map registers */
6511 -struct dtsec_regs {
6512 - /* dTSEC General Control and Status Registers */
6513 - u32 tsec_id; /* 0x000 ETSEC_ID register */
6514 - u32 tsec_id2; /* 0x004 ETSEC_ID2 register */
6515 - u32 ievent; /* 0x008 Interrupt event register */
6516 - u32 imask; /* 0x00C Interrupt mask register */
6517 - u32 reserved0010[1];
6518 - u32 ecntrl; /* 0x014 E control register */
6519 - u32 ptv; /* 0x018 Pause time value register */
6520 - u32 tbipa; /* 0x01C TBI PHY address register */
6521 - u32 tmr_ctrl; /* 0x020 Time-stamp Control register */
6522 - u32 tmr_pevent; /* 0x024 Time-stamp event register */
6523 - u32 tmr_pemask; /* 0x028 Timer event mask register */
6524 - u32 reserved002c[5];
6525 - u32 tctrl; /* 0x040 Transmit control register */
6526 - u32 reserved0044[3];
6527 - u32 rctrl; /* 0x050 Receive control register */
6528 - u32 reserved0054[11];
6529 - u32 igaddr[8]; /* 0x080-0x09C Individual/group address */
6530 - u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
6531 - u32 reserved00c0[16];
6532 - u32 maccfg1; /* 0x100 MAC configuration #1 */
6533 - u32 maccfg2; /* 0x104 MAC configuration #2 */
6534 - u32 ipgifg; /* 0x108 IPG/IFG */
6535 - u32 hafdup; /* 0x10C Half-duplex */
6536 - u32 maxfrm; /* 0x110 Maximum frame */
6537 - u32 reserved0114[10];
6538 - u32 ifstat; /* 0x13C Interface status */
6539 - u32 macstnaddr1; /* 0x140 Station Address,part 1 */
6540 - u32 macstnaddr2; /* 0x144 Station Address,part 2 */
6542 - u32 exact_match1; /* octets 1-4 */
6543 - u32 exact_match2; /* octets 5-6 */
6544 - } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
6545 - u32 reserved01c0[16];
6546 - u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */
6547 - u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */
6548 - u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */
6549 - u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */
6550 - u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
6551 - u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
6553 - /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
6554 - u32 rbyt; /* 0x21C receive byte counter */
6555 - u32 rpkt; /* 0x220 receive packet counter */
6556 - u32 rfcs; /* 0x224 receive FCS error counter */
6557 - u32 rmca; /* 0x228 RMCA Rx multicast packet counter */
6558 - u32 rbca; /* 0x22C Rx broadcast packet counter */
6559 - u32 rxcf; /* 0x230 Rx control frame packet counter */
6560 - u32 rxpf; /* 0x234 Rx pause frame packet counter */
6561 - u32 rxuo; /* 0x238 Rx unknown OP code counter */
6562 - u32 raln; /* 0x23C Rx alignment error counter */
6563 - u32 rflr; /* 0x240 Rx frame length error counter */
6564 - u32 rcde; /* 0x244 Rx code error counter */
6565 - u32 rcse; /* 0x248 Rx carrier sense error counter */
6566 - u32 rund; /* 0x24C Rx undersize packet counter */
6567 - u32 rovr; /* 0x250 Rx oversize packet counter */
6568 - u32 rfrg; /* 0x254 Rx fragments counter */
6569 - u32 rjbr; /* 0x258 Rx jabber counter */
6570 - u32 rdrp; /* 0x25C Rx drop */
6571 - u32 tbyt; /* 0x260 Tx byte counter */
6572 - u32 tpkt; /* 0x264 Tx packet counter */
6573 - u32 tmca; /* 0x268 Tx multicast packet counter */
6574 - u32 tbca; /* 0x26C Tx broadcast packet counter */
6575 - u32 txpf; /* 0x270 Tx pause control frame counter */
6576 - u32 tdfr; /* 0x274 Tx deferral packet counter */
6577 - u32 tedf; /* 0x278 Tx excessive deferral packet counter */
6578 - u32 tscl; /* 0x27C Tx single collision packet counter */
6579 - u32 tmcl; /* 0x280 Tx multiple collision packet counter */
6580 - u32 tlcl; /* 0x284 Tx late collision packet counter */
6581 - u32 txcl; /* 0x288 Tx excessive collision packet counter */
6582 - u32 tncl; /* 0x28C Tx total collision counter */
6583 - u32 reserved0290[1];
6584 - u32 tdrp; /* 0x294 Tx drop frame counter */
6585 - u32 tjbr; /* 0x298 Tx jabber frame counter */
6586 - u32 tfcs; /* 0x29C Tx FCS error counter */
6587 - u32 txcf; /* 0x2A0 Tx control frame counter */
6588 - u32 tovr; /* 0x2A4 Tx oversize frame counter */
6589 - u32 tund; /* 0x2A8 Tx undersize frame counter */
6590 - u32 tfrg; /* 0x2AC Tx fragments frame counter */
6591 - u32 car1; /* 0x2B0 carry register one register* */
6592 - u32 car2; /* 0x2B4 carry register two register* */
6593 - u32 cam1; /* 0x2B8 carry register one mask register */
6594 - u32 cam2; /* 0x2BC carry register two mask register */
6595 - u32 reserved02c0[848];
6598 -/* struct dtsec_cfg - dTSEC configuration
6599 - * Transmit half-duplex flow control, under software control for 10/100-Mbps
6600 - * half-duplex media. If set, back pressure is applied to media by raising
6602 - * halfdup_retransmit:
6603 - * Number of retransmission attempts following a collision.
6604 - * If this is exceeded dTSEC aborts transmission due to excessive collisions.
6605 - * The standard specifies the attempt limit to be 15.
6606 - * halfdup_coll_window:
6607 - * The number of bytes of the frame during which collisions may occur.
6608 - * The default value of 55 corresponds to the frame byte at the end of the
6609 - * standard 512-bit slot time window. If collisions are detected after this
6610 - * byte, the late collision event is asserted and transmission of current
6611 - * frame is aborted.
6613 - * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
6614 - * appends a CRC to every frame regardless of padding requirement.
6616 - * Transmit pause time value. This pause value is used as part of the pause
6617 - * frame to be sent when a transmit pause frame is initiated.
6618 - * If set to 0 this disables transmission of pause frames.
6620 - * Length, in bytes, of the preamble field preceding each Ethernet
6621 - * start-of-frame delimiter byte. The default value of 0x7 should be used in
6622 - * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
6624 - * Packet alignment padding length. The specified number of bytes (1-31)
6625 - * of zero padding are inserted before the start of each received frame.
6626 - * For Ethernet, where optional preamble extraction is enabled, the padding
6627 - * appears before the preamble, otherwise the padding precedes the
6630 - * This structure contains basic dTSEC configuration and must be passed to
6631 - * init() function. A default set of configuration values can be
6632 - * obtained by calling set_dflts().
6635 - u16 halfdup_retransmit;
6636 - u16 halfdup_coll_window;
6638 - u16 tx_pause_time;
6640 - bool ptp_exception_en;
6643 - u16 tx_pause_time_extd;
6644 - u16 maximum_frame;
6645 - u32 non_back_to_back_ipg1;
6646 - u32 non_back_to_back_ipg2;
6647 - u32 min_ifg_enforcement;
6648 - u32 back_to_back_ipg;
6652 - /* pointer to dTSEC memory mapped registers */
6653 - struct dtsec_regs __iomem *regs;
6654 - /* MAC address of device */
6656 - /* Ethernet physical interface */
6657 - phy_interface_t phy_if;
6659 - void *dev_id; /* device cookie used by the exception cbs */
6660 - fman_mac_exception_cb *exception_cb;
6661 - fman_mac_exception_cb *event_cb;
6662 - /* Number of individual addresses in registers for this station */
6663 - u8 num_of_ind_addr_in_regs;
6664 - /* pointer to driver's global address hash table */
6665 - struct eth_hash_t *multicast_addr_hash;
6666 - /* pointer to driver's individual address hash table */
6667 - struct eth_hash_t *unicast_addr_hash;
6670 - bool ptp_tsu_enabled;
6671 - bool en_tsu_err_exeption;
6672 - struct dtsec_cfg *dtsec_drv_param;
6674 - struct fman_rev_info fm_rev_info;
6676 - struct phy_device *tbiphy;
6679 -static void set_dflts(struct dtsec_cfg *cfg)
6681 - cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
6682 - cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
6683 - cfg->tx_pad_crc = true;
6684 - cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
6685 - /* PHY address 0 is reserved (DPAA RM) */
6686 - cfg->rx_prepend = DEFAULT_RX_PREPEND;
6687 - cfg->ptp_tsu_en = true;
6688 - cfg->ptp_exception_en = true;
6689 - cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
6690 - cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
6691 - cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
6692 - cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
6693 - cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
6694 - cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
6695 - cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
6698 -static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
6699 - phy_interface_t iface, u16 iface_speed, u8 *macaddr,
6700 - u32 exception_mask, u8 tbi_addr)
6702 - bool is_rgmii, is_sgmii, is_qsgmii;
6707 - iowrite32be(MACCFG1_SOFT_RESET, ®s->maccfg1);
6708 - iowrite32be(0, ®s->maccfg1);
6711 - tmp = ioread32be(®s->tsec_id2);
6713 - /* check RGMII support */
6714 - if (iface == PHY_INTERFACE_MODE_RGMII ||
6715 - iface == PHY_INTERFACE_MODE_RMII)
6716 - if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
6719 - if (iface == PHY_INTERFACE_MODE_SGMII ||
6720 - iface == PHY_INTERFACE_MODE_MII)
6721 - if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
6724 - is_rgmii = iface == PHY_INTERFACE_MODE_RGMII;
6725 - is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
6726 - is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
6729 - if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
6730 - tmp |= DTSEC_ECNTRL_GMIIM;
6732 - tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
6734 - tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
6735 - DTSEC_ECNTRL_QSGMIIM);
6737 - tmp |= DTSEC_ECNTRL_RPM;
6738 - if (iface_speed == SPEED_100)
6739 - tmp |= DTSEC_ECNTRL_R100M;
6741 - iowrite32be(tmp, ®s->ecntrl);
6745 - if (cfg->tx_pause_time)
6746 - tmp |= cfg->tx_pause_time;
6747 - if (cfg->tx_pause_time_extd)
6748 - tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
6749 - iowrite32be(tmp, ®s->ptv);
6752 - tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
6753 - /* Accept short frames */
6756 - iowrite32be(tmp, ®s->rctrl);
6758 - /* Assign a Phy Address to the TBI (TBIPA).
6759 - * Done also in cases where TBI is not selected to avoid conflict with
6760 - * the external PHY's Physical address
6762 - iowrite32be(tbi_addr, ®s->tbipa);
6764 - iowrite32be(0, ®s->tmr_ctrl);
6766 - if (cfg->ptp_tsu_en) {
6768 - tmp |= TMR_PEVENT_TSRE;
6769 - iowrite32be(tmp, ®s->tmr_pevent);
6771 - if (cfg->ptp_exception_en) {
6773 - tmp |= TMR_PEMASK_TSREEN;
6774 - iowrite32be(tmp, ®s->tmr_pemask);
6779 - tmp |= MACCFG1_RX_FLOW;
6780 - tmp |= MACCFG1_TX_FLOW;
6781 - iowrite32be(tmp, ®s->maccfg1);
6785 - if (iface_speed < SPEED_1000)
6786 - tmp |= MACCFG2_NIBBLE_MODE;
6787 - else if (iface_speed == SPEED_1000)
6788 - tmp |= MACCFG2_BYTE_MODE;
6790 - tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
6791 - MACCFG2_PREAMBLE_LENGTH_MASK;
6792 - if (cfg->tx_pad_crc)
6793 - tmp |= MACCFG2_PAD_CRC_EN;
6795 - tmp |= MACCFG2_FULL_DUPLEX;
6796 - iowrite32be(tmp, ®s->maccfg2);
6798 - tmp = (((cfg->non_back_to_back_ipg1 <<
6799 - IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
6800 - & IPGIFG_NON_BACK_TO_BACK_IPG_1)
6801 - | ((cfg->non_back_to_back_ipg2 <<
6802 - IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
6803 - & IPGIFG_NON_BACK_TO_BACK_IPG_2)
6804 - | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
6805 - & IPGIFG_MIN_IFG_ENFORCEMENT)
6806 - | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
6807 - iowrite32be(tmp, ®s->ipgifg);
6810 - tmp |= HAFDUP_EXCESS_DEFER;
6811 - tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
6812 - & HAFDUP_RETRANSMISSION_MAX);
6813 - tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
6815 - iowrite32be(tmp, ®s->hafdup);
6817 - /* Initialize Maximum frame length */
6818 - iowrite32be(cfg->maximum_frame, ®s->maxfrm);
6820 - iowrite32be(0xffffffff, ®s->cam1);
6821 - iowrite32be(0xffffffff, ®s->cam2);
6823 - iowrite32be(exception_mask, ®s->imask);
6825 - iowrite32be(0xffffffff, ®s->ievent);
6827 - tmp = (u32)((macaddr[5] << 24) |
6828 - (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
6829 - iowrite32be(tmp, ®s->macstnaddr1);
6831 - tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
6832 - iowrite32be(tmp, ®s->macstnaddr2);
6835 - for (i = 0; i < NUM_OF_HASH_REGS; i++) {
6836 - /* Initialize IADDRx */
6837 - iowrite32be(0, ®s->igaddr[i]);
6838 - /* Initialize GADDRx */
6839 - iowrite32be(0, ®s->gaddr[i]);
6845 -static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
6849 - tmp = (u32)((adr[5] << 24) |
6850 - (adr[4] << 16) | (adr[3] << 8) | adr[2]);
6851 - iowrite32be(tmp, ®s->macstnaddr1);
6853 - tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
6854 - iowrite32be(tmp, ®s->macstnaddr2);
6857 -static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
6860 - int reg_idx = (bucket >> 5) & 0xf;
6861 - int bit_idx = bucket & 0x1f;
6862 - u32 bit_mask = 0x80000000 >> bit_idx;
6866 - reg = ®s->gaddr[reg_idx - 8];
6868 - reg = ®s->igaddr[reg_idx];
6871 - iowrite32be(ioread32be(reg) | bit_mask, reg);
6873 - iowrite32be(ioread32be(reg) & (~bit_mask), reg);
6876 -static int check_init_parameters(struct fman_mac *dtsec)
6878 - if (dtsec->max_speed >= SPEED_10000) {
6879 - pr_err("1G MAC driver supports 1G or lower speeds\n");
6882 - if (dtsec->addr == 0) {
6883 - pr_err("Ethernet MAC Must have a valid MAC Address\n");
6886 - if ((dtsec->dtsec_drv_param)->rx_prepend >
6887 - MAX_PACKET_ALIGNMENT) {
6888 - pr_err("packetAlignmentPadding can't be > than %d\n",
6889 - MAX_PACKET_ALIGNMENT);
6892 - if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
6893 - MAX_INTER_PACKET_GAP) ||
6894 - ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
6895 - MAX_INTER_PACKET_GAP) ||
6896 - ((dtsec->dtsec_drv_param)->back_to_back_ipg >
6897 - MAX_INTER_PACKET_GAP)) {
6898 - pr_err("Inter packet gap can't be greater than %d\n",
6899 - MAX_INTER_PACKET_GAP);
6902 - if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
6903 - MAX_RETRANSMISSION) {
6904 - pr_err("maxRetransmission can't be greater than %d\n",
6905 - MAX_RETRANSMISSION);
6908 - if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
6909 - MAX_COLLISION_WINDOW) {
6910 - pr_err("collisionWindow can't be greater than %d\n",
6911 - MAX_COLLISION_WINDOW);
6913 - /* If Auto negotiation process is disabled, need to set up the PHY
6914 - * using the MII Management Interface
6917 - if (!dtsec->exception_cb) {
6918 - pr_err("uninitialized exception_cb\n");
6921 - if (!dtsec->event_cb) {
6922 - pr_err("uninitialized event_cb\n");
6929 -static int get_exception_flag(enum fman_mac_exceptions exception)
6933 - switch (exception) {
6934 - case FM_MAC_EX_1G_BAB_RX:
6935 - bit_mask = DTSEC_IMASK_BREN;
6937 - case FM_MAC_EX_1G_RX_CTL:
6938 - bit_mask = DTSEC_IMASK_RXCEN;
6940 - case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
6941 - bit_mask = DTSEC_IMASK_GTSCEN;
6943 - case FM_MAC_EX_1G_BAB_TX:
6944 - bit_mask = DTSEC_IMASK_BTEN;
6946 - case FM_MAC_EX_1G_TX_CTL:
6947 - bit_mask = DTSEC_IMASK_TXCEN;
6949 - case FM_MAC_EX_1G_TX_ERR:
6950 - bit_mask = DTSEC_IMASK_TXEEN;
6952 - case FM_MAC_EX_1G_LATE_COL:
6953 - bit_mask = DTSEC_IMASK_LCEN;
6955 - case FM_MAC_EX_1G_COL_RET_LMT:
6956 - bit_mask = DTSEC_IMASK_CRLEN;
6958 - case FM_MAC_EX_1G_TX_FIFO_UNDRN:
6959 - bit_mask = DTSEC_IMASK_XFUNEN;
6961 - case FM_MAC_EX_1G_MAG_PCKT:
6962 - bit_mask = DTSEC_IMASK_MAGEN;
6964 - case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
6965 - bit_mask = DTSEC_IMASK_MMRDEN;
6967 - case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
6968 - bit_mask = DTSEC_IMASK_MMWREN;
6970 - case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
6971 - bit_mask = DTSEC_IMASK_GRSCEN;
6973 - case FM_MAC_EX_1G_DATA_ERR:
6974 - bit_mask = DTSEC_IMASK_TDPEEN;
6976 - case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
6977 - bit_mask = DTSEC_IMASK_MSROEN;
6987 -static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
6989 - /* Checks if dTSEC driver parameters were initialized */
6990 - if (!dtsec_drv_params)
6996 -static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
6998 - struct dtsec_regs __iomem *regs = dtsec->regs;
7000 - if (is_init_done(dtsec->dtsec_drv_param))
7003 - return (u16)ioread32be(®s->maxfrm);
7006 -static void dtsec_isr(void *handle)
7008 - struct fman_mac *dtsec = (struct fman_mac *)handle;
7009 - struct dtsec_regs __iomem *regs = dtsec->regs;
7012 - /* do not handle MDIO events */
7013 - event = ioread32be(®s->ievent) &
7014 - (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
7016 - event &= ioread32be(®s->imask);
7018 - iowrite32be(event, ®s->ievent);
7020 - if (event & DTSEC_IMASK_BREN)
7021 - dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
7022 - if (event & DTSEC_IMASK_RXCEN)
7023 - dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
7024 - if (event & DTSEC_IMASK_GTSCEN)
7025 - dtsec->exception_cb(dtsec->dev_id,
7026 - FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
7027 - if (event & DTSEC_IMASK_BTEN)
7028 - dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
7029 - if (event & DTSEC_IMASK_TXCEN)
7030 - dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
7031 - if (event & DTSEC_IMASK_TXEEN)
7032 - dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
7033 - if (event & DTSEC_IMASK_LCEN)
7034 - dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
7035 - if (event & DTSEC_IMASK_CRLEN)
7036 - dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
7037 - if (event & DTSEC_IMASK_XFUNEN) {
7038 - /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
7039 - if (dtsec->fm_rev_info.major == 2) {
7040 - u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
7041 - /* a. Write 0x00E0_0C00 to DTSEC_ID
7042 - * This is a read only register
7043 - * b. Read and save the value of TPKT
7045 - tpkt1 = ioread32be(®s->tpkt);
7047 - /* c. Read the register at dTSEC address offset 0x32C */
7048 - tmp_reg1 = ioread32be(®s->reserved02c0[27]);
7050 - /* d. Compare bits [9:15] to bits [25:31] of the
7051 - * register at address offset 0x32C.
7053 - if ((tmp_reg1 & 0x007F0000) !=
7054 - (tmp_reg1 & 0x0000007F)) {
7055 - /* If they are not equal, save the value of
7056 - * this register and wait for at least
7059 - usleep_range((u32)(min
7060 - (dtsec_get_max_frame_length(dtsec) *
7061 - 16 / 1000, 1)), (u32)
7062 - (min(dtsec_get_max_frame_length
7063 - (dtsec) * 16 / 1000, 1) + 1));
7066 - /* e. Read and save TPKT again and read the register
7067 - * at dTSEC address offset 0x32C again
7069 - tpkt2 = ioread32be(®s->tpkt);
7070 - tmp_reg2 = ioread32be(®s->reserved02c0[27]);
7072 - /* f. Compare the value of TPKT saved in step b to
7073 - * value read in step e. Also compare bits [9:15] of
7074 - * the register at offset 0x32C saved in step d to the
7075 - * value of bits [9:15] saved in step e. If the two
7076 - * registers values are unchanged, then the transmit
7077 - * portion of the dTSEC controller is locked up and
7078 - * the user should proceed to the recover sequence.
7080 - if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
7081 - (tmp_reg2 & 0x007F0000))) {
7082 - /* recover sequence */
7084 - /* a.Write a 1 to RCTRL[GRS] */
7086 - iowrite32be(ioread32be(®s->rctrl) |
7087 - RCTRL_GRS, ®s->rctrl);
7089 - /* b.Wait until IEVENT[GRSC]=1, or at least
7090 - * 100 us has elapsed.
7092 - for (i = 0; i < 100; i++) {
7093 - if (ioread32be(®s->ievent) &
7094 - DTSEC_IMASK_GRSCEN)
7098 - if (ioread32be(®s->ievent) &
7099 - DTSEC_IMASK_GRSCEN)
7100 - iowrite32be(DTSEC_IMASK_GRSCEN,
7103 - pr_debug("Rx lockup due to Tx lockup\n");
7105 - /* c.Write a 1 to bit n of FM_RSTC
7106 - * (offset 0x0CC of FPM)
7108 - fman_reset_mac(dtsec->fm, dtsec->mac_id);
7110 - /* d.Wait 4 Tx clocks (32 ns) */
7113 - /* e.Write a 0 to bit n of FM_RSTC. */
7114 - /* cleared by FMAN
7119 - dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
7121 - if (event & DTSEC_IMASK_MAGEN)
7122 - dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
7123 - if (event & DTSEC_IMASK_GRSCEN)
7124 - dtsec->exception_cb(dtsec->dev_id,
7125 - FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
7126 - if (event & DTSEC_IMASK_TDPEEN)
7127 - dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
7128 - if (event & DTSEC_IMASK_RDPEEN)
7129 - dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
7131 - /* masked interrupts */
7132 - WARN_ON(event & DTSEC_IMASK_ABRTEN);
7133 - WARN_ON(event & DTSEC_IMASK_IFERREN);
7136 -static void dtsec_1588_isr(void *handle)
7138 - struct fman_mac *dtsec = (struct fman_mac *)handle;
7139 - struct dtsec_regs __iomem *regs = dtsec->regs;
7142 - if (dtsec->ptp_tsu_enabled) {
7143 - event = ioread32be(®s->tmr_pevent);
7144 - event &= ioread32be(®s->tmr_pemask);
7147 - iowrite32be(event, ®s->tmr_pevent);
7148 - WARN_ON(event & TMR_PEVENT_TSRE);
7149 - dtsec->exception_cb(dtsec->dev_id,
7150 - FM_MAC_EX_1G_1588_TS_RX_ERR);
7155 -static void free_init_resources(struct fman_mac *dtsec)
7157 - fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
7158 - FMAN_INTR_TYPE_ERR);
7159 - fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
7160 - FMAN_INTR_TYPE_NORMAL);
7162 - /* release the driver's group hash table */
7163 - free_hash_table(dtsec->multicast_addr_hash);
7164 - dtsec->multicast_addr_hash = NULL;
7166 - /* release the driver's individual hash table */
7167 - free_hash_table(dtsec->unicast_addr_hash);
7168 - dtsec->unicast_addr_hash = NULL;
7171 -int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
7173 - if (is_init_done(dtsec->dtsec_drv_param))
7176 - dtsec->dtsec_drv_param->maximum_frame = new_val;
7181 -int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
7183 - if (is_init_done(dtsec->dtsec_drv_param))
7186 - dtsec->dtsec_drv_param->tx_pad_crc = new_val;
7191 -int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
7193 - struct dtsec_regs __iomem *regs = dtsec->regs;
7196 - if (!is_init_done(dtsec->dtsec_drv_param))
7200 - tmp = ioread32be(®s->maccfg1);
7201 - if (mode & COMM_MODE_RX)
7202 - tmp |= MACCFG1_RX_EN;
7203 - if (mode & COMM_MODE_TX)
7204 - tmp |= MACCFG1_TX_EN;
7206 - iowrite32be(tmp, ®s->maccfg1);
7208 - /* Graceful start - clear the graceful receive stop bit */
7209 - if (mode & COMM_MODE_TX)
7210 - iowrite32be(ioread32be(®s->tctrl) & ~DTSEC_TCTRL_GTS,
7212 - if (mode & COMM_MODE_RX)
7213 - iowrite32be(ioread32be(®s->rctrl) & ~RCTRL_GRS,
7219 -int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
7221 - struct dtsec_regs __iomem *regs = dtsec->regs;
7224 - if (!is_init_done(dtsec->dtsec_drv_param))
7227 - /* Gracefull stop - Assert the graceful transmit stop bit */
7228 - if (mode & COMM_MODE_RX) {
7229 - tmp = ioread32be(®s->rctrl) | RCTRL_GRS;
7230 - iowrite32be(tmp, ®s->rctrl);
7232 - if (dtsec->fm_rev_info.major == 2)
7233 - usleep_range(100, 200);
7238 - if (mode & COMM_MODE_TX) {
7239 - if (dtsec->fm_rev_info.major == 2)
7240 - pr_debug("GTS not supported due to DTSEC_A004 errata.\n");
7242 - pr_debug("GTS not supported due to DTSEC_A0014 errata.\n");
7245 - tmp = ioread32be(®s->maccfg1);
7246 - if (mode & COMM_MODE_RX)
7247 - tmp &= ~MACCFG1_RX_EN;
7248 - if (mode & COMM_MODE_TX)
7249 - tmp &= ~MACCFG1_TX_EN;
7251 - iowrite32be(tmp, ®s->maccfg1);
7256 -int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
7257 - u8 __maybe_unused priority,
7258 - u16 pause_time, u16 __maybe_unused thresh_time)
7260 - struct dtsec_regs __iomem *regs = dtsec->regs;
7263 - if (!is_init_done(dtsec->dtsec_drv_param))
7267 - /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
7268 - if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
7269 - pr_warn("pause-time: %d illegal.Should be > 320\n",
7274 - ptv = ioread32be(®s->ptv);
7275 - ptv &= PTV_PTE_MASK;
7276 - ptv |= pause_time & PTV_PT_MASK;
7277 - iowrite32be(ptv, ®s->ptv);
7279 - /* trigger the transmission of a flow-control pause frame */
7280 - iowrite32be(ioread32be(®s->maccfg1) | MACCFG1_TX_FLOW,
7283 - iowrite32be(ioread32be(®s->maccfg1) & ~MACCFG1_TX_FLOW,
7289 -int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
7291 - struct dtsec_regs __iomem *regs = dtsec->regs;
7294 - if (!is_init_done(dtsec->dtsec_drv_param))
7297 - tmp = ioread32be(®s->maccfg1);
7299 - tmp |= MACCFG1_RX_FLOW;
7301 - tmp &= ~MACCFG1_RX_FLOW;
7302 - iowrite32be(tmp, ®s->maccfg1);
7307 -int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
7309 - if (!is_init_done(dtsec->dtsec_drv_param))
7312 - /* Initialize MAC Station Address registers (1 & 2)
7313 - * Station address have to be swapped (big endian to little endian
7315 - dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
7316 - set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
7321 -int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
7323 - struct dtsec_regs __iomem *regs = dtsec->regs;
7324 - struct eth_hash_entry *hash_entry;
7327 - u32 crc = 0xFFFFFFFF;
7330 - if (!is_init_done(dtsec->dtsec_drv_param))
7333 - addr = ENET_ADDR_TO_UINT64(*eth_addr);
7335 - ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false);
7336 - mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
7338 - /* Cannot handle unicast mac addr when GHTX is on */
7339 - if (ghtx && !mcast) {
7340 - pr_err("Could not compute hash bucket\n");
7343 - crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
7344 - crc = bitrev32(crc);
7346 - /* considering the 9 highest order bits in crc H[8:0]:
7347 - *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
7348 - *and H[5:1] (next 5 bits) identify the hash bit
7349 - *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
7350 - *and H[4:0] (next 5 bits) identify the hash bit.
7352 - *In bucket index output the low 5 bits identify the hash register
7353 - *bit, while the higher 4 bits identify the hash register
7357 - bucket = (s32)((crc >> 23) & 0x1ff);
7359 - bucket = (s32)((crc >> 24) & 0xff);
7360 - /* if !ghtx and mcast the bit must be set in gaddr instead of
7367 - set_bucket(dtsec->regs, bucket, true);
7369 - /* Create element to be added to the driver hash table */
7370 - hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
7373 - hash_entry->addr = addr;
7374 - INIT_LIST_HEAD(&hash_entry->node);
7376 - if (addr & MAC_GROUP_ADDRESS)
7377 - /* Group Address */
7378 - list_add_tail(&hash_entry->node,
7379 - &dtsec->multicast_addr_hash->lsts[bucket]);
7381 - list_add_tail(&hash_entry->node,
7382 - &dtsec->unicast_addr_hash->lsts[bucket]);
7387 -int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
7389 - struct dtsec_regs __iomem *regs = dtsec->regs;
7390 - struct list_head *pos;
7391 - struct eth_hash_entry *hash_entry = NULL;
7394 - u32 crc = 0xFFFFFFFF;
7397 - if (!is_init_done(dtsec->dtsec_drv_param))
7400 - addr = ENET_ADDR_TO_UINT64(*eth_addr);
7402 - ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false);
7403 - mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
7405 - /* Cannot handle unicast mac addr when GHTX is on */
7406 - if (ghtx && !mcast) {
7407 - pr_err("Could not compute hash bucket\n");
7410 - crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
7411 - crc = bitrev32(crc);
7414 - bucket = (s32)((crc >> 23) & 0x1ff);
7416 - bucket = (s32)((crc >> 24) & 0xff);
7417 - /* if !ghtx and mcast the bit must be set
7418 - * in gaddr instead of igaddr.
7424 - if (addr & MAC_GROUP_ADDRESS) {
7425 - /* Group Address */
7426 - list_for_each(pos,
7427 - &dtsec->multicast_addr_hash->lsts[bucket]) {
7428 - hash_entry = ETH_HASH_ENTRY_OBJ(pos);
7429 - if (hash_entry->addr == addr) {
7430 - list_del_init(&hash_entry->node);
7431 - kfree(hash_entry);
7435 - if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
7436 - set_bucket(dtsec->regs, bucket, false);
7438 - /* Individual Address */
7439 - list_for_each(pos,
7440 - &dtsec->unicast_addr_hash->lsts[bucket]) {
7441 - hash_entry = ETH_HASH_ENTRY_OBJ(pos);
7442 - if (hash_entry->addr == addr) {
7443 - list_del_init(&hash_entry->node);
7444 - kfree(hash_entry);
7448 - if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
7449 - set_bucket(dtsec->regs, bucket, false);
7452 - /* address does not exist */
7453 - WARN_ON(!hash_entry);
7458 -int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
7460 - struct dtsec_regs __iomem *regs = dtsec->regs;
7463 - if (!is_init_done(dtsec->dtsec_drv_param))
7466 - /* Set unicast promiscuous */
7467 - tmp = ioread32be(®s->rctrl);
7469 - tmp |= RCTRL_UPROM;
7471 - tmp &= ~RCTRL_UPROM;
7473 - iowrite32be(tmp, ®s->rctrl);
7475 - /* Set multicast promiscuous */
7476 - tmp = ioread32be(®s->rctrl);
7478 - tmp |= RCTRL_MPROM;
7480 - tmp &= ~RCTRL_MPROM;
7482 - iowrite32be(tmp, ®s->rctrl);
7487 -int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
7489 - struct dtsec_regs __iomem *regs = dtsec->regs;
7492 - if (!is_init_done(dtsec->dtsec_drv_param))
7495 - tmp = ioread32be(®s->maccfg2);
7498 - tmp |= MACCFG2_FULL_DUPLEX;
7500 - tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
7501 - if (speed < SPEED_1000)
7502 - tmp |= MACCFG2_NIBBLE_MODE;
7503 - else if (speed == SPEED_1000)
7504 - tmp |= MACCFG2_BYTE_MODE;
7505 - iowrite32be(tmp, ®s->maccfg2);
7507 - tmp = ioread32be(®s->ecntrl);
7508 - if (speed == SPEED_100)
7509 - tmp |= DTSEC_ECNTRL_R100M;
7511 - tmp &= ~DTSEC_ECNTRL_R100M;
7512 - iowrite32be(tmp, ®s->ecntrl);
7517 -int dtsec_restart_autoneg(struct fman_mac *dtsec)
7521 - if (!is_init_done(dtsec->dtsec_drv_param))
7524 - tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
7526 - tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
7527 - tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
7528 - BMCR_FULLDPLX | BMCR_SPEED1000);
7530 - phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
7535 -int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
7537 - struct dtsec_regs __iomem *regs = dtsec->regs;
7539 - if (!is_init_done(dtsec->dtsec_drv_param))
7542 - *mac_version = ioread32be(®s->tsec_id);
7547 -int dtsec_set_exception(struct fman_mac *dtsec,
7548 - enum fman_mac_exceptions exception, bool enable)
7550 - struct dtsec_regs __iomem *regs = dtsec->regs;
7553 - if (!is_init_done(dtsec->dtsec_drv_param))
7556 - if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
7557 - bit_mask = get_exception_flag(exception);
7560 - dtsec->exceptions |= bit_mask;
7562 - dtsec->exceptions &= ~bit_mask;
7564 - pr_err("Undefined exception\n");
7568 - iowrite32be(ioread32be(®s->imask) | bit_mask,
7571 - iowrite32be(ioread32be(®s->imask) & ~bit_mask,
7574 - if (!dtsec->ptp_tsu_enabled) {
7575 - pr_err("Exception valid for 1588 only\n");
7578 - switch (exception) {
7579 - case FM_MAC_EX_1G_1588_TS_RX_ERR:
7581 - dtsec->en_tsu_err_exeption = true;
7582 - iowrite32be(ioread32be(®s->tmr_pemask) |
7583 - TMR_PEMASK_TSREEN,
7584 - ®s->tmr_pemask);
7586 - dtsec->en_tsu_err_exeption = false;
7587 - iowrite32be(ioread32be(®s->tmr_pemask) &
7588 - ~TMR_PEMASK_TSREEN,
7589 - ®s->tmr_pemask);
7593 - pr_err("Undefined exception\n");
7601 -int dtsec_init(struct fman_mac *dtsec)
7603 - struct dtsec_regs __iomem *regs = dtsec->regs;
7604 - struct dtsec_cfg *dtsec_drv_param;
7607 - enet_addr_t eth_addr;
7609 - if (is_init_done(dtsec->dtsec_drv_param))
7612 - if (DEFAULT_RESET_ON_INIT &&
7613 - (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
7614 - pr_err("Can't reset MAC!\n");
7618 - err = check_init_parameters(dtsec);
7622 - dtsec_drv_param = dtsec->dtsec_drv_param;
7624 - MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
7626 - err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
7627 - dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
7628 - dtsec->tbiphy->mdio.addr);
7630 - free_init_resources(dtsec);
7631 - pr_err("DTSEC version doesn't support this i/f mode\n");
7635 - if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
7638 - /* Configure the TBI PHY Control Register */
7639 - tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
7640 - phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
7642 - tmp_reg16 = TBICON_CLK_SELECT;
7643 - phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
7645 - tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
7646 - BMCR_FULLDPLX | BMCR_SPEED1000);
7647 - phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
7649 - if (dtsec->basex_if)
7650 - tmp_reg16 = TBIANA_1000X;
7652 - tmp_reg16 = TBIANA_SGMII;
7653 - phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
7655 - tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
7656 - BMCR_FULLDPLX | BMCR_SPEED1000);
7658 - phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
7661 - /* Max Frame Length */
7662 - max_frm_ln = (u16)ioread32be(®s->maxfrm);
7663 - err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
7665 - pr_err("Setting max frame length failed\n");
7666 - free_init_resources(dtsec);
7670 - dtsec->multicast_addr_hash =
7671 - alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
7672 - if (!dtsec->multicast_addr_hash) {
7673 - free_init_resources(dtsec);
7674 - pr_err("MC hash table is failed\n");
7678 - dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
7679 - if (!dtsec->unicast_addr_hash) {
7680 - free_init_resources(dtsec);
7681 - pr_err("UC hash table is failed\n");
7685 - /* register err intr handler for dtsec to FPM (err) */
7686 - fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
7687 - FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
7688 - /* register 1588 intr handler for TMR to FPM (normal) */
7689 - fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
7690 - FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
7692 - kfree(dtsec_drv_param);
7693 - dtsec->dtsec_drv_param = NULL;
7698 -int dtsec_free(struct fman_mac *dtsec)
7700 - free_init_resources(dtsec);
7702 - kfree(dtsec->dtsec_drv_param);
7703 - dtsec->dtsec_drv_param = NULL;
7709 -struct fman_mac *dtsec_config(struct fman_mac_params *params)
7711 - struct fman_mac *dtsec;
7712 - struct dtsec_cfg *dtsec_drv_param;
7713 - void __iomem *base_addr;
7715 - base_addr = params->base_addr;
7717 - /* allocate memory for the UCC GETH data structure. */
7718 - dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
7722 - /* allocate memory for the d_tsec driver parameters data structure. */
7723 - dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
7724 - if (!dtsec_drv_param)
7727 - /* Plant parameter structure pointer */
7728 - dtsec->dtsec_drv_param = dtsec_drv_param;
7730 - set_dflts(dtsec_drv_param);
7732 - dtsec->regs = base_addr;
7733 - dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
7734 - dtsec->max_speed = params->max_speed;
7735 - dtsec->phy_if = params->phy_if;
7736 - dtsec->mac_id = params->mac_id;
7737 - dtsec->exceptions = (DTSEC_IMASK_BREN |
7738 - DTSEC_IMASK_RXCEN |
7739 - DTSEC_IMASK_BTEN |
7740 - DTSEC_IMASK_TXCEN |
7741 - DTSEC_IMASK_TXEEN |
7742 - DTSEC_IMASK_ABRTEN |
7743 - DTSEC_IMASK_LCEN |
7744 - DTSEC_IMASK_CRLEN |
7745 - DTSEC_IMASK_XFUNEN |
7746 - DTSEC_IMASK_IFERREN |
7747 - DTSEC_IMASK_MAGEN |
7748 - DTSEC_IMASK_TDPEEN |
7749 - DTSEC_IMASK_RDPEEN);
7750 - dtsec->exception_cb = params->exception_cb;
7751 - dtsec->event_cb = params->event_cb;
7752 - dtsec->dev_id = params->dev_id;
7753 - dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
7754 - dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en;
7756 - dtsec->fm = params->fm;
7757 - dtsec->basex_if = params->basex_if;
7759 - if (!params->internal_phy_node) {
7760 - pr_err("TBI PHY node is not available\n");
7761 - goto err_dtsec_drv_param;
7764 - dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
7765 - if (!dtsec->tbiphy) {
7766 - pr_err("of_phy_find_device (TBI PHY) failed\n");
7767 - goto err_dtsec_drv_param;
7770 - put_device(&dtsec->tbiphy->mdio.dev);
7772 - /* Save FMan revision */
7773 - fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
7777 -err_dtsec_drv_param:
7778 - kfree(dtsec_drv_param);
7783 diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
7784 deleted file mode 100644
7785 index c4467c0..0000000
7786 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
7790 - * Copyright 2008-2015 Freescale Semiconductor Inc.
7792 - * Redistribution and use in source and binary forms, with or without
7793 - * modification, are permitted provided that the following conditions are met:
7794 - * * Redistributions of source code must retain the above copyright
7795 - * notice, this list of conditions and the following disclaimer.
7796 - * * Redistributions in binary form must reproduce the above copyright
7797 - * notice, this list of conditions and the following disclaimer in the
7798 - * documentation and/or other materials provided with the distribution.
7799 - * * Neither the name of Freescale Semiconductor nor the
7800 - * names of its contributors may be used to endorse or promote products
7801 - * derived from this software without specific prior written permission.
7804 - * ALTERNATIVELY, this software may be distributed under the terms of the
7805 - * GNU General Public License ("GPL") as published by the Free Software
7806 - * Foundation, either version 2 of that License or (at your option) any
7809 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7810 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7811 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7812 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7813 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7814 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7815 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7816 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7817 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7818 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7824 -#include "fman_mac.h"
7826 -struct fman_mac *dtsec_config(struct fman_mac_params *params);
7827 -int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
7828 -int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr);
7829 -int dtsec_adjust_link(struct fman_mac *dtsec,
7831 -int dtsec_restart_autoneg(struct fman_mac *dtsec);
7832 -int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val);
7833 -int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val);
7834 -int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode);
7835 -int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode);
7836 -int dtsec_init(struct fman_mac *dtsec);
7837 -int dtsec_free(struct fman_mac *dtsec);
7838 -int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en);
7839 -int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority,
7840 - u16 pause_time, u16 thresh_time);
7841 -int dtsec_set_exception(struct fman_mac *dtsec,
7842 - enum fman_mac_exceptions exception, bool enable);
7843 -int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
7844 -int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
7845 -int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
7847 -#endif /* __DTSEC_H */
7848 diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
7849 deleted file mode 100644
7850 index dd6d052..0000000
7851 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h
7855 - * Copyright 2008-2015 Freescale Semiconductor Inc.
7857 - * Redistribution and use in source and binary forms, with or without
7858 - * modification, are permitted provided that the following conditions are met:
7859 - * * Redistributions of source code must retain the above copyright
7860 - * notice, this list of conditions and the following disclaimer.
7861 - * * Redistributions in binary form must reproduce the above copyright
7862 - * notice, this list of conditions and the following disclaimer in the
7863 - * documentation and/or other materials provided with the distribution.
7864 - * * Neither the name of Freescale Semiconductor nor the
7865 - * names of its contributors may be used to endorse or promote products
7866 - * derived from this software without specific prior written permission.
7869 - * ALTERNATIVELY, this software may be distributed under the terms of the
7870 - * GNU General Public License ("GPL") as published by the Free Software
7871 - * Foundation, either version 2 of that License or (at your option) any
7874 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7875 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7876 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7877 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7878 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7879 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7880 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7881 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7882 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7883 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7892 -#include <linux/slab.h>
7893 -#include <linux/phy.h>
7894 -#include <linux/if_ether.h>
7898 -/* Ethernet Address */
7899 -typedef u8 enet_addr_t[ETH_ALEN];
7901 -#define ENET_ADDR_TO_UINT64(_enet_addr) \
7902 - (u64)(((u64)(_enet_addr)[0] << 40) | \
7903 - ((u64)(_enet_addr)[1] << 32) | \
7904 - ((u64)(_enet_addr)[2] << 24) | \
7905 - ((u64)(_enet_addr)[3] << 16) | \
7906 - ((u64)(_enet_addr)[4] << 8) | \
7907 - ((u64)(_enet_addr)[5]))
7909 -#define MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enet_addr) \
7912 - for (i = 0; i < ETH_ALEN; i++) \
7913 - (_enet_addr)[i] = \
7914 - (u8)((_addr64) >> ((5 - i) * 8)); \
7918 -#define DEFAULT_RESET_ON_INIT false
7921 -#define FSL_FM_PAUSE_TIME_ENABLE 0xf000
7922 -#define FSL_FM_PAUSE_TIME_DISABLE 0
7923 -#define FSL_FM_PAUSE_THRESH_DEFAULT 0
7925 -#define FM_MAC_NO_PFC 0xff
7928 -#define ETH_HASH_ENTRY_OBJ(ptr) \
7929 - hlist_entry_safe(ptr, struct eth_hash_entry, node)
7931 -/* Enumeration (bit flags) of communication modes (Transmit,
7932 - * receive or both).
7935 - COMM_MODE_NONE = 0, /* No transmit/receive communication */
7936 - COMM_MODE_RX = 1, /* Only receive communication */
7937 - COMM_MODE_TX = 2, /* Only transmit communication */
7938 - COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */
7941 -/* FM MAC Exceptions */
7942 -enum fman_mac_exceptions {
7943 - FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
7944 - /* 10GEC MDIO scan event interrupt */
7945 - , FM_MAC_EX_10G_MDIO_CMD_CMPL
7946 - /* 10GEC MDIO command completion interrupt */
7947 - , FM_MAC_EX_10G_REM_FAULT
7948 - /* 10GEC, mEMAC Remote fault interrupt */
7949 - , FM_MAC_EX_10G_LOC_FAULT
7950 - /* 10GEC, mEMAC Local fault interrupt */
7951 - , FM_MAC_EX_10G_TX_ECC_ER
7952 - /* 10GEC, mEMAC Transmit frame ECC error interrupt */
7953 - , FM_MAC_EX_10G_TX_FIFO_UNFL
7954 - /* 10GEC, mEMAC Transmit FIFO underflow interrupt */
7955 - , FM_MAC_EX_10G_TX_FIFO_OVFL
7956 - /* 10GEC, mEMAC Transmit FIFO overflow interrupt */
7957 - , FM_MAC_EX_10G_TX_ER
7958 - /* 10GEC Transmit frame error interrupt */
7959 - , FM_MAC_EX_10G_RX_FIFO_OVFL
7960 - /* 10GEC, mEMAC Receive FIFO overflow interrupt */
7961 - , FM_MAC_EX_10G_RX_ECC_ER
7962 - /* 10GEC, mEMAC Receive frame ECC error interrupt */
7963 - , FM_MAC_EX_10G_RX_JAB_FRM
7964 - /* 10GEC Receive jabber frame interrupt */
7965 - , FM_MAC_EX_10G_RX_OVRSZ_FRM
7966 - /* 10GEC Receive oversized frame interrupt */
7967 - , FM_MAC_EX_10G_RX_RUNT_FRM
7968 - /* 10GEC Receive runt frame interrupt */
7969 - , FM_MAC_EX_10G_RX_FRAG_FRM
7970 - /* 10GEC Receive fragment frame interrupt */
7971 - , FM_MAC_EX_10G_RX_LEN_ER
7972 - /* 10GEC Receive payload length error interrupt */
7973 - , FM_MAC_EX_10G_RX_CRC_ER
7974 - /* 10GEC Receive CRC error interrupt */
7975 - , FM_MAC_EX_10G_RX_ALIGN_ER
7976 - /* 10GEC Receive alignment error interrupt */
7977 - , FM_MAC_EX_1G_BAB_RX
7978 - /* dTSEC Babbling receive error */
7979 - , FM_MAC_EX_1G_RX_CTL
7980 - /* dTSEC Receive control (pause frame) interrupt */
7981 - , FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET
7982 - /* dTSEC Graceful transmit stop complete */
7983 - , FM_MAC_EX_1G_BAB_TX
7984 - /* dTSEC Babbling transmit error */
7985 - , FM_MAC_EX_1G_TX_CTL
7986 - /* dTSEC Transmit control (pause frame) interrupt */
7987 - , FM_MAC_EX_1G_TX_ERR
7988 - /* dTSEC Transmit error */
7989 - , FM_MAC_EX_1G_LATE_COL
7990 - /* dTSEC Late collision */
7991 - , FM_MAC_EX_1G_COL_RET_LMT
7992 - /* dTSEC Collision retry limit */
7993 - , FM_MAC_EX_1G_TX_FIFO_UNDRN
7994 - /* dTSEC Transmit FIFO underrun */
7995 - , FM_MAC_EX_1G_MAG_PCKT
7996 - /* dTSEC Magic Packet detection */
7997 - , FM_MAC_EX_1G_MII_MNG_RD_COMPLET
7998 - /* dTSEC MII management read completion */
7999 - , FM_MAC_EX_1G_MII_MNG_WR_COMPLET
8000 - /* dTSEC MII management write completion */
8001 - , FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET
8002 - /* dTSEC Graceful receive stop complete */
8003 - , FM_MAC_EX_1G_DATA_ERR
8004 - /* dTSEC Internal data error on transmit */
8005 - , FM_MAC_1G_RX_DATA_ERR
8006 - /* dTSEC Internal data error on receive */
8007 - , FM_MAC_EX_1G_1588_TS_RX_ERR
8008 - /* dTSEC Time-Stamp Receive Error */
8009 - , FM_MAC_EX_1G_RX_MIB_CNT_OVFL
8010 - /* dTSEC MIB counter overflow */
8011 - , FM_MAC_EX_TS_FIFO_ECC_ERR
8012 - /* mEMAC Time-stamp FIFO ECC error interrupt;
8013 - * not supported on T4240/B4860 rev1 chips
8015 - , FM_MAC_EX_MAGIC_PACKET_INDICATION = FM_MAC_EX_1G_MAG_PCKT
8016 - /* mEMAC Magic Packet Indication Interrupt */
8019 -struct eth_hash_entry {
8020 - u64 addr; /* Ethernet Address */
8021 - struct list_head node;
8024 -typedef void (fman_mac_exception_cb)(void *dev_id,
8025 - enum fman_mac_exceptions exceptions);
8027 -/* FMan MAC config input */
8028 -struct fman_mac_params {
8029 - /* Base of memory mapped FM MAC registers */
8030 - void __iomem *base_addr;
8031 - /* MAC address of device; First octet is sent first */
8033 - /* MAC ID; numbering of dTSEC and 1G-mEMAC:
8034 - * 0 - FM_MAX_NUM_OF_1G_MACS;
8035 - * numbering of 10G-MAC (TGEC) and 10G-mEMAC:
8036 - * 0 - FM_MAX_NUM_OF_10G_MACS
8039 - /* PHY interface */
8040 - phy_interface_t phy_if;
8041 - /* Note that the speed should indicate the maximum rate that
8042 - * this MAC should support rather than the actual speed;
8045 - /* A handle to the FM object this port related to */
8047 - void *dev_id; /* device cookie used by the exception cbs */
8048 - fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
8049 - fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
8050 - /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
8051 - * and phy or backplane; Note: 1000BaseX auto-negotiation relates only
8052 - * to interface between MAC and phy/backplane, SGMII phy can still
8053 - * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
8056 - /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
8057 - struct device_node *internal_phy_node;
8060 -struct eth_hash_t {
8062 - struct list_head *lsts;
8065 -static inline struct eth_hash_entry
8066 -*dequeue_addr_from_hash_entry(struct list_head *addr_lst)
8068 - struct eth_hash_entry *hash_entry = NULL;
8070 - if (!list_empty(addr_lst)) {
8071 - hash_entry = ETH_HASH_ENTRY_OBJ(addr_lst->next);
8072 - list_del_init(&hash_entry->node);
8074 - return hash_entry;
8077 -static inline void free_hash_table(struct eth_hash_t *hash)
8079 - struct eth_hash_entry *hash_entry;
8084 - for (i = 0; i < hash->size; i++) {
8086 - dequeue_addr_from_hash_entry(&hash->lsts[i]);
8087 - while (hash_entry) {
8088 - kfree(hash_entry);
8090 - dequeue_addr_from_hash_entry(&hash->
8095 - kfree(hash->lsts);
8102 -static inline struct eth_hash_t *alloc_hash_table(u16 size)
8105 - struct eth_hash_t *hash;
8107 - /* Allocate address hash table */
8108 - hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL);
8112 - hash->size = size;
8114 - hash->lsts = kmalloc_array(hash->size, sizeof(struct list_head),
8116 - if (!hash->lsts) {
8121 - for (i = 0; i < hash->size; i++)
8122 - INIT_LIST_HEAD(&hash->lsts[i]);
8127 -#endif /* __FM_MAC_H */
8128 diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
8129 deleted file mode 100644
8130 index 71a5ded..0000000
8131 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c
8135 - * Copyright 2008-2015 Freescale Semiconductor Inc.
8137 - * Redistribution and use in source and binary forms, with or without
8138 - * modification, are permitted provided that the following conditions are met:
8139 - * * Redistributions of source code must retain the above copyright
8140 - * notice, this list of conditions and the following disclaimer.
8141 - * * Redistributions in binary form must reproduce the above copyright
8142 - * notice, this list of conditions and the following disclaimer in the
8143 - * documentation and/or other materials provided with the distribution.
8144 - * * Neither the name of Freescale Semiconductor nor the
8145 - * names of its contributors may be used to endorse or promote products
8146 - * derived from this software without specific prior written permission.
8149 - * ALTERNATIVELY, this software may be distributed under the terms of the
8150 - * GNU General Public License ("GPL") as published by the Free Software
8151 - * Foundation, either version 2 of that License or (at your option) any
8154 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
8155 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
8156 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
8157 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
8158 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
8159 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
8160 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
8161 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
8162 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
8163 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8166 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8168 -#include "fman_memac.h"
8171 -#include <linux/slab.h>
8172 -#include <linux/io.h>
8173 -#include <linux/phy.h>
8174 -#include <linux/of_mdio.h>
8176 -/* PCS registers */
8177 -#define MDIO_SGMII_CR 0x00
8178 -#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
8179 -#define MDIO_SGMII_LINK_TMR_L 0x12
8180 -#define MDIO_SGMII_LINK_TMR_H 0x13
8181 -#define MDIO_SGMII_IF_MODE 0x14
8183 -/* SGMII Control defines */
8184 -#define SGMII_CR_AN_EN 0x1000
8185 -#define SGMII_CR_RESTART_AN 0x0200
8186 -#define SGMII_CR_FD 0x0100
8187 -#define SGMII_CR_SPEED_SEL1_1G 0x0040
8188 -#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
8189 - SGMII_CR_SPEED_SEL1_1G)
8191 -/* SGMII Device Ability for SGMII defines */
8192 -#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001
8193 -#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0
8195 -/* Link timer define */
8196 -#define LINK_TMR_L 0xa120
8197 -#define LINK_TMR_H 0x0007
8198 -#define LINK_TMR_L_BASEX 0xaf08
8199 -#define LINK_TMR_H_BASEX 0x002f
8201 -/* SGMII IF Mode defines */
8202 -#define IF_MODE_USE_SGMII_AN 0x0002
8203 -#define IF_MODE_SGMII_EN 0x0001
8204 -#define IF_MODE_SGMII_SPEED_100M 0x0004
8205 -#define IF_MODE_SGMII_SPEED_1G 0x0008
8206 -#define IF_MODE_SGMII_DUPLEX_HALF 0x0010
8208 -/* Num of additional exact match MAC adr regs */
8209 -#define MEMAC_NUM_OF_PADDRS 7
8211 -/* Control and Configuration Register (COMMAND_CONFIG) */
8212 -#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */
8213 -#define CMD_CFG_TX_LOWP_ENA 0x00800000 /* 08 Tx Low Power Idle Enable */
8214 -#define CMD_CFG_PFC_MODE 0x00080000 /* 12 Enable PFC */
8215 -#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 Payload length check disable */
8216 -#define CMD_CFG_SW_RESET 0x00001000 /* 19 S/W Reset, self clearing bit */
8217 -#define CMD_CFG_TX_PAD_EN 0x00000800 /* 20 Enable Tx padding of frames */
8218 -#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 Ignore Pause frame quanta */
8219 -#define CMD_CFG_CRC_FWD 0x00000040 /* 25 Terminate/frwd CRC of frames */
8220 -#define CMD_CFG_PAD_EN 0x00000020 /* 26 Frame padding removal */
8221 -#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 Promiscuous operation enable */
8222 -#define CMD_CFG_RX_EN 0x00000002 /* 30 MAC receive path enable */
8223 -#define CMD_CFG_TX_EN 0x00000001 /* 31 MAC transmit path enable */
8225 -/* Transmit FIFO Sections Register (TX_FIFO_SECTIONS) */
8226 -#define TX_FIFO_SECTIONS_TX_EMPTY_MASK 0xFFFF0000
8227 -#define TX_FIFO_SECTIONS_TX_AVAIL_MASK 0x0000FFFF
8228 -#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G 0x00400000
8229 -#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G 0x00100000
8230 -#define TX_FIFO_SECTIONS_TX_AVAIL_10G 0x00000019
8231 -#define TX_FIFO_SECTIONS_TX_AVAIL_1G 0x00000020
8232 -#define TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G 0x00000060
8234 -#define GET_TX_EMPTY_DEFAULT_VALUE(_val) \
8236 - _val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK; \
8237 - ((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ? \
8238 - (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G) :\
8239 - (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G));\
8242 -/* Interface Mode Register (IF_MODE) */
8244 -#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
8245 -#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
8246 -#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
8247 -#define IF_MODE_RGMII 0x00000004
8248 -#define IF_MODE_RGMII_AUTO 0x00008000
8249 -#define IF_MODE_RGMII_1000 0x00004000 /* 10 - 1000Mbps RGMII */
8250 -#define IF_MODE_RGMII_100 0x00000000 /* 00 - 100Mbps RGMII */
8251 -#define IF_MODE_RGMII_10 0x00002000 /* 01 - 10Mbps RGMII */
8252 -#define IF_MODE_RGMII_SP_MASK 0x00006000 /* Setsp mask bits */
8253 -#define IF_MODE_RGMII_FD 0x00001000 /* Full duplex RGMII */
8254 -#define IF_MODE_HD 0x00000040 /* Half duplex operation */
8256 -/* Hash table Control Register (HASHTABLE_CTRL) */
8257 -#define HASH_CTRL_MCAST_EN 0x00000100
8258 -/* 26-31 Hash table address code */
8259 -#define HASH_CTRL_ADDR_MASK 0x0000003F
8260 -/* MAC mcast indication */
8261 -#define GROUP_ADDRESS 0x0000010000000000LL
8262 -#define HASH_TABLE_SIZE 64 /* Hash tbl size */
8264 -/* Interrupt Mask Register (IMASK) */
8265 -#define MEMAC_IMASK_MGI 0x40000000 /* 1 Magic pkt detect indication */
8266 -#define MEMAC_IMASK_TSECC_ER 0x20000000 /* 2 Timestamp FIFO ECC error evnt */
8267 -#define MEMAC_IMASK_TECC_ER 0x02000000 /* 6 Transmit frame ECC error evnt */
8268 -#define MEMAC_IMASK_RECC_ER 0x01000000 /* 7 Receive frame ECC error evnt */
8270 -#define MEMAC_ALL_ERRS_IMASK \
8271 - ((u32)(MEMAC_IMASK_TSECC_ER | \
8272 - MEMAC_IMASK_TECC_ER | \
8273 - MEMAC_IMASK_RECC_ER | \
8276 -#define MEMAC_IEVNT_PCS 0x80000000 /* PCS (XG). Link sync (G) */
8277 -#define MEMAC_IEVNT_AN 0x40000000 /* Auto-negotiation */
8278 -#define MEMAC_IEVNT_LT 0x20000000 /* Link Training/New page */
8279 -#define MEMAC_IEVNT_MGI 0x00004000 /* Magic pkt detection */
8280 -#define MEMAC_IEVNT_TS_ECC_ER 0x00002000 /* Timestamp FIFO ECC error*/
8281 -#define MEMAC_IEVNT_RX_FIFO_OVFL 0x00001000 /* Rx FIFO overflow */
8282 -#define MEMAC_IEVNT_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow */
8283 -#define MEMAC_IEVNT_TX_FIFO_OVFL 0x00000400 /* Tx FIFO overflow */
8284 -#define MEMAC_IEVNT_TX_ECC_ER 0x00000200 /* Tx frame ECC error */
8285 -#define MEMAC_IEVNT_RX_ECC_ER 0x00000100 /* Rx frame ECC error */
8286 -#define MEMAC_IEVNT_LI_FAULT 0x00000080 /* Link Interruption flt */
8287 -#define MEMAC_IEVNT_RX_EMPTY 0x00000040 /* Rx FIFO empty */
8288 -#define MEMAC_IEVNT_TX_EMPTY 0x00000020 /* Tx FIFO empty */
8289 -#define MEMAC_IEVNT_RX_LOWP 0x00000010 /* Low Power Idle */
8290 -#define MEMAC_IEVNT_PHY_LOS 0x00000004 /* Phy loss of signal */
8291 -#define MEMAC_IEVNT_REM_FAULT 0x00000002 /* Remote fault (XGMII) */
8292 -#define MEMAC_IEVNT_LOC_FAULT 0x00000001 /* Local fault (XGMII) */
8294 -#define DEFAULT_PAUSE_QUANTA 0xf000
8295 -#define DEFAULT_FRAME_LENGTH 0x600
8296 -#define DEFAULT_TX_IPG_LENGTH 12
8298 -#define CLXY_PAUSE_QUANTA_CLX_PQNT 0x0000FFFF
8299 -#define CLXY_PAUSE_QUANTA_CLY_PQNT 0xFFFF0000
8300 -#define CLXY_PAUSE_THRESH_CLX_QTH 0x0000FFFF
8301 -#define CLXY_PAUSE_THRESH_CLY_QTH 0xFFFF0000
8304 - /* Lower 32 bits of 48-bit MAC address */
8306 - /* Upper 16 bits of 48-bit MAC address */
8311 -struct memac_regs {
8312 - u32 res0000[2]; /* General Control and Status */
8313 - u32 command_config; /* 0x008 Ctrl and cfg */
8314 - struct mac_addr mac_addr0; /* 0x00C-0x010 MAC_ADDR_0...1 */
8315 - u32 maxfrm; /* 0x014 Max frame length */
8317 - u32 rx_fifo_sections; /* Receive FIFO configuration reg */
8318 - u32 tx_fifo_sections; /* Transmit FIFO configuration reg */
8320 - u32 hashtable_ctrl; /* 0x02C Hash table control */
8322 - u32 ievent; /* 0x040 Interrupt event */
8323 - u32 tx_ipg_length; /* 0x044 Transmitter inter-packet-gap */
8325 - u32 imask; /* 0x04C Interrupt mask */
8327 - u32 pause_quanta[4]; /* 0x054 Pause quanta */
8328 - u32 pause_thresh[4]; /* 0x064 Pause quanta threshold */
8329 - u32 rx_pause_status; /* 0x074 Receive pause status */
8331 - struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];/* 0x80-0x0B4 mac padr */
8332 - u32 lpwake_timer; /* 0x0B8 Low Power Wakeup Timer */
8333 - u32 sleep_timer; /* 0x0BC Transmit EEE Low Power Timer */
8335 - u32 statn_config; /* 0x0E0 Statistics configuration */
8337 - /* Rx Statistics Counter */
8391 - /* Tx Statistics Counter */
8436 - /* Line Interface Control */
8437 - u32 if_mode; /* 0x300 Interface Mode Control */
8438 - u32 if_status; /* 0x304 Interface Status */
8441 - u32 hg_config; /* 0x340 Control and cfg */
8443 - u32 hg_pause_quanta; /* 0x350 Pause quanta */
8445 - u32 hg_pause_thresh; /* 0x360 Pause quanta threshold */
8447 - u32 hgrx_pause_status; /* 0x370 Receive pause status */
8448 - u32 hg_fifos_status; /* 0x374 fifos status */
8449 - u32 rhm; /* 0x378 rx messages counter */
8450 - u32 thm; /* 0x37C tx messages counter */
8454 - bool reset_on_init;
8455 - bool pause_ignore;
8456 - bool promiscuous_mode_enable;
8457 - struct fixed_phy_status *fixed_link;
8458 - u16 max_frame_length;
8460 - u32 tx_ipg_length;
8464 - /* Pointer to MAC memory mapped registers */
8465 - struct memac_regs __iomem *regs;
8466 - /* MAC address of device */
8468 - /* Ethernet physical interface */
8469 - phy_interface_t phy_if;
8471 - void *dev_id; /* device cookie used by the exception cbs */
8472 - fman_mac_exception_cb *exception_cb;
8473 - fman_mac_exception_cb *event_cb;
8474 - /* Pointer to driver's global address hash table */
8475 - struct eth_hash_t *multicast_addr_hash;
8476 - /* Pointer to driver's individual address hash table */
8477 - struct eth_hash_t *unicast_addr_hash;
8480 - struct memac_cfg *memac_drv_param;
8482 - struct fman_rev_info fm_rev_info;
8484 - struct phy_device *pcsphy;
8487 -static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
8492 - tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
8493 - tmp1 = (u32)(adr[4] | adr[5] << 8);
8495 - if (paddr_num == 0) {
8496 - iowrite32be(tmp0, ®s->mac_addr0.mac_addr_l);
8497 - iowrite32be(tmp1, ®s->mac_addr0.mac_addr_u);
8499 - iowrite32be(tmp0, ®s->mac_addr[paddr_num - 1].mac_addr_l);
8500 - iowrite32be(tmp1, ®s->mac_addr[paddr_num - 1].mac_addr_u);
8504 -static int reset(struct memac_regs __iomem *regs)
8509 - tmp = ioread32be(®s->command_config);
8511 - tmp |= CMD_CFG_SW_RESET;
8513 - iowrite32be(tmp, ®s->command_config);
8518 - } while ((ioread32be(®s->command_config) & CMD_CFG_SW_RESET) &&
8527 -static void set_exception(struct memac_regs __iomem *regs, u32 val,
8532 - tmp = ioread32be(®s->imask);
8538 - iowrite32be(tmp, ®s->imask);
8541 -static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
8542 - phy_interface_t phy_if, u16 speed, bool slow_10g_if,
8549 - if (cfg->promiscuous_mode_enable)
8550 - tmp |= CMD_CFG_PROMIS_EN;
8551 - if (cfg->pause_ignore)
8552 - tmp |= CMD_CFG_PAUSE_IGNORE;
8554 - /* Payload length check disable */
8555 - tmp |= CMD_CFG_NO_LEN_CHK;
8556 - /* Enable padding of frames in transmit direction */
8557 - tmp |= CMD_CFG_TX_PAD_EN;
8559 - tmp |= CMD_CFG_CRC_FWD;
8561 - iowrite32be(tmp, ®s->command_config);
8563 - /* Max Frame Length */
8564 - iowrite32be((u32)cfg->max_frame_length, ®s->maxfrm);
8567 - iowrite32be((u32)cfg->pause_quanta, ®s->pause_quanta[0]);
8568 - iowrite32be((u32)0, ®s->pause_thresh[0]);
8573 - case PHY_INTERFACE_MODE_XGMII:
8574 - tmp |= IF_MODE_XGMII;
8577 - tmp |= IF_MODE_GMII;
8578 - if (phy_if == PHY_INTERFACE_MODE_RGMII)
8579 - tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
8581 - iowrite32be(tmp, ®s->if_mode);
8583 - /* TX_FIFO_SECTIONS */
8585 - if (phy_if == PHY_INTERFACE_MODE_XGMII) {
8586 - if (slow_10g_if) {
8587 - tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
8588 - TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
8590 - tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
8591 - TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
8594 - tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
8595 - TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
8597 - iowrite32be(tmp, ®s->tx_fifo_sections);
8599 - /* clear all pending events and set-up interrupts */
8600 - iowrite32be(0xffffffff, ®s->ievent);
8601 - set_exception(regs, exceptions, true);
8606 -static void set_dflts(struct memac_cfg *cfg)
8608 - cfg->reset_on_init = false;
8609 - cfg->promiscuous_mode_enable = false;
8610 - cfg->pause_ignore = false;
8611 - cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
8612 - cfg->max_frame_length = DEFAULT_FRAME_LENGTH;
8613 - cfg->pause_quanta = DEFAULT_PAUSE_QUANTA;
8616 -static u32 get_mac_addr_hash_code(u64 eth_addr)
8622 - for (i = 0; i < 6; i++) {
8623 - mask1 = eth_addr & (u64)0x01;
8626 - for (j = 0; j < 7; j++) {
8627 - mask2 = eth_addr & (u64)0x01;
8632 - xor_val |= (mask1 << (5 - i));
8638 -static void setup_sgmii_internal_phy(struct fman_mac *memac,
8639 - struct fixed_phy_status *fixed_link)
8643 - if (WARN_ON(!memac->pcsphy))
8647 - tmp_reg16 = IF_MODE_SGMII_EN;
8650 - tmp_reg16 |= IF_MODE_USE_SGMII_AN;
8652 - switch (fixed_link->speed) {
8654 - /* For 10M: IF_MODE[SPEED_10M] = 0 */
8657 - tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
8659 - case 1000: /* fallthrough */
8661 - tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
8664 - if (!fixed_link->duplex)
8665 - tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF;
8667 - phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16);
8669 - /* Device ability according to SGMII specification */
8670 - tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE;
8671 - phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
8673 - /* Adjust link timer for SGMII -
8674 - * According to Cisco SGMII specification the timer should be 1.6 ms.
8675 - * The link_timer register is configured in units of the clock.
8676 - * - When running as 1G SGMII, Serdes clock is 125 MHz, so
8677 - * unit = 1 / (125*10^6 Hz) = 8 ns.
8678 - * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40
8679 - * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
8680 - * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
8681 - * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120.
8682 - * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
8683 - * we always set up here a value of 2.5 SGMII.
8685 - phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H);
8686 - phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L);
8690 - tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
8693 - tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN;
8694 - phy_write(memac->pcsphy, 0x0, tmp_reg16);
8697 -static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
8701 - /* AN Device capability */
8702 - tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE;
8703 - phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
8705 - /* Adjust link timer for SGMII -
8706 - * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
8707 - * The link_timer register is configured in units of the clock.
8708 - * - When running as 1G SGMII, Serdes clock is 125 MHz, so
8709 - * unit = 1 / (125*10^6 Hz) = 8 ns.
8710 - * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
8711 - * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
8712 - * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
8713 - * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
8714 - * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
8715 - * we always set up here a value of 2.5 SGMII.
8717 - phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX);
8718 - phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX);
8721 - tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
8722 - phy_write(memac->pcsphy, 0x0, tmp_reg16);
8725 -static int check_init_parameters(struct fman_mac *memac)
8727 - if (memac->addr == 0) {
8728 - pr_err("Ethernet MAC must have a valid MAC address\n");
8731 - if (!memac->exception_cb) {
8732 - pr_err("Uninitialized exception handler\n");
8735 - if (!memac->event_cb) {
8736 - pr_warn("Uninitialize event handler\n");
8743 -static int get_exception_flag(enum fman_mac_exceptions exception)
8747 - switch (exception) {
8748 - case FM_MAC_EX_10G_TX_ECC_ER:
8749 - bit_mask = MEMAC_IMASK_TECC_ER;
8751 - case FM_MAC_EX_10G_RX_ECC_ER:
8752 - bit_mask = MEMAC_IMASK_RECC_ER;
8754 - case FM_MAC_EX_TS_FIFO_ECC_ERR:
8755 - bit_mask = MEMAC_IMASK_TSECC_ER;
8757 - case FM_MAC_EX_MAGIC_PACKET_INDICATION:
8758 - bit_mask = MEMAC_IMASK_MGI;
8768 -static void memac_err_exception(void *handle)
8770 - struct fman_mac *memac = (struct fman_mac *)handle;
8771 - struct memac_regs __iomem *regs = memac->regs;
8774 - event = ioread32be(®s->ievent);
8775 - imask = ioread32be(®s->imask);
8777 - /* Imask include both error and notification/event bits.
8778 - * Leaving only error bits enabled by imask.
8779 - * The imask error bits are shifted by 16 bits offset from
8780 - * their corresponding location in the ievent - hence the >> 16
8782 - event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
8784 - iowrite32be(event, ®s->ievent);
8786 - if (event & MEMAC_IEVNT_TS_ECC_ER)
8787 - memac->exception_cb(memac->dev_id, FM_MAC_EX_TS_FIFO_ECC_ERR);
8788 - if (event & MEMAC_IEVNT_TX_ECC_ER)
8789 - memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
8790 - if (event & MEMAC_IEVNT_RX_ECC_ER)
8791 - memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
8794 -static void memac_exception(void *handle)
8796 - struct fman_mac *memac = (struct fman_mac *)handle;
8797 - struct memac_regs __iomem *regs = memac->regs;
8800 - event = ioread32be(®s->ievent);
8801 - imask = ioread32be(®s->imask);
8803 - /* Imask include both error and notification/event bits.
8804 - * Leaving only error bits enabled by imask.
8805 - * The imask error bits are shifted by 16 bits offset from
8806 - * their corresponding location in the ievent - hence the >> 16
8808 - event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
8810 - iowrite32be(event, ®s->ievent);
8812 - if (event & MEMAC_IEVNT_MGI)
8813 - memac->exception_cb(memac->dev_id,
8814 - FM_MAC_EX_MAGIC_PACKET_INDICATION);
8817 -static void free_init_resources(struct fman_mac *memac)
8819 - fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
8820 - FMAN_INTR_TYPE_ERR);
8822 - fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
8823 - FMAN_INTR_TYPE_NORMAL);
8825 - /* release the driver's group hash table */
8826 - free_hash_table(memac->multicast_addr_hash);
8827 - memac->multicast_addr_hash = NULL;
8829 - /* release the driver's individual hash table */
8830 - free_hash_table(memac->unicast_addr_hash);
8831 - memac->unicast_addr_hash = NULL;
8834 -static bool is_init_done(struct memac_cfg *memac_drv_params)
8836 - /* Checks if mEMAC driver parameters were initialized */
8837 - if (!memac_drv_params)
8843 -int memac_enable(struct fman_mac *memac, enum comm_mode mode)
8845 - struct memac_regs __iomem *regs = memac->regs;
8848 - if (!is_init_done(memac->memac_drv_param))
8851 - tmp = ioread32be(®s->command_config);
8852 - if (mode & COMM_MODE_RX)
8853 - tmp |= CMD_CFG_RX_EN;
8854 - if (mode & COMM_MODE_TX)
8855 - tmp |= CMD_CFG_TX_EN;
8857 - iowrite32be(tmp, ®s->command_config);
8862 -int memac_disable(struct fman_mac *memac, enum comm_mode mode)
8864 - struct memac_regs __iomem *regs = memac->regs;
8867 - if (!is_init_done(memac->memac_drv_param))
8870 - tmp = ioread32be(®s->command_config);
8871 - if (mode & COMM_MODE_RX)
8872 - tmp &= ~CMD_CFG_RX_EN;
8873 - if (mode & COMM_MODE_TX)
8874 - tmp &= ~CMD_CFG_TX_EN;
8876 - iowrite32be(tmp, ®s->command_config);
8881 -int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
8883 - struct memac_regs __iomem *regs = memac->regs;
8886 - if (!is_init_done(memac->memac_drv_param))
8889 - tmp = ioread32be(®s->command_config);
8891 - tmp |= CMD_CFG_PROMIS_EN;
8893 - tmp &= ~CMD_CFG_PROMIS_EN;
8895 - iowrite32be(tmp, ®s->command_config);
8900 -int memac_adjust_link(struct fman_mac *memac, u16 speed)
8902 - struct memac_regs __iomem *regs = memac->regs;
8905 - if (!is_init_done(memac->memac_drv_param))
8908 - tmp = ioread32be(®s->if_mode);
8910 - /* Set full duplex */
8911 - tmp &= ~IF_MODE_HD;
8913 - if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) {
8914 - /* Configure RGMII in manual mode */
8915 - tmp &= ~IF_MODE_RGMII_AUTO;
8916 - tmp &= ~IF_MODE_RGMII_SP_MASK;
8918 - tmp |= IF_MODE_RGMII_FD;
8922 - tmp |= IF_MODE_RGMII_1000;
8925 - tmp |= IF_MODE_RGMII_100;
8928 - tmp |= IF_MODE_RGMII_10;
8935 - iowrite32be(tmp, ®s->if_mode);
8940 -int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val)
8942 - if (is_init_done(memac->memac_drv_param))
8945 - memac->memac_drv_param->max_frame_length = new_val;
8950 -int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable)
8952 - if (is_init_done(memac->memac_drv_param))
8955 - memac->memac_drv_param->reset_on_init = enable;
8960 -int memac_cfg_fixed_link(struct fman_mac *memac,
8961 - struct fixed_phy_status *fixed_link)
8963 - if (is_init_done(memac->memac_drv_param))
8966 - memac->memac_drv_param->fixed_link = fixed_link;
8971 -int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
8972 - u16 pause_time, u16 thresh_time)
8974 - struct memac_regs __iomem *regs = memac->regs;
8977 - if (!is_init_done(memac->memac_drv_param))
8980 - tmp = ioread32be(®s->tx_fifo_sections);
8982 - GET_TX_EMPTY_DEFAULT_VALUE(tmp);
8983 - iowrite32be(tmp, ®s->tx_fifo_sections);
8985 - tmp = ioread32be(®s->command_config);
8986 - tmp &= ~CMD_CFG_PFC_MODE;
8989 - iowrite32be(tmp, ®s->command_config);
8991 - tmp = ioread32be(®s->pause_quanta[priority / 2]);
8993 - tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT;
8995 - tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT;
8996 - tmp |= ((u32)pause_time << (16 * (priority % 2)));
8997 - iowrite32be(tmp, ®s->pause_quanta[priority / 2]);
8999 - tmp = ioread32be(®s->pause_thresh[priority / 2]);
9001 - tmp &= CLXY_PAUSE_THRESH_CLX_QTH;
9003 - tmp &= CLXY_PAUSE_THRESH_CLY_QTH;
9004 - tmp |= ((u32)thresh_time << (16 * (priority % 2)));
9005 - iowrite32be(tmp, ®s->pause_thresh[priority / 2]);
9010 -int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
9012 - struct memac_regs __iomem *regs = memac->regs;
9015 - if (!is_init_done(memac->memac_drv_param))
9018 - tmp = ioread32be(®s->command_config);
9020 - tmp &= ~CMD_CFG_PAUSE_IGNORE;
9022 - tmp |= CMD_CFG_PAUSE_IGNORE;
9024 - iowrite32be(tmp, ®s->command_config);
9029 -int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr)
9031 - if (!is_init_done(memac->memac_drv_param))
9034 - add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0);
9039 -int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
9041 - struct memac_regs __iomem *regs = memac->regs;
9042 - struct eth_hash_entry *hash_entry;
9046 - if (!is_init_done(memac->memac_drv_param))
9049 - addr = ENET_ADDR_TO_UINT64(*eth_addr);
9051 - if (!(addr & GROUP_ADDRESS)) {
9052 - /* Unicast addresses not supported in hash */
9053 - pr_err("Unicast Address\n");
9056 - hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
9058 - /* Create element to be added to the driver hash table */
9059 - hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
9062 - hash_entry->addr = addr;
9063 - INIT_LIST_HEAD(&hash_entry->node);
9065 - list_add_tail(&hash_entry->node,
9066 - &memac->multicast_addr_hash->lsts[hash]);
9067 - iowrite32be(hash | HASH_CTRL_MCAST_EN, ®s->hashtable_ctrl);
9072 -int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
9074 - struct memac_regs __iomem *regs = memac->regs;
9075 - struct eth_hash_entry *hash_entry = NULL;
9076 - struct list_head *pos;
9080 - if (!is_init_done(memac->memac_drv_param))
9083 - addr = ENET_ADDR_TO_UINT64(*eth_addr);
9085 - hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
9087 - list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) {
9088 - hash_entry = ETH_HASH_ENTRY_OBJ(pos);
9089 - if (hash_entry->addr == addr) {
9090 - list_del_init(&hash_entry->node);
9091 - kfree(hash_entry);
9095 - if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
9096 - iowrite32be(hash & ~HASH_CTRL_MCAST_EN, ®s->hashtable_ctrl);
9101 -int memac_set_exception(struct fman_mac *memac,
9102 - enum fman_mac_exceptions exception, bool enable)
9106 - if (!is_init_done(memac->memac_drv_param))
9109 - bit_mask = get_exception_flag(exception);
9112 - memac->exceptions |= bit_mask;
9114 - memac->exceptions &= ~bit_mask;
9116 - pr_err("Undefined exception\n");
9119 - set_exception(memac->regs, bit_mask, enable);
9124 -int memac_init(struct fman_mac *memac)
9126 - struct memac_cfg *memac_drv_param;
9128 - enet_addr_t eth_addr;
9129 - bool slow_10g_if = false;
9130 - struct fixed_phy_status *fixed_link;
9134 - if (is_init_done(memac->memac_drv_param))
9137 - err = check_init_parameters(memac);
9141 - memac_drv_param = memac->memac_drv_param;
9143 - if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4)
9144 - slow_10g_if = true;
9146 - /* First, reset the MAC if desired. */
9147 - if (memac_drv_param->reset_on_init) {
9148 - err = reset(memac->regs);
9150 - pr_err("mEMAC reset failed\n");
9156 - MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
9157 - add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
9159 - fixed_link = memac_drv_param->fixed_link;
9161 - init(memac->regs, memac->memac_drv_param, memac->phy_if,
9162 - memac->max_speed, slow_10g_if, memac->exceptions);
9164 - /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround
9165 - * Exists only in FMan 6.0 and 6.3.
9167 - if ((memac->fm_rev_info.major == 6) &&
9168 - ((memac->fm_rev_info.minor == 0) ||
9169 - (memac->fm_rev_info.minor == 3))) {
9170 - /* MAC strips CRC from received frames - this workaround
9171 - * should decrease the likelihood of bug appearance
9173 - reg32 = ioread32be(&memac->regs->command_config);
9174 - reg32 &= ~CMD_CFG_CRC_FWD;
9175 - iowrite32be(reg32, &memac->regs->command_config);
9178 - if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
9179 - /* Configure internal SGMII PHY */
9180 - if (memac->basex_if)
9181 - setup_sgmii_internal_phy_base_x(memac);
9183 - setup_sgmii_internal_phy(memac, fixed_link);
9184 - } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
9185 - /* Configure 4 internal SGMII PHYs */
9186 - for (i = 0; i < 4; i++) {
9187 - u8 qsmgii_phy_addr, phy_addr;
9188 - /* QSGMII PHY address occupies 3 upper bits of 5-bit
9189 - * phy_address; the lower 2 bits are used to extend
9190 - * register address space and access each one of 4
9191 - * ports inside QSGMII.
9193 - phy_addr = memac->pcsphy->mdio.addr;
9194 - qsmgii_phy_addr = (u8)((phy_addr << 2) | i);
9195 - memac->pcsphy->mdio.addr = qsmgii_phy_addr;
9196 - if (memac->basex_if)
9197 - setup_sgmii_internal_phy_base_x(memac);
9199 - setup_sgmii_internal_phy(memac, fixed_link);
9201 - memac->pcsphy->mdio.addr = phy_addr;
9205 - /* Max Frame Length */
9206 - err = fman_set_mac_max_frame(memac->fm, memac->mac_id,
9207 - memac_drv_param->max_frame_length);
9209 - pr_err("settings Mac max frame length is FAILED\n");
9213 - memac->multicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
9214 - if (!memac->multicast_addr_hash) {
9215 - free_init_resources(memac);
9216 - pr_err("allocation hash table is FAILED\n");
9220 - memac->unicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
9221 - if (!memac->unicast_addr_hash) {
9222 - free_init_resources(memac);
9223 - pr_err("allocation hash table is FAILED\n");
9227 - fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
9228 - FMAN_INTR_TYPE_ERR, memac_err_exception, memac);
9230 - fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
9231 - FMAN_INTR_TYPE_NORMAL, memac_exception, memac);
9233 - kfree(memac_drv_param);
9234 - memac->memac_drv_param = NULL;
9239 -int memac_free(struct fman_mac *memac)
9241 - free_init_resources(memac);
9243 - if (memac->pcsphy)
9244 - put_device(&memac->pcsphy->mdio.dev);
9246 - kfree(memac->memac_drv_param);
9252 -struct fman_mac *memac_config(struct fman_mac_params *params)
9254 - struct fman_mac *memac;
9255 - struct memac_cfg *memac_drv_param;
9256 - void __iomem *base_addr;
9258 - base_addr = params->base_addr;
9259 - /* allocate memory for the m_emac data structure */
9260 - memac = kzalloc(sizeof(*memac), GFP_KERNEL);
9264 - /* allocate memory for the m_emac driver parameters data structure */
9265 - memac_drv_param = kzalloc(sizeof(*memac_drv_param), GFP_KERNEL);
9266 - if (!memac_drv_param) {
9267 - memac_free(memac);
9271 - /* Plant parameter structure pointer */
9272 - memac->memac_drv_param = memac_drv_param;
9274 - set_dflts(memac_drv_param);
9276 - memac->addr = ENET_ADDR_TO_UINT64(params->addr);
9278 - memac->regs = base_addr;
9279 - memac->max_speed = params->max_speed;
9280 - memac->phy_if = params->phy_if;
9281 - memac->mac_id = params->mac_id;
9282 - memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
9283 - MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
9284 - memac->exception_cb = params->exception_cb;
9285 - memac->event_cb = params->event_cb;
9286 - memac->dev_id = params->dev_id;
9287 - memac->fm = params->fm;
9288 - memac->basex_if = params->basex_if;
9290 - /* Save FMan revision */
9291 - fman_get_revision(memac->fm, &memac->fm_rev_info);
9293 - if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
9294 - memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
9295 - if (!params->internal_phy_node) {
9296 - pr_err("PCS PHY node is not available\n");
9297 - memac_free(memac);
9301 - memac->pcsphy = of_phy_find_device(params->internal_phy_node);
9302 - if (!memac->pcsphy) {
9303 - pr_err("of_phy_find_device (PCS PHY) failed\n");
9304 - memac_free(memac);
9311 diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
9312 deleted file mode 100644
9313 index 173d8e0..0000000
9314 --- a/drivers/net/ethernet/freescale/fman/fman_memac.h
9318 - * Copyright 2008-2015 Freescale Semiconductor Inc.
9320 - * Redistribution and use in source and binary forms, with or without
9321 - * modification, are permitted provided that the following conditions are met:
9322 - * * Redistributions of source code must retain the above copyright
9323 - * notice, this list of conditions and the following disclaimer.
9324 - * * Redistributions in binary form must reproduce the above copyright
9325 - * notice, this list of conditions and the following disclaimer in the
9326 - * documentation and/or other materials provided with the distribution.
9327 - * * Neither the name of Freescale Semiconductor nor the
9328 - * names of its contributors may be used to endorse or promote products
9329 - * derived from this software without specific prior written permission.
9332 - * ALTERNATIVELY, this software may be distributed under the terms of the
9333 - * GNU General Public License ("GPL") as published by the Free Software
9334 - * Foundation, either version 2 of that License or (at your option) any
9337 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9338 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9339 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9340 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9341 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9342 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9343 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9344 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9345 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9346 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9352 -#include "fman_mac.h"
9354 -#include <linux/netdevice.h>
9356 -struct fman_mac *memac_config(struct fman_mac_params *params);
9357 -int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
9358 -int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr);
9359 -int memac_adjust_link(struct fman_mac *memac, u16 speed);
9360 -int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
9361 -int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
9362 -int memac_cfg_fixed_link(struct fman_mac *memac,
9363 - struct fixed_phy_status *fixed_link);
9364 -int memac_enable(struct fman_mac *memac, enum comm_mode mode);
9365 -int memac_disable(struct fman_mac *memac, enum comm_mode mode);
9366 -int memac_init(struct fman_mac *memac);
9367 -int memac_free(struct fman_mac *memac);
9368 -int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en);
9369 -int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
9370 - u16 pause_time, u16 thresh_time);
9371 -int memac_set_exception(struct fman_mac *memac,
9372 - enum fman_mac_exceptions exception, bool enable);
9373 -int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
9374 -int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
9376 -#endif /* __MEMAC_H */
9377 diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
9378 deleted file mode 100644
9379 index 5ec94d2..0000000
9380 --- a/drivers/net/ethernet/freescale/fman/fman_muram.c
9384 - * Copyright 2008-2015 Freescale Semiconductor Inc.
9386 - * Redistribution and use in source and binary forms, with or without
9387 - * modification, are permitted provided that the following conditions are met:
9388 - * * Redistributions of source code must retain the above copyright
9389 - * notice, this list of conditions and the following disclaimer.
9390 - * * Redistributions in binary form must reproduce the above copyright
9391 - * notice, this list of conditions and the following disclaimer in the
9392 - * documentation and/or other materials provided with the distribution.
9393 - * * Neither the name of Freescale Semiconductor nor the
9394 - * names of its contributors may be used to endorse or promote products
9395 - * derived from this software without specific prior written permission.
9398 - * ALTERNATIVELY, this software may be distributed under the terms of the
9399 - * GNU General Public License ("GPL") as published by the Free Software
9400 - * Foundation, either version 2 of that License or (at your option) any
9403 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9404 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9405 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9406 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9407 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9408 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9409 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9410 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9411 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9412 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9415 -#include "fman_muram.h"
9417 -#include <linux/io.h>
9418 -#include <linux/slab.h>
9419 -#include <linux/genalloc.h>
9421 -struct muram_info {
9422 - struct gen_pool *pool;
9423 - void __iomem *vbase;
9425 - phys_addr_t pbase;
9428 -static unsigned long fman_muram_vbase_to_offset(struct muram_info *muram,
9429 - unsigned long vaddr)
9431 - return vaddr - (unsigned long)muram->vbase;
9436 - * @base: Pointer to base of memory mapped FM-MURAM.
9437 - * @size: Size of the FM-MURAM partition.
9439 - * Creates partition in the MURAM.
9440 - * The routine returns a pointer to the MURAM partition.
9441 - * This pointer must be passed as to all other FM-MURAM function calls.
9442 - * No actual initialization or configuration of FM_MURAM hardware is done by
9445 - * Return: pointer to FM-MURAM object, or NULL for Failure.
9447 -struct muram_info *fman_muram_init(phys_addr_t base, size_t size)
9449 - struct muram_info *muram;
9450 - void __iomem *vaddr;
9453 - muram = kzalloc(sizeof(*muram), GFP_KERNEL);
9457 - muram->pool = gen_pool_create(ilog2(64), -1);
9458 - if (!muram->pool) {
9459 - pr_err("%s(): MURAM pool create failed\n", __func__);
9463 - vaddr = ioremap(base, size);
9465 - pr_err("%s(): MURAM ioremap failed\n", __func__);
9466 - goto pool_destroy;
9469 - ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr,
9472 - pr_err("%s(): MURAM pool add failed\n", __func__);
9474 - goto pool_destroy;
9477 - memset_io(vaddr, 0, (int)size);
9479 - muram->vbase = vaddr;
9480 - muram->pbase = base;
9484 - gen_pool_destroy(muram->pool);
9491 - * fman_muram_offset_to_vbase
9492 - * @muram: FM-MURAM module pointer.
9493 - * @offset: the offset of the memory block
9495 - * Gives the address of the memory region from specific offset
9497 - * Return: The address of the memory block
9499 -unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
9500 - unsigned long offset)
9502 - return offset + (unsigned long)muram->vbase;
9506 - * fman_muram_alloc
9507 - * @muram: FM-MURAM module pointer.
9508 - * @size: Size of the memory to be allocated.
9510 - * Allocate some memory from FM-MURAM partition.
9512 - * Return: address of the allocated memory; NULL otherwise.
9514 -unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
9516 - unsigned long vaddr;
9518 - vaddr = gen_pool_alloc(muram->pool, size);
9522 - memset_io((void __iomem *)vaddr, 0, size);
9524 - return fman_muram_vbase_to_offset(muram, vaddr);
9528 - * fman_muram_free_mem
9529 - * muram: FM-MURAM module pointer.
9530 - * offset: offset of the memory region to be freed.
9531 - * size: size of the memory to be freed.
9533 - * Free an allocated memory from FM-MURAM partition.
9535 -void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
9538 - unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
9540 - gen_pool_free(muram->pool, addr, size);
9542 diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
9543 deleted file mode 100644
9544 index 453bf84..0000000
9545 --- a/drivers/net/ethernet/freescale/fman/fman_muram.h
9549 - * Copyright 2008-2015 Freescale Semiconductor Inc.
9551 - * Redistribution and use in source and binary forms, with or without
9552 - * modification, are permitted provided that the following conditions are met:
9553 - * * Redistributions of source code must retain the above copyright
9554 - * notice, this list of conditions and the following disclaimer.
9555 - * * Redistributions in binary form must reproduce the above copyright
9556 - * notice, this list of conditions and the following disclaimer in the
9557 - * documentation and/or other materials provided with the distribution.
9558 - * * Neither the name of Freescale Semiconductor nor the
9559 - * names of its contributors may be used to endorse or promote products
9560 - * derived from this software without specific prior written permission.
9563 - * ALTERNATIVELY, this software may be distributed under the terms of the
9564 - * GNU General Public License ("GPL") as published by the Free Software
9565 - * Foundation, either version 2 of that License or (at your option) any
9568 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9569 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9570 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9571 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9572 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9573 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9574 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9575 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9576 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9577 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9579 -#ifndef __FM_MURAM_EXT
9580 -#define __FM_MURAM_EXT
9582 -#include <linux/types.h>
9584 -#define FM_MURAM_INVALID_ALLOCATION -1
9586 -/* Structure for FM MURAM information */
9589 -struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
9591 -unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
9592 - unsigned long offset);
9594 -unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
9596 -void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
9599 -#endif /* __FM_MURAM_EXT */
9600 diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
9601 deleted file mode 100644
9602 index 9f3bb50..0000000
9603 --- a/drivers/net/ethernet/freescale/fman/fman_port.c
9607 - * Copyright 2008 - 2015 Freescale Semiconductor Inc.
9609 - * Redistribution and use in source and binary forms, with or without
9610 - * modification, are permitted provided that the following conditions are met:
9611 - * * Redistributions of source code must retain the above copyright
9612 - * notice, this list of conditions and the following disclaimer.
9613 - * * Redistributions in binary form must reproduce the above copyright
9614 - * notice, this list of conditions and the following disclaimer in the
9615 - * documentation and/or other materials provided with the distribution.
9616 - * * Neither the name of Freescale Semiconductor nor the
9617 - * names of its contributors may be used to endorse or promote products
9618 - * derived from this software without specific prior written permission.
9621 - * ALTERNATIVELY, this software may be distributed under the terms of the
9622 - * GNU General Public License ("GPL") as published by the Free Software
9623 - * Foundation, either version 2 of that License or (at your option) any
9626 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9627 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9628 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9629 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9630 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9631 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9632 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9633 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9634 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9635 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9638 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9640 -#include "fman_port.h"
9642 -#include "fman_sp.h"
9644 -#include <linux/io.h>
9645 -#include <linux/slab.h>
9646 -#include <linux/module.h>
9647 -#include <linux/interrupt.h>
9648 -#include <linux/of_platform.h>
9649 -#include <linux/of_address.h>
9650 -#include <linux/delay.h>
9651 -#include <linux/libfdt_env.h>
9654 -#define DFLT_FQ_ID 0x00FFFFFF
9656 -/* General defines */
9657 -#define PORT_BMI_FIFO_UNITS 0x100
9659 -#define MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) \
9660 - min((u32)bmi_max_fifo_size, (u32)1024 * FMAN_BMI_FIFO_UNITS)
9662 -#define PORT_CG_MAP_NUM 8
9663 -#define PORT_PRS_RESULT_WORDS_NUM 8
9664 -#define PORT_IC_OFFSET_UNITS 0x10
9666 -#define MIN_EXT_BUF_SIZE 64
9668 -#define BMI_PORT_REGS_OFFSET 0
9669 -#define QMI_PORT_REGS_OFFSET 0x400
9671 -/* Default values */
9672 -#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \
9673 - DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN
9675 -#define DFLT_PORT_CUT_BYTES_FROM_END 4
9677 -#define DFLT_PORT_ERRORS_TO_DISCARD FM_PORT_FRM_ERR_CLS_DISCARD
9678 -#define DFLT_PORT_MAX_FRAME_LENGTH 9600
9680 -#define DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(bmi_max_fifo_size) \
9681 - MAX_PORT_FIFO_SIZE(bmi_max_fifo_size)
9683 -#define DFLT_PORT_RX_FIFO_THRESHOLD(major, bmi_max_fifo_size) \
9685 - MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) : \
9686 - (MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) * 3 / 4)) \
9688 -#define DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS 0
9691 -#define QMI_DEQ_CFG_SUBPORTAL_MASK 0x1f
9693 -#define QMI_PORT_CFG_EN 0x80000000
9694 -#define QMI_PORT_STATUS_DEQ_FD_BSY 0x20000000
9696 -#define QMI_DEQ_CFG_PRI 0x80000000
9697 -#define QMI_DEQ_CFG_TYPE1 0x10000000
9698 -#define QMI_DEQ_CFG_TYPE2 0x20000000
9699 -#define QMI_DEQ_CFG_TYPE3 0x30000000
9700 -#define QMI_DEQ_CFG_PREFETCH_PARTIAL 0x01000000
9701 -#define QMI_DEQ_CFG_PREFETCH_FULL 0x03000000
9702 -#define QMI_DEQ_CFG_SP_MASK 0xf
9703 -#define QMI_DEQ_CFG_SP_SHIFT 20
9705 -#define QMI_BYTE_COUNT_LEVEL_CONTROL(_type) \
9706 - (_type == FMAN_PORT_TYPE_TX ? 0x1400 : 0x400)
9709 -#define BMI_EBD_EN 0x80000000
9711 -#define BMI_PORT_CFG_EN 0x80000000
9713 -#define BMI_PORT_STATUS_BSY 0x80000000
9715 -#define BMI_DMA_ATTR_SWP_SHIFT FMAN_SP_DMA_ATTR_SWP_SHIFT
9716 -#define BMI_DMA_ATTR_WRITE_OPTIMIZE FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
9718 -#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16
9719 -#define BMI_RX_FIFO_THRESHOLD_ETHE 0x80000000
9721 -#define BMI_FRAME_END_CS_IGNORE_SHIFT 24
9722 -#define BMI_FRAME_END_CS_IGNORE_MASK 0x0000001f
9724 -#define BMI_RX_FRAME_END_CUT_SHIFT 16
9725 -#define BMI_RX_FRAME_END_CUT_MASK 0x0000001f
9727 -#define BMI_IC_TO_EXT_SHIFT FMAN_SP_IC_TO_EXT_SHIFT
9728 -#define BMI_IC_TO_EXT_MASK 0x0000001f
9729 -#define BMI_IC_FROM_INT_SHIFT FMAN_SP_IC_FROM_INT_SHIFT
9730 -#define BMI_IC_FROM_INT_MASK 0x0000000f
9731 -#define BMI_IC_SIZE_MASK 0x0000001f
9733 -#define BMI_INT_BUF_MARG_SHIFT 28
9734 -#define BMI_INT_BUF_MARG_MASK 0x0000000f
9735 -#define BMI_EXT_BUF_MARG_START_SHIFT FMAN_SP_EXT_BUF_MARG_START_SHIFT
9736 -#define BMI_EXT_BUF_MARG_START_MASK 0x000001ff
9737 -#define BMI_EXT_BUF_MARG_END_MASK 0x000001ff
9739 -#define BMI_CMD_MR_LEAC 0x00200000
9740 -#define BMI_CMD_MR_SLEAC 0x00100000
9741 -#define BMI_CMD_MR_MA 0x00080000
9742 -#define BMI_CMD_MR_DEAS 0x00040000
9743 -#define BMI_CMD_RX_MR_DEF (BMI_CMD_MR_LEAC | \
9744 - BMI_CMD_MR_SLEAC | \
9747 -#define BMI_CMD_TX_MR_DEF 0
9749 -#define BMI_CMD_ATTR_ORDER 0x80000000
9750 -#define BMI_CMD_ATTR_SYNC 0x02000000
9751 -#define BMI_CMD_ATTR_COLOR_SHIFT 26
9753 -#define BMI_FIFO_PIPELINE_DEPTH_SHIFT 12
9754 -#define BMI_FIFO_PIPELINE_DEPTH_MASK 0x0000000f
9755 -#define BMI_NEXT_ENG_FD_BITS_SHIFT 24
9757 -#define BMI_EXT_BUF_POOL_VALID FMAN_SP_EXT_BUF_POOL_VALID
9758 -#define BMI_EXT_BUF_POOL_EN_COUNTER FMAN_SP_EXT_BUF_POOL_EN_COUNTER
9759 -#define BMI_EXT_BUF_POOL_BACKUP FMAN_SP_EXT_BUF_POOL_BACKUP
9760 -#define BMI_EXT_BUF_POOL_ID_SHIFT 16
9761 -#define BMI_EXT_BUF_POOL_ID_MASK 0x003F0000
9762 -#define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT 16
9764 -#define BMI_TX_FIFO_MIN_FILL_SHIFT 16
9766 -#define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
9767 -#define BMI_FIFO_THRESHOLD ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
9769 -#define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed) \
9770 - ((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1)
9772 -#define RX_ERRS_TO_ENQ \
9773 - (FM_PORT_FRM_ERR_DMA | \
9774 - FM_PORT_FRM_ERR_PHYSICAL | \
9775 - FM_PORT_FRM_ERR_SIZE | \
9776 - FM_PORT_FRM_ERR_EXTRACTION | \
9777 - FM_PORT_FRM_ERR_NO_SCHEME | \
9778 - FM_PORT_FRM_ERR_PRS_TIMEOUT | \
9779 - FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \
9780 - FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \
9781 - FM_PORT_FRM_ERR_PRS_HDR_ERR | \
9782 - FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW | \
9783 - FM_PORT_FRM_ERR_IPRE)
9786 -#define NIA_ORDER_RESTOR 0x00800000
9787 -#define NIA_ENG_BMI 0x00500000
9788 -#define NIA_ENG_QMI_ENQ 0x00540000
9789 -#define NIA_ENG_QMI_DEQ 0x00580000
9791 -#define NIA_BMI_AC_ENQ_FRAME 0x00000002
9792 -#define NIA_BMI_AC_TX_RELEASE 0x000002C0
9793 -#define NIA_BMI_AC_RELEASE 0x000000C0
9794 -#define NIA_BMI_AC_TX 0x00000274
9795 -#define NIA_BMI_AC_FETCH_ALL_FRAME 0x0000020c
9798 -#define TX_10G_PORT_BASE 0x30
9799 -#define RX_10G_PORT_BASE 0x10
9801 -/* BMI Rx port register map */
9802 -struct fman_port_rx_bmi_regs {
9803 - u32 fmbm_rcfg; /* Rx Configuration */
9804 - u32 fmbm_rst; /* Rx Status */
9805 - u32 fmbm_rda; /* Rx DMA attributes */
9806 - u32 fmbm_rfp; /* Rx FIFO Parameters */
9807 - u32 fmbm_rfed; /* Rx Frame End Data */
9808 - u32 fmbm_ricp; /* Rx Internal Context Parameters */
9809 - u32 fmbm_rim; /* Rx Internal Buffer Margins */
9810 - u32 fmbm_rebm; /* Rx External Buffer Margins */
9811 - u32 fmbm_rfne; /* Rx Frame Next Engine */
9812 - u32 fmbm_rfca; /* Rx Frame Command Attributes. */
9813 - u32 fmbm_rfpne; /* Rx Frame Parser Next Engine */
9814 - u32 fmbm_rpso; /* Rx Parse Start Offset */
9815 - u32 fmbm_rpp; /* Rx Policer Profile */
9816 - u32 fmbm_rccb; /* Rx Coarse Classification Base */
9817 - u32 fmbm_reth; /* Rx Excessive Threshold */
9818 - u32 reserved003c[1]; /* (0x03C 0x03F) */
9819 - u32 fmbm_rprai[PORT_PRS_RESULT_WORDS_NUM];
9820 - /* Rx Parse Results Array Init */
9821 - u32 fmbm_rfqid; /* Rx Frame Queue ID */
9822 - u32 fmbm_refqid; /* Rx Error Frame Queue ID */
9823 - u32 fmbm_rfsdm; /* Rx Frame Status Discard Mask */
9824 - u32 fmbm_rfsem; /* Rx Frame Status Error Mask */
9825 - u32 fmbm_rfene; /* Rx Frame Enqueue Next Engine */
9826 - u32 reserved0074[0x2]; /* (0x074-0x07C) */
9827 - u32 fmbm_rcmne; /* Rx Frame Continuous Mode Next Engine */
9828 - u32 reserved0080[0x20]; /* (0x080 0x0FF) */
9829 - u32 fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
9830 - /* Buffer Manager pool Information- */
9831 - u32 fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM]; /* Allocate Counter- */
9832 - u32 reserved0130[8]; /* 0x130/0x140 - 0x15F reserved - */
9833 - u32 fmbm_rcgm[PORT_CG_MAP_NUM]; /* Congestion Group Map */
9834 - u32 fmbm_mpd; /* BM Pool Depletion */
9835 - u32 reserved0184[0x1F]; /* (0x184 0x1FF) */
9836 - u32 fmbm_rstc; /* Rx Statistics Counters */
9837 - u32 fmbm_rfrc; /* Rx Frame Counter */
9838 - u32 fmbm_rfbc; /* Rx Bad Frames Counter */
9839 - u32 fmbm_rlfc; /* Rx Large Frames Counter */
9840 - u32 fmbm_rffc; /* Rx Filter Frames Counter */
9841 - u32 fmbm_rfdc; /* Rx Frame Discard Counter */
9842 - u32 fmbm_rfldec; /* Rx Frames List DMA Error Counter */
9843 - u32 fmbm_rodc; /* Rx Out of Buffers Discard nntr */
9844 - u32 fmbm_rbdc; /* Rx Buffers Deallocate Counter */
9845 - u32 fmbm_rpec; /* RX Prepare to enqueue Counte */
9846 - u32 reserved0224[0x16]; /* (0x224 0x27F) */
9847 - u32 fmbm_rpc; /* Rx Performance Counters */
9848 - u32 fmbm_rpcp; /* Rx Performance Count Parameters */
9849 - u32 fmbm_rccn; /* Rx Cycle Counter */
9850 - u32 fmbm_rtuc; /* Rx Tasks Utilization Counter */
9851 - u32 fmbm_rrquc; /* Rx Receive Queue Utilization cntr */
9852 - u32 fmbm_rduc; /* Rx DMA Utilization Counter */
9853 - u32 fmbm_rfuc; /* Rx FIFO Utilization Counter */
9854 - u32 fmbm_rpac; /* Rx Pause Activation Counter */
9855 - u32 reserved02a0[0x18]; /* (0x2A0 0x2FF) */
9856 - u32 fmbm_rdcfg[0x3]; /* Rx Debug Configuration */
9857 - u32 fmbm_rgpr; /* Rx General Purpose Register */
9858 - u32 reserved0310[0x3a];
9861 -/* BMI Tx port register map */
9862 -struct fman_port_tx_bmi_regs {
9863 - u32 fmbm_tcfg; /* Tx Configuration */
9864 - u32 fmbm_tst; /* Tx Status */
9865 - u32 fmbm_tda; /* Tx DMA attributes */
9866 - u32 fmbm_tfp; /* Tx FIFO Parameters */
9867 - u32 fmbm_tfed; /* Tx Frame End Data */
9868 - u32 fmbm_ticp; /* Tx Internal Context Parameters */
9869 - u32 fmbm_tfdne; /* Tx Frame Dequeue Next Engine. */
9870 - u32 fmbm_tfca; /* Tx Frame Command attribute. */
9871 - u32 fmbm_tcfqid; /* Tx Confirmation Frame Queue ID. */
9872 - u32 fmbm_tefqid; /* Tx Frame Error Queue ID */
9873 - u32 fmbm_tfene; /* Tx Frame Enqueue Next Engine */
9874 - u32 fmbm_trlmts; /* Tx Rate Limiter Scale */
9875 - u32 fmbm_trlmt; /* Tx Rate Limiter */
9876 - u32 reserved0034[0x0e]; /* (0x034-0x6c) */
9877 - u32 fmbm_tccb; /* Tx Coarse Classification base */
9878 - u32 fmbm_tfne; /* Tx Frame Next Engine */
9879 - u32 fmbm_tpfcm[0x02];
9880 - /* Tx Priority based Flow Control (PFC) Mapping */
9881 - u32 fmbm_tcmne; /* Tx Frame Continuous Mode Next Engine */
9882 - u32 reserved0080[0x60]; /* (0x080-0x200) */
9883 - u32 fmbm_tstc; /* Tx Statistics Counters */
9884 - u32 fmbm_tfrc; /* Tx Frame Counter */
9885 - u32 fmbm_tfdc; /* Tx Frames Discard Counter */
9886 - u32 fmbm_tfledc; /* Tx Frame len error discard cntr */
9887 - u32 fmbm_tfufdc; /* Tx Frame unsprt frmt discard cntr */
9888 - u32 fmbm_tbdc; /* Tx Buffers Deallocate Counter */
9889 - u32 reserved0218[0x1A]; /* (0x218-0x280) */
9890 - u32 fmbm_tpc; /* Tx Performance Counters */
9891 - u32 fmbm_tpcp; /* Tx Performance Count Parameters */
9892 - u32 fmbm_tccn; /* Tx Cycle Counter */
9893 - u32 fmbm_ttuc; /* Tx Tasks Utilization Counter */
9894 - u32 fmbm_ttcquc; /* Tx Transmit conf Q util Counter */
9895 - u32 fmbm_tduc; /* Tx DMA Utilization Counter */
9896 - u32 fmbm_tfuc; /* Tx FIFO Utilization Counter */
9897 - u32 reserved029c[16]; /* (0x29C-0x2FF) */
9898 - u32 fmbm_tdcfg[0x3]; /* Tx Debug Configuration */
9899 - u32 fmbm_tgpr; /* Tx General Purpose Register */
9900 - u32 reserved0310[0x3a]; /* (0x310-0x3FF) */
9903 -/* BMI port register map */
9904 -union fman_port_bmi_regs {
9905 - struct fman_port_rx_bmi_regs rx;
9906 - struct fman_port_tx_bmi_regs tx;
9909 -/* QMI port register map */
9910 -struct fman_port_qmi_regs {
9911 - u32 fmqm_pnc; /* PortID n Configuration Register */
9912 - u32 fmqm_pns; /* PortID n Status Register */
9913 - u32 fmqm_pnts; /* PortID n Task Status Register */
9914 - u32 reserved00c[4]; /* 0xn00C - 0xn01B */
9915 - u32 fmqm_pnen; /* PortID n Enqueue NIA Register */
9916 - u32 fmqm_pnetfc; /* PortID n Enq Total Frame Counter */
9917 - u32 reserved024[2]; /* 0xn024 - 0x02B */
9918 - u32 fmqm_pndn; /* PortID n Dequeue NIA Register */
9919 - u32 fmqm_pndc; /* PortID n Dequeue Config Register */
9920 - u32 fmqm_pndtfc; /* PortID n Dequeue tot Frame cntr */
9921 - u32 fmqm_pndfdc; /* PortID n Dequeue FQID Dflt Cntr */
9922 - u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */
9925 -/* QMI dequeue prefetch modes */
9926 -enum fman_port_deq_prefetch {
9927 - FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
9928 - FMAN_PORT_DEQ_PART_PREFETCH, /* Partial prefetch mode */
9929 - FMAN_PORT_DEQ_FULL_PREFETCH /* Full prefetch mode */
9932 -/* A structure for defining FM port resources */
9933 -struct fman_port_rsrc {
9934 - u32 num; /* Committed required resource */
9935 - u32 extra; /* Extra (not committed) required resource */
9938 -enum fman_port_dma_swap {
9939 - FMAN_PORT_DMA_NO_SWAP, /* No swap, transfer data as is */
9940 - FMAN_PORT_DMA_SWAP_LE,
9941 - /* The transferred data should be swapped in PPC Little Endian mode */
9942 - FMAN_PORT_DMA_SWAP_BE
9943 - /* The transferred data should be swapped in Big Endian mode */
9946 -/* Default port color */
9947 -enum fman_port_color {
9948 - FMAN_PORT_COLOR_GREEN, /* Default port color is green */
9949 - FMAN_PORT_COLOR_YELLOW, /* Default port color is yellow */
9950 - FMAN_PORT_COLOR_RED, /* Default port color is red */
9951 - FMAN_PORT_COLOR_OVERRIDE /* Ignore color */
9954 -/* QMI dequeue from the SP channel - types */
9955 -enum fman_port_deq_type {
9956 - FMAN_PORT_DEQ_BY_PRI,
9957 - /* Priority precedence and Intra-Class scheduling */
9958 - FMAN_PORT_DEQ_ACTIVE_FQ,
9959 - /* Active FQ precedence and Intra-Class scheduling */
9960 - FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS
9961 - /* Active FQ precedence and override Intra-Class scheduling */
9964 -/* External buffer pools configuration */
9965 -struct fman_port_bpools {
9966 - u8 count; /* Num of pools to set up */
9967 - bool counters_enable; /* Enable allocate counters */
9968 - u8 grp_bp_depleted_num;
9969 - /* Number of depleted pools - if reached the BMI indicates
9970 - * the MAC to send a pause frame
9973 - u8 bpid; /* BM pool ID */
9975 - /* Pool's size - must be in ascending order */
9977 - /* If this is a backup pool */
9978 - bool grp_bp_depleted;
9979 - /* Consider this buffer in multiple pools depletion criteria */
9980 - bool single_bp_depleted;
9981 - /* Consider this buffer in single pool depletion criteria */
9982 - } bpool[FMAN_PORT_MAX_EXT_POOLS_NUM];
9985 -struct fman_port_cfg {
9989 - bool deq_high_priority;
9990 - enum fman_port_deq_type deq_type;
9991 - enum fman_port_deq_prefetch deq_prefetch_option;
9993 - u8 cheksum_last_bytes_ignore;
9994 - u8 rx_cut_end_bytes;
9995 - struct fman_buf_pool_depletion buf_pool_depletion;
9996 - struct fman_ext_pools ext_buf_pools;
9997 - u32 tx_fifo_min_level;
9998 - u32 tx_fifo_low_comf_level;
9999 - u32 rx_pri_elevation;
10001 - struct fman_sp_buf_margins buf_margins;
10002 - u32 int_buf_start_margin;
10003 - struct fman_sp_int_context_data_copy int_context;
10004 - u32 discard_mask;
10006 - struct fman_buffer_prefix_content buffer_prefix_content;
10007 - bool dont_release_buf;
10010 - u32 tx_fifo_deq_pipeline_depth;
10011 - bool errata_A006320;
10012 - bool excessive_threshold_register;
10013 - bool fmbm_tfne_has_features;
10015 - enum fman_port_dma_swap dma_swap_data;
10016 - enum fman_port_color color;
10019 -struct fman_port_rx_pools_params {
10021 - u16 second_largest_buf_size;
10022 - u16 largest_buf_size;
10025 -struct fman_port_dts_params {
10026 - void __iomem *base_addr; /* FMan port virtual memory */
10027 - enum fman_port_type type; /* Port type */
10028 - u16 speed; /* Port speed */
10029 - u8 id; /* HW Port Id */
10030 - u32 qman_channel_id; /* QMan channel id (non RX only) */
10031 - struct fman *fman; /* FMan Handle */
10034 -struct fman_port {
10036 - struct device *dev;
10037 - struct fman_rev_info rev_info;
10039 - enum fman_port_type port_type;
10042 - union fman_port_bmi_regs __iomem *bmi_regs;
10043 - struct fman_port_qmi_regs __iomem *qmi_regs;
10045 - struct fman_sp_buffer_offsets buffer_offsets;
10047 - u8 internal_buf_offset;
10048 - struct fman_ext_pools ext_buf_pools;
10050 - u16 max_frame_length;
10051 - struct fman_port_rsrc open_dmas;
10052 - struct fman_port_rsrc tasks;
10053 - struct fman_port_rsrc fifo_bufs;
10054 - struct fman_port_rx_pools_params rx_pools_params;
10056 - struct fman_port_cfg *cfg;
10057 - struct fman_port_dts_params dts_params;
10059 - u8 ext_pools_num;
10060 - u32 max_port_fifo_size;
10061 - u32 max_num_of_ext_pools;
10062 - u32 max_num_of_sub_portals;
10063 - u32 bm_max_num_of_pools;
10066 -static int init_bmi_rx(struct fman_port *port)
10068 - struct fman_port_rx_bmi_regs __iomem *regs = &port->bmi_regs->rx;
10069 - struct fman_port_cfg *cfg = port->cfg;
10072 - /* DMA attributes */
10073 - tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
10074 - /* Enable write optimization */
10075 - tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
10076 - iowrite32be(tmp, ®s->fmbm_rda);
10078 - /* Rx FIFO parameters */
10079 - tmp = (cfg->rx_pri_elevation / PORT_BMI_FIFO_UNITS - 1) <<
10080 - BMI_RX_FIFO_PRI_ELEVATION_SHIFT;
10081 - tmp |= cfg->rx_fifo_thr / PORT_BMI_FIFO_UNITS - 1;
10082 - iowrite32be(tmp, ®s->fmbm_rfp);
10084 - if (cfg->excessive_threshold_register)
10085 - /* always allow access to the extra resources */
10086 - iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, ®s->fmbm_reth);
10088 - /* Frame end data */
10089 - tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
10090 - BMI_FRAME_END_CS_IGNORE_SHIFT;
10091 - tmp |= (cfg->rx_cut_end_bytes & BMI_RX_FRAME_END_CUT_MASK) <<
10092 - BMI_RX_FRAME_END_CUT_SHIFT;
10093 - if (cfg->errata_A006320)
10094 - tmp &= 0xffe0ffff;
10095 - iowrite32be(tmp, ®s->fmbm_rfed);
10097 - /* Internal context parameters */
10098 - tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
10099 - BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
10100 - tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
10101 - BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
10102 - tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
10103 - BMI_IC_SIZE_MASK;
10104 - iowrite32be(tmp, ®s->fmbm_ricp);
10106 - /* Internal buffer offset */
10107 - tmp = ((cfg->int_buf_start_margin / PORT_IC_OFFSET_UNITS) &
10108 - BMI_INT_BUF_MARG_MASK) << BMI_INT_BUF_MARG_SHIFT;
10109 - iowrite32be(tmp, ®s->fmbm_rim);
10111 - /* External buffer margins */
10112 - tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) <<
10113 - BMI_EXT_BUF_MARG_START_SHIFT;
10114 - tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK;
10115 - iowrite32be(tmp, ®s->fmbm_rebm);
10117 - /* Frame attributes */
10118 - tmp = BMI_CMD_RX_MR_DEF;
10119 - tmp |= BMI_CMD_ATTR_ORDER;
10120 - tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
10121 - /* Synchronization request */
10122 - tmp |= BMI_CMD_ATTR_SYNC;
10124 - iowrite32be(tmp, ®s->fmbm_rfca);
10127 - tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
10129 - tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
10130 - iowrite32be(tmp, ®s->fmbm_rfne);
10132 - /* Enqueue NIA */
10133 - iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, ®s->fmbm_rfene);
10135 - /* Default/error queues */
10136 - iowrite32be((cfg->dflt_fqid & DFLT_FQ_ID), ®s->fmbm_rfqid);
10137 - iowrite32be((cfg->err_fqid & DFLT_FQ_ID), ®s->fmbm_refqid);
10139 - /* Discard/error masks */
10140 - iowrite32be(cfg->discard_mask, ®s->fmbm_rfsdm);
10141 - iowrite32be(cfg->err_mask, ®s->fmbm_rfsem);
10146 -static int init_bmi_tx(struct fman_port *port)
10148 - struct fman_port_tx_bmi_regs __iomem *regs = &port->bmi_regs->tx;
10149 - struct fman_port_cfg *cfg = port->cfg;
10152 - /* Tx Configuration register */
10154 - iowrite32be(tmp, ®s->fmbm_tcfg);
10156 - /* DMA attributes */
10157 - tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
10158 - iowrite32be(tmp, ®s->fmbm_tda);
10160 - /* Tx FIFO parameters */
10161 - tmp = (cfg->tx_fifo_min_level / PORT_BMI_FIFO_UNITS) <<
10162 - BMI_TX_FIFO_MIN_FILL_SHIFT;
10163 - tmp |= ((cfg->tx_fifo_deq_pipeline_depth - 1) &
10164 - BMI_FIFO_PIPELINE_DEPTH_MASK) << BMI_FIFO_PIPELINE_DEPTH_SHIFT;
10165 - tmp |= (cfg->tx_fifo_low_comf_level / PORT_BMI_FIFO_UNITS) - 1;
10166 - iowrite32be(tmp, ®s->fmbm_tfp);
10168 - /* Frame end data */
10169 - tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
10170 - BMI_FRAME_END_CS_IGNORE_SHIFT;
10171 - iowrite32be(tmp, ®s->fmbm_tfed);
10173 - /* Internal context parameters */
10174 - tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
10175 - BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
10176 - tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
10177 - BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
10178 - tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
10179 - BMI_IC_SIZE_MASK;
10180 - iowrite32be(tmp, ®s->fmbm_ticp);
10182 - /* Frame attributes */
10183 - tmp = BMI_CMD_TX_MR_DEF;
10184 - tmp |= BMI_CMD_ATTR_ORDER;
10185 - tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
10186 - iowrite32be(tmp, ®s->fmbm_tfca);
10188 - /* Dequeue NIA + enqueue NIA */
10189 - iowrite32be(NIA_ENG_QMI_DEQ, ®s->fmbm_tfdne);
10190 - iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, ®s->fmbm_tfene);
10191 - if (cfg->fmbm_tfne_has_features)
10192 - iowrite32be(!cfg->dflt_fqid ?
10193 - BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME :
10194 - NIA_BMI_AC_FETCH_ALL_FRAME, ®s->fmbm_tfne);
10195 - if (!cfg->dflt_fqid && cfg->dont_release_buf) {
10196 - iowrite32be(DFLT_FQ_ID, ®s->fmbm_tcfqid);
10197 - iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
10198 - ®s->fmbm_tfene);
10199 - if (cfg->fmbm_tfne_has_features)
10200 - iowrite32be(ioread32be(®s->fmbm_tfne) & ~BMI_EBD_EN,
10201 - ®s->fmbm_tfne);
10204 - /* Confirmation/error queues */
10205 - if (cfg->dflt_fqid || !cfg->dont_release_buf)
10206 - iowrite32be(cfg->dflt_fqid & DFLT_FQ_ID, ®s->fmbm_tcfqid);
10207 - iowrite32be((cfg->err_fqid & DFLT_FQ_ID), ®s->fmbm_tefqid);
10212 -static int init_qmi(struct fman_port *port)
10214 - struct fman_port_qmi_regs __iomem *regs = port->qmi_regs;
10215 - struct fman_port_cfg *cfg = port->cfg;
10218 - /* Rx port configuration */
10219 - if (port->port_type == FMAN_PORT_TYPE_RX) {
10220 - /* Enqueue NIA */
10221 - iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, ®s->fmqm_pnen);
10225 - /* Continue with Tx port configuration */
10226 - if (port->port_type == FMAN_PORT_TYPE_TX) {
10227 - /* Enqueue NIA */
10228 - iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
10229 - ®s->fmqm_pnen);
10230 - /* Dequeue NIA */
10231 - iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, ®s->fmqm_pndn);
10234 - /* Dequeue Configuration register */
10236 - if (cfg->deq_high_priority)
10237 - tmp |= QMI_DEQ_CFG_PRI;
10239 - switch (cfg->deq_type) {
10240 - case FMAN_PORT_DEQ_BY_PRI:
10241 - tmp |= QMI_DEQ_CFG_TYPE1;
10243 - case FMAN_PORT_DEQ_ACTIVE_FQ:
10244 - tmp |= QMI_DEQ_CFG_TYPE2;
10246 - case FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS:
10247 - tmp |= QMI_DEQ_CFG_TYPE3;
10253 - switch (cfg->deq_prefetch_option) {
10254 - case FMAN_PORT_DEQ_NO_PREFETCH:
10256 - case FMAN_PORT_DEQ_PART_PREFETCH:
10257 - tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
10259 - case FMAN_PORT_DEQ_FULL_PREFETCH:
10260 - tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
10266 - tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT;
10267 - tmp |= cfg->deq_byte_cnt;
10268 - iowrite32be(tmp, ®s->fmqm_pndc);
10273 -static int init(struct fman_port *port)
10277 - /* Init BMI registers */
10278 - switch (port->port_type) {
10279 - case FMAN_PORT_TYPE_RX:
10280 - err = init_bmi_rx(port);
10282 - case FMAN_PORT_TYPE_TX:
10283 - err = init_bmi_tx(port);
10292 - /* Init QMI registers */
10293 - err = init_qmi(port);
10299 -static int set_bpools(const struct fman_port *port,
10300 - const struct fman_port_bpools *bp)
10302 - u32 __iomem *bp_reg, *bp_depl_reg;
10304 - u8 i, max_bp_num;
10305 - bool grp_depl_used = false, rx_port;
10307 - switch (port->port_type) {
10308 - case FMAN_PORT_TYPE_RX:
10309 - max_bp_num = port->ext_pools_num;
10311 - bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
10312 - bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd;
10319 - /* Check buffers are provided in ascending order */
10320 - for (i = 0; (i < (bp->count - 1) &&
10321 - (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1)); i++) {
10322 - if (bp->bpool[i].size > bp->bpool[i + 1].size)
10327 - /* Set up external buffers pools */
10328 - for (i = 0; i < bp->count; i++) {
10329 - tmp = BMI_EXT_BUF_POOL_VALID;
10330 - tmp |= ((u32)bp->bpool[i].bpid <<
10331 - BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK;
10334 - if (bp->counters_enable)
10335 - tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
10337 - if (bp->bpool[i].is_backup)
10338 - tmp |= BMI_EXT_BUF_POOL_BACKUP;
10340 - tmp |= (u32)bp->bpool[i].size;
10343 - iowrite32be(tmp, &bp_reg[i]);
10346 - /* Clear unused pools */
10347 - for (i = bp->count; i < max_bp_num; i++)
10348 - iowrite32be(0, &bp_reg[i]);
10350 - /* Pools depletion */
10352 - for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) {
10353 - if (bp->bpool[i].grp_bp_depleted) {
10354 - grp_depl_used = true;
10355 - tmp |= 0x80000000 >> i;
10358 - if (bp->bpool[i].single_bp_depleted)
10359 - tmp |= 0x80 >> i;
10362 - if (grp_depl_used)
10363 - tmp |= ((u32)bp->grp_bp_depleted_num - 1) <<
10364 - BMI_POOL_DEP_NUM_OF_POOLS_SHIFT;
10366 - iowrite32be(tmp, bp_depl_reg);
10370 -static bool is_init_done(struct fman_port_cfg *cfg)
10372 - /* Checks if FMan port driver parameters were initialized */
10379 -static int verify_size_of_fifo(struct fman_port *port)
10381 - u32 min_fifo_size_required = 0, opt_fifo_size_for_b2b = 0;
10384 - if (port->port_type == FMAN_PORT_TYPE_TX) {
10385 - min_fifo_size_required = (u32)
10386 - (roundup(port->max_frame_length,
10387 - FMAN_BMI_FIFO_UNITS) + (3 * FMAN_BMI_FIFO_UNITS));
10389 - min_fifo_size_required +=
10390 - port->cfg->tx_fifo_deq_pipeline_depth *
10391 - FMAN_BMI_FIFO_UNITS;
10393 - opt_fifo_size_for_b2b = min_fifo_size_required;
10395 - /* Add some margin for back-to-back capability to improve
10396 - * performance, allows the hardware to pipeline new frame dma
10397 - * while the previous frame not yet transmitted.
10399 - if (port->port_speed == 10000)
10400 - opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
10402 - opt_fifo_size_for_b2b += 2 * FMAN_BMI_FIFO_UNITS;
10406 - else if (port->port_type == FMAN_PORT_TYPE_RX) {
10407 - if (port->rev_info.major >= 6)
10408 - min_fifo_size_required = (u32)
10409 - (roundup(port->max_frame_length,
10410 - FMAN_BMI_FIFO_UNITS) +
10411 - (5 * FMAN_BMI_FIFO_UNITS));
10412 - /* 4 according to spec + 1 for FOF>0 */
10414 - min_fifo_size_required = (u32)
10415 - (roundup(min(port->max_frame_length,
10416 - port->rx_pools_params.largest_buf_size),
10417 - FMAN_BMI_FIFO_UNITS) +
10418 - (7 * FMAN_BMI_FIFO_UNITS));
10420 - opt_fifo_size_for_b2b = min_fifo_size_required;
10422 - /* Add some margin for back-to-back capability to improve
10423 - * performance,allows the hardware to pipeline new frame dma
10424 - * while the previous frame not yet transmitted.
10426 - if (port->port_speed == 10000)
10427 - opt_fifo_size_for_b2b += 8 * FMAN_BMI_FIFO_UNITS;
10429 - opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
10432 - WARN_ON(min_fifo_size_required <= 0);
10433 - WARN_ON(opt_fifo_size_for_b2b < min_fifo_size_required);
10435 - /* Verify the size */
10436 - if (port->fifo_bufs.num < min_fifo_size_required)
10437 - dev_dbg(port->dev, "%s: FIFO size should be enlarged to %d bytes\n",
10438 - __func__, min_fifo_size_required);
10439 - else if (port->fifo_bufs.num < opt_fifo_size_for_b2b)
10440 - dev_dbg(port->dev, "%s: For b2b processing,FIFO may be enlarged to %d bytes\n",
10441 - __func__, opt_fifo_size_for_b2b);
10446 -static int set_ext_buffer_pools(struct fman_port *port)
10448 - struct fman_ext_pools *ext_buf_pools = &port->cfg->ext_buf_pools;
10449 - struct fman_buf_pool_depletion *buf_pool_depletion =
10450 - &port->cfg->buf_pool_depletion;
10451 - u8 ordered_array[FMAN_PORT_MAX_EXT_POOLS_NUM];
10452 - u16 sizes_array[BM_MAX_NUM_OF_POOLS];
10453 - int i = 0, j = 0, err;
10454 - struct fman_port_bpools bpools;
10456 - memset(&ordered_array, 0, sizeof(u8) * FMAN_PORT_MAX_EXT_POOLS_NUM);
10457 - memset(&sizes_array, 0, sizeof(u16) * BM_MAX_NUM_OF_POOLS);
10458 - memcpy(&port->ext_buf_pools, ext_buf_pools,
10459 - sizeof(struct fman_ext_pools));
10461 - fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(ext_buf_pools,
10465 - memset(&bpools, 0, sizeof(struct fman_port_bpools));
10466 - bpools.count = ext_buf_pools->num_of_pools_used;
10467 - bpools.counters_enable = true;
10468 - for (i = 0; i < ext_buf_pools->num_of_pools_used; i++) {
10469 - bpools.bpool[i].bpid = ordered_array[i];
10470 - bpools.bpool[i].size = sizes_array[ordered_array[i]];
10473 - /* save pools parameters for later use */
10474 - port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
10475 - port->rx_pools_params.largest_buf_size =
10476 - sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
10477 - port->rx_pools_params.second_largest_buf_size =
10478 - sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]];
10480 - /* FMBM_RMPD reg. - pool depletion */
10481 - if (buf_pool_depletion->pools_grp_mode_enable) {
10482 - bpools.grp_bp_depleted_num = buf_pool_depletion->num_of_pools;
10483 - for (i = 0; i < port->bm_max_num_of_pools; i++) {
10484 - if (buf_pool_depletion->pools_to_consider[i]) {
10485 - for (j = 0; j < ext_buf_pools->
10486 - num_of_pools_used; j++) {
10487 - if (i == ordered_array[j]) {
10489 - grp_bp_depleted = true;
10497 - if (buf_pool_depletion->single_pool_mode_enable) {
10498 - for (i = 0; i < port->bm_max_num_of_pools; i++) {
10499 - if (buf_pool_depletion->
10500 - pools_to_consider_for_single_mode[i]) {
10501 - for (j = 0; j < ext_buf_pools->
10502 - num_of_pools_used; j++) {
10503 - if (i == ordered_array[j]) {
10505 - single_bp_depleted = true;
10513 - err = set_bpools(port, &bpools);
10515 - dev_err(port->dev, "%s: set_bpools() failed\n", __func__);
10522 -static int init_low_level_driver(struct fman_port *port)
10524 - struct fman_port_cfg *cfg = port->cfg;
10527 - switch (port->port_type) {
10528 - case FMAN_PORT_TYPE_RX:
10529 - cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask);
10535 - tmp_val = (u32)((port->internal_buf_offset % OFFSET_UNITS) ?
10536 - (port->internal_buf_offset / OFFSET_UNITS + 1) :
10537 - (port->internal_buf_offset / OFFSET_UNITS));
10538 - port->internal_buf_offset = (u8)(tmp_val * OFFSET_UNITS);
10539 - port->cfg->int_buf_start_margin = port->internal_buf_offset;
10541 - if (init(port) != 0) {
10542 - dev_err(port->dev, "%s: fman port initialization failed\n",
10547 - /* The code bellow is a trick so the FM will not release the buffer
10548 - * to BM nor will try to enqueue the frame to QM
10550 - if (port->port_type == FMAN_PORT_TYPE_TX) {
10551 - if (!cfg->dflt_fqid && cfg->dont_release_buf) {
10552 - /* override fmbm_tcfqid 0 with a false non-0 value.
10553 - * This will force FM to act according to tfene.
10554 - * Otherwise, if fmbm_tcfqid is 0 the FM will release
10555 - * buffers to BM regardless of fmbm_tfene
10557 - iowrite32be(0xFFFFFF, &port->bmi_regs->tx.fmbm_tcfqid);
10558 - iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
10559 - &port->bmi_regs->tx.fmbm_tfene);
10566 -static int fill_soc_specific_params(struct fman_port *port)
10568 - u32 bmi_max_fifo_size;
10570 - bmi_max_fifo_size = fman_get_bmi_max_fifo_size(port->fm);
10571 - port->max_port_fifo_size = MAX_PORT_FIFO_SIZE(bmi_max_fifo_size);
10572 - port->bm_max_num_of_pools = 64;
10574 - /* P4080 - Major 2
10575 - * P2041/P3041/P5020/P5040 - Major 3
10576 - * Tx/Bx - Major 6
10578 - switch (port->rev_info.major) {
10581 - port->max_num_of_ext_pools = 4;
10582 - port->max_num_of_sub_portals = 12;
10586 - port->max_num_of_ext_pools = 8;
10587 - port->max_num_of_sub_portals = 16;
10591 - dev_err(port->dev, "%s: Unsupported FMan version\n", __func__);
10598 -static int get_dflt_fifo_deq_pipeline_depth(u8 major, enum fman_port_type type,
10602 - case FMAN_PORT_TYPE_RX:
10603 - case FMAN_PORT_TYPE_TX:
10620 -static int get_dflt_num_of_tasks(u8 major, enum fman_port_type type,
10624 - case FMAN_PORT_TYPE_RX:
10625 - case FMAN_PORT_TYPE_TX:
10642 -static int get_dflt_extra_num_of_tasks(u8 major, enum fman_port_type type,
10646 - case FMAN_PORT_TYPE_RX:
10652 - if (speed == 10000)
10656 - case FMAN_PORT_TYPE_TX:
10662 -static int get_dflt_num_of_open_dmas(u8 major, enum fman_port_type type,
10667 - if (major >= 6) {
10669 - case FMAN_PORT_TYPE_TX:
10670 - if (speed == 10000)
10675 - case FMAN_PORT_TYPE_RX:
10676 - if (speed == 10000)
10686 - case FMAN_PORT_TYPE_TX:
10687 - case FMAN_PORT_TYPE_RX:
10688 - if (speed == 10000)
10701 -static int get_dflt_extra_num_of_open_dmas(u8 major, enum fman_port_type type,
10710 - case FMAN_PORT_TYPE_RX:
10711 - case FMAN_PORT_TYPE_TX:
10712 - if (speed == 10000)
10721 -static int get_dflt_num_of_fifo_bufs(u8 major, enum fman_port_type type,
10726 - if (major >= 6) {
10728 - case FMAN_PORT_TYPE_TX:
10729 - if (speed == 10000)
10734 - case FMAN_PORT_TYPE_RX:
10735 - if (speed == 10000)
10745 - case FMAN_PORT_TYPE_TX:
10746 - if (speed == 10000)
10751 - case FMAN_PORT_TYPE_RX:
10752 - if (speed == 10000)
10765 -static void set_dflt_cfg(struct fman_port *port,
10766 - struct fman_port_params *port_params)
10768 - struct fman_port_cfg *cfg = port->cfg;
10770 - cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP;
10771 - cfg->color = FMAN_PORT_COLOR_GREEN;
10772 - cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END;
10773 - cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL;
10774 - cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD;
10775 - cfg->tx_fifo_low_comf_level = (5 * 1024);
10776 - cfg->deq_type = FMAN_PORT_DEQ_BY_PRI;
10777 - cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH;
10778 - cfg->tx_fifo_deq_pipeline_depth =
10779 - BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed);
10780 - cfg->deq_byte_cnt = QMI_BYTE_COUNT_LEVEL_CONTROL(port->port_type);
10782 - cfg->rx_pri_elevation =
10783 - DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(port->max_port_fifo_size);
10784 - port->cfg->rx_fifo_thr =
10785 - DFLT_PORT_RX_FIFO_THRESHOLD(port->rev_info.major,
10786 - port->max_port_fifo_size);
10788 - if ((port->rev_info.major == 6) &&
10789 - ((port->rev_info.minor == 0) || (port->rev_info.minor == 3)))
10790 - cfg->errata_A006320 = true;
10792 - /* Excessive Threshold register - exists for pre-FMv3 chips only */
10793 - if (port->rev_info.major < 6)
10794 - cfg->excessive_threshold_register = true;
10796 - cfg->fmbm_tfne_has_features = true;
10798 - cfg->buffer_prefix_content.data_align =
10799 - DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
10802 -static void set_rx_dflt_cfg(struct fman_port *port,
10803 - struct fman_port_params *port_params)
10805 - port->cfg->discard_mask = DFLT_PORT_ERRORS_TO_DISCARD;
10807 - memcpy(&port->cfg->ext_buf_pools,
10808 - &port_params->specific_params.rx_params.ext_buf_pools,
10809 - sizeof(struct fman_ext_pools));
10810 - port->cfg->err_fqid =
10811 - port_params->specific_params.rx_params.err_fqid;
10812 - port->cfg->dflt_fqid =
10813 - port_params->specific_params.rx_params.dflt_fqid;
10816 -static void set_tx_dflt_cfg(struct fman_port *port,
10817 - struct fman_port_params *port_params,
10818 - struct fman_port_dts_params *dts_params)
10820 - port->cfg->tx_fifo_deq_pipeline_depth =
10821 - get_dflt_fifo_deq_pipeline_depth(port->rev_info.major,
10823 - port->port_speed);
10824 - port->cfg->err_fqid =
10825 - port_params->specific_params.non_rx_params.err_fqid;
10826 - port->cfg->deq_sp =
10827 - (u8)(dts_params->qman_channel_id & QMI_DEQ_CFG_SUBPORTAL_MASK);
10828 - port->cfg->dflt_fqid =
10829 - port_params->specific_params.non_rx_params.dflt_fqid;
10830 - port->cfg->deq_high_priority = true;
10834 - * fman_port_config
10835 - * @port: Pointer to the port structure
10836 - * @params: Pointer to data structure of parameters
10838 - * Creates a descriptor for the FM PORT module.
10839 - * The routine returns a pointer to the FM PORT object.
10840 - * This descriptor must be passed as first parameter to all other FM PORT
10841 - * function calls.
10842 - * No actual initialization or configuration of FM hardware is done by this
10845 - * Return: 0 on success; Error code otherwise.
10847 -int fman_port_config(struct fman_port *port, struct fman_port_params *params)
10849 - void __iomem *base_addr = port->dts_params.base_addr;
10852 - /* Allocate the FM driver's parameters structure */
10853 - port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
10857 - /* Initialize FM port parameters which will be kept by the driver */
10858 - port->port_type = port->dts_params.type;
10859 - port->port_speed = port->dts_params.speed;
10860 - port->port_id = port->dts_params.id;
10861 - port->fm = port->dts_params.fman;
10862 - port->ext_pools_num = (u8)8;
10864 - /* get FM revision */
10865 - fman_get_revision(port->fm, &port->rev_info);
10867 - err = fill_soc_specific_params(port);
10869 - goto err_port_cfg;
10871 - switch (port->port_type) {
10872 - case FMAN_PORT_TYPE_RX:
10873 - set_rx_dflt_cfg(port, params);
10874 - case FMAN_PORT_TYPE_TX:
10875 - set_tx_dflt_cfg(port, params, &port->dts_params);
10877 - set_dflt_cfg(port, params);
10880 - /* Continue with other parameters */
10881 - /* set memory map pointers */
10882 - port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
10883 - port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
10885 - port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
10886 - /* resource distribution. */
10888 - port->fifo_bufs.num =
10889 - get_dflt_num_of_fifo_bufs(port->rev_info.major, port->port_type,
10890 - port->port_speed) * FMAN_BMI_FIFO_UNITS;
10891 - port->fifo_bufs.extra =
10892 - DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS * FMAN_BMI_FIFO_UNITS;
10894 - port->open_dmas.num =
10895 - get_dflt_num_of_open_dmas(port->rev_info.major,
10896 - port->port_type, port->port_speed);
10897 - port->open_dmas.extra =
10898 - get_dflt_extra_num_of_open_dmas(port->rev_info.major,
10899 - port->port_type, port->port_speed);
10900 - port->tasks.num =
10901 - get_dflt_num_of_tasks(port->rev_info.major,
10902 - port->port_type, port->port_speed);
10903 - port->tasks.extra =
10904 - get_dflt_extra_num_of_tasks(port->rev_info.major,
10905 - port->port_type, port->port_speed);
10907 - /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 errata
10910 - if ((port->rev_info.major == 6) && (port->rev_info.minor == 0) &&
10911 - (((port->port_type == FMAN_PORT_TYPE_TX) &&
10912 - (port->port_speed == 1000)))) {
10913 - port->open_dmas.num = 16;
10914 - port->open_dmas.extra = 0;
10917 - if (port->rev_info.major >= 6 &&
10918 - port->port_type == FMAN_PORT_TYPE_TX &&
10919 - port->port_speed == 1000) {
10920 - /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata
10923 - if (port->rev_info.major >= 6) {
10926 - reg = 0x00001013;
10927 - iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
10934 - kfree(port->cfg);
10939 -EXPORT_SYMBOL(fman_port_config);
10943 - * port: A pointer to a FM Port module.
10944 - * Initializes the FM PORT module by defining the software structure and
10945 - * configuring the hardware registers.
10947 - * Return: 0 on success; Error code otherwise.
10949 -int fman_port_init(struct fman_port *port)
10951 - struct fman_port_cfg *cfg;
10953 - struct fman_port_init_params params;
10955 - if (is_init_done(port->cfg))
10958 - err = fman_sp_build_buffer_struct(&port->cfg->int_context,
10959 - &port->cfg->buffer_prefix_content,
10960 - &port->cfg->buf_margins,
10961 - &port->buffer_offsets,
10962 - &port->internal_buf_offset);
10968 - if (port->port_type == FMAN_PORT_TYPE_RX) {
10969 - /* Call the external Buffer routine which also checks fifo
10970 - * size and updates it if necessary
10972 - /* define external buffer pools and pool depletion */
10973 - err = set_ext_buffer_pools(port);
10976 - /* check if the largest external buffer pool is large enough */
10977 - if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE +
10978 - cfg->buf_margins.end_margins >
10979 - port->rx_pools_params.largest_buf_size) {
10980 - dev_err(port->dev, "%s: buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
10981 - __func__, cfg->buf_margins.start_margins,
10982 - cfg->buf_margins.end_margins,
10983 - port->rx_pools_params.largest_buf_size);
10988 - /* Call FM module routine for communicating parameters */
10989 - memset(¶ms, 0, sizeof(params));
10990 - params.port_id = port->port_id;
10991 - params.port_type = port->port_type;
10992 - params.port_speed = port->port_speed;
10993 - params.num_of_tasks = (u8)port->tasks.num;
10994 - params.num_of_extra_tasks = (u8)port->tasks.extra;
10995 - params.num_of_open_dmas = (u8)port->open_dmas.num;
10996 - params.num_of_extra_open_dmas = (u8)port->open_dmas.extra;
10998 - if (port->fifo_bufs.num) {
10999 - err = verify_size_of_fifo(port);
11003 - params.size_of_fifo = port->fifo_bufs.num;
11004 - params.extra_size_of_fifo = port->fifo_bufs.extra;
11005 - params.deq_pipeline_depth = port->cfg->tx_fifo_deq_pipeline_depth;
11006 - params.max_frame_length = port->max_frame_length;
11008 - err = fman_set_port_params(port->fm, ¶ms);
11012 - err = init_low_level_driver(port);
11016 - kfree(port->cfg);
11017 - port->cfg = NULL;
11021 -EXPORT_SYMBOL(fman_port_init);
11024 - * fman_port_cfg_buf_prefix_content
11025 - * @port A pointer to a FM Port module.
11026 - * @buffer_prefix_content A structure of parameters describing
11027 - * the structure of the buffer.
11029 - * Start margin - offset of data from
11030 - * start of external buffer.
11031 - * Defines the structure, size and content of the application buffer.
11032 - * The prefix, in Tx ports, if 'pass_prs_result', the application should set
11033 - * a value to their offsets in the prefix of the FM will save the first
11034 - * 'priv_data_size', than, depending on 'pass_prs_result' and
11035 - * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
11036 - * (in this order), to the application buffer, and to offset.
11037 - * Calling this routine changes the buffer margins definitions in the internal
11038 - * driver data base from its default configuration:
11039 - * Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
11040 - * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
11041 - * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
11042 - * May be used for all ports
11044 - * Allowed only following fman_port_config() and before fman_port_init().
11046 - * Return: 0 on success; Error code otherwise.
11048 -int fman_port_cfg_buf_prefix_content(struct fman_port *port,
11049 - struct fman_buffer_prefix_content *
11050 - buffer_prefix_content)
11052 - if (is_init_done(port->cfg))
11055 - memcpy(&port->cfg->buffer_prefix_content,
11056 - buffer_prefix_content,
11057 - sizeof(struct fman_buffer_prefix_content));
11058 - /* if data_align was not initialized by user,
11059 - * we return to driver's default
11061 - if (!port->cfg->buffer_prefix_content.data_align)
11062 - port->cfg->buffer_prefix_content.data_align =
11063 - DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
11067 -EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
11070 - * fman_port_disable
11071 - * port: A pointer to a FM Port module.
11073 - * Gracefully disable an FM port. The port will not start new tasks after all
11074 - * tasks associated with the port are terminated.
11076 - * This is a blocking routine, it returns after port is gracefully stopped,
11077 - * i.e. the port will not except new frames, but it will finish all frames
11078 - * or tasks which were already began.
11079 - * Allowed only following fman_port_init().
11081 - * Return: 0 on success; Error code otherwise.
11083 -int fman_port_disable(struct fman_port *port)
11085 - u32 __iomem *bmi_cfg_reg, *bmi_status_reg;
11087 - bool rx_port, failure = false;
11090 - if (!is_init_done(port->cfg))
11093 - switch (port->port_type) {
11094 - case FMAN_PORT_TYPE_RX:
11095 - bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
11096 - bmi_status_reg = &port->bmi_regs->rx.fmbm_rst;
11099 - case FMAN_PORT_TYPE_TX:
11100 - bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
11101 - bmi_status_reg = &port->bmi_regs->tx.fmbm_tst;
11108 - /* Disable QMI */
11110 - tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN;
11111 - iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
11113 - /* Wait for QMI to finish FD handling */
11117 - tmp = ioread32be(&port->qmi_regs->fmqm_pns);
11118 - } while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count);
11120 - if (count == 0) {
11126 - /* Disable BMI */
11127 - tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN;
11128 - iowrite32be(tmp, bmi_cfg_reg);
11130 - /* Wait for graceful stop end */
11134 - tmp = ioread32be(bmi_status_reg);
11135 - } while ((tmp & BMI_PORT_STATUS_BSY) && --count);
11137 - if (count == 0) {
11143 - dev_dbg(port->dev, "%s: FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
11144 - __func__, port->port_id);
11148 -EXPORT_SYMBOL(fman_port_disable);
11151 - * fman_port_enable
11152 - * port: A pointer to a FM Port module.
11154 - * A runtime routine provided to allow disable/enable of port.
11156 - * Allowed only following fman_port_init().
11158 - * Return: 0 on success; Error code otherwise.
11160 -int fman_port_enable(struct fman_port *port)
11162 - u32 __iomem *bmi_cfg_reg;
11166 - if (!is_init_done(port->cfg))
11169 - switch (port->port_type) {
11170 - case FMAN_PORT_TYPE_RX:
11171 - bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
11174 - case FMAN_PORT_TYPE_TX:
11175 - bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
11184 - tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN;
11185 - iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
11189 - tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN;
11190 - iowrite32be(tmp, bmi_cfg_reg);
11194 -EXPORT_SYMBOL(fman_port_enable);
11198 - * dev: FMan Port OF device pointer
11200 - * Bind to a specific FMan Port.
11202 - * Allowed only after the port was created.
11204 - * Return: A pointer to the FMan port device.
11206 -struct fman_port *fman_port_bind(struct device *dev)
11208 - return (struct fman_port *)(dev_get_drvdata(get_device(dev)));
11210 -EXPORT_SYMBOL(fman_port_bind);
11213 - * fman_port_get_qman_channel_id
11214 - * port: Pointer to the FMan port devuce
11216 - * Get the QMan channel ID for the specific port
11218 - * Return: QMan channel ID
11220 -u32 fman_port_get_qman_channel_id(struct fman_port *port)
11222 - return port->dts_params.qman_channel_id;
11224 -EXPORT_SYMBOL(fman_port_get_qman_channel_id);
11226 -static int fman_port_probe(struct platform_device *of_dev)
11228 - struct fman_port *port;
11229 - struct fman *fman;
11230 - struct device_node *fm_node, *port_node;
11231 - struct resource res;
11232 - struct resource *dev_res;
11234 - int err = 0, lenp;
11235 - enum fman_port_type port_type;
11239 - port = kzalloc(sizeof(*port), GFP_KERNEL);
11243 - port->dev = &of_dev->dev;
11245 - port_node = of_node_get(of_dev->dev.of_node);
11247 - /* Get the FM node */
11248 - fm_node = of_get_parent(port_node);
11250 - dev_err(port->dev, "%s: of_get_parent() failed\n", __func__);
11255 - fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
11256 - of_node_put(fm_node);
11262 - err = of_property_read_u32(port_node, "cell-index", &val);
11264 - dev_err(port->dev, "%s: reading cell-index for %s failed\n",
11265 - __func__, port_node->full_name);
11269 - port_id = (u8)val;
11270 - port->dts_params.id = port_id;
11272 - if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
11273 - port_type = FMAN_PORT_TYPE_TX;
11274 - port_speed = 1000;
11275 - if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
11276 - port_speed = 10000;
11278 - } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
11279 - if (port_id >= TX_10G_PORT_BASE)
11280 - port_speed = 10000;
11282 - port_speed = 1000;
11283 - port_type = FMAN_PORT_TYPE_TX;
11285 - } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
11286 - port_type = FMAN_PORT_TYPE_RX;
11287 - port_speed = 1000;
11288 - if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
11289 - port_speed = 10000;
11291 - } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
11292 - if (port_id >= RX_10G_PORT_BASE)
11293 - port_speed = 10000;
11295 - port_speed = 1000;
11296 - port_type = FMAN_PORT_TYPE_RX;
11299 - dev_err(port->dev, "%s: Illegal port type\n", __func__);
11304 - port->dts_params.type = port_type;
11305 - port->dts_params.speed = port_speed;
11307 - if (port_type == FMAN_PORT_TYPE_TX) {
11308 - u32 qman_channel_id;
11310 - qman_channel_id = fman_get_qman_channel_id(fman, port_id);
11311 - if (qman_channel_id == 0) {
11312 - dev_err(port->dev, "%s: incorrect qman-channel-id\n",
11317 - port->dts_params.qman_channel_id = qman_channel_id;
11320 - err = of_address_to_resource(port_node, 0, &res);
11322 - dev_err(port->dev, "%s: of_address_to_resource() failed\n",
11328 - port->dts_params.fman = fman;
11330 - of_node_put(port_node);
11332 - dev_res = __devm_request_region(port->dev, &res, res.start,
11333 - resource_size(&res), "fman-port");
11335 - dev_err(port->dev, "%s: __devm_request_region() failed\n",
11341 - port->dts_params.base_addr = devm_ioremap(port->dev, res.start,
11342 - resource_size(&res));
11343 - if (!port->dts_params.base_addr)
11344 - dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__);
11346 - dev_set_drvdata(&of_dev->dev, port);
11351 - of_node_put(port_node);
11357 -static const struct of_device_id fman_port_match[] = {
11358 - {.compatible = "fsl,fman-v3-port-rx"},
11359 - {.compatible = "fsl,fman-v2-port-rx"},
11360 - {.compatible = "fsl,fman-v3-port-tx"},
11361 - {.compatible = "fsl,fman-v2-port-tx"},
11365 -MODULE_DEVICE_TABLE(of, fman_port_match);
11367 -static struct platform_driver fman_port_driver = {
11369 - .name = "fsl-fman-port",
11370 - .of_match_table = fman_port_match,
11372 - .probe = fman_port_probe,
11375 -static int __init fman_port_load(void)
11379 - pr_debug("FSL DPAA FMan driver\n");
11381 - err = platform_driver_register(&fman_port_driver);
11383 - pr_err("Error, platform_driver_register() = %d\n", err);
11387 -module_init(fman_port_load);
11389 -static void __exit fman_port_unload(void)
11391 - platform_driver_unregister(&fman_port_driver);
11393 -module_exit(fman_port_unload);
11395 -MODULE_LICENSE("Dual BSD/GPL");
11396 -MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver");
11397 diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
11398 deleted file mode 100644
11399 index 8ba9017..0000000
11400 --- a/drivers/net/ethernet/freescale/fman/fman_port.h
11404 - * Copyright 2008 - 2015 Freescale Semiconductor Inc.
11406 - * Redistribution and use in source and binary forms, with or without
11407 - * modification, are permitted provided that the following conditions are met:
11408 - * * Redistributions of source code must retain the above copyright
11409 - * notice, this list of conditions and the following disclaimer.
11410 - * * Redistributions in binary form must reproduce the above copyright
11411 - * notice, this list of conditions and the following disclaimer in the
11412 - * documentation and/or other materials provided with the distribution.
11413 - * * Neither the name of Freescale Semiconductor nor the
11414 - * names of its contributors may be used to endorse or promote products
11415 - * derived from this software without specific prior written permission.
11418 - * ALTERNATIVELY, this software may be distributed under the terms of the
11419 - * GNU General Public License ("GPL") as published by the Free Software
11420 - * Foundation, either version 2 of that License or (at your option) any
11423 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11424 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11425 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11426 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11427 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11428 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11429 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11430 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11431 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11432 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11435 -#ifndef __FMAN_PORT_H
11436 -#define __FMAN_PORT_H
11441 - * The FM uses a general module called "port" to represent a Tx port (MAC),
11442 - * an Rx port (MAC).
11443 - * The number of ports in an FM varies between SOCs.
11444 - * The SW driver manages these ports as sub-modules of the FM,i.e. after an
11445 - * FM is initialized, its ports may be initialized and operated upon.
11446 - * The port is initialized aware of its type, but other functions on a port
11447 - * may be indifferent to its type. When necessary, the driver verifies
11448 - * coherence and returns error if applicable.
11449 - * On initialization, user specifies the port type and it's index (relative
11450 - * to the port's type) - always starting at 0.
11453 -/* FM Frame error */
11454 -/* Frame Descriptor errors */
11455 -/* Not for Rx-Port! Unsupported Format */
11456 -#define FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT FM_FD_ERR_UNSUPPORTED_FORMAT
11457 -/* Not for Rx-Port! Length Error */
11458 -#define FM_PORT_FRM_ERR_LENGTH FM_FD_ERR_LENGTH
11459 -/* DMA Data error */
11460 -#define FM_PORT_FRM_ERR_DMA FM_FD_ERR_DMA
11461 -/* non Frame-Manager error; probably come from SEC that was chained to FM */
11462 -#define FM_PORT_FRM_ERR_NON_FM FM_FD_RX_STATUS_ERR_NON_FM
11464 -#define FM_PORT_FRM_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
11465 -/* IPR non-consistent-sp */
11466 -#define FM_PORT_FRM_ERR_IPR_NCSP (FM_FD_ERR_IPR_NCSP & \
11469 -/* Rx FIFO overflow, FCS error, code error, running disparity
11470 - * error (SGMII and TBI modes), FIFO parity error.
11471 - * PHY Sequence error, PHY error control character detected.
11473 -#define FM_PORT_FRM_ERR_PHYSICAL FM_FD_ERR_PHYSICAL
11474 -/* Frame too long OR Frame size exceeds max_length_frame */
11475 -#define FM_PORT_FRM_ERR_SIZE FM_FD_ERR_SIZE
11476 -/* indicates a classifier "drop" operation */
11477 -#define FM_PORT_FRM_ERR_CLS_DISCARD FM_FD_ERR_CLS_DISCARD
11478 -/* Extract Out of Frame */
11479 -#define FM_PORT_FRM_ERR_EXTRACTION FM_FD_ERR_EXTRACTION
11480 -/* No Scheme Selected */
11481 -#define FM_PORT_FRM_ERR_NO_SCHEME FM_FD_ERR_NO_SCHEME
11482 -/* Keysize Overflow */
11483 -#define FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW FM_FD_ERR_KEYSIZE_OVERFLOW
11484 -/* Frame color is red */
11485 -#define FM_PORT_FRM_ERR_COLOR_RED FM_FD_ERR_COLOR_RED
11486 -/* Frame color is yellow */
11487 -#define FM_PORT_FRM_ERR_COLOR_YELLOW FM_FD_ERR_COLOR_YELLOW
11488 -/* Parser Time out Exceed */
11489 -#define FM_PORT_FRM_ERR_PRS_TIMEOUT FM_FD_ERR_PRS_TIMEOUT
11490 -/* Invalid Soft Parser instruction */
11491 -#define FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT FM_FD_ERR_PRS_ILL_INSTRUCT
11492 -/* Header error was identified during parsing */
11493 -#define FM_PORT_FRM_ERR_PRS_HDR_ERR FM_FD_ERR_PRS_HDR_ERR
11494 -/* Frame parsed beyind 256 first bytes */
11495 -#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED FM_FD_ERR_BLOCK_LIMIT_EXCEEDED
11496 -/* FPM Frame Processing Timeout Exceeded */
11497 -#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT 0x00000001
11501 -/* A structure for additional Rx port parameters */
11502 -struct fman_port_rx_params {
11503 - u32 err_fqid; /* Error Queue Id. */
11504 - u32 dflt_fqid; /* Default Queue Id. */
11505 - /* Which external buffer pools are used
11506 - * (up to FMAN_PORT_MAX_EXT_POOLS_NUM), and their sizes.
11508 - struct fman_ext_pools ext_buf_pools;
11511 -/* A structure for additional non-Rx port parameters */
11512 -struct fman_port_non_rx_params {
11513 - /* Error Queue Id. */
11515 - /* For Tx - Default Confirmation queue, 0 means no Tx confirmation
11516 - * for processed frames. For OP port - default Rx queue.
11521 -/* A union for additional parameters depending on port type */
11522 -union fman_port_specific_params {
11523 - /* Rx port parameters structure */
11524 - struct fman_port_rx_params rx_params;
11525 - /* Non-Rx port parameters structure */
11526 - struct fman_port_non_rx_params non_rx_params;
11529 -/* A structure representing FM initialization parameters */
11530 -struct fman_port_params {
11531 - /* Virtual Address of memory mapped FM Port registers. */
11533 - union fman_port_specific_params specific_params;
11534 - /* Additional parameters depending on port type. */
11537 -int fman_port_config(struct fman_port *port, struct fman_port_params *params);
11539 -int fman_port_init(struct fman_port *port);
11541 -int fman_port_cfg_buf_prefix_content(struct fman_port *port,
11542 - struct fman_buffer_prefix_content
11543 - *buffer_prefix_content);
11545 -int fman_port_disable(struct fman_port *port);
11547 -int fman_port_enable(struct fman_port *port);
11549 -u32 fman_port_get_qman_channel_id(struct fman_port *port);
11551 -struct fman_port *fman_port_bind(struct device *dev);
11553 -#endif /* __FMAN_PORT_H */
11554 diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
11555 deleted file mode 100644
11556 index 248f5bc..0000000
11557 --- a/drivers/net/ethernet/freescale/fman/fman_sp.c
11561 - * Copyright 2008 - 2015 Freescale Semiconductor Inc.
11563 - * Redistribution and use in source and binary forms, with or without
11564 - * modification, are permitted provided that the following conditions are met:
11565 - * * Redistributions of source code must retain the above copyright
11566 - * notice, this list of conditions and the following disclaimer.
11567 - * * Redistributions in binary form must reproduce the above copyright
11568 - * notice, this list of conditions and the following disclaimer in the
11569 - * documentation and/or other materials provided with the distribution.
11570 - * * Neither the name of Freescale Semiconductor nor the
11571 - * names of its contributors may be used to endorse or promote products
11572 - * derived from this software without specific prior written permission.
11575 - * ALTERNATIVELY, this software may be distributed under the terms of the
11576 - * GNU General Public License ("GPL") as published by the Free Software
11577 - * Foundation, either version 2 of that License or (at your option) any
11580 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11581 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11582 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11583 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11584 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11585 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11586 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11587 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11588 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11589 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11592 -#include "fman_sp.h"
11595 -void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
11597 - u8 *ordered_array,
11598 - u16 *sizes_array)
11600 - u16 buf_size = 0;
11601 - int i = 0, j = 0, k = 0;
11603 - /* First we copy the external buffers pools information
11604 - * to an ordered local array
11606 - for (i = 0; i < fm_ext_pools->num_of_pools_used; i++) {
11607 - /* get pool size */
11608 - buf_size = fm_ext_pools->ext_buf_pool[i].size;
11610 - /* keep sizes in an array according to poolId
11611 - * for direct access
11613 - sizes_array[fm_ext_pools->ext_buf_pool[i].id] = buf_size;
11615 - /* save poolId in an ordered array according to size */
11616 - for (j = 0; j <= i; j++) {
11617 - /* this is the next free place in the array */
11619 - ordered_array[i] =
11620 - fm_ext_pools->ext_buf_pool[i].id;
11622 - /* find the right place for this poolId */
11623 - if (buf_size < sizes_array[ordered_array[j]]) {
11624 - /* move the pool_ids one place ahead
11625 - * to make room for this poolId
11627 - for (k = i; k > j; k--)
11628 - ordered_array[k] =
11629 - ordered_array[k - 1];
11631 - /* now k==j, this is the place for
11634 - ordered_array[k] =
11635 - fm_ext_pools->ext_buf_pool[i].id;
11642 -EXPORT_SYMBOL(fman_sp_set_buf_pools_in_asc_order_of_buf_sizes);
11644 -int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
11645 - int_context_data_copy,
11646 - struct fman_buffer_prefix_content *
11647 - buffer_prefix_content,
11648 - struct fman_sp_buf_margins *buf_margins,
11649 - struct fman_sp_buffer_offsets *buffer_offsets,
11650 - u8 *internal_buf_offset)
11654 - /* Align start of internal context data to 16 byte */
11655 - int_context_data_copy->ext_buf_offset = (u16)
11656 - ((buffer_prefix_content->priv_data_size & (OFFSET_UNITS - 1)) ?
11657 - ((buffer_prefix_content->priv_data_size + OFFSET_UNITS) &
11658 - ~(u16)(OFFSET_UNITS - 1)) :
11659 - buffer_prefix_content->priv_data_size);
11661 - /* Translate margin and int_context params to FM parameters */
11662 - /* Initialize with illegal value. Later we'll set legal values. */
11663 - buffer_offsets->prs_result_offset = (u32)ILLEGAL_BASE;
11664 - buffer_offsets->time_stamp_offset = (u32)ILLEGAL_BASE;
11665 - buffer_offsets->hash_result_offset = (u32)ILLEGAL_BASE;
11667 - /* Internally the driver supports 4 options
11668 - * 1. prsResult/timestamp/hashResult selection (in fact 8 options,
11669 - * but for simplicity we'll
11670 - * relate to it as 1).
11671 - * 2. All IC context (from AD) not including debug.
11674 - /* This case covers the options under 1 */
11675 - /* Copy size must be in 16-byte granularity. */
11676 - int_context_data_copy->size =
11677 - (u16)((buffer_prefix_content->pass_prs_result ? 32 : 0) +
11678 - ((buffer_prefix_content->pass_time_stamp ||
11679 - buffer_prefix_content->pass_hash_result) ? 16 : 0));
11681 - /* Align start of internal context data to 16 byte */
11682 - int_context_data_copy->int_context_offset =
11683 - (u8)(buffer_prefix_content->pass_prs_result ? 32 :
11684 - ((buffer_prefix_content->pass_time_stamp ||
11685 - buffer_prefix_content->pass_hash_result) ? 64 : 0));
11687 - if (buffer_prefix_content->pass_prs_result)
11688 - buffer_offsets->prs_result_offset =
11689 - int_context_data_copy->ext_buf_offset;
11690 - if (buffer_prefix_content->pass_time_stamp)
11691 - buffer_offsets->time_stamp_offset =
11692 - buffer_prefix_content->pass_prs_result ?
11693 - (int_context_data_copy->ext_buf_offset +
11694 - sizeof(struct fman_prs_result)) :
11695 - int_context_data_copy->ext_buf_offset;
11696 - if (buffer_prefix_content->pass_hash_result)
11697 - /* If PR is not requested, whether TS is
11698 - * requested or not, IC will be copied from TS
11700 - buffer_offsets->hash_result_offset =
11701 - buffer_prefix_content->pass_prs_result ?
11702 - (int_context_data_copy->ext_buf_offset +
11703 - sizeof(struct fman_prs_result) + 8) :
11704 - int_context_data_copy->ext_buf_offset + 8;
11706 - if (int_context_data_copy->size)
11707 - buf_margins->start_margins =
11708 - (u16)(int_context_data_copy->ext_buf_offset +
11709 - int_context_data_copy->size);
11711 - /* No Internal Context passing, STartMargin is
11712 - * immediately after private_info
11714 - buf_margins->start_margins =
11715 - buffer_prefix_content->priv_data_size;
11717 - /* align data start */
11718 - tmp = (u32)(buf_margins->start_margins %
11719 - buffer_prefix_content->data_align);
11721 - buf_margins->start_margins +=
11722 - (buffer_prefix_content->data_align - tmp);
11723 - buffer_offsets->data_offset = buf_margins->start_margins;
11727 -EXPORT_SYMBOL(fman_sp_build_buffer_struct);
11729 diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h
11730 deleted file mode 100644
11731 index 820b7f6..0000000
11732 --- a/drivers/net/ethernet/freescale/fman/fman_sp.h
11736 - * Copyright 2008 - 2015 Freescale Semiconductor Inc.
11738 - * Redistribution and use in source and binary forms, with or without
11739 - * modification, are permitted provided that the following conditions are met:
11740 - * * Redistributions of source code must retain the above copyright
11741 - * notice, this list of conditions and the following disclaimer.
11742 - * * Redistributions in binary form must reproduce the above copyright
11743 - * notice, this list of conditions and the following disclaimer in the
11744 - * documentation and/or other materials provided with the distribution.
11745 - * * Neither the name of Freescale Semiconductor nor the
11746 - * names of its contributors may be used to endorse or promote products
11747 - * derived from this software without specific prior written permission.
11749 - * ALTERNATIVELY, this software may be distributed under the terms of the
11750 - * GNU General Public License ("GPL") as published by the Free Software
11751 - * Foundation, either version 2 of that License or (at your option) any
11754 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11755 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11756 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11757 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11758 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11759 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11760 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11761 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11762 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11763 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11770 -#include <linux/types.h>
11772 -#define ILLEGAL_BASE (~0)
11775 -#define DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN 64
11777 -/* Registers bit fields */
11778 -#define FMAN_SP_EXT_BUF_POOL_EN_COUNTER 0x40000000
11779 -#define FMAN_SP_EXT_BUF_POOL_VALID 0x80000000
11780 -#define FMAN_SP_EXT_BUF_POOL_BACKUP 0x20000000
11781 -#define FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE 0x00100000
11782 -#define FMAN_SP_SG_DISABLE 0x80000000
11785 -#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
11786 -#define FMAN_SP_DMA_ATTR_SWP_SHIFT 30
11787 -#define FMAN_SP_IC_TO_EXT_SHIFT 16
11788 -#define FMAN_SP_IC_FROM_INT_SHIFT 8
11790 -/* structure for defining internal context copying */
11791 -struct fman_sp_int_context_data_copy {
11792 - /* < Offset in External buffer to which internal
11793 - * context is copied to (Rx) or taken from (Tx, Op).
11795 - u16 ext_buf_offset;
11796 - /* Offset within internal context to copy from
11797 - * (Rx) or to copy to (Tx, Op).
11799 - u8 int_context_offset;
11800 - /* Internal offset size to be copied */
11804 -/* struct for defining external buffer margins */
11805 -struct fman_sp_buf_margins {
11806 - /* Number of bytes to be left at the beginning
11807 - * of the external buffer (must be divisible by 16)
11809 - u16 start_margins;
11810 - /* number of bytes to be left at the end
11811 - * of the external buffer(must be divisible by 16)
11816 -struct fman_sp_buffer_offsets {
11818 - u32 prs_result_offset;
11819 - u32 time_stamp_offset;
11820 - u32 hash_result_offset;
11823 -int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy
11824 - *int_context_data_copy,
11825 - struct fman_buffer_prefix_content
11826 - *buffer_prefix_content,
11827 - struct fman_sp_buf_margins *buf_margins,
11828 - struct fman_sp_buffer_offsets
11830 - u8 *internal_buf_offset);
11832 -void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
11834 - u8 *ordered_array,
11835 - u16 *sizes_array);
11837 -#endif /* __FM_SP_H */
11838 diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
11839 deleted file mode 100644
11840 index 4b0f3a5..0000000
11841 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
11845 - * Copyright 2008-2015 Freescale Semiconductor Inc.
11847 - * Redistribution and use in source and binary forms, with or without
11848 - * modification, are permitted provided that the following conditions are met:
11849 - * * Redistributions of source code must retain the above copyright
11850 - * notice, this list of conditions and the following disclaimer.
11851 - * * Redistributions in binary form must reproduce the above copyright
11852 - * notice, this list of conditions and the following disclaimer in the
11853 - * documentation and/or other materials provided with the distribution.
11854 - * * Neither the name of Freescale Semiconductor nor the
11855 - * names of its contributors may be used to endorse or promote products
11856 - * derived from this software without specific prior written permission.
11859 - * ALTERNATIVELY, this software may be distributed under the terms of the
11860 - * GNU General Public License ("GPL") as published by the Free Software
11861 - * Foundation, either version 2 of that License or (at your option) any
11864 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11865 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11866 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11867 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11868 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11869 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11870 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11871 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11872 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11873 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11876 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11878 -#include "fman_tgec.h"
11881 -#include <linux/slab.h>
11882 -#include <linux/bitrev.h>
11883 -#include <linux/io.h>
11884 -#include <linux/crc32.h>
11886 -/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
11887 -#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
11889 -/* Command and Configuration Register (COMMAND_CONFIG) */
11890 -#define CMD_CFG_NO_LEN_CHK 0x00020000
11891 -#define CMD_CFG_PAUSE_IGNORE 0x00000100
11892 -#define CMF_CFG_CRC_FWD 0x00000040
11893 -#define CMD_CFG_PROMIS_EN 0x00000010
11894 -#define CMD_CFG_RX_EN 0x00000002
11895 -#define CMD_CFG_TX_EN 0x00000001
11897 -/* Interrupt Mask Register (IMASK) */
11898 -#define TGEC_IMASK_MDIO_SCAN_EVENT 0x00010000
11899 -#define TGEC_IMASK_MDIO_CMD_CMPL 0x00008000
11900 -#define TGEC_IMASK_REM_FAULT 0x00004000
11901 -#define TGEC_IMASK_LOC_FAULT 0x00002000
11902 -#define TGEC_IMASK_TX_ECC_ER 0x00001000
11903 -#define TGEC_IMASK_TX_FIFO_UNFL 0x00000800
11904 -#define TGEC_IMASK_TX_FIFO_OVFL 0x00000400
11905 -#define TGEC_IMASK_TX_ER 0x00000200
11906 -#define TGEC_IMASK_RX_FIFO_OVFL 0x00000100
11907 -#define TGEC_IMASK_RX_ECC_ER 0x00000080
11908 -#define TGEC_IMASK_RX_JAB_FRM 0x00000040
11909 -#define TGEC_IMASK_RX_OVRSZ_FRM 0x00000020
11910 -#define TGEC_IMASK_RX_RUNT_FRM 0x00000010
11911 -#define TGEC_IMASK_RX_FRAG_FRM 0x00000008
11912 -#define TGEC_IMASK_RX_LEN_ER 0x00000004
11913 -#define TGEC_IMASK_RX_CRC_ER 0x00000002
11914 -#define TGEC_IMASK_RX_ALIGN_ER 0x00000001
11916 -/* Hashtable Control Register (HASHTABLE_CTRL) */
11917 -#define TGEC_HASH_MCAST_SHIFT 23
11918 -#define TGEC_HASH_MCAST_EN 0x00000200
11919 -#define TGEC_HASH_ADR_MSK 0x000001ff
11921 -#define DEFAULT_TX_IPG_LENGTH 12
11922 -#define DEFAULT_MAX_FRAME_LENGTH 0x600
11923 -#define DEFAULT_PAUSE_QUANT 0xf000
11925 -/* number of pattern match registers (entries) */
11926 -#define TGEC_NUM_OF_PADDRS 1
11928 -/* Group address bit indication */
11929 -#define GROUP_ADDRESS 0x0000010000000000LL
11931 -/* Hash table size (= 32 bits*8 regs) */
11932 -#define TGEC_HASH_TABLE_SIZE 512
11934 -/* tGEC memory map */
11935 -struct tgec_regs {
11936 - u32 tgec_id; /* 0x000 Controller ID */
11937 - u32 reserved001[1]; /* 0x004 */
11938 - u32 command_config; /* 0x008 Control and configuration */
11939 - u32 mac_addr_0; /* 0x00c Lower 32 bits of the MAC adr */
11940 - u32 mac_addr_1; /* 0x010 Upper 16 bits of the MAC adr */
11941 - u32 maxfrm; /* 0x014 Maximum frame length */
11942 - u32 pause_quant; /* 0x018 Pause quanta */
11943 - u32 rx_fifo_sections; /* 0x01c */
11944 - u32 tx_fifo_sections; /* 0x020 */
11945 - u32 rx_fifo_almost_f_e; /* 0x024 */
11946 - u32 tx_fifo_almost_f_e; /* 0x028 */
11947 - u32 hashtable_ctrl; /* 0x02c Hash table control */
11948 - u32 mdio_cfg_status; /* 0x030 */
11949 - u32 mdio_command; /* 0x034 */
11950 - u32 mdio_data; /* 0x038 */
11951 - u32 mdio_regaddr; /* 0x03c */
11952 - u32 status; /* 0x040 */
11953 - u32 tx_ipg_len; /* 0x044 Transmitter inter-packet-gap */
11954 - u32 mac_addr_2; /* 0x048 Lower 32 bits of 2nd MAC adr */
11955 - u32 mac_addr_3; /* 0x04c Upper 16 bits of 2nd MAC adr */
11956 - u32 rx_fifo_ptr_rd; /* 0x050 */
11957 - u32 rx_fifo_ptr_wr; /* 0x054 */
11958 - u32 tx_fifo_ptr_rd; /* 0x058 */
11959 - u32 tx_fifo_ptr_wr; /* 0x05c */
11960 - u32 imask; /* 0x060 Interrupt mask */
11961 - u32 ievent; /* 0x064 Interrupt event */
11962 - u32 udp_port; /* 0x068 Defines a UDP Port number */
11963 - u32 type_1588v2; /* 0x06c Type field for 1588v2 */
11964 - u32 reserved070[4]; /* 0x070 */
11965 - /* 10Ge Statistics Counter */
11966 - u32 tfrm_u; /* 80 aFramesTransmittedOK */
11967 - u32 tfrm_l; /* 84 aFramesTransmittedOK */
11968 - u32 rfrm_u; /* 88 aFramesReceivedOK */
11969 - u32 rfrm_l; /* 8c aFramesReceivedOK */
11970 - u32 rfcs_u; /* 90 aFrameCheckSequenceErrors */
11971 - u32 rfcs_l; /* 94 aFrameCheckSequenceErrors */
11972 - u32 raln_u; /* 98 aAlignmentErrors */
11973 - u32 raln_l; /* 9c aAlignmentErrors */
11974 - u32 txpf_u; /* A0 aPAUSEMACCtrlFramesTransmitted */
11975 - u32 txpf_l; /* A4 aPAUSEMACCtrlFramesTransmitted */
11976 - u32 rxpf_u; /* A8 aPAUSEMACCtrlFramesReceived */
11977 - u32 rxpf_l; /* Ac aPAUSEMACCtrlFramesReceived */
11978 - u32 rlong_u; /* B0 aFrameTooLongErrors */
11979 - u32 rlong_l; /* B4 aFrameTooLongErrors */
11980 - u32 rflr_u; /* B8 aInRangeLengthErrors */
11981 - u32 rflr_l; /* Bc aInRangeLengthErrors */
11982 - u32 tvlan_u; /* C0 VLANTransmittedOK */
11983 - u32 tvlan_l; /* C4 VLANTransmittedOK */
11984 - u32 rvlan_u; /* C8 VLANReceivedOK */
11985 - u32 rvlan_l; /* Cc VLANReceivedOK */
11986 - u32 toct_u; /* D0 if_out_octets */
11987 - u32 toct_l; /* D4 if_out_octets */
11988 - u32 roct_u; /* D8 if_in_octets */
11989 - u32 roct_l; /* Dc if_in_octets */
11990 - u32 ruca_u; /* E0 if_in_ucast_pkts */
11991 - u32 ruca_l; /* E4 if_in_ucast_pkts */
11992 - u32 rmca_u; /* E8 ifInMulticastPkts */
11993 - u32 rmca_l; /* Ec ifInMulticastPkts */
11994 - u32 rbca_u; /* F0 ifInBroadcastPkts */
11995 - u32 rbca_l; /* F4 ifInBroadcastPkts */
11996 - u32 terr_u; /* F8 if_out_errors */
11997 - u32 terr_l; /* Fc if_out_errors */
11998 - u32 reserved100[2]; /* 100-108 */
11999 - u32 tuca_u; /* 108 if_out_ucast_pkts */
12000 - u32 tuca_l; /* 10c if_out_ucast_pkts */
12001 - u32 tmca_u; /* 110 ifOutMulticastPkts */
12002 - u32 tmca_l; /* 114 ifOutMulticastPkts */
12003 - u32 tbca_u; /* 118 ifOutBroadcastPkts */
12004 - u32 tbca_l; /* 11c ifOutBroadcastPkts */
12005 - u32 rdrp_u; /* 120 etherStatsDropEvents */
12006 - u32 rdrp_l; /* 124 etherStatsDropEvents */
12007 - u32 reoct_u; /* 128 etherStatsOctets */
12008 - u32 reoct_l; /* 12c etherStatsOctets */
12009 - u32 rpkt_u; /* 130 etherStatsPkts */
12010 - u32 rpkt_l; /* 134 etherStatsPkts */
12011 - u32 trund_u; /* 138 etherStatsUndersizePkts */
12012 - u32 trund_l; /* 13c etherStatsUndersizePkts */
12013 - u32 r64_u; /* 140 etherStatsPkts64Octets */
12014 - u32 r64_l; /* 144 etherStatsPkts64Octets */
12015 - u32 r127_u; /* 148 etherStatsPkts65to127Octets */
12016 - u32 r127_l; /* 14c etherStatsPkts65to127Octets */
12017 - u32 r255_u; /* 150 etherStatsPkts128to255Octets */
12018 - u32 r255_l; /* 154 etherStatsPkts128to255Octets */
12019 - u32 r511_u; /* 158 etherStatsPkts256to511Octets */
12020 - u32 r511_l; /* 15c etherStatsPkts256to511Octets */
12021 - u32 r1023_u; /* 160 etherStatsPkts512to1023Octets */
12022 - u32 r1023_l; /* 164 etherStatsPkts512to1023Octets */
12023 - u32 r1518_u; /* 168 etherStatsPkts1024to1518Octets */
12024 - u32 r1518_l; /* 16c etherStatsPkts1024to1518Octets */
12025 - u32 r1519x_u; /* 170 etherStatsPkts1519toX */
12026 - u32 r1519x_l; /* 174 etherStatsPkts1519toX */
12027 - u32 trovr_u; /* 178 etherStatsOversizePkts */
12028 - u32 trovr_l; /* 17c etherStatsOversizePkts */
12029 - u32 trjbr_u; /* 180 etherStatsJabbers */
12030 - u32 trjbr_l; /* 184 etherStatsJabbers */
12031 - u32 trfrg_u; /* 188 etherStatsFragments */
12032 - u32 trfrg_l; /* 18C etherStatsFragments */
12033 - u32 rerr_u; /* 190 if_in_errors */
12034 - u32 rerr_l; /* 194 if_in_errors */
12038 - bool pause_ignore;
12039 - bool promiscuous_mode_enable;
12040 - u16 max_frame_length;
12042 - u32 tx_ipg_length;
12046 - /* Pointer to the memory mapped registers. */
12047 - struct tgec_regs __iomem *regs;
12048 - /* MAC address of device; */
12051 - void *dev_id; /* device cookie used by the exception cbs */
12052 - fman_mac_exception_cb *exception_cb;
12053 - fman_mac_exception_cb *event_cb;
12054 - /* pointer to driver's global address hash table */
12055 - struct eth_hash_t *multicast_addr_hash;
12056 - /* pointer to driver's individual address hash table */
12057 - struct eth_hash_t *unicast_addr_hash;
12060 - struct tgec_cfg *cfg;
12062 - struct fman_rev_info fm_rev_info;
12065 -static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
12069 - tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
12070 - tmp1 = (u32)(adr[4] | adr[5] << 8);
12071 - iowrite32be(tmp0, ®s->mac_addr_0);
12072 - iowrite32be(tmp1, ®s->mac_addr_1);
12075 -static void set_dflts(struct tgec_cfg *cfg)
12077 - cfg->promiscuous_mode_enable = false;
12078 - cfg->pause_ignore = false;
12079 - cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
12080 - cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH;
12081 - cfg->pause_quant = DEFAULT_PAUSE_QUANT;
12084 -static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
12085 - u32 exception_mask)
12090 - tmp = CMF_CFG_CRC_FWD;
12091 - if (cfg->promiscuous_mode_enable)
12092 - tmp |= CMD_CFG_PROMIS_EN;
12093 - if (cfg->pause_ignore)
12094 - tmp |= CMD_CFG_PAUSE_IGNORE;
12095 - /* Payload length check disable */
12096 - tmp |= CMD_CFG_NO_LEN_CHK;
12097 - iowrite32be(tmp, ®s->command_config);
12099 - /* Max Frame Length */
12100 - iowrite32be((u32)cfg->max_frame_length, ®s->maxfrm);
12102 - iowrite32be(cfg->pause_quant, ®s->pause_quant);
12104 - /* clear all pending events and set-up interrupts */
12105 - iowrite32be(0xffffffff, ®s->ievent);
12106 - iowrite32be(ioread32be(®s->imask) | exception_mask, ®s->imask);
12111 -static int check_init_parameters(struct fman_mac *tgec)
12113 - if (tgec->max_speed < SPEED_10000) {
12114 - pr_err("10G MAC driver only support 10G speed\n");
12117 - if (tgec->addr == 0) {
12118 - pr_err("Ethernet 10G MAC Must have valid MAC Address\n");
12121 - if (!tgec->exception_cb) {
12122 - pr_err("uninitialized exception_cb\n");
12125 - if (!tgec->event_cb) {
12126 - pr_err("uninitialized event_cb\n");
12133 -static int get_exception_flag(enum fman_mac_exceptions exception)
12137 - switch (exception) {
12138 - case FM_MAC_EX_10G_MDIO_SCAN_EVENT:
12139 - bit_mask = TGEC_IMASK_MDIO_SCAN_EVENT;
12141 - case FM_MAC_EX_10G_MDIO_CMD_CMPL:
12142 - bit_mask = TGEC_IMASK_MDIO_CMD_CMPL;
12144 - case FM_MAC_EX_10G_REM_FAULT:
12145 - bit_mask = TGEC_IMASK_REM_FAULT;
12147 - case FM_MAC_EX_10G_LOC_FAULT:
12148 - bit_mask = TGEC_IMASK_LOC_FAULT;
12150 - case FM_MAC_EX_10G_TX_ECC_ER:
12151 - bit_mask = TGEC_IMASK_TX_ECC_ER;
12153 - case FM_MAC_EX_10G_TX_FIFO_UNFL:
12154 - bit_mask = TGEC_IMASK_TX_FIFO_UNFL;
12156 - case FM_MAC_EX_10G_TX_FIFO_OVFL:
12157 - bit_mask = TGEC_IMASK_TX_FIFO_OVFL;
12159 - case FM_MAC_EX_10G_TX_ER:
12160 - bit_mask = TGEC_IMASK_TX_ER;
12162 - case FM_MAC_EX_10G_RX_FIFO_OVFL:
12163 - bit_mask = TGEC_IMASK_RX_FIFO_OVFL;
12165 - case FM_MAC_EX_10G_RX_ECC_ER:
12166 - bit_mask = TGEC_IMASK_RX_ECC_ER;
12168 - case FM_MAC_EX_10G_RX_JAB_FRM:
12169 - bit_mask = TGEC_IMASK_RX_JAB_FRM;
12171 - case FM_MAC_EX_10G_RX_OVRSZ_FRM:
12172 - bit_mask = TGEC_IMASK_RX_OVRSZ_FRM;
12174 - case FM_MAC_EX_10G_RX_RUNT_FRM:
12175 - bit_mask = TGEC_IMASK_RX_RUNT_FRM;
12177 - case FM_MAC_EX_10G_RX_FRAG_FRM:
12178 - bit_mask = TGEC_IMASK_RX_FRAG_FRM;
12180 - case FM_MAC_EX_10G_RX_LEN_ER:
12181 - bit_mask = TGEC_IMASK_RX_LEN_ER;
12183 - case FM_MAC_EX_10G_RX_CRC_ER:
12184 - bit_mask = TGEC_IMASK_RX_CRC_ER;
12186 - case FM_MAC_EX_10G_RX_ALIGN_ER:
12187 - bit_mask = TGEC_IMASK_RX_ALIGN_ER;
12197 -static void tgec_err_exception(void *handle)
12199 - struct fman_mac *tgec = (struct fman_mac *)handle;
12200 - struct tgec_regs __iomem *regs = tgec->regs;
12203 - /* do not handle MDIO events */
12204 - event = ioread32be(®s->ievent) &
12205 - ~(TGEC_IMASK_MDIO_SCAN_EVENT |
12206 - TGEC_IMASK_MDIO_CMD_CMPL);
12208 - event &= ioread32be(®s->imask);
12210 - iowrite32be(event, ®s->ievent);
12212 - if (event & TGEC_IMASK_REM_FAULT)
12213 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_REM_FAULT);
12214 - if (event & TGEC_IMASK_LOC_FAULT)
12215 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_LOC_FAULT);
12216 - if (event & TGEC_IMASK_TX_ECC_ER)
12217 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
12218 - if (event & TGEC_IMASK_TX_FIFO_UNFL)
12219 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_UNFL);
12220 - if (event & TGEC_IMASK_TX_FIFO_OVFL)
12221 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_OVFL);
12222 - if (event & TGEC_IMASK_TX_ER)
12223 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ER);
12224 - if (event & TGEC_IMASK_RX_FIFO_OVFL)
12225 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FIFO_OVFL);
12226 - if (event & TGEC_IMASK_RX_ECC_ER)
12227 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
12228 - if (event & TGEC_IMASK_RX_JAB_FRM)
12229 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_JAB_FRM);
12230 - if (event & TGEC_IMASK_RX_OVRSZ_FRM)
12231 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_OVRSZ_FRM);
12232 - if (event & TGEC_IMASK_RX_RUNT_FRM)
12233 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_RUNT_FRM);
12234 - if (event & TGEC_IMASK_RX_FRAG_FRM)
12235 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FRAG_FRM);
12236 - if (event & TGEC_IMASK_RX_LEN_ER)
12237 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_LEN_ER);
12238 - if (event & TGEC_IMASK_RX_CRC_ER)
12239 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_CRC_ER);
12240 - if (event & TGEC_IMASK_RX_ALIGN_ER)
12241 - tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ALIGN_ER);
12244 -static void free_init_resources(struct fman_mac *tgec)
12246 - fman_unregister_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
12247 - FMAN_INTR_TYPE_ERR);
12249 - /* release the driver's group hash table */
12250 - free_hash_table(tgec->multicast_addr_hash);
12251 - tgec->multicast_addr_hash = NULL;
12253 - /* release the driver's individual hash table */
12254 - free_hash_table(tgec->unicast_addr_hash);
12255 - tgec->unicast_addr_hash = NULL;
12258 -static bool is_init_done(struct tgec_cfg *cfg)
12260 - /* Checks if tGEC driver parameters were initialized */
12267 -int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
12269 - struct tgec_regs __iomem *regs = tgec->regs;
12272 - if (!is_init_done(tgec->cfg))
12275 - tmp = ioread32be(®s->command_config);
12276 - if (mode & COMM_MODE_RX)
12277 - tmp |= CMD_CFG_RX_EN;
12278 - if (mode & COMM_MODE_TX)
12279 - tmp |= CMD_CFG_TX_EN;
12280 - iowrite32be(tmp, ®s->command_config);
12285 -int tgec_disable(struct fman_mac *tgec, enum comm_mode mode)
12287 - struct tgec_regs __iomem *regs = tgec->regs;
12290 - if (!is_init_done(tgec->cfg))
12293 - tmp = ioread32be(®s->command_config);
12294 - if (mode & COMM_MODE_RX)
12295 - tmp &= ~CMD_CFG_RX_EN;
12296 - if (mode & COMM_MODE_TX)
12297 - tmp &= ~CMD_CFG_TX_EN;
12298 - iowrite32be(tmp, ®s->command_config);
12303 -int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
12305 - struct tgec_regs __iomem *regs = tgec->regs;
12308 - if (!is_init_done(tgec->cfg))
12311 - tmp = ioread32be(®s->command_config);
12313 - tmp |= CMD_CFG_PROMIS_EN;
12315 - tmp &= ~CMD_CFG_PROMIS_EN;
12316 - iowrite32be(tmp, ®s->command_config);
12321 -int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val)
12323 - if (is_init_done(tgec->cfg))
12326 - tgec->cfg->max_frame_length = new_val;
12331 -int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
12332 - u16 pause_time, u16 __maybe_unused thresh_time)
12334 - struct tgec_regs __iomem *regs = tgec->regs;
12336 - if (!is_init_done(tgec->cfg))
12339 - iowrite32be((u32)pause_time, ®s->pause_quant);
12344 -int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
12346 - struct tgec_regs __iomem *regs = tgec->regs;
12349 - if (!is_init_done(tgec->cfg))
12352 - tmp = ioread32be(®s->command_config);
12354 - tmp |= CMD_CFG_PAUSE_IGNORE;
12356 - tmp &= ~CMD_CFG_PAUSE_IGNORE;
12357 - iowrite32be(tmp, ®s->command_config);
12362 -int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr)
12364 - if (!is_init_done(tgec->cfg))
12367 - tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
12368 - set_mac_address(tgec->regs, (u8 *)(*p_enet_addr));
12373 -int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
12375 - struct tgec_regs __iomem *regs = tgec->regs;
12376 - struct eth_hash_entry *hash_entry;
12377 - u32 crc = 0xFFFFFFFF, hash;
12380 - if (!is_init_done(tgec->cfg))
12383 - addr = ENET_ADDR_TO_UINT64(*eth_addr);
12385 - if (!(addr & GROUP_ADDRESS)) {
12386 - /* Unicast addresses not supported in hash */
12387 - pr_err("Unicast Address\n");
12390 - /* CRC calculation */
12391 - crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
12392 - crc = bitrev32(crc);
12393 - /* Take 9 MSB bits */
12394 - hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
12396 - /* Create element to be added to the driver hash table */
12397 - hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
12400 - hash_entry->addr = addr;
12401 - INIT_LIST_HEAD(&hash_entry->node);
12403 - list_add_tail(&hash_entry->node,
12404 - &tgec->multicast_addr_hash->lsts[hash]);
12405 - iowrite32be((hash | TGEC_HASH_MCAST_EN), ®s->hashtable_ctrl);
12410 -int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
12412 - struct tgec_regs __iomem *regs = tgec->regs;
12413 - struct eth_hash_entry *hash_entry = NULL;
12414 - struct list_head *pos;
12415 - u32 crc = 0xFFFFFFFF, hash;
12418 - if (!is_init_done(tgec->cfg))
12421 - addr = ((*(u64 *)eth_addr) >> 16);
12423 - /* CRC calculation */
12424 - crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
12425 - crc = bitrev32(crc);
12426 - /* Take 9 MSB bits */
12427 - hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
12429 - list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) {
12430 - hash_entry = ETH_HASH_ENTRY_OBJ(pos);
12431 - if (hash_entry->addr == addr) {
12432 - list_del_init(&hash_entry->node);
12433 - kfree(hash_entry);
12437 - if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
12438 - iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
12439 - ®s->hashtable_ctrl);
12444 -int tgec_get_version(struct fman_mac *tgec, u32 *mac_version)
12446 - struct tgec_regs __iomem *regs = tgec->regs;
12448 - if (!is_init_done(tgec->cfg))
12451 - *mac_version = ioread32be(®s->tgec_id);
12456 -int tgec_set_exception(struct fman_mac *tgec,
12457 - enum fman_mac_exceptions exception, bool enable)
12459 - struct tgec_regs __iomem *regs = tgec->regs;
12460 - u32 bit_mask = 0;
12462 - if (!is_init_done(tgec->cfg))
12465 - bit_mask = get_exception_flag(exception);
12468 - tgec->exceptions |= bit_mask;
12470 - tgec->exceptions &= ~bit_mask;
12472 - pr_err("Undefined exception\n");
12476 - iowrite32be(ioread32be(®s->imask) | bit_mask, ®s->imask);
12478 - iowrite32be(ioread32be(®s->imask) & ~bit_mask, ®s->imask);
12483 -int tgec_init(struct fman_mac *tgec)
12485 - struct tgec_cfg *cfg;
12486 - enet_addr_t eth_addr;
12489 - if (is_init_done(tgec->cfg))
12492 - if (DEFAULT_RESET_ON_INIT &&
12493 - (fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) {
12494 - pr_err("Can't reset MAC!\n");
12498 - err = check_init_parameters(tgec);
12504 - MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
12505 - set_mac_address(tgec->regs, (u8 *)eth_addr);
12508 - /* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 Errata workaround */
12509 - if (tgec->fm_rev_info.major <= 2)
12510 - tgec->exceptions &= ~(TGEC_IMASK_REM_FAULT |
12511 - TGEC_IMASK_LOC_FAULT);
12513 - err = init(tgec->regs, cfg, tgec->exceptions);
12515 - free_init_resources(tgec);
12516 - pr_err("TGEC version doesn't support this i/f mode\n");
12520 - /* Max Frame Length */
12521 - err = fman_set_mac_max_frame(tgec->fm, tgec->mac_id,
12522 - cfg->max_frame_length);
12524 - pr_err("Setting max frame length FAILED\n");
12525 - free_init_resources(tgec);
12529 - /* FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 Errata workaround */
12530 - if (tgec->fm_rev_info.major == 2) {
12531 - struct tgec_regs __iomem *regs = tgec->regs;
12534 - /* restore the default tx ipg Length */
12535 - tmp = (ioread32be(®s->tx_ipg_len) &
12536 - ~TGEC_TX_IPG_LENGTH_MASK) | 12;
12538 - iowrite32be(tmp, ®s->tx_ipg_len);
12541 - tgec->multicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
12542 - if (!tgec->multicast_addr_hash) {
12543 - free_init_resources(tgec);
12544 - pr_err("allocation hash table is FAILED\n");
12548 - tgec->unicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
12549 - if (!tgec->unicast_addr_hash) {
12550 - free_init_resources(tgec);
12551 - pr_err("allocation hash table is FAILED\n");
12555 - fman_register_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
12556 - FMAN_INTR_TYPE_ERR, tgec_err_exception, tgec);
12559 - tgec->cfg = NULL;
12564 -int tgec_free(struct fman_mac *tgec)
12566 - free_init_resources(tgec);
12568 - kfree(tgec->cfg);
12574 -struct fman_mac *tgec_config(struct fman_mac_params *params)
12576 - struct fman_mac *tgec;
12577 - struct tgec_cfg *cfg;
12578 - void __iomem *base_addr;
12580 - base_addr = params->base_addr;
12581 - /* allocate memory for the UCC GETH data structure. */
12582 - tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
12586 - /* allocate memory for the 10G MAC driver parameters data structure. */
12587 - cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
12593 - /* Plant parameter structure pointer */
12598 - tgec->regs = base_addr;
12599 - tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
12600 - tgec->max_speed = params->max_speed;
12601 - tgec->mac_id = params->mac_id;
12602 - tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
12603 - TGEC_IMASK_REM_FAULT |
12604 - TGEC_IMASK_LOC_FAULT |
12605 - TGEC_IMASK_TX_ECC_ER |
12606 - TGEC_IMASK_TX_FIFO_UNFL |
12607 - TGEC_IMASK_TX_FIFO_OVFL |
12608 - TGEC_IMASK_TX_ER |
12609 - TGEC_IMASK_RX_FIFO_OVFL |
12610 - TGEC_IMASK_RX_ECC_ER |
12611 - TGEC_IMASK_RX_JAB_FRM |
12612 - TGEC_IMASK_RX_OVRSZ_FRM |
12613 - TGEC_IMASK_RX_RUNT_FRM |
12614 - TGEC_IMASK_RX_FRAG_FRM |
12615 - TGEC_IMASK_RX_CRC_ER |
12616 - TGEC_IMASK_RX_ALIGN_ER);
12617 - tgec->exception_cb = params->exception_cb;
12618 - tgec->event_cb = params->event_cb;
12619 - tgec->dev_id = params->dev_id;
12620 - tgec->fm = params->fm;
12622 - /* Save FMan revision */
12623 - fman_get_revision(tgec->fm, &tgec->fm_rev_info);
12627 diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
12628 deleted file mode 100644
12629 index 514bba9..0000000
12630 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
12634 - * Copyright 2008-2015 Freescale Semiconductor Inc.
12636 - * Redistribution and use in source and binary forms, with or without
12637 - * modification, are permitted provided that the following conditions are met:
12638 - * * Redistributions of source code must retain the above copyright
12639 - * notice, this list of conditions and the following disclaimer.
12640 - * * Redistributions in binary form must reproduce the above copyright
12641 - * notice, this list of conditions and the following disclaimer in the
12642 - * documentation and/or other materials provided with the distribution.
12643 - * * Neither the name of Freescale Semiconductor nor the
12644 - * names of its contributors may be used to endorse or promote products
12645 - * derived from this software without specific prior written permission.
12648 - * ALTERNATIVELY, this software may be distributed under the terms of the
12649 - * GNU General Public License ("GPL") as published by the Free Software
12650 - * Foundation, either version 2 of that License or (at your option) any
12653 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
12654 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
12655 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
12656 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
12657 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
12658 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
12659 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
12660 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12661 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12662 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12668 -#include "fman_mac.h"
12670 -struct fman_mac *tgec_config(struct fman_mac_params *params);
12671 -int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
12672 -int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr);
12673 -int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
12674 -int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
12675 -int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
12676 -int tgec_init(struct fman_mac *tgec);
12677 -int tgec_free(struct fman_mac *tgec);
12678 -int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en);
12679 -int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority,
12680 - u16 pause_time, u16 thresh_time);
12681 -int tgec_set_exception(struct fman_mac *tgec,
12682 - enum fman_mac_exceptions exception, bool enable);
12683 -int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
12684 -int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
12685 -int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
12687 -#endif /* __TGEC_H */
12688 diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
12689 deleted file mode 100644
12690 index 736db9d..0000000
12691 --- a/drivers/net/ethernet/freescale/fman/mac.c
12694 -/* Copyright 2008-2015 Freescale Semiconductor, Inc.
12696 - * Redistribution and use in source and binary forms, with or without
12697 - * modification, are permitted provided that the following conditions are met:
12698 - * * Redistributions of source code must retain the above copyright
12699 - * notice, this list of conditions and the following disclaimer.
12700 - * * Redistributions in binary form must reproduce the above copyright
12701 - * notice, this list of conditions and the following disclaimer in the
12702 - * documentation and/or other materials provided with the distribution.
12703 - * * Neither the name of Freescale Semiconductor nor the
12704 - * names of its contributors may be used to endorse or promote products
12705 - * derived from this software without specific prior written permission.
12708 - * ALTERNATIVELY, this software may be distributed under the terms of the
12709 - * GNU General Public License ("GPL") as published by the Free Software
12710 - * Foundation, either version 2 of that License or (at your option) any
12713 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
12714 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
12715 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
12716 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
12717 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
12718 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
12719 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
12720 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12721 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12722 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12725 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12727 -#include <linux/init.h>
12728 -#include <linux/module.h>
12729 -#include <linux/of_address.h>
12730 -#include <linux/of_platform.h>
12731 -#include <linux/of_net.h>
12732 -#include <linux/of_mdio.h>
12733 -#include <linux/device.h>
12734 -#include <linux/phy.h>
12735 -#include <linux/netdevice.h>
12736 -#include <linux/phy_fixed.h>
12737 -#include <linux/etherdevice.h>
12738 -#include <linux/libfdt_env.h>
12741 -#include "fman_mac.h"
12742 -#include "fman_dtsec.h"
12743 -#include "fman_tgec.h"
12744 -#include "fman_memac.h"
12746 -MODULE_LICENSE("Dual BSD/GPL");
12747 -MODULE_DESCRIPTION("FSL FMan MAC API based driver");
12749 -struct mac_priv_s {
12750 - struct device *dev;
12751 - void __iomem *vaddr;
12753 - phy_interface_t phy_if;
12754 - struct fman *fman;
12755 - struct device_node *phy_node;
12756 - struct device_node *internal_phy_node;
12757 - /* List of multicast addresses */
12758 - struct list_head mc_addr_list;
12759 - struct platform_device *eth_dev;
12760 - struct fixed_phy_status *fixed_link;
12764 - int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode);
12765 - int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode);
12768 -struct mac_address {
12769 - u8 addr[ETH_ALEN];
12770 - struct list_head list;
12773 -static void mac_exception(void *handle, enum fman_mac_exceptions ex)
12775 - struct mac_device *mac_dev;
12776 - struct mac_priv_s *priv;
12778 - mac_dev = handle;
12779 - priv = mac_dev->priv;
12781 - if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
12782 - /* don't flag RX FIFO after the first */
12783 - mac_dev->set_exception(mac_dev->fman_mac,
12784 - FM_MAC_EX_10G_RX_FIFO_OVFL, false);
12785 - dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex);
12788 - dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
12792 -static void set_fman_mac_params(struct mac_device *mac_dev,
12793 - struct fman_mac_params *params)
12795 - struct mac_priv_s *priv = mac_dev->priv;
12797 - params->base_addr = (typeof(params->base_addr))
12798 - devm_ioremap(priv->dev, mac_dev->res->start,
12799 - resource_size(mac_dev->res));
12800 - memcpy(¶ms->addr, mac_dev->addr, sizeof(mac_dev->addr));
12801 - params->max_speed = priv->max_speed;
12802 - params->phy_if = priv->phy_if;
12803 - params->basex_if = false;
12804 - params->mac_id = priv->cell_index;
12805 - params->fm = (void *)priv->fman;
12806 - params->exception_cb = mac_exception;
12807 - params->event_cb = mac_exception;
12808 - params->dev_id = mac_dev;
12809 - params->internal_phy_node = priv->internal_phy_node;
12812 -static int tgec_initialization(struct mac_device *mac_dev)
12815 - struct mac_priv_s *priv;
12816 - struct fman_mac_params params;
12819 - priv = mac_dev->priv;
12821 - set_fman_mac_params(mac_dev, ¶ms);
12823 - mac_dev->fman_mac = tgec_config(¶ms);
12824 - if (!mac_dev->fman_mac) {
12829 - err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
12831 - goto _return_fm_mac_free;
12833 - err = tgec_init(mac_dev->fman_mac);
12835 - goto _return_fm_mac_free;
12837 - /* For 10G MAC, disable Tx ECC exception */
12838 - err = mac_dev->set_exception(mac_dev->fman_mac,
12839 - FM_MAC_EX_10G_TX_ECC_ER, false);
12841 - goto _return_fm_mac_free;
12843 - err = tgec_get_version(mac_dev->fman_mac, &version);
12845 - goto _return_fm_mac_free;
12847 - dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version);
12851 -_return_fm_mac_free:
12852 - tgec_free(mac_dev->fman_mac);
12858 -static int dtsec_initialization(struct mac_device *mac_dev)
12861 - struct mac_priv_s *priv;
12862 - struct fman_mac_params params;
12865 - priv = mac_dev->priv;
12867 - set_fman_mac_params(mac_dev, ¶ms);
12869 - mac_dev->fman_mac = dtsec_config(¶ms);
12870 - if (!mac_dev->fman_mac) {
12875 - err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
12877 - goto _return_fm_mac_free;
12879 - err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true);
12881 - goto _return_fm_mac_free;
12883 - err = dtsec_init(mac_dev->fman_mac);
12885 - goto _return_fm_mac_free;
12887 - /* For 1G MAC, disable by default the MIB counters overflow interrupt */
12888 - err = mac_dev->set_exception(mac_dev->fman_mac,
12889 - FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
12891 - goto _return_fm_mac_free;
12893 - err = dtsec_get_version(mac_dev->fman_mac, &version);
12895 - goto _return_fm_mac_free;
12897 - dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version);
12901 -_return_fm_mac_free:
12902 - dtsec_free(mac_dev->fman_mac);
12908 -static int memac_initialization(struct mac_device *mac_dev)
12911 - struct mac_priv_s *priv;
12912 - struct fman_mac_params params;
12914 - priv = mac_dev->priv;
12916 - set_fman_mac_params(mac_dev, ¶ms);
12918 - if (priv->max_speed == SPEED_10000)
12919 - params.phy_if = PHY_INTERFACE_MODE_XGMII;
12921 - mac_dev->fman_mac = memac_config(¶ms);
12922 - if (!mac_dev->fman_mac) {
12927 - err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
12929 - goto _return_fm_mac_free;
12931 - err = memac_cfg_reset_on_init(mac_dev->fman_mac, true);
12933 - goto _return_fm_mac_free;
12935 - err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link);
12937 - goto _return_fm_mac_free;
12939 - err = memac_init(mac_dev->fman_mac);
12941 - goto _return_fm_mac_free;
12943 - dev_info(priv->dev, "FMan MEMAC\n");
12947 -_return_fm_mac_free:
12948 - memac_free(mac_dev->fman_mac);
12954 -static int start(struct mac_device *mac_dev)
12957 - struct phy_device *phy_dev = mac_dev->phy_dev;
12958 - struct mac_priv_s *priv = mac_dev->priv;
12960 - err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
12961 - if (!err && phy_dev)
12962 - phy_start(phy_dev);
12967 -static int stop(struct mac_device *mac_dev)
12969 - struct mac_priv_s *priv = mac_dev->priv;
12971 - if (mac_dev->phy_dev)
12972 - phy_stop(mac_dev->phy_dev);
12974 - return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
12977 -static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
12979 - struct mac_priv_s *priv;
12980 - struct mac_address *old_addr, *tmp;
12981 - struct netdev_hw_addr *ha;
12983 - enet_addr_t *addr;
12985 - priv = mac_dev->priv;
12987 - /* Clear previous address list */
12988 - list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) {
12989 - addr = (enet_addr_t *)old_addr->addr;
12990 - err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr);
12994 - list_del(&old_addr->list);
12998 - /* Add all the addresses from the new list */
12999 - netdev_for_each_mc_addr(ha, net_dev) {
13000 - addr = (enet_addr_t *)ha->addr;
13001 - err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr);
13005 - tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
13009 - ether_addr_copy(tmp->addr, ha->addr);
13010 - list_add(&tmp->list, &priv->mc_addr_list);
13016 - * fman_set_mac_active_pause
13017 - * @mac_dev: A pointer to the MAC device
13018 - * @rx: Pause frame setting for RX
13019 - * @tx: Pause frame setting for TX
13021 - * Set the MAC RX/TX PAUSE frames settings
13023 - * Avoid redundant calls to FMD, if the MAC driver already contains the desired
13024 - * active PAUSE settings. Otherwise, the new active settings should be reflected
13027 - * Return: 0 on success; Error code otherwise.
13029 -int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
13031 - struct fman_mac *fman_mac = mac_dev->fman_mac;
13034 - if (rx != mac_dev->rx_pause_active) {
13035 - err = mac_dev->set_rx_pause(fman_mac, rx);
13036 - if (likely(err == 0))
13037 - mac_dev->rx_pause_active = rx;
13040 - if (tx != mac_dev->tx_pause_active) {
13041 - u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE :
13042 - FSL_FM_PAUSE_TIME_DISABLE);
13044 - err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0);
13046 - if (likely(err == 0))
13047 - mac_dev->tx_pause_active = tx;
13052 -EXPORT_SYMBOL(fman_set_mac_active_pause);
13055 - * fman_get_pause_cfg
13056 - * @mac_dev: A pointer to the MAC device
13057 - * @rx: Return value for RX setting
13058 - * @tx: Return value for TX setting
13060 - * Determine the MAC RX/TX PAUSE frames settings based on PHY
13061 - * autonegotiation or values set by eththool.
13063 - * Return: Pointer to FMan device.
13065 -void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
13068 - struct phy_device *phy_dev = mac_dev->phy_dev;
13069 - u16 lcl_adv, rmt_adv;
13072 - *rx_pause = *tx_pause = false;
13074 - if (!phy_dev->duplex)
13077 - /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
13078 - * are those set by ethtool.
13080 - if (!mac_dev->autoneg_pause) {
13081 - *rx_pause = mac_dev->rx_pause_req;
13082 - *tx_pause = mac_dev->tx_pause_req;
13086 - /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
13087 - * settings depend on the result of the link negotiation.
13090 - /* get local capabilities */
13092 - if (phy_dev->advertising & ADVERTISED_Pause)
13093 - lcl_adv |= ADVERTISE_PAUSE_CAP;
13094 - if (phy_dev->advertising & ADVERTISED_Asym_Pause)
13095 - lcl_adv |= ADVERTISE_PAUSE_ASYM;
13097 - /* get link partner capabilities */
13099 - if (phy_dev->pause)
13100 - rmt_adv |= LPA_PAUSE_CAP;
13101 - if (phy_dev->asym_pause)
13102 - rmt_adv |= LPA_PAUSE_ASYM;
13104 - /* Calculate TX/RX settings based on local and peer advertised
13105 - * symmetric/asymmetric PAUSE capabilities.
13107 - flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
13108 - if (flowctrl & FLOW_CTRL_RX)
13109 - *rx_pause = true;
13110 - if (flowctrl & FLOW_CTRL_TX)
13111 - *tx_pause = true;
13113 -EXPORT_SYMBOL(fman_get_pause_cfg);
13115 -static void adjust_link_void(struct net_device *net_dev)
13119 -static void adjust_link_dtsec(struct net_device *net_dev)
13121 - struct device *dev = net_dev->dev.parent;
13122 - struct dpaa_eth_data *eth_data = dev->platform_data;
13123 - struct mac_device *mac_dev = eth_data->mac_dev;
13124 - struct phy_device *phy_dev = mac_dev->phy_dev;
13125 - struct fman_mac *fman_mac;
13126 - bool rx_pause, tx_pause;
13129 - fman_mac = mac_dev->fman_mac;
13130 - if (!phy_dev->link) {
13131 - dtsec_restart_autoneg(fman_mac);
13136 - dtsec_adjust_link(fman_mac, phy_dev->speed);
13137 - fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
13138 - err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
13140 - netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
13143 -static void adjust_link_memac(struct net_device *net_dev)
13145 - struct device *dev = net_dev->dev.parent;
13146 - struct dpaa_eth_data *eth_data = dev->platform_data;
13147 - struct mac_device *mac_dev = eth_data->mac_dev;
13148 - struct phy_device *phy_dev = mac_dev->phy_dev;
13149 - struct fman_mac *fman_mac;
13150 - bool rx_pause, tx_pause;
13153 - fman_mac = mac_dev->fman_mac;
13154 - memac_adjust_link(fman_mac, phy_dev->speed);
13156 - fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
13157 - err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
13159 - netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
13162 -/* Initializes driver's PHY state, and attaches to the PHY.
13163 - * Returns 0 on success.
13165 -static struct phy_device *init_phy(struct net_device *net_dev,
13166 - struct mac_device *mac_dev,
13167 - void (*adj_lnk)(struct net_device *))
13169 - struct phy_device *phy_dev;
13170 - struct mac_priv_s *priv = mac_dev->priv;
13172 - phy_dev = of_phy_connect(net_dev, priv->phy_node, adj_lnk, 0,
13175 - netdev_err(net_dev, "Could not connect to PHY\n");
13179 - /* Remove any features not supported by the controller */
13180 - phy_dev->supported &= mac_dev->if_support;
13181 - /* Enable the symmetric and asymmetric PAUSE frame advertisements,
13182 - * as most of the PHY drivers do not enable them by default.
13184 - phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
13185 - phy_dev->advertising = phy_dev->supported;
13187 - mac_dev->phy_dev = phy_dev;
13192 -static struct phy_device *dtsec_init_phy(struct net_device *net_dev,
13193 - struct mac_device *mac_dev)
13195 - return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
13198 -static struct phy_device *tgec_init_phy(struct net_device *net_dev,
13199 - struct mac_device *mac_dev)
13201 - return init_phy(net_dev, mac_dev, adjust_link_void);
13204 -static struct phy_device *memac_init_phy(struct net_device *net_dev,
13205 - struct mac_device *mac_dev)
13207 - return init_phy(net_dev, mac_dev, &adjust_link_memac);
13210 -static void setup_dtsec(struct mac_device *mac_dev)
13212 - mac_dev->init_phy = dtsec_init_phy;
13213 - mac_dev->init = dtsec_initialization;
13214 - mac_dev->set_promisc = dtsec_set_promiscuous;
13215 - mac_dev->change_addr = dtsec_modify_mac_address;
13216 - mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
13217 - mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
13218 - mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
13219 - mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
13220 - mac_dev->set_exception = dtsec_set_exception;
13221 - mac_dev->set_multi = set_multi;
13222 - mac_dev->start = start;
13223 - mac_dev->stop = stop;
13225 - mac_dev->priv->enable = dtsec_enable;
13226 - mac_dev->priv->disable = dtsec_disable;
13229 -static void setup_tgec(struct mac_device *mac_dev)
13231 - mac_dev->init_phy = tgec_init_phy;
13232 - mac_dev->init = tgec_initialization;
13233 - mac_dev->set_promisc = tgec_set_promiscuous;
13234 - mac_dev->change_addr = tgec_modify_mac_address;
13235 - mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
13236 - mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
13237 - mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
13238 - mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
13239 - mac_dev->set_exception = tgec_set_exception;
13240 - mac_dev->set_multi = set_multi;
13241 - mac_dev->start = start;
13242 - mac_dev->stop = stop;
13244 - mac_dev->priv->enable = tgec_enable;
13245 - mac_dev->priv->disable = tgec_disable;
13248 -static void setup_memac(struct mac_device *mac_dev)
13250 - mac_dev->init_phy = memac_init_phy;
13251 - mac_dev->init = memac_initialization;
13252 - mac_dev->set_promisc = memac_set_promiscuous;
13253 - mac_dev->change_addr = memac_modify_mac_address;
13254 - mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
13255 - mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
13256 - mac_dev->set_tx_pause = memac_set_tx_pause_frames;
13257 - mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
13258 - mac_dev->set_exception = memac_set_exception;
13259 - mac_dev->set_multi = set_multi;
13260 - mac_dev->start = start;
13261 - mac_dev->stop = stop;
13263 - mac_dev->priv->enable = memac_enable;
13264 - mac_dev->priv->disable = memac_disable;
13267 -#define DTSEC_SUPPORTED \
13268 - (SUPPORTED_10baseT_Half \
13269 - | SUPPORTED_10baseT_Full \
13270 - | SUPPORTED_100baseT_Half \
13271 - | SUPPORTED_100baseT_Full \
13272 - | SUPPORTED_Autoneg \
13273 - | SUPPORTED_Pause \
13274 - | SUPPORTED_Asym_Pause \
13277 -static DEFINE_MUTEX(eth_lock);
13279 -static const u16 phy2speed[] = {
13280 - [PHY_INTERFACE_MODE_MII] = SPEED_100,
13281 - [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
13282 - [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
13283 - [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
13284 - [PHY_INTERFACE_MODE_RMII] = SPEED_100,
13285 - [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
13286 - [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
13287 - [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
13288 - [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
13289 - [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
13290 - [PHY_INTERFACE_MODE_XGMII] = SPEED_10000
13293 -static struct platform_device *dpaa_eth_add_device(int fman_id,
13294 - struct mac_device *mac_dev,
13295 - struct device_node *node)
13297 - struct platform_device *pdev;
13298 - struct dpaa_eth_data data;
13299 - struct mac_priv_s *priv;
13300 - static int dpaa_eth_dev_cnt;
13303 - priv = mac_dev->priv;
13305 - data.mac_dev = mac_dev;
13306 - data.mac_hw_id = priv->cell_index;
13307 - data.fman_hw_id = fman_id;
13308 - data.mac_node = node;
13310 - mutex_lock(ð_lock);
13312 - pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt);
13318 - ret = platform_device_add_data(pdev, &data, sizeof(data));
13322 - ret = platform_device_add(pdev);
13326 - dpaa_eth_dev_cnt++;
13327 - mutex_unlock(ð_lock);
13332 - platform_device_put(pdev);
13334 - mutex_unlock(ð_lock);
13336 - return ERR_PTR(ret);
13339 -static const struct of_device_id mac_match[] = {
13340 - { .compatible = "fsl,fman-dtsec" },
13341 - { .compatible = "fsl,fman-xgec" },
13342 - { .compatible = "fsl,fman-memac" },
13345 -MODULE_DEVICE_TABLE(of, mac_match);
13347 -static int mac_probe(struct platform_device *_of_dev)
13350 - struct device *dev;
13351 - struct device_node *mac_node, *dev_node;
13352 - struct mac_device *mac_dev;
13353 - struct platform_device *of_dev;
13354 - struct resource res;
13355 - struct mac_priv_s *priv;
13356 - const u8 *mac_addr;
13361 - dev = &_of_dev->dev;
13362 - mac_node = dev->of_node;
13364 - mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
13367 - dev_err(dev, "devm_kzalloc() = %d\n", err);
13370 - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
13376 - /* Save private information */
13377 - mac_dev->priv = priv;
13380 - if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
13381 - setup_dtsec(mac_dev);
13382 - priv->internal_phy_node = of_parse_phandle(mac_node,
13383 - "tbi-handle", 0);
13384 - } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
13385 - setup_tgec(mac_dev);
13386 - } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
13387 - setup_memac(mac_dev);
13388 - priv->internal_phy_node = of_parse_phandle(mac_node,
13389 - "pcsphy-handle", 0);
13391 - dev_err(dev, "MAC node (%s) contains unsupported MAC\n",
13392 - mac_node->full_name);
13397 - /* Register mac_dev */
13398 - dev_set_drvdata(dev, mac_dev);
13400 - INIT_LIST_HEAD(&priv->mc_addr_list);
13402 - /* Get the FM node */
13403 - dev_node = of_get_parent(mac_node);
13405 - dev_err(dev, "of_get_parent(%s) failed\n",
13406 - mac_node->full_name);
13408 - goto _return_dev_set_drvdata;
13411 - of_dev = of_find_device_by_node(dev_node);
13413 - dev_err(dev, "of_find_device_by_node(%s) failed\n",
13414 - dev_node->full_name);
13416 - goto _return_of_node_put;
13419 - /* Get the FMan cell-index */
13420 - err = of_property_read_u32(dev_node, "cell-index", &val);
13422 - dev_err(dev, "failed to read cell-index for %s\n",
13423 - dev_node->full_name);
13425 - goto _return_of_node_put;
13427 - /* cell-index 0 => FMan id 1 */
13428 - fman_id = (u8)(val + 1);
13430 - priv->fman = fman_bind(&of_dev->dev);
13431 - if (!priv->fman) {
13432 - dev_err(dev, "fman_bind(%s) failed\n", dev_node->full_name);
13434 - goto _return_of_node_put;
13437 - of_node_put(dev_node);
13439 - /* Get the address of the memory mapped registers */
13440 - err = of_address_to_resource(mac_node, 0, &res);
13442 - dev_err(dev, "of_address_to_resource(%s) = %d\n",
13443 - mac_node->full_name, err);
13444 - goto _return_dev_set_drvdata;
13447 - mac_dev->res = __devm_request_region(dev,
13448 - fman_get_mem_region(priv->fman),
13449 - res.start, res.end + 1 - res.start,
13451 - if (!mac_dev->res) {
13452 - dev_err(dev, "__devm_request_mem_region(mac) failed\n");
13454 - goto _return_dev_set_drvdata;
13457 - priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
13458 - mac_dev->res->end + 1 - mac_dev->res->start);
13459 - if (!priv->vaddr) {
13460 - dev_err(dev, "devm_ioremap() failed\n");
13462 - goto _return_dev_set_drvdata;
13465 - if (!of_device_is_available(mac_node)) {
13466 - devm_iounmap(dev, priv->vaddr);
13467 - __devm_release_region(dev, fman_get_mem_region(priv->fman),
13468 - res.start, res.end + 1 - res.start);
13469 - devm_kfree(dev, mac_dev);
13470 - dev_set_drvdata(dev, NULL);
13474 - /* Get the cell-index */
13475 - err = of_property_read_u32(mac_node, "cell-index", &val);
13477 - dev_err(dev, "failed to read cell-index for %s\n",
13478 - mac_node->full_name);
13480 - goto _return_dev_set_drvdata;
13482 - priv->cell_index = (u8)val;
13484 - /* Get the MAC address */
13485 - mac_addr = of_get_mac_address(mac_node);
13487 - dev_err(dev, "of_get_mac_address(%s) failed\n",
13488 - mac_node->full_name);
13490 - goto _return_dev_set_drvdata;
13492 - memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
13494 - /* Get the port handles */
13495 - nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
13496 - if (unlikely(nph < 0)) {
13497 - dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n",
13498 - mac_node->full_name);
13500 - goto _return_dev_set_drvdata;
13503 - if (nph != ARRAY_SIZE(mac_dev->port)) {
13504 - dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n",
13505 - mac_node->full_name);
13507 - goto _return_dev_set_drvdata;
13510 - for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
13511 - /* Find the port node */
13512 - dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
13514 - dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n",
13515 - mac_node->full_name);
13517 - goto _return_of_node_put;
13520 - of_dev = of_find_device_by_node(dev_node);
13522 - dev_err(dev, "of_find_device_by_node(%s) failed\n",
13523 - dev_node->full_name);
13525 - goto _return_of_node_put;
13528 - mac_dev->port[i] = fman_port_bind(&of_dev->dev);
13529 - if (!mac_dev->port[i]) {
13530 - dev_err(dev, "dev_get_drvdata(%s) failed\n",
13531 - dev_node->full_name);
13533 - goto _return_of_node_put;
13535 - of_node_put(dev_node);
13538 - /* Get the PHY connection type */
13539 - phy_if = of_get_phy_mode(mac_node);
13540 - if (phy_if < 0) {
13542 - "of_get_phy_mode() for %s failed. Defaulting to SGMII\n",
13543 - mac_node->full_name);
13544 - phy_if = PHY_INTERFACE_MODE_SGMII;
13546 - priv->phy_if = phy_if;
13548 - priv->speed = phy2speed[priv->phy_if];
13549 - priv->max_speed = priv->speed;
13550 - mac_dev->if_support = DTSEC_SUPPORTED;
13551 - /* We don't support half-duplex in SGMII mode */
13552 - if (priv->phy_if == PHY_INTERFACE_MODE_SGMII)
13553 - mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
13554 - SUPPORTED_100baseT_Half);
13556 - /* Gigabit support (no half-duplex) */
13557 - if (priv->max_speed == 1000)
13558 - mac_dev->if_support |= SUPPORTED_1000baseT_Full;
13560 - /* The 10G interface only supports one mode */
13561 - if (priv->phy_if == PHY_INTERFACE_MODE_XGMII)
13562 - mac_dev->if_support = SUPPORTED_10000baseT_Full;
13564 - /* Get the rest of the PHY information */
13565 - priv->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
13566 - if (!priv->phy_node && of_phy_is_fixed_link(mac_node)) {
13567 - struct phy_device *phy;
13569 - err = of_phy_register_fixed_link(mac_node);
13571 - goto _return_dev_set_drvdata;
13573 - priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
13575 - if (!priv->fixed_link)
13576 - goto _return_dev_set_drvdata;
13578 - priv->phy_node = of_node_get(mac_node);
13579 - phy = of_phy_find_device(priv->phy_node);
13581 - goto _return_dev_set_drvdata;
13583 - priv->fixed_link->link = phy->link;
13584 - priv->fixed_link->speed = phy->speed;
13585 - priv->fixed_link->duplex = phy->duplex;
13586 - priv->fixed_link->pause = phy->pause;
13587 - priv->fixed_link->asym_pause = phy->asym_pause;
13589 - put_device(&phy->mdio.dev);
13592 - err = mac_dev->init(mac_dev);
13594 - dev_err(dev, "mac_dev->init() = %d\n", err);
13595 - of_node_put(priv->phy_node);
13596 - goto _return_dev_set_drvdata;
13599 - /* pause frame autonegotiation enabled */
13600 - mac_dev->autoneg_pause = true;
13602 - /* By intializing the values to false, force FMD to enable PAUSE frames
13605 - mac_dev->rx_pause_req = true;
13606 - mac_dev->tx_pause_req = true;
13607 - mac_dev->rx_pause_active = false;
13608 - mac_dev->tx_pause_active = false;
13609 - err = fman_set_mac_active_pause(mac_dev, true, true);
13611 - dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
13613 - dev_info(dev, "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
13614 - mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
13615 - mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
13617 - priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev, mac_node);
13618 - if (IS_ERR(priv->eth_dev)) {
13619 - dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
13620 - priv->cell_index);
13621 - priv->eth_dev = NULL;
13626 -_return_of_node_put:
13627 - of_node_put(dev_node);
13628 -_return_dev_set_drvdata:
13629 - kfree(priv->fixed_link);
13630 - dev_set_drvdata(dev, NULL);
13635 -static struct platform_driver mac_driver = {
13637 - .name = KBUILD_MODNAME,
13638 - .of_match_table = mac_match,
13640 - .probe = mac_probe,
13643 -builtin_platform_driver(mac_driver);
13644 diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
13645 deleted file mode 100644
13646 index d7313f0..0000000
13647 --- a/drivers/net/ethernet/freescale/fman/mac.h
13650 -/* Copyright 2008-2015 Freescale Semiconductor, Inc.
13652 - * Redistribution and use in source and binary forms, with or without
13653 - * modification, are permitted provided that the following conditions are met:
13654 - * * Redistributions of source code must retain the above copyright
13655 - * notice, this list of conditions and the following disclaimer.
13656 - * * Redistributions in binary form must reproduce the above copyright
13657 - * notice, this list of conditions and the following disclaimer in the
13658 - * documentation and/or other materials provided with the distribution.
13659 - * * Neither the name of Freescale Semiconductor nor the
13660 - * names of its contributors may be used to endorse or promote products
13661 - * derived from this software without specific prior written permission.
13664 - * ALTERNATIVELY, this software may be distributed under the terms of the
13665 - * GNU General Public License ("GPL") as published by the Free Software
13666 - * Foundation, either version 2 of that License or (at your option) any
13669 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
13670 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
13671 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
13672 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
13673 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
13674 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
13675 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
13676 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
13677 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13678 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13684 -#include <linux/device.h>
13685 -#include <linux/if_ether.h>
13686 -#include <linux/phy.h>
13687 -#include <linux/list.h>
13689 -#include "fman_port.h"
13691 -#include "fman_mac.h"
13694 -struct mac_priv_s;
13696 -struct mac_device {
13697 - struct resource *res;
13698 - u8 addr[ETH_ALEN];
13699 - struct fman_port *port[2];
13701 - struct phy_device *phy_dev;
13703 - bool autoneg_pause;
13704 - bool rx_pause_req;
13705 - bool tx_pause_req;
13706 - bool rx_pause_active;
13707 - bool tx_pause_active;
13710 - struct phy_device *(*init_phy)(struct net_device *net_dev,
13711 - struct mac_device *mac_dev);
13712 - int (*init)(struct mac_device *mac_dev);
13713 - int (*start)(struct mac_device *mac_dev);
13714 - int (*stop)(struct mac_device *mac_dev);
13715 - int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
13716 - int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
13717 - int (*set_multi)(struct net_device *net_dev,
13718 - struct mac_device *mac_dev);
13719 - int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
13720 - int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority,
13721 - u16 pause_time, u16 thresh_time);
13722 - int (*set_exception)(struct fman_mac *mac_dev,
13723 - enum fman_mac_exceptions exception, bool enable);
13724 - int (*add_hash_mac_addr)(struct fman_mac *mac_dev,
13725 - enet_addr_t *eth_addr);
13726 - int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
13727 - enet_addr_t *eth_addr);
13729 - struct fman_mac *fman_mac;
13730 - struct mac_priv_s *priv;
13733 -struct dpaa_eth_data {
13734 - struct device_node *mac_node;
13735 - struct mac_device *mac_dev;
13740 -extern const char *mac_driver_description;
13742 -int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
13744 -void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
13747 -#endif /* __MAC_H */
13748 diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
13749 index 4b86260..9b3639e 100644
13750 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
13751 +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
13752 @@ -60,9 +60,6 @@ module_param(fs_enet_debug, int, 0);
13753 MODULE_PARM_DESC(fs_enet_debug,
13754 "Freescale bitmapped debugging message enable value");
13756 -#define RX_RING_SIZE 32
13757 -#define TX_RING_SIZE 64
13759 #ifdef CONFIG_NET_POLL_CONTROLLER
13760 static void fs_enet_netpoll(struct net_device *dev);
13762 @@ -82,113 +79,20 @@ static void skb_align(struct sk_buff *skb, int align)
13763 skb_reserve(skb, align - off);
13766 -/* NAPI function */
13767 -static int fs_enet_napi(struct napi_struct *napi, int budget)
13768 +/* NAPI receive function */
13769 +static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
13771 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
13772 struct net_device *dev = fep->ndev;
13773 const struct fs_platform_info *fpi = fep->fpi;
13774 cbd_t __iomem *bdp;
13775 - struct sk_buff *skb, *skbn;
13776 + struct sk_buff *skb, *skbn, *skbt;
13780 - int dirtyidx, do_wake, do_restart;
13781 - int tx_left = TX_RING_SIZE;
13783 - spin_lock(&fep->tx_lock);
13784 - bdp = fep->dirty_tx;
13786 - /* clear status bits for napi*/
13787 - (*fep->ops->napi_clear_event)(dev);
13789 - do_wake = do_restart = 0;
13790 - while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) {
13791 - dirtyidx = bdp - fep->tx_bd_base;
13793 - if (fep->tx_free == fep->tx_ring)
13796 - skb = fep->tx_skbuff[dirtyidx];
13799 - * Check for errors.
13801 - if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
13802 - BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
13804 - if (sc & BD_ENET_TX_HB) /* No heartbeat */
13805 - fep->stats.tx_heartbeat_errors++;
13806 - if (sc & BD_ENET_TX_LC) /* Late collision */
13807 - fep->stats.tx_window_errors++;
13808 - if (sc & BD_ENET_TX_RL) /* Retrans limit */
13809 - fep->stats.tx_aborted_errors++;
13810 - if (sc & BD_ENET_TX_UN) /* Underrun */
13811 - fep->stats.tx_fifo_errors++;
13812 - if (sc & BD_ENET_TX_CSL) /* Carrier lost */
13813 - fep->stats.tx_carrier_errors++;
13815 - if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
13816 - fep->stats.tx_errors++;
13820 - fep->stats.tx_packets++;
13822 - if (sc & BD_ENET_TX_READY) {
13823 - dev_warn(fep->dev,
13824 - "HEY! Enet xmit interrupt and TX_READY.\n");
13828 - * Deferred means some collisions occurred during transmit,
13829 - * but we eventually sent the packet OK.
13831 - if (sc & BD_ENET_TX_DEF)
13832 - fep->stats.collisions++;
13835 - if (fep->mapped_as_page[dirtyidx])
13836 - dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
13837 - CBDR_DATLEN(bdp), DMA_TO_DEVICE);
13839 - dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
13840 - CBDR_DATLEN(bdp), DMA_TO_DEVICE);
13843 - * Free the sk buffer associated with this last transmit.
13846 - dev_kfree_skb(skb);
13847 - fep->tx_skbuff[dirtyidx] = NULL;
13851 - * Update pointer to next buffer descriptor to be transmitted.
13853 - if ((sc & BD_ENET_TX_WRAP) == 0)
13856 - bdp = fep->tx_bd_base;
13859 - * Since we have freed up a buffer, the ring is no longer
13862 - if (++fep->tx_free == MAX_SKB_FRAGS)
13867 - fep->dirty_tx = bdp;
13870 - (*fep->ops->tx_restart)(dev);
13872 - spin_unlock(&fep->tx_lock);
13875 - netif_wake_queue(dev);
13880 * First, grab all of the stats for the incoming packet.
13881 @@ -196,8 +100,10 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
13885 - while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0 &&
13886 - received < budget) {
13887 + /* clear RX status bits for napi*/
13888 + (*fep->ops->napi_clear_rx_event)(dev);
13890 + while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
13891 curidx = bdp - fep->rx_bd_base;
13894 @@ -226,10 +132,21 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
13895 if (sc & BD_ENET_RX_OV)
13896 fep->stats.rx_crc_errors++;
13898 - skbn = fep->rx_skbuff[curidx];
13899 + skb = fep->rx_skbuff[curidx];
13901 + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
13902 + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
13903 + DMA_FROM_DEVICE);
13908 skb = fep->rx_skbuff[curidx];
13910 + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
13911 + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
13912 + DMA_FROM_DEVICE);
13915 * Process the incoming frame.
13917 @@ -244,31 +161,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
13918 skb_reserve(skbn, 2); /* align IP header */
13919 skb_copy_from_linear_data(skb,
13920 skbn->data, pkt_len);
13922 - dma_sync_single_for_cpu(fep->dev,
13923 - CBDR_BUFADDR(bdp),
13924 - L1_CACHE_ALIGN(pkt_len),
13925 - DMA_FROM_DEVICE);
13932 skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
13938 skb_align(skbn, ENET_RX_ALIGN);
13940 - dma_unmap_single(fep->dev,
13941 - CBDR_BUFADDR(bdp),
13942 - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
13943 - DMA_FROM_DEVICE);
13945 - dma = dma_map_single(fep->dev,
13947 - L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
13948 - DMA_FROM_DEVICE);
13949 - CBDW_BUFADDR(bdp, dma);
13953 if (skbn != NULL) {
13954 @@ -283,6 +185,9 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
13957 fep->rx_skbuff[curidx] = skbn;
13958 + CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
13959 + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
13960 + DMA_FROM_DEVICE));
13961 CBDW_DATLEN(bdp, 0);
13962 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
13964 @@ -295,19 +200,134 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
13965 bdp = fep->rx_bd_base;
13967 (*fep->ops->rx_bd_done)(dev);
13969 + if (received >= budget)
13975 - if (received < budget && tx_left) {
13976 + if (received < budget) {
13978 napi_complete(napi);
13979 - (*fep->ops->napi_enable)(dev);
13980 + (*fep->ops->napi_enable_rx)(dev);
13986 +static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
13988 + struct fs_enet_private *fep = container_of(napi, struct fs_enet_private,
13990 + struct net_device *dev = fep->ndev;
13991 + cbd_t __iomem *bdp;
13992 + struct sk_buff *skb;
13993 + int dirtyidx, do_wake, do_restart;
13995 + int has_tx_work = 0;
13997 + spin_lock(&fep->tx_lock);
13998 + bdp = fep->dirty_tx;
14000 + /* clear TX status bits for napi*/
14001 + (*fep->ops->napi_clear_tx_event)(dev);
14003 + do_wake = do_restart = 0;
14004 + while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
14005 + dirtyidx = bdp - fep->tx_bd_base;
14007 + if (fep->tx_free == fep->tx_ring)
14010 + skb = fep->tx_skbuff[dirtyidx];
14013 + * Check for errors.
14015 + if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
14016 + BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
14018 + if (sc & BD_ENET_TX_HB) /* No heartbeat */
14019 + fep->stats.tx_heartbeat_errors++;
14020 + if (sc & BD_ENET_TX_LC) /* Late collision */
14021 + fep->stats.tx_window_errors++;
14022 + if (sc & BD_ENET_TX_RL) /* Retrans limit */
14023 + fep->stats.tx_aborted_errors++;
14024 + if (sc & BD_ENET_TX_UN) /* Underrun */
14025 + fep->stats.tx_fifo_errors++;
14026 + if (sc & BD_ENET_TX_CSL) /* Carrier lost */
14027 + fep->stats.tx_carrier_errors++;
14029 + if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
14030 + fep->stats.tx_errors++;
14034 + fep->stats.tx_packets++;
14036 + if (sc & BD_ENET_TX_READY) {
14037 + dev_warn(fep->dev,
14038 + "HEY! Enet xmit interrupt and TX_READY.\n");
14042 + * Deferred means some collisions occurred during transmit,
14043 + * but we eventually sent the packet OK.
14045 + if (sc & BD_ENET_TX_DEF)
14046 + fep->stats.collisions++;
14049 + if (fep->mapped_as_page[dirtyidx])
14050 + dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
14051 + CBDR_DATLEN(bdp), DMA_TO_DEVICE);
14053 + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
14054 + CBDR_DATLEN(bdp), DMA_TO_DEVICE);
14057 + * Free the sk buffer associated with this last transmit.
14060 + dev_kfree_skb(skb);
14061 + fep->tx_skbuff[dirtyidx] = NULL;
14065 + * Update pointer to next buffer descriptor to be transmitted.
14067 + if ((sc & BD_ENET_TX_WRAP) == 0)
14070 + bdp = fep->tx_bd_base;
14073 + * Since we have freed up a buffer, the ring is no longer
14076 + if (++fep->tx_free >= MAX_SKB_FRAGS)
14081 + fep->dirty_tx = bdp;
14084 + (*fep->ops->tx_restart)(dev);
14086 + if (!has_tx_work) {
14087 + napi_complete(napi);
14088 + (*fep->ops->napi_enable_tx)(dev);
14092 + spin_unlock(&fep->tx_lock);
14095 + netif_wake_queue(dev);
14103 @@ -333,18 +353,18 @@ fs_enet_interrupt(int irq, void *dev_id)
14106 int_clr_events = int_events;
14107 - int_clr_events &= ~fep->ev_napi;
14108 + int_clr_events &= ~fep->ev_napi_rx;
14110 (*fep->ops->clear_int_events)(dev, int_clr_events);
14112 if (int_events & fep->ev_err)
14113 (*fep->ops->ev_error)(dev, int_events);
14115 - if (int_events & fep->ev) {
14116 + if (int_events & fep->ev_rx) {
14117 napi_ok = napi_schedule_prep(&fep->napi);
14119 - (*fep->ops->napi_disable)(dev);
14120 - (*fep->ops->clear_int_events)(dev, fep->ev_napi);
14121 + (*fep->ops->napi_disable_rx)(dev);
14122 + (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
14124 /* NOTE: it is possible for FCCs in NAPI mode */
14125 /* to submit a spurious interrupt while in poll */
14126 @@ -352,6 +372,17 @@ fs_enet_interrupt(int irq, void *dev_id)
14127 __napi_schedule(&fep->napi);
14130 + if (int_events & fep->ev_tx) {
14131 + napi_ok = napi_schedule_prep(&fep->napi_tx);
14133 + (*fep->ops->napi_disable_tx)(dev);
14134 + (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx);
14136 + /* NOTE: it is possible for FCCs in NAPI mode */
14137 + /* to submit a spurious interrupt while in poll */
14139 + __napi_schedule(&fep->napi_tx);
14144 @@ -459,9 +490,6 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
14146 struct sk_buff *new_skb;
14148 - if (skb_linearize(skb))
14151 /* Alloc new skb */
14152 new_skb = netdev_alloc_skb(dev, skb->len + 4);
14154 @@ -487,27 +515,12 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
14155 cbd_t __iomem *bdp;
14159 + int nr_frags = skb_shinfo(skb)->nr_frags;
14162 -#ifdef CONFIG_FS_ENET_MPC5121_FEC
14163 - int is_aligned = 1;
14166 - if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
14169 - nr_frags = skb_shinfo(skb)->nr_frags;
14170 - frag = skb_shinfo(skb)->frags;
14171 - for (i = 0; i < nr_frags; i++, frag++) {
14172 - if (!IS_ALIGNED(frag->page_offset, 4)) {
14179 - if (!is_aligned) {
14180 +#ifdef CONFIG_FS_ENET_MPC5121_FEC
14181 + if (((unsigned long)skb->data) & 0x3) {
14182 skb = tx_skb_align_workaround(dev, skb);
14185 @@ -519,7 +532,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
14190 spin_lock(&fep->tx_lock);
14193 @@ -527,7 +539,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
14197 - nr_frags = skb_shinfo(skb)->nr_frags;
14198 if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
14199 netif_stop_queue(dev);
14200 spin_unlock(&fep->tx_lock);
14201 @@ -558,8 +569,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
14202 frag = skb_shinfo(skb)->frags;
14205 - BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
14207 + BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
14208 CBDS_SC(bdp, BD_ENET_TX_READY);
14210 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
14211 @@ -624,15 +634,14 @@ static void fs_timeout(struct net_device *dev)
14212 spin_lock_irqsave(&fep->lock, flags);
14214 if (dev->flags & IFF_UP) {
14215 - phy_stop(dev->phydev);
14216 + phy_stop(fep->phydev);
14217 (*fep->ops->stop)(dev);
14218 (*fep->ops->restart)(dev);
14219 - phy_start(dev->phydev);
14220 + phy_start(fep->phydev);
14223 - phy_start(dev->phydev);
14224 - wake = fep->tx_free >= MAX_SKB_FRAGS &&
14225 - !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
14226 + phy_start(fep->phydev);
14227 + wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
14228 spin_unlock_irqrestore(&fep->lock, flags);
14231 @@ -645,7 +654,7 @@ static void fs_timeout(struct net_device *dev)
14232 static void generic_adjust_link(struct net_device *dev)
14234 struct fs_enet_private *fep = netdev_priv(dev);
14235 - struct phy_device *phydev = dev->phydev;
14236 + struct phy_device *phydev = fep->phydev;
14239 if (phydev->link) {
14240 @@ -714,6 +723,8 @@ static int fs_init_phy(struct net_device *dev)
14244 + fep->phydev = phydev;
14249 @@ -724,10 +735,11 @@ static int fs_enet_open(struct net_device *dev)
14252 /* to initialize the fep->cur_rx,... */
14253 - /* not doing this, will cause a crash in fs_enet_napi */
14254 + /* not doing this, will cause a crash in fs_enet_rx_napi */
14255 fs_init_bds(fep->ndev);
14257 napi_enable(&fep->napi);
14258 + napi_enable(&fep->napi_tx);
14260 /* Install our interrupt handler. */
14261 r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
14262 @@ -735,6 +747,7 @@ static int fs_enet_open(struct net_device *dev)
14264 dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
14265 napi_disable(&fep->napi);
14266 + napi_disable(&fep->napi_tx);
14270 @@ -742,9 +755,10 @@ static int fs_enet_open(struct net_device *dev)
14272 free_irq(fep->interrupt, dev);
14273 napi_disable(&fep->napi);
14274 + napi_disable(&fep->napi_tx);
14277 - phy_start(dev->phydev);
14278 + phy_start(fep->phydev);
14280 netif_start_queue(dev);
14282 @@ -759,7 +773,8 @@ static int fs_enet_close(struct net_device *dev)
14283 netif_stop_queue(dev);
14284 netif_carrier_off(dev);
14285 napi_disable(&fep->napi);
14286 - phy_stop(dev->phydev);
14287 + napi_disable(&fep->napi_tx);
14288 + phy_stop(fep->phydev);
14290 spin_lock_irqsave(&fep->lock, flags);
14291 spin_lock(&fep->tx_lock);
14292 @@ -768,7 +783,8 @@ static int fs_enet_close(struct net_device *dev)
14293 spin_unlock_irqrestore(&fep->lock, flags);
14295 /* release any irqs */
14296 - phy_disconnect(dev->phydev);
14297 + phy_disconnect(fep->phydev);
14298 + fep->phydev = NULL;
14299 free_irq(fep->interrupt, dev);
14302 @@ -813,82 +829,64 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
14306 -static int fs_nway_reset(struct net_device *dev)
14307 +static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
14310 + struct fs_enet_private *fep = netdev_priv(dev);
14312 + if (!fep->phydev)
14315 + return phy_ethtool_gset(fep->phydev, cmd);
14318 -static u32 fs_get_msglevel(struct net_device *dev)
14319 +static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
14321 struct fs_enet_private *fep = netdev_priv(dev);
14322 - return fep->msg_enable;
14324 + if (!fep->phydev)
14327 + return phy_ethtool_sset(fep->phydev, cmd);
14330 -static void fs_set_msglevel(struct net_device *dev, u32 value)
14331 +static int fs_nway_reset(struct net_device *dev)
14333 - struct fs_enet_private *fep = netdev_priv(dev);
14334 - fep->msg_enable = value;
14338 -static int fs_get_tunable(struct net_device *dev,
14339 - const struct ethtool_tunable *tuna, void *data)
14340 +static u32 fs_get_msglevel(struct net_device *dev)
14342 struct fs_enet_private *fep = netdev_priv(dev);
14343 - struct fs_platform_info *fpi = fep->fpi;
14346 - switch (tuna->id) {
14347 - case ETHTOOL_RX_COPYBREAK:
14348 - *(u32 *)data = fpi->rx_copybreak;
14356 + return fep->msg_enable;
14359 -static int fs_set_tunable(struct net_device *dev,
14360 - const struct ethtool_tunable *tuna, const void *data)
14361 +static void fs_set_msglevel(struct net_device *dev, u32 value)
14363 struct fs_enet_private *fep = netdev_priv(dev);
14364 - struct fs_platform_info *fpi = fep->fpi;
14367 - switch (tuna->id) {
14368 - case ETHTOOL_RX_COPYBREAK:
14369 - fpi->rx_copybreak = *(u32 *)data;
14377 + fep->msg_enable = value;
14380 static const struct ethtool_ops fs_ethtool_ops = {
14381 .get_drvinfo = fs_get_drvinfo,
14382 .get_regs_len = fs_get_regs_len,
14383 + .get_settings = fs_get_settings,
14384 + .set_settings = fs_set_settings,
14385 .nway_reset = fs_nway_reset,
14386 .get_link = ethtool_op_get_link,
14387 .get_msglevel = fs_get_msglevel,
14388 .set_msglevel = fs_set_msglevel,
14389 .get_regs = fs_get_regs,
14390 .get_ts_info = ethtool_op_get_ts_info,
14391 - .get_link_ksettings = phy_ethtool_get_link_ksettings,
14392 - .set_link_ksettings = phy_ethtool_set_link_ksettings,
14393 - .get_tunable = fs_get_tunable,
14394 - .set_tunable = fs_set_tunable,
14397 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
14399 + struct fs_enet_private *fep = netdev_priv(dev);
14401 if (!netif_running(dev))
14404 - return phy_mii_ioctl(dev->phydev, rq, cmd);
14405 + return phy_mii_ioctl(fep->phydev, rq, cmd);
14408 extern int fs_mii_connect(struct net_device *dev);
14409 @@ -948,8 +946,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
14410 fpi->cp_command = *data;
14413 - fpi->rx_ring = RX_RING_SIZE;
14414 - fpi->tx_ring = TX_RING_SIZE;
14415 + fpi->rx_ring = 32;
14416 + fpi->tx_ring = 64;
14417 fpi->rx_copybreak = 240;
14418 fpi->napi_weight = 17;
14419 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
14420 @@ -980,7 +978,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
14421 err = clk_prepare_enable(clk);
14424 - goto out_deregister_fixed_link;
14425 + goto out_free_fpi;
14427 fpi->clk_per = clk;
14429 @@ -1033,7 +1031,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
14431 ndev->netdev_ops = &fs_enet_netdev_ops;
14432 ndev->watchdog_timeo = 2 * HZ;
14433 - netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
14434 + netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight);
14435 + netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2);
14437 ndev->ethtool_ops = &fs_ethtool_ops;
14439 @@ -1061,9 +1060,6 @@ static int fs_enet_probe(struct platform_device *ofdev)
14440 of_node_put(fpi->phy_node);
14442 clk_disable_unprepare(fpi->clk_per);
14443 -out_deregister_fixed_link:
14444 - if (of_phy_is_fixed_link(ofdev->dev.of_node))
14445 - of_phy_deregister_fixed_link(ofdev->dev.of_node);
14449 @@ -1082,8 +1078,6 @@ static int fs_enet_remove(struct platform_device *ofdev)
14450 of_node_put(fep->fpi->phy_node);
14451 if (fep->fpi->clk_per)
14452 clk_disable_unprepare(fep->fpi->clk_per);
14453 - if (of_phy_is_fixed_link(ofdev->dev.of_node))
14454 - of_phy_deregister_fixed_link(ofdev->dev.of_node);
14458 diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
14459 index fee24c8..f184d8f 100644
14460 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
14461 +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
14462 @@ -81,9 +81,12 @@ struct fs_ops {
14463 void (*adjust_link)(struct net_device *dev);
14464 void (*restart)(struct net_device *dev);
14465 void (*stop)(struct net_device *dev);
14466 - void (*napi_clear_event)(struct net_device *dev);
14467 - void (*napi_enable)(struct net_device *dev);
14468 - void (*napi_disable)(struct net_device *dev);
14469 + void (*napi_clear_rx_event)(struct net_device *dev);
14470 + void (*napi_enable_rx)(struct net_device *dev);
14471 + void (*napi_disable_rx)(struct net_device *dev);
14472 + void (*napi_clear_tx_event)(struct net_device *dev);
14473 + void (*napi_enable_tx)(struct net_device *dev);
14474 + void (*napi_disable_tx)(struct net_device *dev);
14475 void (*rx_bd_done)(struct net_device *dev);
14476 void (*tx_kickstart)(struct net_device *dev);
14477 u32 (*get_int_events)(struct net_device *dev);
14478 @@ -119,6 +122,7 @@ struct phy_info {
14480 struct fs_enet_private {
14481 struct napi_struct napi;
14482 + struct napi_struct napi_tx;
14483 struct device *dev; /* pointer back to the device (must be initialized first) */
14484 struct net_device *ndev;
14485 spinlock_t lock; /* during all ops except TX pckt processing */
14486 @@ -145,11 +149,14 @@ struct fs_enet_private {
14487 unsigned int last_mii_status;
14490 + struct phy_device *phydev;
14491 int oldduplex, oldspeed, oldlink; /* current settings */
14494 - u32 ev_napi; /* mask of NAPI events */
14495 - u32 ev; /* event mask */
14496 + u32 ev_napi_rx; /* mask of NAPI rx events */
14497 + u32 ev_napi_tx; /* mask of NAPI rx events */
14498 + u32 ev_rx; /* rx event mask */
14499 + u32 ev_tx; /* tx event mask */
14500 u32 ev_err; /* error event mask */
14502 u16 bd_rx_empty; /* mask of BD rx empty */
14503 diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
14504 index 120c758..08f5b91 100644
14505 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
14506 +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
14507 @@ -90,7 +90,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
14510 fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
14511 - if (!fep->interrupt)
14512 + if (fep->interrupt == NO_IRQ)
14515 fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
14516 @@ -124,8 +124,10 @@ static int do_pd_setup(struct fs_enet_private *fep)
14520 -#define FCC_NAPI_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB | FCC_ENET_TXB)
14521 -#define FCC_EVENT (FCC_ENET_RXF | FCC_ENET_TXB)
14522 +#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
14523 +#define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXB)
14524 +#define FCC_RX_EVENT (FCC_ENET_RXF)
14525 +#define FCC_TX_EVENT (FCC_ENET_TXB)
14526 #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
14528 static int setup_data(struct net_device *dev)
14529 @@ -135,8 +137,10 @@ static int setup_data(struct net_device *dev)
14530 if (do_pd_setup(fep) != 0)
14533 - fep->ev_napi = FCC_NAPI_EVENT_MSK;
14534 - fep->ev = FCC_EVENT;
14535 + fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
14536 + fep->ev_napi_tx = FCC_NAPI_TX_EVENT_MSK;
14537 + fep->ev_rx = FCC_RX_EVENT;
14538 + fep->ev_tx = FCC_TX_EVENT;
14539 fep->ev_err = FCC_ERR_EVENT_MSK;
14542 @@ -366,7 +370,7 @@ static void restart(struct net_device *dev)
14544 /* adjust to speed (for RMII mode) */
14545 if (fpi->use_rmii) {
14546 - if (dev->phydev->speed == 100)
14547 + if (fep->phydev->speed == 100)
14548 C8(fcccp, fcc_gfemr, 0x20);
14550 S8(fcccp, fcc_gfemr, 0x20);
14551 @@ -392,7 +396,7 @@ static void restart(struct net_device *dev)
14552 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
14554 /* adjust to duplex mode */
14555 - if (dev->phydev->duplex)
14556 + if (fep->phydev->duplex)
14557 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
14559 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
14560 @@ -420,28 +424,52 @@ static void stop(struct net_device *dev)
14561 fs_cleanup_bds(dev);
14564 -static void napi_clear_event_fs(struct net_device *dev)
14565 +static void napi_clear_rx_event(struct net_device *dev)
14567 struct fs_enet_private *fep = netdev_priv(dev);
14568 fcc_t __iomem *fccp = fep->fcc.fccp;
14570 - W16(fccp, fcc_fcce, FCC_NAPI_EVENT_MSK);
14571 + W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
14574 -static void napi_enable_fs(struct net_device *dev)
14575 +static void napi_enable_rx(struct net_device *dev)
14577 struct fs_enet_private *fep = netdev_priv(dev);
14578 fcc_t __iomem *fccp = fep->fcc.fccp;
14580 - S16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
14581 + S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
14584 -static void napi_disable_fs(struct net_device *dev)
14585 +static void napi_disable_rx(struct net_device *dev)
14587 struct fs_enet_private *fep = netdev_priv(dev);
14588 fcc_t __iomem *fccp = fep->fcc.fccp;
14590 - C16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
14591 + C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
14594 +static void napi_clear_tx_event(struct net_device *dev)
14596 + struct fs_enet_private *fep = netdev_priv(dev);
14597 + fcc_t __iomem *fccp = fep->fcc.fccp;
14599 + W16(fccp, fcc_fcce, FCC_NAPI_TX_EVENT_MSK);
14602 +static void napi_enable_tx(struct net_device *dev)
14604 + struct fs_enet_private *fep = netdev_priv(dev);
14605 + fcc_t __iomem *fccp = fep->fcc.fccp;
14607 + S16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
14610 +static void napi_disable_tx(struct net_device *dev)
14612 + struct fs_enet_private *fep = netdev_priv(dev);
14613 + fcc_t __iomem *fccp = fep->fcc.fccp;
14615 + C16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
14618 static void rx_bd_done(struct net_device *dev)
14619 @@ -524,7 +552,7 @@ static void tx_restart(struct net_device *dev)
14620 cbd_t __iomem *prev_bd;
14621 cbd_t __iomem *last_tx_bd;
14623 - last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1);
14624 + last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
14626 /* get the current bd held in TBPTR and scan back from this point */
14627 recheck_bd = curr_tbptr = (cbd_t __iomem *)
14628 @@ -567,9 +595,12 @@ const struct fs_ops fs_fcc_ops = {
14629 .set_multicast_list = set_multicast_list,
14630 .restart = restart,
14632 - .napi_clear_event = napi_clear_event_fs,
14633 - .napi_enable = napi_enable_fs,
14634 - .napi_disable = napi_disable_fs,
14635 + .napi_clear_rx_event = napi_clear_rx_event,
14636 + .napi_enable_rx = napi_enable_rx,
14637 + .napi_disable_rx = napi_disable_rx,
14638 + .napi_clear_tx_event = napi_clear_tx_event,
14639 + .napi_enable_tx = napi_enable_tx,
14640 + .napi_disable_tx = napi_disable_tx,
14641 .rx_bd_done = rx_bd_done,
14642 .tx_kickstart = tx_kickstart,
14643 .get_int_events = get_int_events,
14644 diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
14645 index 777beff..b34214e 100644
14646 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
14647 +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
14648 @@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
14649 struct platform_device *ofdev = to_platform_device(fep->dev);
14651 fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
14652 - if (!fep->interrupt)
14653 + if (fep->interrupt == NO_IRQ)
14656 fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
14657 @@ -109,8 +109,10 @@ static int do_pd_setup(struct fs_enet_private *fep)
14661 -#define FEC_NAPI_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_TXF)
14662 -#define FEC_EVENT (FEC_ENET_RXF | FEC_ENET_TXF)
14663 +#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
14664 +#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB)
14665 +#define FEC_RX_EVENT (FEC_ENET_RXF)
14666 +#define FEC_TX_EVENT (FEC_ENET_TXF)
14667 #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
14668 FEC_ENET_BABT | FEC_ENET_EBERR)
14670 @@ -124,8 +126,10 @@ static int setup_data(struct net_device *dev)
14674 - fep->ev_napi = FEC_NAPI_EVENT_MSK;
14675 - fep->ev = FEC_EVENT;
14676 + fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
14677 + fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK;
14678 + fep->ev_rx = FEC_RX_EVENT;
14679 + fep->ev_tx = FEC_TX_EVENT;
14680 fep->ev_err = FEC_ERR_EVENT_MSK;
14683 @@ -250,7 +254,7 @@ static void restart(struct net_device *dev)
14685 u32 addrhi, addrlo;
14687 - struct mii_bus *mii = dev->phydev->mdio.bus;
14688 + struct mii_bus* mii = fep->phydev->bus;
14689 struct fec_info* fec_inf = mii->priv;
14691 r = whack_reset(fep->fec.fecp);
14692 @@ -329,7 +333,7 @@ static void restart(struct net_device *dev)
14694 * adjust to duplex mode
14696 - if (dev->phydev->duplex) {
14697 + if (fep->phydev->duplex) {
14698 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
14699 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
14701 @@ -359,7 +363,7 @@ static void stop(struct net_device *dev)
14702 const struct fs_platform_info *fpi = fep->fpi;
14703 struct fec __iomem *fecp = fep->fec.fecp;
14705 - struct fec_info *feci = dev->phydev->mdio.bus->priv;
14706 + struct fec_info* feci= fep->phydev->bus->priv;
14710 @@ -392,28 +396,52 @@ static void stop(struct net_device *dev)
14714 -static void napi_clear_event_fs(struct net_device *dev)
14715 +static void napi_clear_rx_event(struct net_device *dev)
14717 struct fs_enet_private *fep = netdev_priv(dev);
14718 struct fec __iomem *fecp = fep->fec.fecp;
14720 - FW(fecp, ievent, FEC_NAPI_EVENT_MSK);
14721 + FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
14724 -static void napi_enable_fs(struct net_device *dev)
14725 +static void napi_enable_rx(struct net_device *dev)
14727 struct fs_enet_private *fep = netdev_priv(dev);
14728 struct fec __iomem *fecp = fep->fec.fecp;
14730 - FS(fecp, imask, FEC_NAPI_EVENT_MSK);
14731 + FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
14734 -static void napi_disable_fs(struct net_device *dev)
14735 +static void napi_disable_rx(struct net_device *dev)
14737 struct fs_enet_private *fep = netdev_priv(dev);
14738 struct fec __iomem *fecp = fep->fec.fecp;
14740 - FC(fecp, imask, FEC_NAPI_EVENT_MSK);
14741 + FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
14744 +static void napi_clear_tx_event(struct net_device *dev)
14746 + struct fs_enet_private *fep = netdev_priv(dev);
14747 + struct fec __iomem *fecp = fep->fec.fecp;
14749 + FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK);
14752 +static void napi_enable_tx(struct net_device *dev)
14754 + struct fs_enet_private *fep = netdev_priv(dev);
14755 + struct fec __iomem *fecp = fep->fec.fecp;
14757 + FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
14760 +static void napi_disable_tx(struct net_device *dev)
14762 + struct fs_enet_private *fep = netdev_priv(dev);
14763 + struct fec __iomem *fecp = fep->fec.fecp;
14765 + FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
14768 static void rx_bd_done(struct net_device *dev)
14769 @@ -485,9 +513,12 @@ const struct fs_ops fs_fec_ops = {
14770 .set_multicast_list = set_multicast_list,
14771 .restart = restart,
14773 - .napi_clear_event = napi_clear_event_fs,
14774 - .napi_enable = napi_enable_fs,
14775 - .napi_disable = napi_disable_fs,
14776 + .napi_clear_rx_event = napi_clear_rx_event,
14777 + .napi_enable_rx = napi_enable_rx,
14778 + .napi_disable_rx = napi_disable_rx,
14779 + .napi_clear_tx_event = napi_clear_tx_event,
14780 + .napi_enable_tx = napi_enable_tx,
14781 + .napi_disable_tx = napi_disable_tx,
14782 .rx_bd_done = rx_bd_done,
14783 .tx_kickstart = tx_kickstart,
14784 .get_int_events = get_int_events,
14785 diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
14786 index 15abd37..7a184e8 100644
14787 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
14788 +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
14789 @@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
14790 struct platform_device *ofdev = to_platform_device(fep->dev);
14792 fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
14793 - if (!fep->interrupt)
14794 + if (fep->interrupt == NO_IRQ)
14797 fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
14798 @@ -115,8 +115,10 @@ static int do_pd_setup(struct fs_enet_private *fep)
14802 -#define SCC_NAPI_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
14803 -#define SCC_EVENT (SCCE_ENET_RXF | SCCE_ENET_TXB)
14804 +#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
14805 +#define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXB)
14806 +#define SCC_RX_EVENT (SCCE_ENET_RXF)
14807 +#define SCC_TX_EVENT (SCCE_ENET_TXB)
14808 #define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
14810 static int setup_data(struct net_device *dev)
14811 @@ -128,8 +130,10 @@ static int setup_data(struct net_device *dev)
14815 - fep->ev_napi = SCC_NAPI_EVENT_MSK;
14816 - fep->ev = SCC_EVENT | SCCE_ENET_TXE;
14817 + fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
14818 + fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK;
14819 + fep->ev_rx = SCC_RX_EVENT;
14820 + fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
14821 fep->ev_err = SCC_ERR_EVENT_MSK;
14824 @@ -348,7 +352,7 @@ static void restart(struct net_device *dev)
14825 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
14827 /* Set full duplex mode if needed */
14828 - if (dev->phydev->duplex)
14829 + if (fep->phydev->duplex)
14830 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
14832 /* Restore multicast and promiscuous settings */
14833 @@ -375,28 +379,52 @@ static void stop(struct net_device *dev)
14834 fs_cleanup_bds(dev);
14837 -static void napi_clear_event_fs(struct net_device *dev)
14838 +static void napi_clear_rx_event(struct net_device *dev)
14840 struct fs_enet_private *fep = netdev_priv(dev);
14841 scc_t __iomem *sccp = fep->scc.sccp;
14843 - W16(sccp, scc_scce, SCC_NAPI_EVENT_MSK);
14844 + W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
14847 -static void napi_enable_fs(struct net_device *dev)
14848 +static void napi_enable_rx(struct net_device *dev)
14850 struct fs_enet_private *fep = netdev_priv(dev);
14851 scc_t __iomem *sccp = fep->scc.sccp;
14853 - S16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
14854 + S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
14857 -static void napi_disable_fs(struct net_device *dev)
14858 +static void napi_disable_rx(struct net_device *dev)
14860 struct fs_enet_private *fep = netdev_priv(dev);
14861 scc_t __iomem *sccp = fep->scc.sccp;
14863 - C16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
14864 + C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
14867 +static void napi_clear_tx_event(struct net_device *dev)
14869 + struct fs_enet_private *fep = netdev_priv(dev);
14870 + scc_t __iomem *sccp = fep->scc.sccp;
14872 + W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK);
14875 +static void napi_enable_tx(struct net_device *dev)
14877 + struct fs_enet_private *fep = netdev_priv(dev);
14878 + scc_t __iomem *sccp = fep->scc.sccp;
14880 + S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
14883 +static void napi_disable_tx(struct net_device *dev)
14885 + struct fs_enet_private *fep = netdev_priv(dev);
14886 + scc_t __iomem *sccp = fep->scc.sccp;
14888 + C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
14891 static void rx_bd_done(struct net_device *dev)
14892 @@ -469,9 +497,12 @@ const struct fs_ops fs_scc_ops = {
14893 .set_multicast_list = set_multicast_list,
14894 .restart = restart,
14896 - .napi_clear_event = napi_clear_event_fs,
14897 - .napi_enable = napi_enable_fs,
14898 - .napi_disable = napi_disable_fs,
14899 + .napi_clear_rx_event = napi_clear_rx_event,
14900 + .napi_enable_rx = napi_enable_rx,
14901 + .napi_disable_rx = napi_disable_rx,
14902 + .napi_clear_tx_event = napi_clear_tx_event,
14903 + .napi_enable_tx = napi_enable_tx,
14904 + .napi_disable_tx = napi_disable_tx,
14905 .rx_bd_done = rx_bd_done,
14906 .tx_kickstart = tx_kickstart,
14907 .get_int_events = get_int_events,
14908 diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
14909 index 1f015ed..68a428d 100644
14910 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
14911 +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
14912 @@ -172,16 +172,23 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
14915 new_bus->phy_mask = ~0;
14916 + new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
14917 + if (!new_bus->irq) {
14919 + goto out_unmap_regs;
14922 new_bus->parent = &ofdev->dev;
14923 platform_set_drvdata(ofdev, new_bus);
14925 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
14927 - goto out_unmap_regs;
14928 + goto out_free_irqs;
14933 + kfree(new_bus->irq);
14935 iounmap(bitbang->dir);
14937 @@ -198,6 +205,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
14938 struct bb_info *bitbang = bus->priv;
14940 mdiobus_unregister(bus);
14942 free_mdio_bitbang(bus);
14943 iounmap(bitbang->dir);
14945 diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
14946 index a89267b..2be383e 100644
14947 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
14948 +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
14949 @@ -166,16 +166,23 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
14950 clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed);
14952 new_bus->phy_mask = ~0;
14953 + new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
14954 + if (!new_bus->irq) {
14956 + goto out_unmap_regs;
14959 new_bus->parent = &ofdev->dev;
14960 platform_set_drvdata(ofdev, new_bus);
14962 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
14964 - goto out_unmap_regs;
14965 + goto out_free_irqs;
14970 + kfree(new_bus->irq);
14972 iounmap(fec->fecp);
14974 @@ -193,6 +200,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
14975 struct fec_info *fec = bus->priv;
14977 mdiobus_unregister(bus);
14979 iounmap(fec->fecp);
14982 diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
14983 index 446c7b3..3c40f6b 100644
14984 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
14985 +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
14988 #include <asm/io.h>
14989 #if IS_ENABLED(CONFIG_UCC_GETH)
14990 -#include <soc/fsl/qe/ucc.h>
14991 +#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */
14994 #include "gianfar.h"
14995 @@ -69,6 +69,7 @@ struct fsl_pq_mdio {
14996 struct fsl_pq_mdio_priv {
14998 struct fsl_pq_mii __iomem *regs;
14999 + int irqs[PHY_MAX_ADDR];
15003 @@ -195,15 +196,13 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
15007 -#if IS_ENABLED(CONFIG_GIANFAR)
15008 +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
15010 - * Return the TBIPA address, starting from the address
15011 - * of the mapped GFAR MDIO registers (struct gfar)
15012 * This is mildly evil, but so is our hardware for doing this.
15013 * Also, we have to cast back to struct gfar because of
15014 * definition weirdness done in gianfar.h.
15016 -static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
15017 +static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
15019 struct gfar __iomem *enet_regs = p;
15021 @@ -211,15 +210,6 @@ static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
15025 - * Return the TBIPA address, starting from the address
15026 - * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar)
15028 -static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p)
15030 - return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs));
15034 * Return the TBIPAR address for an eTSEC2 node
15036 static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
15037 @@ -228,14 +218,13 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
15041 -#if IS_ENABLED(CONFIG_UCC_GETH)
15042 +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
15044 - * Return the TBIPAR address for a QE MDIO node, starting from the address
15045 - * of the mapped MII registers (struct fsl_pq_mii)
15046 + * Return the TBIPAR address for a QE MDIO node
15048 static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
15050 - struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii);
15051 + struct fsl_pq_mdio __iomem *mdio = p;
15053 return &mdio->utbipar;
15055 @@ -306,19 +295,19 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
15058 static const struct of_device_id fsl_pq_mdio_match[] = {
15059 -#if IS_ENABLED(CONFIG_GIANFAR)
15060 +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
15062 .compatible = "fsl,gianfar-tbi",
15063 .data = &(struct fsl_pq_mdio_data) {
15065 - .get_tbipa = get_gfar_tbipa_from_mii,
15066 + .get_tbipa = get_gfar_tbipa,
15070 .compatible = "fsl,gianfar-mdio",
15071 .data = &(struct fsl_pq_mdio_data) {
15073 - .get_tbipa = get_gfar_tbipa_from_mii,
15074 + .get_tbipa = get_gfar_tbipa,
15078 @@ -326,7 +315,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
15079 .compatible = "gianfar",
15080 .data = &(struct fsl_pq_mdio_data) {
15081 .mii_offset = offsetof(struct fsl_pq_mdio, mii),
15082 - .get_tbipa = get_gfar_tbipa_from_mdio,
15083 + .get_tbipa = get_gfar_tbipa,
15087 @@ -344,7 +333,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
15091 -#if IS_ENABLED(CONFIG_UCC_GETH)
15092 +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
15094 .compatible = "fsl,ucc-mdio",
15095 .data = &(struct fsl_pq_mdio_data) {
15096 @@ -400,6 +389,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
15097 new_bus->read = &fsl_pq_mdio_read;
15098 new_bus->write = &fsl_pq_mdio_write;
15099 new_bus->reset = &fsl_pq_mdio_reset;
15100 + new_bus->irq = priv->irqs;
15102 err = of_address_to_resource(np, 0, &res);
15104 @@ -455,16 +445,6 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
15106 tbipa = data->get_tbipa(priv->map);
15109 - * Add consistency check to make sure TBI is contained
15110 - * within the mapped range (not because we would get a
15111 - * segfault, rather to catch bugs in computing TBI
15112 - * address). Print error message but continue anyway.
15114 - if ((void *)tbipa > priv->map + resource_size(&res) - 4)
15115 - dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
15116 - ((void *)tbipa - priv->map) + 4);
15118 iowrite32be(be32_to_cpup(prop), tbipa);
15121 diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
15122 index 9061c2f..4ee080d 100644
15123 --- a/drivers/net/ethernet/freescale/gianfar.c
15124 +++ b/drivers/net/ethernet/freescale/gianfar.c
15125 @@ -107,17 +107,17 @@
15127 #include "gianfar.h"
15129 -#define TX_TIMEOUT (5*HZ)
15130 +#define TX_TIMEOUT (1*HZ)
15132 -const char gfar_driver_version[] = "2.0";
15133 +const char gfar_driver_version[] = "1.3";
15135 static int gfar_enet_open(struct net_device *dev);
15136 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
15137 static void gfar_reset_task(struct work_struct *work);
15138 static void gfar_timeout(struct net_device *dev);
15139 static int gfar_close(struct net_device *dev);
15140 -static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
15142 +static struct sk_buff *gfar_new_skb(struct net_device *dev,
15143 + dma_addr_t *bufaddr);
15144 static int gfar_set_mac_address(struct net_device *dev);
15145 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
15146 static irqreturn_t gfar_error(int irq, void *dev_id);
15147 @@ -141,7 +141,8 @@ static void gfar_netpoll(struct net_device *dev);
15149 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
15150 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
15151 -static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
15152 +static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
15153 + int amount_pull, struct napi_struct *napi);
15154 static void gfar_halt_nodisable(struct gfar_private *priv);
15155 static void gfar_clear_exact_match(struct net_device *dev);
15156 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
15157 @@ -168,15 +169,17 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
15158 bdp->lstatus = cpu_to_be32(lstatus);
15161 -static void gfar_init_bds(struct net_device *ndev)
15162 +static int gfar_init_bds(struct net_device *ndev)
15164 struct gfar_private *priv = netdev_priv(ndev);
15165 struct gfar __iomem *regs = priv->gfargrp[0].regs;
15166 struct gfar_priv_tx_q *tx_queue = NULL;
15167 struct gfar_priv_rx_q *rx_queue = NULL;
15168 struct txbd8 *txbdp;
15169 + struct rxbd8 *rxbdp;
15170 u32 __iomem *rfbptr;
15172 + dma_addr_t bufaddr;
15174 for (i = 0; i < priv->num_tx_queues; i++) {
15175 tx_queue = priv->tx_queue[i];
15176 @@ -204,26 +207,40 @@ static void gfar_init_bds(struct net_device *ndev)
15177 rfbptr = ®s->rfbptr0;
15178 for (i = 0; i < priv->num_rx_queues; i++) {
15179 rx_queue = priv->rx_queue[i];
15180 + rx_queue->cur_rx = rx_queue->rx_bd_base;
15181 + rx_queue->skb_currx = 0;
15182 + rxbdp = rx_queue->rx_bd_base;
15184 - rx_queue->next_to_clean = 0;
15185 - rx_queue->next_to_use = 0;
15186 - rx_queue->next_to_alloc = 0;
15187 + for (j = 0; j < rx_queue->rx_ring_size; j++) {
15188 + struct sk_buff *skb = rx_queue->rx_skbuff[j];
15190 - /* make sure next_to_clean != next_to_use after this
15191 - * by leaving at least 1 unused descriptor
15193 - gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
15195 + bufaddr = be32_to_cpu(rxbdp->bufPtr);
15197 + skb = gfar_new_skb(ndev, &bufaddr);
15199 + netdev_err(ndev, "Can't allocate RX buffers\n");
15202 + rx_queue->rx_skbuff[j] = skb;
15205 + gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
15209 rx_queue->rfbptr = rfbptr;
15216 static int gfar_alloc_skb_resources(struct net_device *ndev)
15222 struct gfar_private *priv = netdev_priv(ndev);
15223 struct device *dev = priv->dev;
15224 struct gfar_priv_tx_q *tx_queue = NULL;
15225 @@ -262,8 +279,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
15226 rx_queue = priv->rx_queue[i];
15227 rx_queue->rx_bd_base = vaddr;
15228 rx_queue->rx_bd_dma_base = addr;
15229 - rx_queue->ndev = ndev;
15230 - rx_queue->dev = dev;
15231 + rx_queue->dev = ndev;
15232 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
15233 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
15235 @@ -278,20 +294,25 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
15236 if (!tx_queue->tx_skbuff)
15239 - for (j = 0; j < tx_queue->tx_ring_size; j++)
15240 - tx_queue->tx_skbuff[j] = NULL;
15241 + for (k = 0; k < tx_queue->tx_ring_size; k++)
15242 + tx_queue->tx_skbuff[k] = NULL;
15245 for (i = 0; i < priv->num_rx_queues; i++) {
15246 rx_queue = priv->rx_queue[i];
15247 - rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
15248 - sizeof(*rx_queue->rx_buff),
15250 - if (!rx_queue->rx_buff)
15251 + rx_queue->rx_skbuff =
15252 + kmalloc_array(rx_queue->rx_ring_size,
15253 + sizeof(*rx_queue->rx_skbuff),
15255 + if (!rx_queue->rx_skbuff)
15258 + for (j = 0; j < rx_queue->rx_ring_size; j++)
15259 + rx_queue->rx_skbuff[j] = NULL;
15262 - gfar_init_bds(ndev);
15263 + if (gfar_init_bds(ndev))
15268 @@ -333,16 +354,28 @@ static void gfar_init_rqprm(struct gfar_private *priv)
15272 -static void gfar_rx_offload_en(struct gfar_private *priv)
15273 +static void gfar_rx_buff_size_config(struct gfar_private *priv)
15275 + int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
15277 /* set this when rx hw offload (TOE) functions are being used */
15278 priv->uses_rxfcb = 0;
15280 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
15281 priv->uses_rxfcb = 1;
15283 - if (priv->hwts_rx_en || priv->rx_filer_enable)
15284 + if (priv->hwts_rx_en)
15285 priv->uses_rxfcb = 1;
15287 + if (priv->uses_rxfcb)
15288 + frame_size += GMAC_FCB_LEN;
15290 + frame_size += priv->padding;
15292 + frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
15293 + INCREMENTAL_BUFFER_SIZE;
15295 + priv->rx_buffer_size = frame_size;
15298 static void gfar_mac_rx_config(struct gfar_private *priv)
15299 @@ -351,7 +384,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
15302 if (priv->rx_filer_enable) {
15303 - rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
15304 + rctrl |= RCTRL_FILREN;
15305 /* Program the RIR0 reg with the required distribution */
15306 if (priv->poll_mode == GFAR_SQ_POLLING)
15307 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
15308 @@ -483,15 +516,6 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
15309 return &dev->stats;
15312 -static int gfar_set_mac_addr(struct net_device *dev, void *p)
15314 - eth_mac_addr(dev, p);
15316 - gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
15321 static const struct net_device_ops gfar_netdev_ops = {
15322 .ndo_open = gfar_enet_open,
15323 .ndo_start_xmit = gfar_start_xmit,
15324 @@ -502,7 +526,7 @@ static const struct net_device_ops gfar_netdev_ops = {
15325 .ndo_tx_timeout = gfar_timeout,
15326 .ndo_do_ioctl = gfar_ioctl,
15327 .ndo_get_stats = gfar_get_stats,
15328 - .ndo_set_mac_address = gfar_set_mac_addr,
15329 + .ndo_set_mac_address = eth_mac_addr,
15330 .ndo_validate_addr = eth_validate_addr,
15331 #ifdef CONFIG_NET_POLL_CONTROLLER
15332 .ndo_poll_controller = gfar_netpoll,
15333 @@ -532,6 +556,22 @@ static void gfar_ints_enable(struct gfar_private *priv)
15337 +static void lock_tx_qs(struct gfar_private *priv)
15341 + for (i = 0; i < priv->num_tx_queues; i++)
15342 + spin_lock(&priv->tx_queue[i]->txlock);
15345 +static void unlock_tx_qs(struct gfar_private *priv)
15349 + for (i = 0; i < priv->num_tx_queues; i++)
15350 + spin_unlock(&priv->tx_queue[i]->txlock);
15353 static int gfar_alloc_tx_queues(struct gfar_private *priv)
15356 @@ -560,8 +600,9 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv)
15357 if (!priv->rx_queue[i])
15360 + priv->rx_queue[i]->rx_skbuff = NULL;
15361 priv->rx_queue[i]->qindex = i;
15362 - priv->rx_queue[i]->ndev = priv->ndev;
15363 + priv->rx_queue[i]->dev = priv->ndev;
15367 @@ -647,9 +688,9 @@ static int gfar_parse_group(struct device_node *np,
15368 if (model && strcasecmp(model, "FEC")) {
15369 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
15370 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
15371 - if (!gfar_irq(grp, TX)->irq ||
15372 - !gfar_irq(grp, RX)->irq ||
15373 - !gfar_irq(grp, ER)->irq)
15374 + if (gfar_irq(grp, TX)->irq == NO_IRQ ||
15375 + gfar_irq(grp, RX)->irq == NO_IRQ ||
15376 + gfar_irq(grp, ER)->irq == NO_IRQ)
15380 @@ -738,6 +779,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
15381 struct gfar_private *priv = NULL;
15382 struct device_node *np = ofdev->dev.of_node;
15383 struct device_node *child = NULL;
15384 + struct property *stash;
15387 unsigned int num_tx_qs, num_rx_qs;
15388 @@ -853,7 +895,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
15392 - if (of_property_read_bool(np, "bd-stash")) {
15393 + stash = of_find_property(np, "bd-stash", NULL);
15396 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
15397 priv->bd_stash_en = 1;
15399 @@ -891,8 +935,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
15400 FSL_GIANFAR_DEV_HAS_VLAN |
15401 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
15402 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
15403 - FSL_GIANFAR_DEV_HAS_TIMER |
15404 - FSL_GIANFAR_DEV_HAS_RX_FILER;
15405 + FSL_GIANFAR_DEV_HAS_TIMER;
15407 err = of_property_read_string(np, "phy-connection-type", &ctype);
15409 @@ -905,9 +948,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
15410 if (of_find_property(np, "fsl,magic-packet", NULL))
15411 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
15413 - if (of_get_property(np, "fsl,wake-on-filer", NULL))
15414 - priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
15416 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
15418 /* In the case of a fixed PHY, the DT node associated
15419 @@ -999,7 +1039,7 @@ static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
15421 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
15423 - struct phy_device *phydev = dev->phydev;
15424 + struct gfar_private *priv = netdev_priv(dev);
15426 if (!netif_running(dev))
15428 @@ -1009,10 +1049,10 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
15429 if (cmd == SIOCGHWTSTAMP)
15430 return gfar_hwtstamp_get(dev, rq);
15433 + if (!priv->phydev)
15436 - return phy_mii_ioctl(phydev, rq, cmd);
15437 + return phy_mii_ioctl(priv->phydev, rq, cmd);
15440 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
15441 @@ -1111,10 +1151,8 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv)
15443 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
15444 priv->errata |= GFAR_ERRATA_12;
15445 - /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
15446 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
15447 - ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
15448 - ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
15449 + ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
15450 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
15453 @@ -1156,11 +1194,12 @@ void gfar_mac_reset(struct gfar_private *priv)
15457 - gfar_rx_offload_en(priv);
15458 + /* Compute rx_buff_size based on config flags */
15459 + gfar_rx_buff_size_config(priv);
15461 /* Initialize the max receive frame/buffer lengths */
15462 - gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
15463 - gfar_write(®s->mrblr, GFAR_RXB_SIZE);
15464 + gfar_write(®s->maxfrm, priv->rx_buffer_size);
15465 + gfar_write(®s->mrblr, priv->rx_buffer_size);
15467 /* Initialize the Minimum Frame Length Register */
15468 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
15469 @@ -1168,11 +1207,12 @@ void gfar_mac_reset(struct gfar_private *priv)
15470 /* Initialize MACCFG2. */
15471 tempval = MACCFG2_INIT_SETTINGS;
15473 - /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
15474 - * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
15475 - * and by checking RxBD[LG] and discarding larger than MAXFRM.
15476 + /* If the mtu is larger than the max size for standard
15477 + * ethernet frames (ie, a jumbo frame), then set maccfg2
15478 + * to allow huge frames, and to check the length
15480 - if (gfar_has_errata(priv, GFAR_ERRATA_74))
15481 + if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
15482 + gfar_has_errata(priv, GFAR_ERRATA_74))
15483 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
15485 gfar_write(®s->maccfg2, tempval);
15486 @@ -1312,7 +1352,6 @@ static void gfar_init_addr_hash_table(struct gfar_private *priv)
15488 static int gfar_probe(struct platform_device *ofdev)
15490 - struct device_node *np = ofdev->dev.of_node;
15491 struct net_device *dev = NULL;
15492 struct gfar_private *priv = NULL;
15494 @@ -1328,6 +1367,7 @@ static int gfar_probe(struct platform_device *ofdev)
15495 priv->dev = &ofdev->dev;
15496 SET_NETDEV_DEV(dev, &ofdev->dev);
15498 + spin_lock_init(&priv->bflock);
15499 INIT_WORK(&priv->reset_task, gfar_reset_task);
15501 platform_set_drvdata(ofdev, priv);
15502 @@ -1348,12 +1388,12 @@ static int gfar_probe(struct platform_device *ofdev)
15503 if (priv->poll_mode == GFAR_SQ_POLLING) {
15504 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
15505 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
15506 - netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
15507 + netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
15508 gfar_poll_tx_sq, 2);
15510 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
15511 gfar_poll_rx, GFAR_DEV_WEIGHT);
15512 - netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
15513 + netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
15517 @@ -1371,8 +1411,6 @@ static int gfar_probe(struct platform_device *ofdev)
15518 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
15521 - dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
15523 gfar_init_addr_hash_table(priv);
15525 /* Insert receive time stamps into padding alignment bytes */
15526 @@ -1383,6 +1421,8 @@ static int gfar_probe(struct platform_device *ofdev)
15527 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
15528 dev->needed_headroom = GMAC_FCB_LEN;
15530 + priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
15532 /* Initializing some of the rx/tx queue level parameters */
15533 for (i = 0; i < priv->num_tx_queues; i++) {
15534 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
15535 @@ -1397,9 +1437,8 @@ static int gfar_probe(struct platform_device *ofdev)
15536 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
15539 - /* Always enable rx filer if available */
15540 - priv->rx_filer_enable =
15541 - (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
15542 + /* always enable rx filer */
15543 + priv->rx_filer_enable = 1;
15544 /* Enable most messages by default */
15545 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
15546 /* use pritority h/w tx queue scheduling for single queue devices */
15547 @@ -1420,14 +1459,9 @@ static int gfar_probe(struct platform_device *ofdev)
15548 goto register_fail;
15551 - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
15552 - priv->wol_supported |= GFAR_WOL_MAGIC;
15554 - if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
15555 - priv->rx_filer_enable)
15556 - priv->wol_supported |= GFAR_WOL_FILER_UCAST;
15558 - device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
15559 + device_init_wakeup(&dev->dev,
15560 + priv->device_flags &
15561 + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
15563 /* fill out IRQ number and name fields */
15564 for (i = 0; i < priv->num_grps; i++) {
15565 @@ -1463,8 +1497,6 @@ static int gfar_probe(struct platform_device *ofdev)
15569 - if (of_phy_is_fixed_link(np))
15570 - of_phy_deregister_fixed_link(np);
15571 unmap_group_regs(priv);
15572 gfar_free_rx_queues(priv);
15573 gfar_free_tx_queues(priv);
15574 @@ -1477,16 +1509,11 @@ static int gfar_probe(struct platform_device *ofdev)
15575 static int gfar_remove(struct platform_device *ofdev)
15577 struct gfar_private *priv = platform_get_drvdata(ofdev);
15578 - struct device_node *np = ofdev->dev.of_node;
15580 of_node_put(priv->phy_node);
15581 of_node_put(priv->tbi_node);
15583 unregister_netdev(priv->ndev);
15585 - if (of_phy_is_fixed_link(np))
15586 - of_phy_deregister_fixed_link(np);
15588 unmap_group_regs(priv);
15589 gfar_free_rx_queues(priv);
15590 gfar_free_tx_queues(priv);
15591 @@ -1497,153 +1524,53 @@ static int gfar_remove(struct platform_device *ofdev)
15595 -static void __gfar_filer_disable(struct gfar_private *priv)
15597 - struct gfar __iomem *regs = priv->gfargrp[0].regs;
15600 - temp = gfar_read(®s->rctrl);
15601 - temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
15602 - gfar_write(®s->rctrl, temp);
15605 -static void __gfar_filer_enable(struct gfar_private *priv)
15607 - struct gfar __iomem *regs = priv->gfargrp[0].regs;
15610 - temp = gfar_read(®s->rctrl);
15611 - temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
15612 - gfar_write(®s->rctrl, temp);
15615 -/* Filer rules implementing wol capabilities */
15616 -static void gfar_filer_config_wol(struct gfar_private *priv)
15621 - __gfar_filer_disable(priv);
15623 - /* clear the filer table, reject any packet by default */
15624 - rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
15625 - for (i = 0; i <= MAX_FILER_IDX; i++)
15626 - gfar_write_filer(priv, i, rqfcr, 0);
15629 - if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
15630 - /* unicast packet, accept it */
15631 - struct net_device *ndev = priv->ndev;
15632 - /* get the default rx queue index */
15633 - u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
15634 - u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
15635 - (ndev->dev_addr[1] << 8) |
15636 - ndev->dev_addr[2];
15638 - rqfcr = (qindex << 10) | RQFCR_AND |
15639 - RQFCR_CMP_EXACT | RQFCR_PID_DAH;
15641 - gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
15643 - dest_mac_addr = (ndev->dev_addr[3] << 16) |
15644 - (ndev->dev_addr[4] << 8) |
15645 - ndev->dev_addr[5];
15646 - rqfcr = (qindex << 10) | RQFCR_GPI |
15647 - RQFCR_CMP_EXACT | RQFCR_PID_DAL;
15648 - gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
15651 - __gfar_filer_enable(priv);
15654 -static void gfar_filer_restore_table(struct gfar_private *priv)
15656 - u32 rqfcr, rqfpr;
15659 - __gfar_filer_disable(priv);
15661 - for (i = 0; i <= MAX_FILER_IDX; i++) {
15662 - rqfcr = priv->ftp_rqfcr[i];
15663 - rqfpr = priv->ftp_rqfpr[i];
15664 - gfar_write_filer(priv, i, rqfcr, rqfpr);
15667 - __gfar_filer_enable(priv);
15670 -/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
15671 -static void gfar_start_wol_filer(struct gfar_private *priv)
15673 - struct gfar __iomem *regs = priv->gfargrp[0].regs;
15677 - /* Enable Rx hw queues */
15678 - gfar_write(®s->rqueue, priv->rqueue);
15680 - /* Initialize DMACTRL to have WWR and WOP */
15681 - tempval = gfar_read(®s->dmactrl);
15682 - tempval |= DMACTRL_INIT_SETTINGS;
15683 - gfar_write(®s->dmactrl, tempval);
15685 - /* Make sure we aren't stopped */
15686 - tempval = gfar_read(®s->dmactrl);
15687 - tempval &= ~DMACTRL_GRS;
15688 - gfar_write(®s->dmactrl, tempval);
15690 - for (i = 0; i < priv->num_grps; i++) {
15691 - regs = priv->gfargrp[i].regs;
15692 - /* Clear RHLT, so that the DMA starts polling now */
15693 - gfar_write(®s->rstat, priv->gfargrp[i].rstat);
15694 - /* enable the Filer General Purpose Interrupt */
15695 - gfar_write(®s->imask, IMASK_FGPI);
15698 - /* Enable Rx DMA */
15699 - tempval = gfar_read(®s->maccfg1);
15700 - tempval |= MACCFG1_RX_EN;
15701 - gfar_write(®s->maccfg1, tempval);
15704 static int gfar_suspend(struct device *dev)
15706 struct gfar_private *priv = dev_get_drvdata(dev);
15707 struct net_device *ndev = priv->ndev;
15708 struct gfar __iomem *regs = priv->gfargrp[0].regs;
15709 + unsigned long flags;
15711 - u16 wol = priv->wol_opts;
15713 - if (!netif_running(ndev))
15715 + int magic_packet = priv->wol_en &&
15716 + (priv->device_flags &
15717 + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
15719 - disable_napi(priv);
15720 - netif_tx_lock(ndev);
15721 netif_device_detach(ndev);
15722 - netif_tx_unlock(ndev);
15725 + if (netif_running(ndev)) {
15727 - if (wol & GFAR_WOL_MAGIC) {
15728 - /* Enable interrupt on Magic Packet */
15729 - gfar_write(®s->imask, IMASK_MAG);
15730 + local_irq_save(flags);
15731 + lock_tx_qs(priv);
15733 - /* Enable Magic Packet mode */
15734 - tempval = gfar_read(®s->maccfg2);
15735 - tempval |= MACCFG2_MPEN;
15736 - gfar_write(®s->maccfg2, tempval);
15737 + gfar_halt_nodisable(priv);
15739 - /* re-enable the Rx block */
15740 + /* Disable Tx, and Rx if wake-on-LAN is disabled. */
15741 tempval = gfar_read(®s->maccfg1);
15742 - tempval |= MACCFG1_RX_EN;
15744 + tempval &= ~MACCFG1_TX_EN;
15746 + if (!magic_packet)
15747 + tempval &= ~MACCFG1_RX_EN;
15749 gfar_write(®s->maccfg1, tempval);
15751 - } else if (wol & GFAR_WOL_FILER_UCAST) {
15752 - gfar_filer_config_wol(priv);
15753 - gfar_start_wol_filer(priv);
15754 + unlock_tx_qs(priv);
15755 + local_irq_restore(flags);
15758 - phy_stop(ndev->phydev);
15759 + disable_napi(priv);
15761 + if (magic_packet) {
15762 + /* Enable interrupt on Magic Packet */
15763 + gfar_write(®s->imask, IMASK_MAG);
15765 + /* Enable Magic Packet mode */
15766 + tempval = gfar_read(®s->maccfg2);
15767 + tempval |= MACCFG2_MPEN;
15768 + gfar_write(®s->maccfg2, tempval);
15770 + phy_stop(priv->phydev);
15775 @@ -1654,30 +1581,37 @@ static int gfar_resume(struct device *dev)
15776 struct gfar_private *priv = dev_get_drvdata(dev);
15777 struct net_device *ndev = priv->ndev;
15778 struct gfar __iomem *regs = priv->gfargrp[0].regs;
15779 + unsigned long flags;
15781 - u16 wol = priv->wol_opts;
15782 + int magic_packet = priv->wol_en &&
15783 + (priv->device_flags &
15784 + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
15786 - if (!netif_running(ndev))
15787 + if (!netif_running(ndev)) {
15788 + netif_device_attach(ndev);
15792 - if (wol & GFAR_WOL_MAGIC) {
15793 - /* Disable Magic Packet mode */
15794 - tempval = gfar_read(®s->maccfg2);
15795 - tempval &= ~MACCFG2_MPEN;
15796 - gfar_write(®s->maccfg2, tempval);
15797 + if (!magic_packet && priv->phydev)
15798 + phy_start(priv->phydev);
15800 - } else if (wol & GFAR_WOL_FILER_UCAST) {
15801 - /* need to stop rx only, tx is already down */
15803 - gfar_filer_restore_table(priv);
15804 + /* Disable Magic Packet mode, in case something
15805 + * else woke us up.
15807 + local_irq_save(flags);
15808 + lock_tx_qs(priv);
15811 - phy_start(ndev->phydev);
15813 + tempval = gfar_read(®s->maccfg2);
15814 + tempval &= ~MACCFG2_MPEN;
15815 + gfar_write(®s->maccfg2, tempval);
15819 + unlock_tx_qs(priv);
15820 + local_irq_restore(flags);
15822 netif_device_attach(ndev);
15827 @@ -1694,7 +1628,10 @@ static int gfar_restore(struct device *dev)
15831 - gfar_init_bds(ndev);
15832 + if (gfar_init_bds(ndev)) {
15833 + free_skb_resources(priv);
15837 gfar_mac_reset(priv);
15839 @@ -1706,8 +1643,8 @@ static int gfar_restore(struct device *dev)
15840 priv->oldspeed = 0;
15841 priv->oldduplex = -1;
15843 - if (ndev->phydev)
15844 - phy_start(ndev->phydev);
15845 + if (priv->phydev)
15846 + phy_start(priv->phydev);
15848 netif_device_attach(ndev);
15850 @@ -1786,7 +1723,6 @@ static int init_phy(struct net_device *dev)
15851 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
15852 GFAR_SUPPORTED_GBIT : 0;
15853 phy_interface_t interface;
15854 - struct phy_device *phydev;
15857 priv->oldspeed = 0;
15858 @@ -1794,9 +1730,9 @@ static int init_phy(struct net_device *dev)
15860 interface = gfar_get_interface(dev);
15862 - phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
15865 + priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
15867 + if (!priv->phydev) {
15868 dev_err(&dev->dev, "could not attach to PHY\n");
15871 @@ -1805,11 +1741,11 @@ static int init_phy(struct net_device *dev)
15872 gfar_configure_serdes(dev);
15874 /* Remove any features not supported by the controller */
15875 - phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
15876 - phydev->advertising = phydev->supported;
15877 + priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
15878 + priv->phydev->advertising = priv->phydev->supported;
15880 /* Add support for flow control, but don't advertise it by default */
15881 - phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
15882 + priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
15886 @@ -1844,10 +1780,8 @@ static void gfar_configure_serdes(struct net_device *dev)
15887 * everything for us? Resetting it takes the link down and requires
15888 * several seconds for it to come back.
15890 - if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
15891 - put_device(&tbiphy->mdio.dev);
15892 + if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
15896 /* Single clk mode, mii mode off(for serdes communication) */
15897 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
15898 @@ -1859,8 +1793,6 @@ static void gfar_configure_serdes(struct net_device *dev)
15899 phy_write(tbiphy, MII_BMCR,
15900 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
15903 - put_device(&tbiphy->mdio.dev);
15906 static int __gfar_is_rx_idle(struct gfar_private *priv)
15907 @@ -1953,7 +1885,7 @@ void stop_gfar(struct net_device *dev)
15908 /* disable ints and gracefully shut down Rx/Tx DMA */
15911 - phy_stop(dev->phydev);
15912 + phy_stop(priv->phydev);
15914 free_skb_resources(priv);
15916 @@ -1990,32 +1922,26 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
15918 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
15920 + struct rxbd8 *rxbdp;
15921 + struct gfar_private *priv = netdev_priv(rx_queue->dev);
15924 - struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
15926 - if (rx_queue->skb)
15927 - dev_kfree_skb(rx_queue->skb);
15928 + rxbdp = rx_queue->rx_bd_base;
15930 for (i = 0; i < rx_queue->rx_ring_size; i++) {
15931 - struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
15933 + if (rx_queue->rx_skbuff[i]) {
15934 + dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
15935 + priv->rx_buffer_size,
15936 + DMA_FROM_DEVICE);
15937 + dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
15938 + rx_queue->rx_skbuff[i] = NULL;
15940 rxbdp->lstatus = 0;
15947 - dma_unmap_single(rx_queue->dev, rxb->dma,
15948 - PAGE_SIZE, DMA_FROM_DEVICE);
15949 - __free_page(rxb->page);
15951 - rxb->page = NULL;
15954 - kfree(rx_queue->rx_buff);
15955 - rx_queue->rx_buff = NULL;
15956 + kfree(rx_queue->rx_skbuff);
15957 + rx_queue->rx_skbuff = NULL;
15960 /* If there are any tx skbs or rx skbs still around, free them.
15961 @@ -2040,7 +1966,7 @@ static void free_skb_resources(struct gfar_private *priv)
15963 for (i = 0; i < priv->num_rx_queues; i++) {
15964 rx_queue = priv->rx_queue[i];
15965 - if (rx_queue->rx_buff)
15966 + if (rx_queue->rx_skbuff)
15967 free_skb_rx_queue(rx_queue);
15970 @@ -2085,7 +2011,7 @@ void gfar_start(struct gfar_private *priv)
15972 gfar_ints_enable(priv);
15974 - netif_trans_update(priv->ndev); /* prevent tx timeout */
15975 + priv->ndev->trans_start = jiffies; /* prevent tx timeout */
15978 static void free_grp_irqs(struct gfar_priv_grp *grp)
15979 @@ -2116,8 +2042,6 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
15983 - enable_irq_wake(gfar_irq(grp, ER)->irq);
15985 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
15986 gfar_irq(grp, TX)->name, grp);
15988 @@ -2132,8 +2056,6 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
15989 gfar_irq(grp, RX)->irq);
15992 - enable_irq_wake(gfar_irq(grp, RX)->irq);
15995 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
15996 gfar_irq(grp, TX)->name, grp);
15997 @@ -2142,7 +2064,6 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
15998 gfar_irq(grp, TX)->irq);
16001 - enable_irq_wake(gfar_irq(grp, TX)->irq);
16005 @@ -2208,12 +2129,7 @@ int startup_gfar(struct net_device *ndev)
16006 /* Start Rx/Tx DMA and enable the interrupts */
16009 - /* force link state update after mac reset */
16010 - priv->oldlink = 0;
16011 - priv->oldspeed = 0;
16012 - priv->oldduplex = -1;
16014 - phy_start(ndev->phydev);
16015 + phy_start(priv->phydev);
16019 @@ -2242,6 +2158,8 @@ static int gfar_enet_open(struct net_device *dev)
16023 + device_set_wakeup_enable(&dev->dev, priv->wol_en);
16028 @@ -2283,7 +2201,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
16029 fcb->flags = flags;
16032 -static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
16033 +void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
16035 fcb->flags |= TXFCB_VLN;
16036 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
16037 @@ -2333,10 +2251,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16038 struct txfcb *fcb = NULL;
16039 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
16041 - skb_frag_t *frag;
16043 int do_tstamp, do_csum, do_vlan;
16045 + unsigned long flags;
16046 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
16048 rq = skb->queue_mapping;
16049 @@ -2401,6 +2319,52 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16050 txbdp = txbdp_start = tx_queue->cur_tx;
16051 lstatus = be32_to_cpu(txbdp->lstatus);
16053 + /* Time stamp insertion requires one additional TxBD */
16054 + if (unlikely(do_tstamp))
16055 + txbdp_tstamp = txbdp = next_txbd(txbdp, base,
16056 + tx_queue->tx_ring_size);
16058 + if (nr_frags == 0) {
16059 + if (unlikely(do_tstamp)) {
16060 + u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
16062 + lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
16063 + txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
16065 + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
16068 + /* Place the fragment addresses and lengths into the TxBDs */
16069 + for (i = 0; i < nr_frags; i++) {
16070 + unsigned int frag_len;
16071 + /* Point at the next BD, wrapping as needed */
16072 + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
16074 + frag_len = skb_shinfo(skb)->frags[i].size;
16076 + lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
16077 + BD_LFLAG(TXBD_READY);
16079 + /* Handle the last BD specially */
16080 + if (i == nr_frags - 1)
16081 + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
16083 + bufaddr = skb_frag_dma_map(priv->dev,
16084 + &skb_shinfo(skb)->frags[i],
16088 + if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
16089 + goto dma_map_err;
16091 + /* set the TxBD length and buffer pointer */
16092 + txbdp->bufPtr = cpu_to_be32(bufaddr);
16093 + txbdp->lstatus = cpu_to_be32(lstatus);
16096 + lstatus = be32_to_cpu(txbdp_start->lstatus);
16099 /* Add TxPAL between FCB and frame if required */
16100 if (unlikely(do_tstamp)) {
16101 skb_push(skb, GMAC_TXPAL_LEN);
16102 @@ -2435,6 +2399,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16104 gfar_tx_vlan(skb, fcb);
16106 + /* Setup tx hardware time stamping if requested */
16107 + if (unlikely(do_tstamp)) {
16108 + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
16112 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
16114 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
16115 @@ -2442,47 +2412,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16117 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
16119 - /* Time stamp insertion requires one additional TxBD */
16120 - if (unlikely(do_tstamp))
16121 - txbdp_tstamp = txbdp = next_txbd(txbdp, base,
16122 - tx_queue->tx_ring_size);
16124 - if (likely(!nr_frags)) {
16125 - if (likely(!do_tstamp))
16126 - lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
16128 - u32 lstatus_start = lstatus;
16130 - /* Place the fragment addresses and lengths into the TxBDs */
16131 - frag = &skb_shinfo(skb)->frags[0];
16132 - for (i = 0; i < nr_frags; i++, frag++) {
16133 - unsigned int size;
16135 - /* Point at the next BD, wrapping as needed */
16136 - txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
16138 - size = skb_frag_size(frag);
16140 - lstatus = be32_to_cpu(txbdp->lstatus) | size |
16141 - BD_LFLAG(TXBD_READY);
16143 - /* Handle the last BD specially */
16144 - if (i == nr_frags - 1)
16145 - lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
16147 - bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
16148 - size, DMA_TO_DEVICE);
16149 - if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
16150 - goto dma_map_err;
16152 - /* set the TxBD length and buffer pointer */
16153 - txbdp->bufPtr = cpu_to_be32(bufaddr);
16154 - txbdp->lstatus = cpu_to_be32(lstatus);
16157 - lstatus = lstatus_start;
16160 /* If time stamping is requested one additional TxBD must be set up. The
16161 * first TxBD points to the FCB and must have a data length of
16162 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
16163 @@ -2493,25 +2422,31 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16165 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
16166 bufaddr += fcb_len;
16168 lstatus_ts |= BD_LFLAG(TXBD_READY) |
16169 (skb_headlen(skb) - fcb_len);
16171 - lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
16173 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
16174 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
16175 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
16177 - /* Setup tx hardware time stamping */
16178 - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
16181 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
16184 netdev_tx_sent_queue(txq, bytes_sent);
16186 + /* We can work in parallel with gfar_clean_tx_ring(), except
16187 + * when modifying num_txbdfree. Note that we didn't grab the lock
16188 + * when we were reading the num_txbdfree and checking for available
16189 + * space, that's because outside of this function it can only grow,
16190 + * and once we've got needed space, it cannot suddenly disappear.
16192 + * The lock also protects us from gfar_error(), which can modify
16193 + * regs->tstat and thus retrigger the transfers, which is why we
16194 + * also must grab the lock before setting ready bit for the first
16195 + * to be transmitted BD.
16197 + spin_lock_irqsave(&tx_queue->txlock, flags);
16201 txbdp_start->lstatus = cpu_to_be32(lstatus);
16202 @@ -2528,15 +2463,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16204 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
16206 - /* We can work in parallel with gfar_clean_tx_ring(), except
16207 - * when modifying num_txbdfree. Note that we didn't grab the lock
16208 - * when we were reading the num_txbdfree and checking for available
16209 - * space, that's because outside of this function it can only grow.
16211 - spin_lock_bh(&tx_queue->txlock);
16212 /* reduce TxBD free count */
16213 tx_queue->num_txbdfree -= (nr_txbds);
16214 - spin_unlock_bh(&tx_queue->txlock);
16216 /* If the next BD still needs to be cleaned up, then the bds
16217 * are full. We need to tell the kernel to stop sending us stuff.
16218 @@ -2550,6 +2478,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16219 /* Tell the DMA to go go go */
16220 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
16222 + /* Unlock priv */
16223 + spin_unlock_irqrestore(&tx_queue->txlock, flags);
16225 return NETDEV_TX_OK;
16228 @@ -2582,7 +2513,8 @@ static int gfar_close(struct net_device *dev)
16231 /* Disconnect from the PHY */
16232 - phy_disconnect(dev->phydev);
16233 + phy_disconnect(priv->phydev);
16234 + priv->phydev = NULL;
16236 gfar_free_irq(priv);
16238 @@ -2602,7 +2534,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
16239 struct gfar_private *priv = netdev_priv(dev);
16240 int frame_size = new_mtu + ETH_HLEN;
16242 - if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
16243 + if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
16244 netif_err(priv, drv, dev, "Invalid MTU setting\n");
16247 @@ -2656,6 +2588,15 @@ static void gfar_timeout(struct net_device *dev)
16248 schedule_work(&priv->reset_task);
16251 +static void gfar_align_skb(struct sk_buff *skb)
16253 + /* We need the data buffer to be aligned properly. We will reserve
16254 + * as many bytes as needed to align the data properly
16256 + skb_reserve(skb, RXBUF_ALIGNMENT -
16257 + (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
16260 /* Interrupt Handler for Transmit complete */
16261 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
16263 @@ -2681,6 +2622,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
16264 skb_dirtytx = tx_queue->skb_dirtytx;
16266 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
16267 + unsigned long flags;
16269 frags = skb_shinfo(skb)->nr_frags;
16271 @@ -2713,11 +2655,10 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
16273 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
16274 struct skb_shared_hwtstamps shhwtstamps;
16275 - u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
16277 + u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
16279 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
16280 - shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
16281 + shhwtstamps.hwtstamp = ns_to_ktime(*ns);
16282 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
16283 skb_tstamp_tx(skb, &shhwtstamps);
16284 gfar_clear_txbd_status(bdp);
16285 @@ -2745,9 +2686,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
16286 TX_RING_MOD_MASK(tx_ring_size);
16289 - spin_lock(&tx_queue->txlock);
16290 + spin_lock_irqsave(&tx_queue->txlock, flags);
16291 tx_queue->num_txbdfree += nr_txbds;
16292 - spin_unlock(&tx_queue->txlock);
16293 + spin_unlock_irqrestore(&tx_queue->txlock, flags);
16296 /* If we freed a buffer, we can restart transmission, if necessary */
16297 @@ -2763,85 +2704,49 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
16298 netdev_tx_completed_queue(txq, howmany, bytes_sent);
16301 -static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
16302 +static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
16304 - struct page *page;
16307 - page = dev_alloc_page();
16308 - if (unlikely(!page))
16311 - addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
16312 - if (unlikely(dma_mapping_error(rxq->dev, addr))) {
16313 - __free_page(page);
16317 + struct gfar_private *priv = netdev_priv(dev);
16318 + struct sk_buff *skb;
16321 - rxb->page = page;
16322 - rxb->page_offset = 0;
16323 + skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
16329 + gfar_align_skb(skb);
16331 -static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
16333 - struct gfar_private *priv = netdev_priv(rx_queue->ndev);
16334 - struct gfar_extra_stats *estats = &priv->extra_stats;
16336 - netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
16337 - atomic64_inc(&estats->rx_alloc_err);
16341 -static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
16343 +static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
16345 - struct rxbd8 *bdp;
16346 - struct gfar_rx_buff *rxb;
16349 - i = rx_queue->next_to_use;
16350 - bdp = &rx_queue->rx_bd_base[i];
16351 - rxb = &rx_queue->rx_buff[i];
16353 - while (alloc_cnt--) {
16354 - /* try reuse page */
16355 - if (unlikely(!rxb->page)) {
16356 - if (unlikely(!gfar_new_page(rx_queue, rxb))) {
16357 - gfar_rx_alloc_err(rx_queue);
16361 + struct gfar_private *priv = netdev_priv(dev);
16362 + struct sk_buff *skb;
16365 - /* Setup the new RxBD */
16366 - gfar_init_rxbdp(rx_queue, bdp,
16367 - rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
16368 + skb = gfar_alloc_skb(dev);
16372 - /* Update to the next pointer */
16376 - if (unlikely(++i == rx_queue->rx_ring_size)) {
16378 - bdp = rx_queue->rx_bd_base;
16379 - rxb = rx_queue->rx_buff;
16381 + addr = dma_map_single(priv->dev, skb->data,
16382 + priv->rx_buffer_size, DMA_FROM_DEVICE);
16383 + if (unlikely(dma_mapping_error(priv->dev, addr))) {
16384 + dev_kfree_skb_any(skb);
16388 - rx_queue->next_to_use = i;
16389 - rx_queue->next_to_alloc = i;
16394 -static void count_errors(u32 lstatus, struct net_device *ndev)
16395 +static inline void count_errors(unsigned short status, struct net_device *dev)
16397 - struct gfar_private *priv = netdev_priv(ndev);
16398 - struct net_device_stats *stats = &ndev->stats;
16399 + struct gfar_private *priv = netdev_priv(dev);
16400 + struct net_device_stats *stats = &dev->stats;
16401 struct gfar_extra_stats *estats = &priv->extra_stats;
16403 /* If the packet was truncated, none of the other errors matter */
16404 - if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
16405 + if (status & RXBD_TRUNCATED) {
16406 stats->rx_length_errors++;
16408 atomic64_inc(&estats->rx_trunc);
16409 @@ -2849,25 +2754,25 @@ static void count_errors(u32 lstatus, struct net_device *ndev)
16412 /* Count the errors, if there were any */
16413 - if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
16414 + if (status & (RXBD_LARGE | RXBD_SHORT)) {
16415 stats->rx_length_errors++;
16417 - if (lstatus & BD_LFLAG(RXBD_LARGE))
16418 + if (status & RXBD_LARGE)
16419 atomic64_inc(&estats->rx_large);
16421 atomic64_inc(&estats->rx_short);
16423 - if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
16424 + if (status & RXBD_NONOCTET) {
16425 stats->rx_frame_errors++;
16426 atomic64_inc(&estats->rx_nonoctet);
16428 - if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
16429 + if (status & RXBD_CRCERR) {
16430 atomic64_inc(&estats->rx_crcerr);
16431 stats->rx_crc_errors++;
16433 - if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
16434 + if (status & RXBD_OVERRUN) {
16435 atomic64_inc(&estats->rx_overrun);
16436 - stats->rx_over_errors++;
16437 + stats->rx_crc_errors++;
16441 @@ -2875,14 +2780,7 @@ irqreturn_t gfar_receive(int irq, void *grp_id)
16443 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
16444 unsigned long flags;
16445 - u32 imask, ievent;
16447 - ievent = gfar_read(&grp->regs->ievent);
16449 - if (unlikely(ievent & IEVENT_FGPI)) {
16450 - gfar_write(&grp->regs->ievent, IEVENT_FGPI);
16451 - return IRQ_HANDLED;
16455 if (likely(napi_schedule_prep(&grp->napi_rx))) {
16456 spin_lock_irqsave(&grp->grplock, flags);
16457 @@ -2925,101 +2823,6 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
16458 return IRQ_HANDLED;
16461 -static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
16462 - struct sk_buff *skb, bool first)
16464 - unsigned int size = lstatus & BD_LENGTH_MASK;
16465 - struct page *page = rxb->page;
16466 - bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
16468 - /* Remove the FCS from the packet length */
16470 - size -= ETH_FCS_LEN;
16472 - if (likely(first)) {
16473 - skb_put(skb, size);
16475 - /* the last fragments' length contains the full frame length */
16477 - size -= skb->len;
16479 - /* in case the last fragment consisted only of the FCS */
16481 - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
16482 - rxb->page_offset + RXBUF_ALIGNMENT,
16483 - size, GFAR_RXB_TRUESIZE);
16486 - /* try reuse page */
16487 - if (unlikely(page_count(page) != 1))
16490 - /* change offset to the other half */
16491 - rxb->page_offset ^= GFAR_RXB_TRUESIZE;
16493 - page_ref_inc(page);
16498 -static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
16499 - struct gfar_rx_buff *old_rxb)
16501 - struct gfar_rx_buff *new_rxb;
16502 - u16 nta = rxq->next_to_alloc;
16504 - new_rxb = &rxq->rx_buff[nta];
16506 - /* find next buf that can reuse a page */
16508 - rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
16510 - /* copy page reference */
16511 - *new_rxb = *old_rxb;
16513 - /* sync for use by the device */
16514 - dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
16515 - old_rxb->page_offset,
16516 - GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
16519 -static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
16520 - u32 lstatus, struct sk_buff *skb)
16522 - struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
16523 - struct page *page = rxb->page;
16524 - bool first = false;
16526 - if (likely(!skb)) {
16527 - void *buff_addr = page_address(page) + rxb->page_offset;
16529 - skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
16530 - if (unlikely(!skb)) {
16531 - gfar_rx_alloc_err(rx_queue);
16534 - skb_reserve(skb, RXBUF_ALIGNMENT);
16538 - dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
16539 - GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
16541 - if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
16542 - /* reuse the free half of the page */
16543 - gfar_reuse_rx_page(rx_queue, rxb);
16545 - /* page cannot be reused, unmap it */
16546 - dma_unmap_page(rx_queue->dev, rxb->dma,
16547 - PAGE_SIZE, DMA_FROM_DEVICE);
16550 - /* clear rxb content */
16551 - rxb->page = NULL;
16556 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
16558 /* If valid headers were found, and valid sums
16559 @@ -3034,9 +2837,10 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
16562 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
16563 -static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
16564 +static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
16565 + int amount_pull, struct napi_struct *napi)
16567 - struct gfar_private *priv = netdev_priv(ndev);
16568 + struct gfar_private *priv = netdev_priv(dev);
16569 struct rxfcb *fcb = NULL;
16571 /* fcb is at the beginning if exists */
16572 @@ -3045,8 +2849,10 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
16573 /* Remove the FCB from the skb
16574 * Remove the padded bytes, if there are any
16576 - if (priv->uses_rxfcb)
16577 - skb_pull(skb, GMAC_FCB_LEN);
16578 + if (amount_pull) {
16579 + skb_record_rx_queue(skb, fcb->rq);
16580 + skb_pull(skb, amount_pull);
16583 /* Get receive timestamp from the skb */
16584 if (priv->hwts_rx_en) {
16585 @@ -3054,26 +2860,30 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
16586 u64 *ns = (u64 *) skb->data;
16588 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
16589 - shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
16590 + shhwtstamps->hwtstamp = ns_to_ktime(*ns);
16594 skb_pull(skb, priv->padding);
16596 - if (ndev->features & NETIF_F_RXCSUM)
16597 + if (dev->features & NETIF_F_RXCSUM)
16598 gfar_rx_checksum(skb, fcb);
16600 /* Tell the skb what kind of packet this is */
16601 - skb->protocol = eth_type_trans(skb, ndev);
16602 + skb->protocol = eth_type_trans(skb, dev);
16604 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
16605 * Even if vlan rx accel is disabled, on some chips
16606 * RXFCB_VLN is pseudo randomly set.
16608 - if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
16609 + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
16610 be16_to_cpu(fcb->flags) & RXFCB_VLN)
16611 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
16612 be16_to_cpu(fcb->vlctl));
16614 + /* Send the packet up the stack */
16615 + napi_gro_receive(napi, skb);
16619 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
16620 @@ -3082,89 +2892,91 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
16622 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
16624 - struct net_device *ndev = rx_queue->ndev;
16625 - struct gfar_private *priv = netdev_priv(ndev);
16626 - struct rxbd8 *bdp;
16627 - int i, howmany = 0;
16628 - struct sk_buff *skb = rx_queue->skb;
16629 - int cleaned_cnt = gfar_rxbd_unused(rx_queue);
16630 - unsigned int total_bytes = 0, total_pkts = 0;
16631 + struct net_device *dev = rx_queue->dev;
16632 + struct rxbd8 *bdp, *base;
16633 + struct sk_buff *skb;
16637 + struct gfar_private *priv = netdev_priv(dev);
16639 /* Get the first full descriptor */
16640 - i = rx_queue->next_to_clean;
16641 + bdp = rx_queue->cur_rx;
16642 + base = rx_queue->rx_bd_base;
16644 - while (rx_work_limit--) {
16646 + amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
16648 - if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
16649 - gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
16653 - bdp = &rx_queue->rx_bd_base[i];
16654 - lstatus = be32_to_cpu(bdp->lstatus);
16655 - if (lstatus & BD_LFLAG(RXBD_EMPTY))
16657 + while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
16658 + struct sk_buff *newskb;
16659 + dma_addr_t bufaddr;
16661 - /* order rx buffer descriptor reads */
16664 - /* fetch next to clean buffer from the ring */
16665 - skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
16666 - if (unlikely(!skb))
16671 + /* Add another skb for the future */
16672 + newskb = gfar_new_skb(dev, &bufaddr);
16674 - if (unlikely(++i == rx_queue->rx_ring_size))
16676 + skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
16678 - rx_queue->next_to_clean = i;
16680 - /* fetch next buffer if not the last in frame */
16681 - if (!(lstatus & BD_LFLAG(RXBD_LAST)))
16683 + dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
16684 + priv->rx_buffer_size, DMA_FROM_DEVICE);
16686 + if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
16687 + be16_to_cpu(bdp->length) > priv->rx_buffer_size))
16688 + bdp->status = cpu_to_be16(RXBD_LARGE);
16690 + /* We drop the frame if we failed to allocate a new buffer */
16691 + if (unlikely(!newskb ||
16692 + !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
16693 + be16_to_cpu(bdp->status) & RXBD_ERR)) {
16694 + count_errors(be16_to_cpu(bdp->status), dev);
16696 + if (unlikely(!newskb)) {
16698 + bufaddr = be32_to_cpu(bdp->bufPtr);
16700 + dev_kfree_skb(skb);
16702 + /* Increment the number of packets */
16703 + rx_queue->stats.rx_packets++;
16706 + if (likely(skb)) {
16707 + pkt_len = be16_to_cpu(bdp->length) -
16709 + /* Remove the FCS from the packet length */
16710 + skb_put(skb, pkt_len);
16711 + rx_queue->stats.rx_bytes += pkt_len;
16712 + skb_record_rx_queue(skb, rx_queue->qindex);
16713 + gfar_process_frame(dev, skb, amount_pull,
16714 + &rx_queue->grp->napi_rx);
16716 - if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
16717 - count_errors(lstatus, ndev);
16719 + netif_warn(priv, rx_err, dev, "Missing skb!\n");
16720 + rx_queue->stats.rx_dropped++;
16721 + atomic64_inc(&priv->extra_stats.rx_skbmissing);
16724 - /* discard faulty buffer */
16725 - dev_kfree_skb(skb);
16727 - rx_queue->stats.rx_dropped++;
16731 - /* Increment the number of packets */
16733 - total_bytes += skb->len;
16734 + rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
16736 - skb_record_rx_queue(skb, rx_queue->qindex);
16737 + /* Setup the new bdp */
16738 + gfar_init_rxbdp(rx_queue, bdp, bufaddr);
16740 - gfar_process_frame(ndev, skb);
16741 + /* Update Last Free RxBD pointer for LFC */
16742 + if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
16743 + gfar_write(rx_queue->rfbptr, (u32)bdp);
16745 - /* Send the packet up the stack */
16746 - napi_gro_receive(&rx_queue->grp->napi_rx, skb);
16747 + /* Update to the next pointer */
16748 + bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
16751 + /* update to point at the next skb */
16752 + rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
16753 + RX_RING_MOD_MASK(rx_queue->rx_ring_size);
16756 - /* Store incomplete frames for completion */
16757 - rx_queue->skb = skb;
16759 - rx_queue->stats.rx_packets += total_pkts;
16760 - rx_queue->stats.rx_bytes += total_bytes;
16763 - gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
16765 - /* Update Last Free RxBD pointer for LFC */
16766 - if (unlikely(priv->tx_actual_en)) {
16767 - u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
16769 - gfar_write(rx_queue->rfbptr, bdp_dma);
16771 + /* Update the current rxbd pointer to be the next one */
16772 + rx_queue->cur_rx = bdp;
16776 @@ -3396,7 +3208,7 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
16777 static void adjust_link(struct net_device *dev)
16779 struct gfar_private *priv = netdev_priv(dev);
16780 - struct phy_device *phydev = dev->phydev;
16781 + struct phy_device *phydev = priv->phydev;
16783 if (unlikely(phydev->link != priv->oldlink ||
16784 (phydev->link && (phydev->duplex != priv->oldduplex ||
16785 @@ -3599,19 +3411,30 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
16786 if (events & IEVENT_CRL)
16787 dev->stats.tx_aborted_errors++;
16788 if (events & IEVENT_XFUN) {
16789 + unsigned long flags;
16791 netif_dbg(priv, tx_err, dev,
16792 "TX FIFO underrun, packet dropped\n");
16793 dev->stats.tx_dropped++;
16794 atomic64_inc(&priv->extra_stats.tx_underrun);
16796 - schedule_work(&priv->reset_task);
16797 + local_irq_save(flags);
16798 + lock_tx_qs(priv);
16800 + /* Reactivate the Tx Queues */
16801 + gfar_write(®s->tstat, gfargrp->tstat);
16803 + unlock_tx_qs(priv);
16804 + local_irq_restore(flags);
16806 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
16808 if (events & IEVENT_BSY) {
16809 - dev->stats.rx_over_errors++;
16810 + dev->stats.rx_errors++;
16811 atomic64_inc(&priv->extra_stats.rx_bsy);
16813 + gfar_receive(irq, grp_id);
16815 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
16816 gfar_read(®s->rstat));
16818 @@ -3637,8 +3460,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
16820 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
16822 - struct net_device *ndev = priv->ndev;
16823 - struct phy_device *phydev = ndev->phydev;
16824 + struct phy_device *phydev = priv->phydev;
16827 if (!phydev->duplex)
16828 @@ -3678,10 +3500,10 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
16829 static noinline void gfar_update_link_state(struct gfar_private *priv)
16831 struct gfar __iomem *regs = priv->gfargrp[0].regs;
16832 - struct net_device *ndev = priv->ndev;
16833 - struct phy_device *phydev = ndev->phydev;
16834 + struct phy_device *phydev = priv->phydev;
16835 struct gfar_priv_rx_q *rx_queue = NULL;
16837 + struct rxbd8 *bdp;
16839 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
16841 @@ -3738,11 +3560,15 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
16842 /* Turn last free buffer recording on */
16843 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
16844 for (i = 0; i < priv->num_rx_queues; i++) {
16847 rx_queue = priv->rx_queue[i];
16848 - bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
16849 - gfar_write(rx_queue->rfbptr, bdp_dma);
16850 + bdp = rx_queue->cur_rx;
16851 + /* skip to previous bd */
16852 + bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
16853 + rx_queue->rx_bd_base,
16854 + rx_queue->rx_ring_size);
16856 + if (rx_queue->rfbptr)
16857 + gfar_write(rx_queue->rfbptr, (u32)bdp);
16860 priv->tx_actual_en = 1;
16861 diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
16862 index 6e8a9c8..daa1d37 100644
16863 --- a/drivers/net/ethernet/freescale/gianfar.h
16864 +++ b/drivers/net/ethernet/freescale/gianfar.h
16865 @@ -71,6 +71,11 @@ struct ethtool_rx_list {
16866 /* Number of bytes to align the rx bufs to */
16867 #define RXBUF_ALIGNMENT 64
16869 +/* The number of bytes which composes a unit for the purpose of
16870 + * allocating data buffers. ie-for any given MTU, the data buffer
16871 + * will be the next highest multiple of 512 bytes. */
16872 +#define INCREMENTAL_BUFFER_SIZE 512
16874 #define PHY_INIT_TIMEOUT 100000
16876 #define DRV_NAME "gfar-enet"
16877 @@ -87,8 +92,6 @@ extern const char gfar_driver_version[];
16878 #define DEFAULT_TX_RING_SIZE 256
16879 #define DEFAULT_RX_RING_SIZE 256
16881 -#define GFAR_RX_BUFF_ALLOC 16
16883 #define GFAR_RX_MAX_RING_SIZE 256
16884 #define GFAR_TX_MAX_RING_SIZE 256
16886 @@ -100,15 +103,11 @@ extern const char gfar_driver_version[];
16887 #define DEFAULT_RX_LFC_THR 16
16888 #define DEFAULT_LFC_PTVVAL 4
16890 -/* prevent fragmenation by HW in DSA environments */
16891 -#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
16892 -#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
16893 - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
16894 -#define GFAR_RXB_TRUESIZE 2048
16896 +#define DEFAULT_RX_BUFFER_SIZE 1536
16897 #define TX_RING_MOD_MASK(size) (size-1)
16898 #define RX_RING_MOD_MASK(size) (size-1)
16899 -#define GFAR_JUMBO_FRAME_SIZE 9600
16900 +#define JUMBO_BUFFER_SIZE 9728
16901 +#define JUMBO_FRAME_SIZE 9600
16903 #define DEFAULT_FIFO_TX_THR 0x100
16904 #define DEFAULT_FIFO_TX_STARVE 0x40
16905 @@ -341,7 +340,6 @@ extern const char gfar_driver_version[];
16906 #define IEVENT_MAG 0x00000800
16907 #define IEVENT_GRSC 0x00000100
16908 #define IEVENT_RXF0 0x00000080
16909 -#define IEVENT_FGPI 0x00000010
16910 #define IEVENT_FIR 0x00000008
16911 #define IEVENT_FIQ 0x00000004
16912 #define IEVENT_DPE 0x00000002
16913 @@ -374,7 +372,6 @@ extern const char gfar_driver_version[];
16914 #define IMASK_MAG 0x00000800
16915 #define IMASK_GRSC 0x00000100
16916 #define IMASK_RXFEN0 0x00000080
16917 -#define IMASK_FGPI 0x00000010
16918 #define IMASK_FIR 0x00000008
16919 #define IMASK_FIQ 0x00000004
16920 #define IMASK_DPE 0x00000002
16921 @@ -543,9 +540,6 @@ extern const char gfar_driver_version[];
16923 #define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */
16925 -#define GFAR_WOL_MAGIC 0x00000001
16926 -#define GFAR_WOL_FILER_UCAST 0x00000002
16931 @@ -646,7 +640,6 @@ struct rmon_mib
16934 struct gfar_extra_stats {
16935 - atomic64_t rx_alloc_err;
16936 atomic64_t rx_large;
16937 atomic64_t rx_short;
16938 atomic64_t rx_nonoctet;
16939 @@ -658,6 +651,7 @@ struct gfar_extra_stats {
16941 atomic64_t tx_babt;
16942 atomic64_t tx_underrun;
16943 + atomic64_t rx_skbmissing;
16944 atomic64_t tx_timeout;
16947 @@ -923,8 +917,6 @@ struct gfar {
16948 #define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
16949 #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
16950 #define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
16951 -#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000
16952 -#define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000
16954 #if (MAXGROUPS == 2)
16955 #define DEFAULT_MAPPING 0xAA
16956 @@ -1020,42 +1012,34 @@ struct rx_q_stats {
16957 unsigned long rx_dropped;
16960 -struct gfar_rx_buff {
16962 - struct page *page;
16963 - unsigned int page_offset;
16967 * struct gfar_priv_rx_q - per rx queue structure
16968 - * @rx_buff: Array of buffer info metadata structs
16969 + * @rx_skbuff: skb pointers
16970 + * @skb_currx: currently use skb pointer
16971 * @rx_bd_base: First rx buffer descriptor
16972 - * @next_to_use: index of the next buffer to be alloc'd
16973 - * @next_to_clean: index of the next buffer to be cleaned
16974 + * @cur_rx: Next free rx ring entry
16975 * @qindex: index of this queue
16976 - * @ndev: back pointer to net_device
16977 + * @dev: back pointer to the dev structure
16978 * @rx_ring_size: Rx ring size
16979 * @rxcoalescing: enable/disable rx-coalescing
16980 * @rxic: receive interrupt coalescing vlaue
16983 struct gfar_priv_rx_q {
16984 - struct gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES);
16985 + struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
16986 + dma_addr_t rx_bd_dma_base;
16987 struct rxbd8 *rx_bd_base;
16988 - struct net_device *ndev;
16989 - struct device *dev;
16990 - u16 rx_ring_size;
16992 - struct gfar_priv_grp *grp;
16993 - u16 next_to_clean;
16995 - u16 next_to_alloc;
16996 - struct sk_buff *skb;
16997 + struct rxbd8 *cur_rx;
16998 + struct net_device *dev;
16999 + struct gfar_priv_grp *grp;
17000 struct rx_q_stats stats;
17001 - u32 __iomem *rfbptr;
17004 + unsigned int rx_ring_size;
17005 + /* RX Coalescing values */
17006 unsigned char rxcoalescing;
17007 unsigned long rxic;
17008 - dma_addr_t rx_bd_dma_base;
17009 + u32 __iomem *rfbptr;
17012 enum gfar_irqinfo_id {
17013 @@ -1125,6 +1109,7 @@ struct gfar_private {
17014 struct device *dev;
17015 struct net_device *ndev;
17016 enum gfar_errata errata;
17017 + unsigned int rx_buffer_size;
17021 @@ -1154,11 +1139,15 @@ struct gfar_private {
17022 phy_interface_t interface;
17023 struct device_node *phy_node;
17024 struct device_node *tbi_node;
17025 + struct phy_device *phydev;
17026 struct mii_bus *mii_bus;
17031 + /* Bitfield update lock */
17032 + spinlock_t bflock;
17034 uint32_t msg_enable;
17036 struct work_struct reset_task;
17037 @@ -1168,6 +1157,8 @@ struct gfar_private {
17041 + /* Wake-on-LAN enabled */
17043 /* Enable priorty based Tx scheduling in Hw */
17045 /* Flow control flags */
17046 @@ -1196,10 +1187,6 @@ struct gfar_private {
17047 u32 __iomem *hash_regs[16];
17050 - /* wake-on-lan settings */
17052 - u16 wol_supported;
17055 unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
17056 unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
17057 @@ -1308,28 +1295,6 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
17058 bdp->lstatus = cpu_to_be32(lstatus);
17061 -static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
17063 - if (rxq->next_to_clean > rxq->next_to_use)
17064 - return rxq->next_to_clean - rxq->next_to_use - 1;
17066 - return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
17069 -static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
17071 - struct rxbd8 *bdp;
17075 - i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
17076 - bdp = &rxq->rx_bd_base[i];
17077 - bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
17078 - bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
17083 irqreturn_t gfar_receive(int irq, void *dev_id);
17084 int startup_gfar(struct net_device *dev);
17085 void stop_gfar(struct net_device *dev);
17086 diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
17087 index 56588f2..fda12fb 100644
17088 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
17089 +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
17090 @@ -61,8 +61,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
17091 struct ethtool_drvinfo *drvinfo);
17093 static const char stat_gstrings[][ETH_GSTRING_LEN] = {
17094 - /* extra stats */
17095 - "rx-allocation-errors",
17096 "rx-large-frame-errors",
17097 "rx-short-frame-errors",
17098 "rx-non-octet-errors",
17099 @@ -74,8 +72,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = {
17100 "ethernet-bus-error",
17101 "tx-babbling-errors",
17102 "tx-underrun-errors",
17103 + "rx-skb-missing-errors",
17104 "tx-timeout-errors",
17107 "tx-rx-65-127-frames",
17108 "tx-rx-128-255-frames",
17109 @@ -182,6 +180,42 @@ static void gfar_gdrvinfo(struct net_device *dev,
17110 sizeof(drvinfo->version));
17111 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
17112 strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
17113 + drvinfo->regdump_len = 0;
17114 + drvinfo->eedump_len = 0;
17118 +static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
17120 + struct gfar_private *priv = netdev_priv(dev);
17121 + struct phy_device *phydev = priv->phydev;
17123 + if (NULL == phydev)
17126 + return phy_ethtool_sset(phydev, cmd);
17130 +/* Return the current settings in the ethtool_cmd structure */
17131 +static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
17133 + struct gfar_private *priv = netdev_priv(dev);
17134 + struct phy_device *phydev = priv->phydev;
17135 + struct gfar_priv_rx_q *rx_queue = NULL;
17136 + struct gfar_priv_tx_q *tx_queue = NULL;
17138 + if (NULL == phydev)
17140 + tx_queue = priv->tx_queue[0];
17141 + rx_queue = priv->rx_queue[0];
17143 + /* etsec-1.7 and older versions have only one txic
17144 + * and rxic regs although they support multiple queues */
17145 + cmd->maxtxpkt = get_icft_value(tx_queue->txic);
17146 + cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
17148 + return phy_ethtool_gset(phydev, cmd);
17151 /* Return the length of the register structure */
17152 @@ -208,12 +242,10 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
17153 static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
17154 unsigned int usecs)
17156 - struct net_device *ndev = priv->ndev;
17157 - struct phy_device *phydev = ndev->phydev;
17158 unsigned int count;
17160 /* The timer is different, depending on the interface speed */
17161 - switch (phydev->speed) {
17162 + switch (priv->phydev->speed) {
17164 count = GFAR_GBIT_TIME;
17166 @@ -235,12 +267,10 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
17167 static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
17168 unsigned int ticks)
17170 - struct net_device *ndev = priv->ndev;
17171 - struct phy_device *phydev = ndev->phydev;
17172 unsigned int count;
17174 /* The timer is different, depending on the interface speed */
17175 - switch (phydev->speed) {
17176 + switch (priv->phydev->speed) {
17178 count = GFAR_GBIT_TIME;
17180 @@ -274,7 +304,7 @@ static int gfar_gcoalesce(struct net_device *dev,
17181 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
17182 return -EOPNOTSUPP;
17184 - if (!dev->phydev)
17185 + if (NULL == priv->phydev)
17188 rx_queue = priv->rx_queue[0];
17189 @@ -335,7 +365,7 @@ static int gfar_scoalesce(struct net_device *dev,
17190 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
17191 return -EOPNOTSUPP;
17193 - if (!dev->phydev)
17194 + if (NULL == priv->phydev)
17197 /* Check the bounds of the values */
17198 @@ -499,7 +529,7 @@ static int gfar_spauseparam(struct net_device *dev,
17199 struct ethtool_pauseparam *epause)
17201 struct gfar_private *priv = netdev_priv(dev);
17202 - struct phy_device *phydev = dev->phydev;
17203 + struct phy_device *phydev = priv->phydev;
17204 struct gfar __iomem *regs = priv->gfargrp[0].regs;
17205 u32 oldadv, newadv;
17207 @@ -612,49 +642,31 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
17209 struct gfar_private *priv = netdev_priv(dev);
17211 - wol->supported = 0;
17212 - wol->wolopts = 0;
17214 - if (priv->wol_supported & GFAR_WOL_MAGIC)
17215 - wol->supported |= WAKE_MAGIC;
17217 - if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
17218 - wol->supported |= WAKE_UCAST;
17220 - if (priv->wol_opts & GFAR_WOL_MAGIC)
17221 - wol->wolopts |= WAKE_MAGIC;
17223 - if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
17224 - wol->wolopts |= WAKE_UCAST;
17225 + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
17226 + wol->supported = WAKE_MAGIC;
17227 + wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
17229 + wol->supported = wol->wolopts = 0;
17233 static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
17235 struct gfar_private *priv = netdev_priv(dev);
17236 - u16 wol_opts = 0;
17238 + unsigned long flags;
17240 - if (!priv->wol_supported && wol->wolopts)
17241 + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
17242 + wol->wolopts != 0)
17245 - if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
17246 + if (wol->wolopts & ~WAKE_MAGIC)
17249 - if (wol->wolopts & WAKE_MAGIC) {
17250 - wol_opts |= GFAR_WOL_MAGIC;
17252 - if (wol->wolopts & WAKE_UCAST)
17253 - wol_opts |= GFAR_WOL_FILER_UCAST;
17256 - wol_opts &= priv->wol_supported;
17257 - priv->wol_opts = 0;
17259 - err = device_set_wakeup_enable(priv->dev, wol_opts);
17262 + device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
17264 - priv->wol_opts = wol_opts;
17265 + spin_lock_irqsave(&priv->bflock, flags);
17266 + priv->wol_en = !!device_may_wakeup(&dev->dev);
17267 + spin_unlock_irqrestore(&priv->bflock, flags);
17271 @@ -665,14 +677,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
17272 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
17274 if (ethflow & RXH_L2DA) {
17275 - fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
17276 + fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
17277 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
17278 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
17279 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
17280 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
17281 priv->cur_filer_idx = priv->cur_filer_idx - 1;
17283 - fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
17284 + fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
17285 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
17286 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
17287 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
17288 @@ -891,6 +903,27 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
17292 +static int gfar_comp_asc(const void *a, const void *b)
17294 + return memcmp(a, b, 4);
17297 +static int gfar_comp_desc(const void *a, const void *b)
17299 + return -memcmp(a, b, 4);
17302 +static void gfar_swap(void *a, void *b, int size)
17307 + swap(_a[0], _b[0]);
17308 + swap(_a[1], _b[1]);
17309 + swap(_a[2], _b[2]);
17310 + swap(_a[3], _b[3]);
17313 /* Write a mask to filer cache */
17314 static void gfar_set_mask(u32 mask, struct filer_table *tab)
17316 @@ -1240,6 +1273,310 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
17320 +/* Copy size filer entries */
17321 +static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
17322 + struct gfar_filer_entry src[0], s32 size)
17324 + while (size > 0) {
17326 + dst[size].ctrl = src[size].ctrl;
17327 + dst[size].prop = src[size].prop;
17331 +/* Delete the contents of the filer-table between start and end
17332 + * and collapse them
17334 +static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
17338 + if (end > MAX_FILER_CACHE_IDX || end < begin)
17342 + length = end - begin;
17345 + while (end < tab->index) {
17346 + tab->fe[begin].ctrl = tab->fe[end].ctrl;
17347 + tab->fe[begin++].prop = tab->fe[end++].prop;
17350 + /* Fill up with don't cares */
17351 + while (begin < tab->index) {
17352 + tab->fe[begin].ctrl = 0x60;
17353 + tab->fe[begin].prop = 0xFFFFFFFF;
17357 + tab->index -= length;
17361 +/* Make space on the wanted location */
17362 +static int gfar_expand_filer_entries(u32 begin, u32 length,
17363 + struct filer_table *tab)
17365 + if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
17366 + begin > MAX_FILER_CACHE_IDX)
17369 + gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
17370 + tab->index - length + 1);
17372 + tab->index += length;
17376 +static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
17378 + for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
17380 + if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
17381 + (RQFCR_AND | RQFCR_CLE))
17387 +static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
17389 + for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
17391 + if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
17398 +/* Uses hardwares clustering option to reduce
17399 + * the number of filer table entries
17401 +static void gfar_cluster_filer(struct filer_table *tab)
17403 + s32 i = -1, j, iend, jend;
17405 + while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
17407 + while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
17408 + /* The cluster entries self and the previous one
17409 + * (a mask) must be identical!
17411 + if (tab->fe[i].ctrl != tab->fe[j].ctrl)
17413 + if (tab->fe[i].prop != tab->fe[j].prop)
17415 + if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
17417 + if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
17419 + iend = gfar_get_next_cluster_end(i, tab);
17420 + jend = gfar_get_next_cluster_end(j, tab);
17421 + if (jend == -1 || iend == -1)
17424 + /* First we make some free space, where our cluster
17425 + * element should be. Then we copy it there and finally
17426 + * delete in from its old location.
17428 + if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
17432 + gfar_copy_filer_entries(&(tab->fe[iend + 1]),
17433 + &(tab->fe[jend + 1]), jend - j);
17435 + if (gfar_trim_filer_entries(jend - 1,
17436 + jend + (jend - j),
17440 + /* Mask out cluster bit */
17441 + tab->fe[iend].ctrl &= ~(RQFCR_CLE);
17446 +/* Swaps the masked bits of a1<>a2 and b1<>b2 */
17447 +static void gfar_swap_bits(struct gfar_filer_entry *a1,
17448 + struct gfar_filer_entry *a2,
17449 + struct gfar_filer_entry *b1,
17450 + struct gfar_filer_entry *b2, u32 mask)
17453 + temp[0] = a1->ctrl & mask;
17454 + temp[1] = a2->ctrl & mask;
17455 + temp[2] = b1->ctrl & mask;
17456 + temp[3] = b2->ctrl & mask;
17458 + a1->ctrl &= ~mask;
17459 + a2->ctrl &= ~mask;
17460 + b1->ctrl &= ~mask;
17461 + b2->ctrl &= ~mask;
17463 + a1->ctrl |= temp[1];
17464 + a2->ctrl |= temp[0];
17465 + b1->ctrl |= temp[3];
17466 + b2->ctrl |= temp[2];
17469 +/* Generate a list consisting of masks values with their start and
17470 + * end of validity and block as indicator for parts belonging
17471 + * together (glued by ANDs) in mask_table
17473 +static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
17474 + struct filer_table *tab)
17476 + u32 i, and_index = 0, block_index = 1;
17478 + for (i = 0; i < tab->index; i++) {
17480 + /* LSByte of control = 0 sets a mask */
17481 + if (!(tab->fe[i].ctrl & 0xF)) {
17482 + mask_table[and_index].mask = tab->fe[i].prop;
17483 + mask_table[and_index].start = i;
17484 + mask_table[and_index].block = block_index;
17485 + if (and_index >= 1)
17486 + mask_table[and_index - 1].end = i - 1;
17489 + /* cluster starts and ends will be separated because they should
17490 + * hold their position
17492 + if (tab->fe[i].ctrl & RQFCR_CLE)
17494 + /* A not set AND indicates the end of a depended block */
17495 + if (!(tab->fe[i].ctrl & RQFCR_AND))
17499 + mask_table[and_index - 1].end = i - 1;
17501 + return and_index;
17504 +/* Sorts the entries of mask_table by the values of the masks.
17505 + * Important: The 0xFF80 flags of the first and last entry of a
17506 + * block must hold their position (which queue, CLusterEnable, ReJEct,
17509 +static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
17510 + struct filer_table *temp_table, u32 and_index)
17512 + /* Pointer to compare function (_asc or _desc) */
17513 + int (*gfar_comp)(const void *, const void *);
17515 + u32 i, size = 0, start = 0, prev = 1;
17516 + u32 old_first, old_last, new_first, new_last;
17518 + gfar_comp = &gfar_comp_desc;
17520 + for (i = 0; i < and_index; i++) {
17521 + if (prev != mask_table[i].block) {
17522 + old_first = mask_table[start].start + 1;
17523 + old_last = mask_table[i - 1].end;
17524 + sort(mask_table + start, size,
17525 + sizeof(struct gfar_mask_entry),
17526 + gfar_comp, &gfar_swap);
17528 + /* Toggle order for every block. This makes the
17529 + * thing more efficient!
17531 + if (gfar_comp == gfar_comp_desc)
17532 + gfar_comp = &gfar_comp_asc;
17534 + gfar_comp = &gfar_comp_desc;
17536 + new_first = mask_table[start].start + 1;
17537 + new_last = mask_table[i - 1].end;
17539 + gfar_swap_bits(&temp_table->fe[new_first],
17540 + &temp_table->fe[old_first],
17541 + &temp_table->fe[new_last],
17542 + &temp_table->fe[old_last],
17543 + RQFCR_QUEUE | RQFCR_CLE |
17544 + RQFCR_RJE | RQFCR_AND);
17550 + prev = mask_table[i].block;
17554 +/* Reduces the number of masks needed in the filer table to save entries
17555 + * This is done by sorting the masks of a depended block. A depended block is
17556 + * identified by gluing ANDs or CLE. The sorting order toggles after every
17557 + * block. Of course entries in scope of a mask must change their location with
17560 +static int gfar_optimize_filer_masks(struct filer_table *tab)
17562 + struct filer_table *temp_table;
17563 + struct gfar_mask_entry *mask_table;
17565 + u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
17568 + /* We need a copy of the filer table because
17569 + * we want to change its order
17571 + temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
17572 + if (temp_table == NULL)
17575 + mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
17576 + sizeof(struct gfar_mask_entry), GFP_KERNEL);
17578 + if (mask_table == NULL) {
17583 + and_index = gfar_generate_mask_table(mask_table, tab);
17585 + gfar_sort_mask_table(mask_table, temp_table, and_index);
17587 + /* Now we can copy the data from our duplicated filer table to
17588 + * the real one in the order the mask table says
17590 + for (i = 0; i < and_index; i++) {
17591 + size = mask_table[i].end - mask_table[i].start + 1;
17592 + gfar_copy_filer_entries(&(tab->fe[j]),
17593 + &(temp_table->fe[mask_table[i].start]), size);
17597 + /* And finally we just have to check for duplicated masks and drop the
17600 + for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
17601 + if (tab->fe[i].ctrl == 0x80) {
17602 + previous_mask = i++;
17606 + for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
17607 + if (tab->fe[i].ctrl == 0x80) {
17608 + if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
17609 + /* Two identical ones found!
17610 + * So drop the second one!
17612 + gfar_trim_filer_entries(i, i, tab);
17614 + /* Not identical! */
17615 + previous_mask = i;
17619 + kfree(mask_table);
17620 +end: kfree(temp_table);
17624 /* Write the bit-pattern from software's buffer to hardware registers */
17625 static int gfar_write_filer_table(struct gfar_private *priv,
17626 struct filer_table *tab)
17627 @@ -1249,10 +1586,11 @@ static int gfar_write_filer_table(struct gfar_private *priv,
17630 /* Fill regular entries */
17631 - for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
17632 + for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
17634 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
17635 /* Fill the rest with fall-troughs */
17636 - for (; i < MAX_FILER_IDX; i++)
17637 + for (; i < MAX_FILER_IDX - 1; i++)
17638 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
17639 /* Last entry must be default accept
17640 * because that's what people expect
17641 @@ -1286,6 +1624,7 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
17643 struct ethtool_flow_spec_container *j;
17644 struct filer_table *tab;
17648 /* So index is set to zero, too! */
17649 @@ -1310,6 +1649,17 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
17655 + /* Optimizations to save entries */
17656 + gfar_cluster_filer(tab);
17657 + gfar_optimize_filer_masks(tab);
17659 + pr_debug("\tSummary:\n"
17660 + "\tData on hardware: %d\n"
17661 + "\tCompression rate: %d%%\n",
17662 + tab->index, 100 - (100 * tab->index) / i);
17664 /* Write everything to hardware */
17665 ret = gfar_write_filer_table(priv, tab);
17666 if (ret == -EBUSY) {
17667 @@ -1375,14 +1725,13 @@ static int gfar_add_cls(struct gfar_private *priv,
17671 - priv->rx_list.count++;
17672 ret = gfar_process_filer_changes(priv);
17675 + priv->rx_list.count++;
17679 - priv->rx_list.count--;
17680 list_del(&temp->list);
17683 @@ -1535,6 +1884,8 @@ static int gfar_get_ts_info(struct net_device *dev,
17686 const struct ethtool_ops gfar_ethtool_ops = {
17687 + .get_settings = gfar_gsettings,
17688 + .set_settings = gfar_ssettings,
17689 .get_drvinfo = gfar_gdrvinfo,
17690 .get_regs_len = gfar_reglen,
17691 .get_regs = gfar_get_regs,
17692 @@ -1557,6 +1908,4 @@ const struct ethtool_ops gfar_ethtool_ops = {
17693 .set_rxnfc = gfar_set_nfc,
17694 .get_rxnfc = gfar_get_nfc,
17695 .get_ts_info = gfar_get_ts_info,
17696 - .get_link_ksettings = phy_ethtool_get_link_ksettings,
17697 - .set_link_ksettings = phy_ethtool_set_link_ksettings,
17699 diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
17700 index 5779881..8e3cd77 100644
17701 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c
17702 +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
17703 @@ -422,6 +422,19 @@ static struct ptp_clock_info ptp_gianfar_caps = {
17704 .enable = ptp_gianfar_enable,
17707 +/* OF device tree */
17709 +static int get_of_u32(struct device_node *node, char *str, u32 *val)
17712 + const u32 *prop = of_get_property(node, str, &plen);
17714 + if (!prop || plen != sizeof(*prop))
17720 static int gianfar_ptp_probe(struct platform_device *dev)
17722 struct device_node *node = dev->dev.of_node;
17723 @@ -439,28 +452,22 @@ static int gianfar_ptp_probe(struct platform_device *dev)
17725 etsects->caps = ptp_gianfar_caps;
17727 - if (of_property_read_u32(node, "fsl,cksel", &etsects->cksel))
17728 + if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
17729 etsects->cksel = DEFAULT_CKSEL;
17731 - if (of_property_read_u32(node,
17732 - "fsl,tclk-period", &etsects->tclk_period) ||
17733 - of_property_read_u32(node,
17734 - "fsl,tmr-prsc", &etsects->tmr_prsc) ||
17735 - of_property_read_u32(node,
17736 - "fsl,tmr-add", &etsects->tmr_add) ||
17737 - of_property_read_u32(node,
17738 - "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
17739 - of_property_read_u32(node,
17740 - "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
17741 - of_property_read_u32(node,
17742 - "fsl,max-adj", &etsects->caps.max_adj)) {
17743 + if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
17744 + get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
17745 + get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) ||
17746 + get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
17747 + get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
17748 + get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) {
17749 pr_err("device tree node missing required elements\n");
17753 etsects->irq = platform_get_irq(dev, 0);
17755 - if (etsects->irq < 0) {
17756 + if (etsects->irq == NO_IRQ) {
17757 pr_err("irq not in device tree\n");
17760 @@ -550,7 +557,6 @@ static const struct of_device_id match_table[] = {
17761 { .compatible = "fsl,etsec-ptp" },
17764 -MODULE_DEVICE_TABLE(of, match_table);
17766 static struct platform_driver gianfar_ptp_driver = {
17768 diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
17769 index f76d332..4dd40e0 100644
17770 --- a/drivers/net/ethernet/freescale/ucc_geth.c
17771 +++ b/drivers/net/ethernet/freescale/ucc_geth.c
17772 @@ -40,10 +40,10 @@
17773 #include <asm/uaccess.h>
17774 #include <asm/irq.h>
17775 #include <asm/io.h>
17776 -#include <soc/fsl/qe/immap_qe.h>
17777 -#include <soc/fsl/qe/qe.h>
17778 -#include <soc/fsl/qe/ucc.h>
17779 -#include <soc/fsl/qe/ucc_fast.h>
17780 +#include <asm/immap_qe.h>
17781 +#include <asm/qe.h>
17782 +#include <asm/ucc.h>
17783 +#include <asm/ucc_fast.h>
17784 #include <asm/machdep.h>
17786 #include "ucc_geth.h"
17787 @@ -1384,8 +1384,6 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
17788 value = phy_read(tbiphy, ENET_TBI_MII_CR);
17789 value &= ~0x1000; /* Turn off autonegotiation */
17790 phy_write(tbiphy, ENET_TBI_MII_CR, value);
17792 - put_device(&tbiphy->mdio.dev);
17795 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
17796 @@ -1704,10 +1702,8 @@ static void uec_configure_serdes(struct net_device *dev)
17797 * everything for us? Resetting it takes the link down and requires
17798 * several seconds for it to come back.
17800 - if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) {
17801 - put_device(&tbiphy->mdio.dev);
17802 + if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
17806 /* Single clk mode, mii mode off(for serdes communication) */
17807 phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
17808 @@ -1715,8 +1711,6 @@ static void uec_configure_serdes(struct net_device *dev)
17809 phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
17811 phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
17813 - put_device(&tbiphy->mdio.dev);
17816 /* Configure the PHY for dev.
17817 @@ -3756,7 +3750,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
17820 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
17821 - pr_err("invalid rx-clock property\n");
17822 + pr_err("invalid rx-clock propperty\n");
17825 ug_info->uf_info.rx_clock = *prop;
17826 @@ -3868,8 +3862,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
17827 dev = alloc_etherdev(sizeof(*ugeth));
17831 - goto err_deregister_fixed_link;
17832 + of_node_put(ug_info->tbi_node);
17833 + of_node_put(ug_info->phy_node);
17837 ugeth = netdev_priv(dev);
17838 @@ -3906,7 +3901,10 @@ static int ucc_geth_probe(struct platform_device* ofdev)
17839 if (netif_msg_probe(ugeth))
17840 pr_err("%s: Cannot register net device, aborting\n",
17842 - goto err_free_netdev;
17843 + free_netdev(dev);
17844 + of_node_put(ug_info->tbi_node);
17845 + of_node_put(ug_info->phy_node);
17849 mac_addr = of_get_mac_address(np);
17850 @@ -3919,29 +3917,16 @@ static int ucc_geth_probe(struct platform_device* ofdev)
17856 - free_netdev(dev);
17857 -err_deregister_fixed_link:
17858 - if (of_phy_is_fixed_link(np))
17859 - of_phy_deregister_fixed_link(np);
17860 - of_node_put(ug_info->tbi_node);
17861 - of_node_put(ug_info->phy_node);
17866 static int ucc_geth_remove(struct platform_device* ofdev)
17868 struct net_device *dev = platform_get_drvdata(ofdev);
17869 struct ucc_geth_private *ugeth = netdev_priv(dev);
17870 - struct device_node *np = ofdev->dev.of_node;
17872 unregister_netdev(dev);
17874 ucc_geth_memclean(ugeth);
17875 - if (of_phy_is_fixed_link(np))
17876 - of_phy_deregister_fixed_link(np);
17877 of_node_put(ugeth->ug_info->tbi_node);
17878 of_node_put(ugeth->ug_info->phy_node);
17880 diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
17881 index 5da19b4..75f3371 100644
17882 --- a/drivers/net/ethernet/freescale/ucc_geth.h
17883 +++ b/drivers/net/ethernet/freescale/ucc_geth.h
17884 @@ -22,11 +22,11 @@
17885 #include <linux/list.h>
17886 #include <linux/if_ether.h>
17888 -#include <soc/fsl/qe/immap_qe.h>
17889 -#include <soc/fsl/qe/qe.h>
17890 +#include <asm/immap_qe.h>
17891 +#include <asm/qe.h>
17893 -#include <soc/fsl/qe/ucc.h>
17894 -#include <soc/fsl/qe/ucc_fast.h>
17895 +#include <asm/ucc.h>
17896 +#include <asm/ucc_fast.h>
17898 #define DRV_DESC "QE UCC Gigabit Ethernet Controller"
17899 #define DRV_NAME "ucc_geth"
17900 diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
17901 index 812a968..cc83350 100644
17902 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
17903 +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
17904 @@ -105,20 +105,23 @@ static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
17905 #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
17908 -uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
17909 +uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
17911 struct ucc_geth_private *ugeth = netdev_priv(netdev);
17912 struct phy_device *phydev = ugeth->phydev;
17913 + struct ucc_geth_info *ug_info = ugeth->ug_info;
17918 - return phy_ethtool_ksettings_get(phydev, cmd);
17919 + ecmd->maxtxpkt = 1;
17920 + ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
17922 + return phy_ethtool_gset(phydev, ecmd);
17926 -uec_set_ksettings(struct net_device *netdev,
17927 - const struct ethtool_link_ksettings *cmd)
17928 +uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
17930 struct ucc_geth_private *ugeth = netdev_priv(netdev);
17931 struct phy_device *phydev = ugeth->phydev;
17932 @@ -126,7 +129,7 @@ uec_set_ksettings(struct net_device *netdev,
17936 - return phy_ethtool_ksettings_set(phydev, cmd);
17937 + return phy_ethtool_sset(phydev, ecmd);
17941 @@ -348,6 +351,8 @@ uec_get_drvinfo(struct net_device *netdev,
17942 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
17943 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
17944 strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
17945 + drvinfo->eedump_len = 0;
17946 + drvinfo->regdump_len = uec_get_regs_len(netdev);
17950 @@ -389,6 +394,8 @@ static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
17951 #endif /* CONFIG_PM */
17953 static const struct ethtool_ops uec_ethtool_ops = {
17954 + .get_settings = uec_get_settings,
17955 + .set_settings = uec_set_settings,
17956 .get_drvinfo = uec_get_drvinfo,
17957 .get_regs_len = uec_get_regs_len,
17958 .get_regs = uec_get_regs,
17959 @@ -406,8 +413,6 @@ static const struct ethtool_ops uec_ethtool_ops = {
17960 .get_wol = uec_get_wol,
17961 .set_wol = uec_set_wol,
17962 .get_ts_info = ethtool_op_get_ts_info,
17963 - .get_link_ksettings = uec_get_ksettings,
17964 - .set_link_ksettings = uec_set_ksettings,
17967 void uec_set_ethtool_ops(struct net_device *netdev)
17968 diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
17969 index e03b30c..7b8fe86 100644
17970 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c
17971 +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
17972 @@ -271,8 +271,11 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
17976 - priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
17977 - "little-endian");
17978 + if (of_get_property(pdev->dev.of_node,
17979 + "little-endian", NULL))
17980 + priv->is_little_endian = true;
17982 + priv->is_little_endian = false;
17984 ret = of_mdiobus_register(bus, np);