-diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
-index 2204c57..25e3425 100644
---- a/drivers/net/ethernet/freescale/Kconfig
-+++ b/drivers/net/ethernet/freescale/Kconfig
-@@ -7,10 +7,11 @@ config NET_VENDOR_FREESCALE
- default y
- depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
- M523x || M527x || M5272 || M528x || M520x || M532x || \
-- ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
-- ARCH_LAYERSCAPE
-+ ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
- ---help---
-- If you have a network (Ethernet) card belonging to this class, say Y.
-+ If you have a network (Ethernet) card belonging to this class, say Y
-+ and read the Ethernet-HOWTO, available from
-+ <http://www.tldp.org/docs.html#howto>.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
-@@ -22,8 +23,8 @@ if NET_VENDOR_FREESCALE
- config FEC
- tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
- depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
-- ARM || ARM64)
-- default y
-+ ARCH_MXC || SOC_IMX28)
-+ default ARCH_MXC || SOC_IMX28 if ARM
- select PHYLIB
- select PTP_1588_CLOCK
- ---help---
-@@ -54,7 +55,6 @@ config FEC_MPC52xx_MDIO
- If compiled as module, it will be called fec_mpc52xx_phy.
-
- source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
--source "drivers/net/ethernet/freescale/fman/Kconfig"
-
- config FSL_PQ_MDIO
- tristate "Freescale PQ MDIO"
-@@ -85,12 +85,12 @@ config UGETH_TX_ON_DEMAND
-
- config GIANFAR
- tristate "Gianfar Ethernet"
-+ depends on FSL_SOC
- select FSL_PQ_MDIO
- select PHYLIB
- select CRC32
- ---help---
- This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
-- and MPC86xx family of chips, the eTSEC on LS1021A and the FEC
-- on the 8540.
-+ and MPC86xx family of chips, and the FEC on the 8540.
-
- endif # NET_VENDOR_FREESCALE
-diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
-index 7f022dd..71debd1 100644
---- a/drivers/net/ethernet/freescale/Makefile
-+++ b/drivers/net/ethernet/freescale/Makefile
-@@ -3,10 +3,7 @@
- #
-
- obj-$(CONFIG_FEC) += fec.o
--fec-objs :=fec_main.o fec_fixup.o fec_ptp.o
--CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
--CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
--
-+fec-objs :=fec_main.o fec_ptp.o
- obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
- ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
- obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
-@@ -20,5 +17,3 @@ gianfar_driver-objs := gianfar.o \
- gianfar_ethtool.o
- obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
- ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
--
--obj-$(CONFIG_FSL_FMAN) += fman/
-diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
-index 1d7b3cc..ecdc711 100644
---- a/drivers/net/ethernet/freescale/fec.h
-+++ b/drivers/net/ethernet/freescale/fec.h
-@@ -20,8 +20,8 @@
- #include <linux/timecounter.h>
-
- #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
-- defined(CONFIG_ARM64)
-+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
-+ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
- /*
- * Just figures, Motorola would have to change the offsets for
- * registers in the same peripheral device on different models
-@@ -192,45 +192,28 @@
-
- /*
- * Define the buffer descriptor structure.
-- *
-- * Evidently, ARM SoCs have the FEC block generated in a
-- * little endian mode so adjust endianness accordingly.
- */
--#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
--#define fec32_to_cpu le32_to_cpu
--#define fec16_to_cpu le16_to_cpu
--#define cpu_to_fec32 cpu_to_le32
--#define cpu_to_fec16 cpu_to_le16
--#define __fec32 __le32
--#define __fec16 __le16
--
-+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
- struct bufdesc {
-- __fec16 cbd_datlen; /* Data length */
-- __fec16 cbd_sc; /* Control and status info */
-- __fec32 cbd_bufaddr; /* Buffer address */
-+ unsigned short cbd_datlen; /* Data length */
-+ unsigned short cbd_sc; /* Control and status info */
-+ unsigned long cbd_bufaddr; /* Buffer address */
- };
- #else
--#define fec32_to_cpu be32_to_cpu
--#define fec16_to_cpu be16_to_cpu
--#define cpu_to_fec32 cpu_to_be32
--#define cpu_to_fec16 cpu_to_be16
--#define __fec32 __be32
--#define __fec16 __be16
--
- struct bufdesc {
-- __fec16 cbd_sc; /* Control and status info */
-- __fec16 cbd_datlen; /* Data length */
-- __fec32 cbd_bufaddr; /* Buffer address */
-+ unsigned short cbd_sc; /* Control and status info */
-+ unsigned short cbd_datlen; /* Data length */
-+ unsigned long cbd_bufaddr; /* Buffer address */
- };
- #endif
-
- struct bufdesc_ex {
- struct bufdesc desc;
-- __fec32 cbd_esc;
-- __fec32 cbd_prot;
-- __fec32 cbd_bdu;
-- __fec32 ts;
-- __fec16 res0[4];
-+ unsigned long cbd_esc;
-+ unsigned long cbd_prot;
-+ unsigned long cbd_bdu;
-+ unsigned long ts;
-+ unsigned short res0[4];
- };
-
- /*
-@@ -294,7 +277,7 @@ struct bufdesc_ex {
-
-
- /* This device has up to three irqs on some platforms */
--#define FEC_IRQ_NUM 4
-+#define FEC_IRQ_NUM 3
-
- /* Maximum number of queues supported
- * ENET with AVB IP can support up to 3 independent tx queues and rx queues.
-@@ -312,6 +295,12 @@ struct bufdesc_ex {
- #define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
- (((X) == 2) ? \
- FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
-+#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \
-+ (((X) == 2) ? \
-+ FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
-+#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \
-+ (((X) == 2) ? \
-+ FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
-
- #define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
-
-@@ -379,7 +368,6 @@ struct bufdesc_ex {
- #define FEC_ENET_TS_TIMER ((uint)0x00008000)
-
- #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
--#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER)
- #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
-
- #define FEC_ENET_ETHEREN ((uint)0x00000002)
-@@ -448,32 +436,12 @@ struct bufdesc_ex {
- #define FEC_QUIRK_SINGLE_MDIO (1 << 11)
- /* Controller supports RACC register */
- #define FEC_QUIRK_HAS_RACC (1 << 12)
--/* Controller supports interrupt coalesc */
--#define FEC_QUIRK_HAS_COALESCE (1 << 13)
--/* Interrupt doesn't wake CPU from deep idle */
--#define FEC_QUIRK_ERR006687 (1 << 14)
- /*
- * i.MX6Q/DL ENET cannot wake up system in wait mode because ENET tx & rx
- * interrupt signal don't connect to GPC. So use pm qos to avoid cpu enter
- * to wait mode.
- */
--#define FEC_QUIRK_BUG_WAITMODE (1 << 15)
--
--/* PHY fixup flag define */
--#define FEC_QUIRK_AR8031_FIXUP (1 << 0)
--
--struct bufdesc_prop {
-- int qid;
-- /* Address of Rx and Tx buffers */
-- struct bufdesc *base;
-- struct bufdesc *last;
-- struct bufdesc *cur;
-- void __iomem *reg_desc_active;
-- dma_addr_t dma;
-- unsigned short ring_size;
-- unsigned char dsize;
-- unsigned char dsize_log2;
--};
-+#define FEC_QUIRK_BUG_WAITMODE (1 << 13)
-
- struct fec_enet_stop_mode {
- struct regmap *gpr;
-@@ -482,21 +450,32 @@ struct fec_enet_stop_mode {
- };
-
- struct fec_enet_priv_tx_q {
-- struct bufdesc_prop bd;
-+ int index;
- unsigned char *tx_bounce[TX_RING_SIZE];
- struct sk_buff *tx_skbuff[TX_RING_SIZE];
-
-+ dma_addr_t bd_dma;
-+ struct bufdesc *tx_bd_base;
-+ uint tx_ring_size;
-+
- unsigned short tx_stop_threshold;
- unsigned short tx_wake_threshold;
-
-+ struct bufdesc *cur_tx;
- struct bufdesc *dirty_tx;
- char *tso_hdrs;
- dma_addr_t tso_hdrs_dma;
- };
-
- struct fec_enet_priv_rx_q {
-- struct bufdesc_prop bd;
-+ int index;
- struct sk_buff *rx_skbuff[RX_RING_SIZE];
-+
-+ dma_addr_t bd_dma;
-+ struct bufdesc *rx_bd_base;
-+ uint rx_ring_size;
-+
-+ struct bufdesc *cur_rx;
- };
-
- /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
-@@ -536,20 +515,22 @@ struct fec_enet_private {
- unsigned long work_ts;
- unsigned long work_mdio;
-
-+ unsigned short bufdesc_size;
-+
- struct platform_device *pdev;
-
- int dev_id;
-
- /* Phylib and MDIO interface */
- struct mii_bus *mii_bus;
-+ struct phy_device *phy_dev;
- int mii_timeout;
- int mii_bus_share;
-- bool active_in_suspend;
-+ bool miibus_up_failed;
- uint phy_speed;
- phy_interface_t phy_interface;
- struct device_node *phy_node;
- int link;
-- bool fixed_link;
- int full_duplex;
- int speed;
- struct completion mdio_done;
-@@ -559,7 +540,8 @@ struct fec_enet_private {
- int wol_flag;
- int wake_irq;
- u32 quirks;
-- u32 fixups;
-+ int phy_reset_gpio;
-+ int phy_reset_duration;
-
- struct napi_struct napi;
- int csum_flags;
-@@ -602,19 +584,14 @@ struct fec_enet_private {
- int pps_enable;
- unsigned int next_counter;
-
-- u64 ethtool_stats[0];
--
- struct fec_enet_stop_mode gpr;
- };
-
- void fec_ptp_init(struct platform_device *pdev);
--void fec_ptp_stop(struct platform_device *pdev);
- void fec_ptp_start_cyclecounter(struct net_device *ndev);
- int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
- int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
- uint fec_ptp_check_pps_event(struct fec_enet_private *fep);
--void fec_enet_register_fixup(struct net_device *ndev);
--int of_fec_enet_parse_fixup(struct device_node *np);
-
- /****************************************************************************/
- #endif /* FEC_H */
-diff --git a/drivers/net/ethernet/freescale/fec_fixup.c b/drivers/net/ethernet/freescale/fec_fixup.c
-deleted file mode 100644
-index 5a8497c..0000000
---- a/drivers/net/ethernet/freescale/fec_fixup.c
-+++ /dev/null
-@@ -1,74 +0,0 @@
--/*
-- * Copyright 2017 NXP
-- *
-- * This program is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License
-- * as published by the Free Software Foundation; either version 2
-- * of the License, or (at your option) any later version.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- */
--
--#include <linux/netdevice.h>
--#include <linux/phy.h>
--#include "fec.h"
--
--#define PHY_ID_AR8031 0x004dd074
--
--static int ar8031_phy_fixup(struct phy_device *dev)
--{
-- u16 val;
--
-- /* Set RGMII IO voltage to 1.8V */
-- phy_write(dev, 0x1d, 0x1f);
-- phy_write(dev, 0x1e, 0x8);
--
-- /* Disable phy AR8031 SmartEEE function */
-- phy_write(dev, 0xd, 0x3);
-- phy_write(dev, 0xe, 0x805d);
-- phy_write(dev, 0xd, 0x4003);
-- val = phy_read(dev, 0xe);
-- val &= ~(0x1 << 8);
-- phy_write(dev, 0xe, val);
--
-- /* Introduce tx clock delay */
-- phy_write(dev, 0x1d, 0x5);
-- phy_write(dev, 0x1e, 0x100);
--
-- return 0;
--}
--
--void fec_enet_register_fixup(struct net_device *ndev)
--{
-- struct fec_enet_private *fep = netdev_priv(ndev);
-- static int registered = 0;
-- int err;
--
-- if (!IS_BUILTIN(CONFIG_PHYLIB))
-- return;
--
-- if (fep->fixups & FEC_QUIRK_AR8031_FIXUP) {
-- static int ar8031_registered = 0;
--
-- if (ar8031_registered)
-- return;
-- err = phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef,
-- ar8031_phy_fixup);
-- if (err)
-- netdev_info(ndev, "Cannot register PHY board fixup\n");
-- registered = 1;
-- }
--}
--
--int of_fec_enet_parse_fixup(struct device_node *np)
--{
-- int fixups = 0;
--
-- if (of_get_property(np, "fsl,ar8031-phy-fixup", NULL))
-- fixups |= FEC_QUIRK_AR8031_FIXUP;
--
-- return fixups;
--}
-diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
-index 41a31f2..15c06df 100644
---- a/drivers/net/ethernet/freescale/fec_main.c
-+++ b/drivers/net/ethernet/freescale/fec_main.c
-@@ -19,8 +19,6 @@
- * Copyright (c) 2004-2006 Macq Electronique SA.
- *
- * Copyright (C) 2010-2014 Freescale Semiconductor, Inc.
-- *
-- * Copyright 2017 NXP
- */
-
- #include <linux/module.h>
-@@ -48,9 +46,7 @@
- #include <linux/io.h>
- #include <linux/irq.h>
- #include <linux/clk.h>
--#include <linux/clk/clk-conf.h>
- #include <linux/platform_device.h>
--#include <linux/mdio.h>
- #include <linux/phy.h>
- #include <linux/fec.h>
- #include <linux/of.h>
-@@ -68,12 +64,12 @@
- #include <linux/regmap.h>
-
- #include <asm/cacheflush.h>
--#include <soc/imx/cpuidle.h>
-
- #include "fec.h"
-
- static void set_multicast_list(struct net_device *ndev);
- static void fec_enet_itr_coal_init(struct net_device *ndev);
-+static void fec_reset_phy(struct platform_device *pdev);
-
- #define DRIVER_NAME "fec"
-
-@@ -87,7 +83,6 @@ static const u16 fec_enet_vlan_pri_to_queue[8] = {1, 1, 1, 1, 2, 2, 2, 2};
- #define FEC_ENET_RAEM_V 0x8
- #define FEC_ENET_RAFL_V 0x8
- #define FEC_ENET_OPD_V 0xFFF0
--#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
-
- static struct platform_device_id fec_devtype[] = {
- {
-@@ -96,10 +91,10 @@ static struct platform_device_id fec_devtype[] = {
- .driver_data = 0,
- }, {
- .name = "imx25-fec",
-- .driver_data = FEC_QUIRK_USE_GASKET,
-+ .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
- }, {
- .name = "imx27-fec",
-- .driver_data = 0,
-+ .driver_data = FEC_QUIRK_HAS_RACC,
- }, {
- .name = "imx28-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
-@@ -119,20 +114,12 @@ static struct platform_device_id fec_devtype[] = {
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
- FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
-- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
-+ FEC_QUIRK_HAS_RACC,
- }, {
- .name = "imx6ul-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
-- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
-- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
-- }, {
-- .name = "imx8qm-fec",
-- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
-- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
-- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
-- FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
-- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
-+ FEC_QUIRK_HAS_VLAN,
- }, {
- /* sentinel */
- }
-@@ -147,7 +134,6 @@ enum imx_fec_type {
- MVF600_FEC,
- IMX6SX_FEC,
- IMX6UL_FEC,
-- IMX8QM_FEC,
- };
-
- static const struct of_device_id fec_dt_ids[] = {
-@@ -158,7 +144,6 @@ static const struct of_device_id fec_dt_ids[] = {
- { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
- { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
- { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
-- { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
- { /* sentinel */ }
- };
- MODULE_DEVICE_TABLE(of, fec_dt_ids);
-@@ -196,7 +181,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
- /* FEC receive acceleration */
- #define FEC_RACC_IPDIS (1 << 1)
- #define FEC_RACC_PRODIS (1 << 2)
--#define FEC_RACC_SHIFT16 BIT(7)
- #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
-
- /*
-@@ -205,8 +189,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
- * account when setting it.
- */
- #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
-- defined(CONFIG_ARM64)
-+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
- #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
- #else
- #define OPT_FRAME_SIZE 0
-@@ -244,38 +227,86 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
-
- #define IS_TSO_HEADER(txq, addr) \
- ((addr >= txq->tso_hdrs_dma) && \
-- (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
-+ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
-
- static int mii_cnt;
-
--static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
-- struct bufdesc_prop *bd)
--{
-- return (bdp >= bd->last) ? bd->base
-- : (struct bufdesc *)(((void *)bdp) + bd->dsize);
--}
-+static inline
-+struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
-+ struct fec_enet_private *fep,
-+ int queue_id)
-+{
-+ struct bufdesc *new_bd = bdp + 1;
-+ struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
-+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
-+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
-+ struct bufdesc_ex *ex_base;
-+ struct bufdesc *base;
-+ int ring_size;
-+
-+ if (bdp >= txq->tx_bd_base) {
-+ base = txq->tx_bd_base;
-+ ring_size = txq->tx_ring_size;
-+ ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
-+ } else {
-+ base = rxq->rx_bd_base;
-+ ring_size = rxq->rx_ring_size;
-+ ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
-+ }
-
--static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
-- struct bufdesc_prop *bd)
--{
-- return (bdp <= bd->base) ? bd->last
-- : (struct bufdesc *)(((void *)bdp) - bd->dsize);
-+ if (fep->bufdesc_ex)
-+ return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
-+ ex_base : ex_new_bd);
-+ else
-+ return (new_bd >= (base + ring_size)) ?
-+ base : new_bd;
-+}
-+
-+static inline
-+struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
-+ struct fec_enet_private *fep,
-+ int queue_id)
-+{
-+ struct bufdesc *new_bd = bdp - 1;
-+ struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
-+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
-+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
-+ struct bufdesc_ex *ex_base;
-+ struct bufdesc *base;
-+ int ring_size;
-+
-+ if (bdp >= txq->tx_bd_base) {
-+ base = txq->tx_bd_base;
-+ ring_size = txq->tx_ring_size;
-+ ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
-+ } else {
-+ base = rxq->rx_bd_base;
-+ ring_size = rxq->rx_ring_size;
-+ ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
-+ }
-+
-+ if (fep->bufdesc_ex)
-+ return (struct bufdesc *)((ex_new_bd < ex_base) ?
-+ (ex_new_bd + ring_size) : ex_new_bd);
-+ else
-+ return (new_bd < base) ? (new_bd + ring_size) : new_bd;
- }
-
--static int fec_enet_get_bd_index(struct bufdesc *bdp,
-- struct bufdesc_prop *bd)
-+static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
-+ struct fec_enet_private *fep)
- {
-- return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
-+ return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
- }
-
--static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
-+static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
-+ struct fec_enet_priv_tx_q *txq)
- {
- int entries;
-
-- entries = (((const char *)txq->dirty_tx -
-- (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
-+ entries = ((const char *)txq->dirty_tx -
-+ (const char *)txq->cur_tx) / fep->bufdesc_size - 1;
-
-- return entries >= 0 ? entries : entries + txq->bd.ring_size;
-+ return entries >= 0 ? entries : entries + txq->tx_ring_size;
- }
-
- static void swap_buffer(void *bufaddr, int len)
-@@ -308,20 +339,18 @@ static void fec_dump(struct net_device *ndev)
- pr_info("Nr SC addr len SKB\n");
-
- txq = fep->tx_queue[0];
-- bdp = txq->bd.base;
-+ bdp = txq->tx_bd_base;
-
- do {
-- pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
-+ pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
- index,
-- bdp == txq->bd.cur ? 'S' : ' ',
-+ bdp == txq->cur_tx ? 'S' : ' ',
- bdp == txq->dirty_tx ? 'H' : ' ',
-- fec16_to_cpu(bdp->cbd_sc),
-- fec32_to_cpu(bdp->cbd_bufaddr),
-- fec16_to_cpu(bdp->cbd_datlen),
-+ bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
- txq->tx_skbuff[index]);
-- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
-+ bdp = fec_enet_get_nextdesc(bdp, fep, 0);
- index++;
-- } while (bdp != txq->bd.base);
-+ } while (bdp != txq->tx_bd_base);
- }
-
- static inline bool is_ipv4_pkt(struct sk_buff *skb)
-@@ -352,9 +381,10 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
- struct net_device *ndev)
- {
- struct fec_enet_private *fep = netdev_priv(ndev);
-- struct bufdesc *bdp = txq->bd.cur;
-+ struct bufdesc *bdp = txq->cur_tx;
- struct bufdesc_ex *ebdp;
- int nr_frags = skb_shinfo(skb)->nr_frags;
-+ unsigned short queue = skb_get_queue_mapping(skb);
- int frag, frag_len;
- unsigned short status;
- unsigned int estatus = 0;
-@@ -366,10 +396,10 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
-
- for (frag = 0; frag < nr_frags; frag++) {
- this_frag = &skb_shinfo(skb)->frags[frag];
-- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
-+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
- ebdp = (struct bufdesc_ex *)bdp;
-
-- status = fec16_to_cpu(bdp->cbd_sc);
-+ status = bdp->cbd_sc;
- status &= ~BD_ENET_TX_STATS;
- status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
- frag_len = skb_shinfo(skb)->frags[frag].size;
-@@ -387,16 +417,16 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
-
- if (fep->bufdesc_ex) {
- if (fep->quirks & FEC_QUIRK_HAS_AVB)
-- estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
-+ estatus |= FEC_TX_BD_FTYPE(queue);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
- ebdp->cbd_bdu = 0;
-- ebdp->cbd_esc = cpu_to_fec32(estatus);
-+ ebdp->cbd_esc = estatus;
- }
-
- bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
-
-- index = fec_enet_get_bd_index(bdp, &txq->bd);
-+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
- if (((unsigned long) bufaddr) & fep->tx_align ||
- fep->quirks & FEC_QUIRK_SWAP_FRAME) {
- memcpy(txq->tx_bounce[index], bufaddr, frag_len);
-@@ -409,27 +439,24 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
- addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&fep->pdev->dev, addr)) {
-+ dev_kfree_skb_any(skb);
- if (net_ratelimit())
- netdev_err(ndev, "Tx DMA memory map failed\n");
- goto dma_mapping_error;
- }
-
-- bdp->cbd_bufaddr = cpu_to_fec32(addr);
-- bdp->cbd_datlen = cpu_to_fec16(frag_len);
-- /* Make sure the updates to rest of the descriptor are
-- * performed before transferring ownership.
-- */
-- wmb();
-- bdp->cbd_sc = cpu_to_fec16(status);
-+ bdp->cbd_bufaddr = addr;
-+ bdp->cbd_datlen = frag_len;
-+ bdp->cbd_sc = status;
- }
-
- return bdp;
- dma_mapping_error:
-- bdp = txq->bd.cur;
-+ bdp = txq->cur_tx;
- for (i = 0; i < frag; i++) {
-- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
-- dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
-- fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
-+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
-+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
-+ bdp->cbd_datlen, DMA_TO_DEVICE);
- }
- return ERR_PTR(-ENOMEM);
- }
-@@ -444,11 +471,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
- dma_addr_t addr;
- unsigned short status;
- unsigned short buflen;
-+ unsigned short queue;
- unsigned int estatus = 0;
- unsigned int index;
- int entries_free;
-
-- entries_free = fec_enet_get_free_txdesc_num(txq);
-+ entries_free = fec_enet_get_free_txdesc_num(fep, txq);
- if (entries_free < MAX_SKB_FRAGS + 1) {
- dev_kfree_skb_any(skb);
- if (net_ratelimit())
-@@ -463,16 +491,17 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
- }
-
- /* Fill in a Tx ring entry */
-- bdp = txq->bd.cur;
-+ bdp = txq->cur_tx;
- last_bdp = bdp;
-- status = fec16_to_cpu(bdp->cbd_sc);
-+ status = bdp->cbd_sc;
- status &= ~BD_ENET_TX_STATS;
-
- /* Set buffer length and buffer pointer */
- bufaddr = skb->data;
- buflen = skb_headlen(skb);
-
-- index = fec_enet_get_bd_index(bdp, &txq->bd);
-+ queue = skb_get_queue_mapping(skb);
-+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
- if (((unsigned long) bufaddr) & fep->tx_align ||
- fep->quirks & FEC_QUIRK_SWAP_FRAME) {
- memcpy(txq->tx_bounce[index], skb->data, buflen);
-@@ -493,12 +522,8 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
-
- if (nr_frags) {
- last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
-- if (IS_ERR(last_bdp)) {
-- dma_unmap_single(&fep->pdev->dev, addr,
-- buflen, DMA_TO_DEVICE);
-- dev_kfree_skb_any(skb);
-+ if (IS_ERR(last_bdp))
- return NETDEV_TX_OK;
-- }
- } else {
- status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
- if (fep->bufdesc_ex) {
-@@ -508,8 +533,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
- estatus |= BD_ENET_TX_TS;
- }
- }
-- bdp->cbd_bufaddr = cpu_to_fec32(addr);
-- bdp->cbd_datlen = cpu_to_fec16(buflen);
-
- if (fep->bufdesc_ex) {
-
-@@ -520,43 +543,41 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-
- if (fep->quirks & FEC_QUIRK_HAS_AVB)
-- estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
-+ estatus |= FEC_TX_BD_FTYPE(queue);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
-
- ebdp->cbd_bdu = 0;
-- ebdp->cbd_esc = cpu_to_fec32(estatus);
-+ ebdp->cbd_esc = estatus;
- }
-
-- index = fec_enet_get_bd_index(last_bdp, &txq->bd);
-+ index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
- /* Save skb pointer */
- txq->tx_skbuff[index] = skb;
-
-- /* Make sure the updates to rest of the descriptor are performed before
-- * transferring ownership.
-- */
-- wmb();
-+ bdp->cbd_datlen = buflen;
-+ bdp->cbd_bufaddr = addr;
-
- /* Send it on its way. Tell FEC it's ready, interrupt when done,
- * it's the last BD of the frame, and to put the CRC on the end.
- */
- status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
-- bdp->cbd_sc = cpu_to_fec16(status);
-+ bdp->cbd_sc = status;
-
- /* If this was the last BD in the ring, start at the beginning again. */
-- bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
-+ bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
-
- skb_tx_timestamp(skb);
-
- /* Make sure the update to bdp and tx_skbuff are performed before
-- * txq->bd.cur.
-+ * cur_tx.
- */
- wmb();
-- txq->bd.cur = bdp;
-+ txq->cur_tx = bdp;
-
- /* Trigger transmission start */
-- writel(0, txq->bd.reg_desc_active);
-+ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
-
- return 0;
- }
-@@ -569,11 +590,12 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
- {
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
-+ unsigned short queue = skb_get_queue_mapping(skb);
- unsigned short status;
- unsigned int estatus = 0;
- dma_addr_t addr;
-
-- status = fec16_to_cpu(bdp->cbd_sc);
-+ status = bdp->cbd_sc;
- status &= ~BD_ENET_TX_STATS;
-
- status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
-@@ -595,16 +617,16 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
- return NETDEV_TX_BUSY;
- }
-
-- bdp->cbd_datlen = cpu_to_fec16(size);
-- bdp->cbd_bufaddr = cpu_to_fec32(addr);
-+ bdp->cbd_datlen = size;
-+ bdp->cbd_bufaddr = addr;
-
- if (fep->bufdesc_ex) {
- if (fep->quirks & FEC_QUIRK_HAS_AVB)
-- estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
-+ estatus |= FEC_TX_BD_FTYPE(queue);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
- ebdp->cbd_bdu = 0;
-- ebdp->cbd_esc = cpu_to_fec32(estatus);
-+ ebdp->cbd_esc = estatus;
- }
-
- /* Handle the last BD specially */
-@@ -613,10 +635,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
- if (is_last) {
- status |= BD_ENET_TX_INTR;
- if (fep->bufdesc_ex)
-- ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
-+ ebdp->cbd_esc |= BD_ENET_TX_INT;
- }
-
-- bdp->cbd_sc = cpu_to_fec16(status);
-+ bdp->cbd_sc = status;
-
- return 0;
- }
-@@ -629,12 +651,13 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
- struct fec_enet_private *fep = netdev_priv(ndev);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
-+ unsigned short queue = skb_get_queue_mapping(skb);
- void *bufaddr;
- unsigned long dmabuf;
- unsigned short status;
- unsigned int estatus = 0;
-
-- status = fec16_to_cpu(bdp->cbd_sc);
-+ status = bdp->cbd_sc;
- status &= ~BD_ENET_TX_STATS;
- status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
-
-@@ -658,19 +681,19 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
- }
- }
-
-- bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
-- bdp->cbd_datlen = cpu_to_fec16(hdr_len);
-+ bdp->cbd_bufaddr = dmabuf;
-+ bdp->cbd_datlen = hdr_len;
-
- if (fep->bufdesc_ex) {
- if (fep->quirks & FEC_QUIRK_HAS_AVB)
-- estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
-+ estatus |= FEC_TX_BD_FTYPE(queue);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
- ebdp->cbd_bdu = 0;
-- ebdp->cbd_esc = cpu_to_fec32(estatus);
-+ ebdp->cbd_esc = estatus;
- }
-
-- bdp->cbd_sc = cpu_to_fec16(status);
-+ bdp->cbd_sc = status;
-
- return 0;
- }
-@@ -682,12 +705,13 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
- struct fec_enet_private *fep = netdev_priv(ndev);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- int total_len, data_left;
-- struct bufdesc *bdp = txq->bd.cur;
-+ struct bufdesc *bdp = txq->cur_tx;
-+ unsigned short queue = skb_get_queue_mapping(skb);
- struct tso_t tso;
- unsigned int index = 0;
- int ret;
-
-- if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
-+ if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
- dev_kfree_skb_any(skb);
- if (net_ratelimit())
- netdev_err(ndev, "NOT enough BD for TSO!\n");
-@@ -707,7 +731,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
- while (total_len > 0) {
- char *hdr;
-
-- index = fec_enet_get_bd_index(bdp, &txq->bd);
-+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
- data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
- total_len -= data_left;
-
-@@ -722,8 +746,9 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
- int size;
-
- size = min_t(int, tso.size, data_left);
-- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
-- index = fec_enet_get_bd_index(bdp, &txq->bd);
-+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
-+ index = fec_enet_get_bd_index(txq->tx_bd_base,
-+ bdp, fep);
- ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
- bdp, index,
- tso.data, size,
-@@ -736,22 +761,22 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
- tso_build_data(skb, &tso, size);
- }
-
-- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
-+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
- }
-
- /* Save skb pointer */
- txq->tx_skbuff[index] = skb;
-
- skb_tx_timestamp(skb);
-- txq->bd.cur = bdp;
-+ txq->cur_tx = bdp;
-
- /* Trigger transmission start */
- if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
-- !readl(txq->bd.reg_desc_active) ||
-- !readl(txq->bd.reg_desc_active) ||
-- !readl(txq->bd.reg_desc_active) ||
-- !readl(txq->bd.reg_desc_active))
-- writel(0, txq->bd.reg_desc_active);
-+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
-+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
-+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
-+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)))
-+ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
-
- return 0;
-
-@@ -781,7 +806,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
- if (ret)
- return ret;
-
-- entries_free = fec_enet_get_free_txdesc_num(txq);
-+ entries_free = fec_enet_get_free_txdesc_num(fep, txq);
- if (entries_free <= txq->tx_stop_threshold)
- netif_tx_stop_queue(nq);
-
-@@ -802,45 +827,45 @@ static void fec_enet_bd_init(struct net_device *dev)
- for (q = 0; q < fep->num_rx_queues; q++) {
- /* Initialize the receive buffer descriptors. */
- rxq = fep->rx_queue[q];
-- bdp = rxq->bd.base;
-+ bdp = rxq->rx_bd_base;
-
-- for (i = 0; i < rxq->bd.ring_size; i++) {
-+ for (i = 0; i < rxq->rx_ring_size; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- if (bdp->cbd_bufaddr)
-- bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
-+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
- else
-- bdp->cbd_sc = cpu_to_fec16(0);
-- bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
-+ bdp->cbd_sc = 0;
-+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
- }
-
- /* Set the last buffer to wrap */
-- bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
-- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
-+ bdp = fec_enet_get_prevdesc(bdp, fep, q);
-+ bdp->cbd_sc |= BD_SC_WRAP;
-
-- rxq->bd.cur = rxq->bd.base;
-+ rxq->cur_rx = rxq->rx_bd_base;
- }
-
- for (q = 0; q < fep->num_tx_queues; q++) {
- /* ...and the same for transmit */
- txq = fep->tx_queue[q];
-- bdp = txq->bd.base;
-- txq->bd.cur = bdp;
-+ bdp = txq->tx_bd_base;
-+ txq->cur_tx = bdp;
-
-- for (i = 0; i < txq->bd.ring_size; i++) {
-+ for (i = 0; i < txq->tx_ring_size; i++) {
- /* Initialize the BD for every fragment in the page. */
-- bdp->cbd_sc = cpu_to_fec16(0);
-+ bdp->cbd_sc = 0;
- if (txq->tx_skbuff[i]) {
- dev_kfree_skb_any(txq->tx_skbuff[i]);
- txq->tx_skbuff[i] = NULL;
- }
-- bdp->cbd_bufaddr = cpu_to_fec32(0);
-- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
-+ bdp->cbd_bufaddr = 0;
-+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
- }
-
- /* Set the last buffer to wrap */
-- bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
-- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
-+ bdp = fec_enet_get_prevdesc(bdp, fep, q);
-+ bdp->cbd_sc |= BD_SC_WRAP;
- txq->dirty_tx = bdp;
- }
- }
-@@ -851,7 +876,7 @@ static void fec_enet_active_rxring(struct net_device *ndev)
- int i;
-
- for (i = 0; i < fep->num_rx_queues; i++)
-- writel(0, fep->rx_queue[i]->bd.reg_desc_active);
-+ writel(0, fep->hwp + FEC_R_DES_ACTIVE(i));
- }
-
- static void fec_enet_enable_ring(struct net_device *ndev)
-@@ -863,7 +888,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
-
- for (i = 0; i < fep->num_rx_queues; i++) {
- rxq = fep->rx_queue[i];
-- writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
-+ writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
- writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
-
- /* enable DMA1/2 */
-@@ -874,7 +899,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
-
- for (i = 0; i < fep->num_tx_queues; i++) {
- txq = fep->tx_queue[i];
-- writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
-+ writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i));
-
- /* enable DMA1/2 */
- if (i)
-@@ -892,7 +917,7 @@ static void fec_enet_reset_skb(struct net_device *ndev)
- for (i = 0; i < fep->num_tx_queues; i++) {
- txq = fep->tx_queue[i];
-
-- for (j = 0; j < txq->bd.ring_size; j++) {
-+ for (j = 0; j < txq->tx_ring_size; j++) {
- if (txq->tx_skbuff[j]) {
- dev_kfree_skb_any(txq->tx_skbuff[j]);
- txq->tx_skbuff[j] = NULL;
-@@ -930,11 +955,11 @@ fec_restart(struct net_device *ndev)
- * enet-mac reset will reset mac address registers too,
- * so need to reconfigure it.
- */
-- memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
-- writel((__force u32)cpu_to_be32(temp_mac[0]),
-- fep->hwp + FEC_ADDR_LOW);
-- writel((__force u32)cpu_to_be32(temp_mac[1]),
-- fep->hwp + FEC_ADDR_HIGH);
-+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
-+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
-+ writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
-+ writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
-+ }
-
- /* Clear any outstanding interrupt. */
- writel(0xffffffff, fep->hwp + FEC_IEVENT);
-@@ -961,16 +986,13 @@ fec_restart(struct net_device *ndev)
-
- #if !defined(CONFIG_M5272)
- if (fep->quirks & FEC_QUIRK_HAS_RACC) {
-+ /* set RX checksum */
- val = readl(fep->hwp + FEC_RACC);
-- /* align IP header */
-- val |= FEC_RACC_SHIFT16;
- if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
-- /* set RX checksum */
- val |= FEC_RACC_OPTIONS;
- else
- val &= ~FEC_RACC_OPTIONS;
- writel(val, fep->hwp + FEC_RACC);
-- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
- }
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
- #endif
-@@ -995,10 +1017,10 @@ fec_restart(struct net_device *ndev)
- rcntl &= ~(1 << 8);
-
- /* 1G, 100M or 10M */
-- if (ndev->phydev) {
-- if (ndev->phydev->speed == SPEED_1000)
-+ if (fep->phy_dev) {
-+ if (fep->phy_dev->speed == SPEED_1000)
- ecntl |= (1 << 5);
-- else if (ndev->phydev->speed == SPEED_100)
-+ else if (fep->phy_dev->speed == SPEED_100)
- rcntl &= ~(1 << 9);
- else
- rcntl |= (1 << 9);
-@@ -1019,7 +1041,7 @@ fec_restart(struct net_device *ndev)
- */
- cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
- ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
-- if (ndev->phydev && ndev->phydev->speed == SPEED_10)
-+ if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
- cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
- writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
-
-@@ -1033,7 +1055,7 @@ fec_restart(struct net_device *ndev)
- /* enable pause frame*/
- if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
- ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
-- ndev->phydev && ndev->phydev->pause)) {
-+ fep->phy_dev && fep->phy_dev->pause)) {
- rcntl |= FEC_ENET_FCE;
-
- /* set FIFO threshold parameter to reduce overrun */
-@@ -1213,12 +1235,13 @@ static void
- fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
- {
- struct fec_enet_private *fep;
-- struct bufdesc *bdp;
-+ struct bufdesc *bdp, *bdp_t;
- unsigned short status;
- struct sk_buff *skb;
- struct fec_enet_priv_tx_q *txq;
- struct netdev_queue *nq;
- int index = 0;
-+ int i, bdnum;
- int entries_free;
-
- fep = netdev_priv(ndev);
-@@ -1231,27 +1254,37 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
- bdp = txq->dirty_tx;
-
- /* get next bdp of dirty_tx */
-- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
-+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
-
-- while (bdp != READ_ONCE(txq->bd.cur)) {
-- /* Order the load of bd.cur and cbd_sc */
-+ while (bdp != READ_ONCE(txq->cur_tx)) {
-+ /* Order the load of cur_tx and cbd_sc */
- rmb();
-- status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
-+ status = READ_ONCE(bdp->cbd_sc);
- if (status & BD_ENET_TX_READY)
- break;
-
-- index = fec_enet_get_bd_index(bdp, &txq->bd);
--
-+ bdp_t = bdp;
-+ bdnum = 1;
-+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
- skb = txq->tx_skbuff[index];
-+ while (!skb) {
-+ bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
-+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
-+ skb = txq->tx_skbuff[index];
-+ bdnum++;
-+ }
-+ if ((status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
-+ break;
-+
-+ for (i = 0; i < bdnum; i++) {
-+ if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
-+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
-+ bdp->cbd_datlen, DMA_TO_DEVICE);
-+ bdp->cbd_bufaddr = 0;
-+ if (i < bdnum - 1)
-+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
-+ }
- txq->tx_skbuff[index] = NULL;
-- if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
-- dma_unmap_single(&fep->pdev->dev,
-- fec32_to_cpu(bdp->cbd_bufaddr),
-- fec16_to_cpu(bdp->cbd_datlen),
-- DMA_TO_DEVICE);
-- bdp->cbd_bufaddr = cpu_to_fec32(0);
-- if (!skb)
-- goto skb_done;
-
- /* Check for errors. */
- if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
-@@ -1278,7 +1311,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
- struct skb_shared_hwtstamps shhwtstamps;
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-
-- fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
-+ fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
- skb_tstamp_tx(skb, &shhwtstamps);
- }
-
-@@ -1290,7 +1323,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
-
- /* Free the sk buffer associated with this last transmit */
- dev_kfree_skb_any(skb);
--skb_done:
-+
- /* Make sure the update to bdp and tx_skbuff are performed
- * before dirty_tx
- */
-@@ -1298,21 +1331,21 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
- txq->dirty_tx = bdp;
-
- /* Update pointer to next buffer descriptor to be transmitted */
-- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
-+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
-
- /* Since we have freed up a buffer, the ring is no longer full
- */
- if (netif_queue_stopped(ndev)) {
-- entries_free = fec_enet_get_free_txdesc_num(txq);
-+ entries_free = fec_enet_get_free_txdesc_num(fep, txq);
- if (entries_free >= txq->tx_wake_threshold)
- netif_tx_wake_queue(nq);
- }
- }
-
- /* ERR006538: Keep the transmitter going */
-- if (bdp != txq->bd.cur &&
-- readl(txq->bd.reg_desc_active) == 0)
-- writel(0, txq->bd.reg_desc_active);
-+ if (bdp != txq->cur_tx &&
-+ readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0)
-+ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id));
- }
-
- static void
-@@ -1338,8 +1371,10 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
- if (off)
- skb_reserve(skb, fep->rx_align + 1 - off);
-
-- bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
-- if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
-+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
-+ FEC_ENET_RX_FRSIZE - fep->rx_align,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
- if (net_ratelimit())
- netdev_err(ndev, "Rx DMA memory map failed\n");
- return -ENOMEM;
-@@ -1361,8 +1396,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
- if (!new_skb)
- return false;
-
-- dma_sync_single_for_cpu(&fep->pdev->dev,
-- fec32_to_cpu(bdp->cbd_bufaddr),
-+ dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- if (!swap)
-@@ -1374,7 +1408,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
- return true;
- }
-
--/* During a receive, the bd_rx.cur points to the current incoming buffer.
-+/* During a receive, the cur_rx points to the current incoming buffer.
- * When we update through the ring, if the next incoming buffer has
- * not been given to the system, we just set the empty indicator,
- * effectively tossing the packet.
-@@ -1407,9 +1441,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
- /* First, grab all of the stats for the incoming packet.
- * These get messed up if we get called due to a busy condition.
- */
-- bdp = rxq->bd.cur;
-+ bdp = rxq->cur_rx;
-
-- while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
-+ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
-
- if (pkt_received >= budget)
- break;
-@@ -1445,10 +1479,10 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
-
- /* Process the incoming frame. */
- ndev->stats.rx_packets++;
-- pkt_len = fec16_to_cpu(bdp->cbd_datlen);
-+ pkt_len = bdp->cbd_datlen;
- ndev->stats.rx_bytes += pkt_len;
-
-- index = fec_enet_get_bd_index(bdp, &rxq->bd);
-+ index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
- skb = rxq->rx_skbuff[index];
-
- /* The packet length includes FCS, but we don't want to
-@@ -1463,8 +1497,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
- ndev->stats.rx_dropped++;
- goto rx_processing_done;
- }
-- dma_unmap_single(&fep->pdev->dev,
-- fec32_to_cpu(bdp->cbd_bufaddr),
-+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- }
-@@ -1472,15 +1505,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
- prefetch(skb->data - NET_IP_ALIGN);
- skb_put(skb, pkt_len - 4);
- data = skb->data;
--
- if (!is_copybreak && need_swap)
- swap_buffer(data, pkt_len);
-
--#if !defined(CONFIG_M5272)
-- if (fep->quirks & FEC_QUIRK_HAS_RACC)
-- data = skb_pull_inline(skb, 2);
--#endif
--
- /* Extract the enhanced buffer descriptor */
- ebdp = NULL;
- if (fep->bufdesc_ex)
-@@ -1489,8 +1516,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
- /* If this is a VLAN packet remove the VLAN Tag */
- vlan_packet_rcvd = false;
- if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
-- fep->bufdesc_ex &&
-- (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
-+ fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
- /* Push and remove the vlan tag */
- struct vlan_hdr *vlan_header =
- (struct vlan_hdr *) (data + ETH_HLEN);
-@@ -1506,12 +1532,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
-
- /* Get receive timestamp from the skb */
- if (fep->hwts_rx_en && fep->bufdesc_ex)
-- fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
-+ fec_enet_hwtstamp(fep, ebdp->ts,
- skb_hwtstamps(skb));
-
- if (fep->bufdesc_ex &&
- (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
-- if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
-+ if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
- /* don't check it */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
-@@ -1528,8 +1554,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
- napi_gro_receive(&fep->napi, skb);
-
- if (is_copybreak) {
-- dma_sync_single_for_device(&fep->pdev->dev,
-- fec32_to_cpu(bdp->cbd_bufaddr),
-+ dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- } else {
-@@ -1543,30 +1568,26 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
-
- /* Mark the buffer empty */
- status |= BD_ENET_RX_EMPTY;
-+ bdp->cbd_sc = status;
-
- if (fep->bufdesc_ex) {
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-
-- ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
-+ ebdp->cbd_esc = BD_ENET_RX_INT;
- ebdp->cbd_prot = 0;
- ebdp->cbd_bdu = 0;
- }
-- /* Make sure the updates to rest of the descriptor are
-- * performed before transferring ownership.
-- */
-- wmb();
-- bdp->cbd_sc = cpu_to_fec16(status);
-
- /* Update BD pointer to next entry */
-- bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
-+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
-
- /* Doing this here will keep the FEC running while we process
- * incoming frames. On a heavily loaded network, we should be
- * able to keep up at the expense of system resources.
- */
-- writel(0, rxq->bd.reg_desc_active);
-+ writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id));
- }
-- rxq->bd.cur = bdp;
-+ rxq->cur_rx = bdp;
- return pkt_received;
- }
-
-@@ -1578,15 +1599,9 @@ fec_enet_rx(struct net_device *ndev, int budget)
- struct fec_enet_private *fep = netdev_priv(ndev);
-
- for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
-- int ret;
--
-- ret = fec_enet_rx_queue(ndev,
-+ clear_bit(queue_id, &fep->work_rx);
-+ pkt_received += fec_enet_rx_queue(ndev,
- budget - pkt_received, queue_id);
--
-- if (ret < budget - pkt_received)
-- clear_bit(queue_id, &fep->work_rx);
--
-- pkt_received += ret;
- }
- return pkt_received;
- }
-@@ -1631,7 +1646,7 @@ fec_enet_interrupt(int irq, void *dev_id)
-
- if (napi_schedule_prep(&fep->napi)) {
- /* Disable the NAPI interrupts */
-- writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
-+ writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
- __napi_schedule(&fep->napi);
- }
- }
-@@ -1742,7 +1757,7 @@ static void fec_get_mac(struct net_device *ndev)
- static void fec_enet_adjust_link(struct net_device *ndev)
- {
- struct fec_enet_private *fep = netdev_priv(ndev);
-- struct phy_device *phy_dev = ndev->phydev;
-+ struct phy_device *phy_dev = fep->phy_dev;
- int status_change = 0;
-
- /* Prevent a state halted on mii error */
-@@ -1802,16 +1817,10 @@ static void fec_enet_adjust_link(struct net_device *ndev)
- static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
- {
- struct fec_enet_private *fep = bus->priv;
-- struct device *dev = &fep->pdev->dev;
- unsigned long time_left;
-- int ret = 0;
--
-- ret = pm_runtime_get_sync(dev);
-- if (ret < 0)
-- return ret;
-
- fep->mii_timeout = 0;
-- reinit_completion(&fep->mdio_done);
-+ init_completion(&fep->mdio_done);
-
- /* start a read op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
-@@ -1824,35 +1833,21 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
- if (time_left == 0) {
- fep->mii_timeout = 1;
- netdev_err(fep->netdev, "MDIO read timeout\n");
-- ret = -ETIMEDOUT;
-- goto out;
-+ return -ETIMEDOUT;
- }
-
-- ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
--
--out:
-- pm_runtime_mark_last_busy(dev);
-- pm_runtime_put_autosuspend(dev);
--
-- return ret;
-+ /* return value */
-+ return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
- }
-
- static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
- {
- struct fec_enet_private *fep = bus->priv;
-- struct device *dev = &fep->pdev->dev;
- unsigned long time_left;
-- int ret;
--
-- ret = pm_runtime_get_sync(dev);
-- if (ret < 0)
-- return ret;
-- else
-- ret = 0;
-
- fep->mii_timeout = 0;
-- reinit_completion(&fep->mdio_done);
-+ init_completion(&fep->mdio_done);
-
- /* start a write op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
-@@ -1866,13 +1861,10 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- if (time_left == 0) {
- fep->mii_timeout = 1;
- netdev_err(fep->netdev, "MDIO write timeout\n");
-- ret = -ETIMEDOUT;
-+ return -ETIMEDOUT;
- }
-
-- pm_runtime_mark_last_busy(dev);
-- pm_runtime_put_autosuspend(dev);
--
-- return ret;
-+ return 0;
- }
-
- static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
-@@ -1881,10 +1873,18 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
- int ret;
-
- if (enable) {
-+ ret = clk_prepare_enable(fep->clk_ahb);
-+ if (ret)
-+ return ret;
-+ ret = clk_prepare_enable(fep->clk_ipg);
-+ if (ret)
-+ goto failed_clk_ipg;
- if (fep->clk_enet_out) {
- ret = clk_prepare_enable(fep->clk_enet_out);
- if (ret)
-- return ret;
-+ goto failed_clk_enet_out;
-+
-+ fec_reset_phy(fep->pdev);
- }
- if (fep->clk_ptp) {
- mutex_lock(&fep->ptp_clk_mutex);
-@@ -1903,6 +1903,8 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
- goto failed_clk_ref;
- }
- } else {
-+ clk_disable_unprepare(fep->clk_ahb);
-+ clk_disable_unprepare(fep->clk_ipg);
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
- if (fep->clk_ptp) {
-@@ -1923,27 +1925,23 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
- failed_clk_ptp:
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
-+failed_clk_enet_out:
-+ clk_disable_unprepare(fep->clk_ipg);
-+failed_clk_ipg:
-+ clk_disable_unprepare(fep->clk_ahb);
-
- return ret;
- }
-
--static int fec_restore_mii_bus(struct net_device *ndev)
-+static void fec_restore_mii_bus(struct net_device *ndev)
- {
- struct fec_enet_private *fep = netdev_priv(ndev);
-- int ret;
--
-- ret = pm_runtime_get_sync(&fep->pdev->dev);
-- if (ret < 0)
-- return ret;
-
-+ fec_enet_clk_enable(ndev, true);
- writel(0xffc00000, fep->hwp + FEC_IEVENT);
- writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
- writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
- writel(FEC_ENET_ETHEREN, fep->hwp + FEC_ECNTRL);
--
-- pm_runtime_mark_last_busy(&fep->pdev->dev);
-- pm_runtime_put_autosuspend(&fep->pdev->dev);
-- return 0;
- }
-
- static int fec_enet_mii_probe(struct net_device *ndev)
-@@ -1955,6 +1953,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
- int phy_id;
- int dev_id = fep->dev_id;
-
-+ fep->phy_dev = NULL;
-+
- if (fep->phy_node) {
- phy_dev = of_phy_connect(ndev, fep->phy_node,
- &fec_enet_adjust_link, 0,
-@@ -1964,7 +1964,11 @@ static int fec_enet_mii_probe(struct net_device *ndev)
- } else {
- /* check for attached phy */
- for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
-- if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
-+ if ((fep->mii_bus->phy_mask & (1 << phy_id)))
-+ continue;
-+ if (fep->mii_bus->mdio_map[phy_id] == NULL)
-+ continue;
-+ if (fep->mii_bus->mdio_map[phy_id]->addr == 0)
- continue;
- if (dev_id--)
- continue;
-@@ -2002,10 +2006,13 @@ static int fec_enet_mii_probe(struct net_device *ndev)
-
- phy_dev->advertising = phy_dev->supported;
-
-+ fep->phy_dev = phy_dev;
- fep->link = 0;
- fep->full_duplex = 0;
-
-- phy_attached_info(phy_dev);
-+ netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
-+ fep->phy_dev->drv->name, NULL,
-+ fep->phy_dev->irq);
-
- return 0;
- }
-@@ -2017,7 +2024,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct device_node *node;
-- int err = -ENXIO;
-+ int err = -ENXIO, i;
- u32 mii_speed, holdtime;
-
- /*
-@@ -2036,7 +2043,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
- * mdio interface in board design, and need to be configured by
- * fec0 mii_bus.
- */
-- if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
-+ if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
- /* fec1 uses fec0 mii_bus */
- if (mii_cnt && fec0_mii_bus) {
- fep->mii_bus = fec0_mii_bus;
-@@ -2100,29 +2107,38 @@ static int fec_enet_mii_init(struct platform_device *pdev)
- fep->mii_bus->priv = fep;
- fep->mii_bus->parent = &pdev->dev;
-
-+/* fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
-+ if (!fep->mii_bus->irq) {
-+ err = -ENOMEM;
-+ goto err_out_free_mdiobus;
-+ }
-+*/
-+ for (i = 0; i < PHY_MAX_ADDR; i++)
-+ fep->mii_bus->irq[i] = PHY_POLL;
-+
- node = of_get_child_by_name(pdev->dev.of_node, "mdio");
- if (node) {
- err = of_mdiobus_register(fep->mii_bus, node);
- of_node_put(node);
-- } else if (fep->phy_node && !fep->fixed_link) {
-- err = -EPROBE_DEFER;
- } else {
- err = mdiobus_register(fep->mii_bus);
- }
-
- if (err)
-- goto err_out_free_mdiobus;
-+ goto err_out_free_mdio_irq;
-
- mii_cnt++;
-
- /* save fec0 mii_bus */
-- if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) {
-+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
- fec0_mii_bus = fep->mii_bus;
- fec_mii_bus_share = &fep->mii_bus_share;
- }
-
- return 0;
-
-+err_out_free_mdio_irq:
-+ kfree(fep->mii_bus->irq);
- err_out_free_mdiobus:
- mdiobus_free(fep->mii_bus);
- err_out:
-@@ -2133,10 +2149,35 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
- {
- if (--mii_cnt == 0) {
- mdiobus_unregister(fep->mii_bus);
-+ kfree(fep->mii_bus->irq);
- mdiobus_free(fep->mii_bus);
- }
- }
-
-+static int fec_enet_get_settings(struct net_device *ndev,
-+ struct ethtool_cmd *cmd)
-+{
-+ struct fec_enet_private *fep = netdev_priv(ndev);
-+ struct phy_device *phydev = fep->phy_dev;
-+
-+ if (!phydev)
-+ return -ENODEV;
-+
-+ return phy_ethtool_gset(phydev, cmd);
-+}
-+
-+static int fec_enet_set_settings(struct net_device *ndev,
-+ struct ethtool_cmd *cmd)
-+{
-+ struct fec_enet_private *fep = netdev_priv(ndev);
-+ struct phy_device *phydev = fep->phy_dev;
-+
-+ if (!phydev)
-+ return -ENODEV;
-+
-+ return phy_ethtool_sset(phydev, cmd);
-+}
-+
- static void fec_enet_get_drvinfo(struct net_device *ndev,
- struct ethtool_drvinfo *info)
- {
-@@ -2163,8 +2204,8 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
-
- /* List of registers that can be safety be read to dump them with ethtool */
- #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
-- defined(CONFIG_ARM64)
-+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
-+ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
- static u32 fec_enet_register_offset[] = {
- FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
- FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
-@@ -2270,7 +2311,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
- {
- struct fec_enet_private *fep = netdev_priv(ndev);
-
-- if (!ndev->phydev)
-+ if (!fep->phy_dev)
- return -ENODEV;
-
- if (pause->tx_pause != pause->rx_pause) {
-@@ -2286,17 +2327,17 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
- fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
-
- if (pause->rx_pause || pause->autoneg) {
-- ndev->phydev->supported |= ADVERTISED_Pause;
-- ndev->phydev->advertising |= ADVERTISED_Pause;
-+ fep->phy_dev->supported |= ADVERTISED_Pause;
-+ fep->phy_dev->advertising |= ADVERTISED_Pause;
- } else {
-- ndev->phydev->supported &= ~ADVERTISED_Pause;
-- ndev->phydev->advertising &= ~ADVERTISED_Pause;
-+ fep->phy_dev->supported &= ~ADVERTISED_Pause;
-+ fep->phy_dev->advertising &= ~ADVERTISED_Pause;
- }
-
- if (pause->autoneg) {
- if (netif_running(ndev))
- fec_stop(ndev);
-- phy_start_aneg(ndev->phydev);
-+ phy_start_aneg(fep->phy_dev);
- }
- if (netif_running(ndev)) {
- napi_disable(&fep->napi);
-@@ -2376,26 +2417,14 @@ static const struct fec_stat {
- { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
- };
-
--#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
--
--static void fec_enet_update_ethtool_stats(struct net_device *dev)
-+static void fec_enet_get_ethtool_stats(struct net_device *dev,
-+ struct ethtool_stats *stats, u64 *data)
- {
- struct fec_enet_private *fep = netdev_priv(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
-- fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
--}
--
--static void fec_enet_get_ethtool_stats(struct net_device *dev,
-- struct ethtool_stats *stats, u64 *data)
--{
-- struct fec_enet_private *fep = netdev_priv(dev);
--
-- if (netif_running(dev))
-- fec_enet_update_ethtool_stats(dev);
--
-- memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
-+ data[i] = readl(fep->hwp + fec_stats[i].offset);
- }
-
- static void fec_enet_get_strings(struct net_device *netdev,
-@@ -2420,17 +2449,12 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
- return -EOPNOTSUPP;
- }
- }
--
--#else /* !defined(CONFIG_M5272) */
--#define FEC_STATS_SIZE 0
--static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
--{
--}
- #endif /* !defined(CONFIG_M5272) */
-
- static int fec_enet_nway_reset(struct net_device *dev)
- {
-- struct phy_device *phydev = dev->phydev;
-+ struct fec_enet_private *fep = netdev_priv(dev);
-+ struct phy_device *phydev = fep->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-@@ -2455,6 +2479,9 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
- struct fec_enet_private *fep = netdev_priv(ndev);
- int rx_itr, tx_itr;
-
-+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
-+ return;
-+
- /* Must be greater than zero to avoid unpredictable behavior */
- if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
- !fep->tx_time_itr || !fep->tx_pkts_itr)
-@@ -2477,12 +2504,10 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
-
- writel(tx_itr, fep->hwp + FEC_TXIC0);
- writel(rx_itr, fep->hwp + FEC_RXIC0);
-- if (fep->quirks & FEC_QUIRK_HAS_AVB) {
-- writel(tx_itr, fep->hwp + FEC_TXIC1);
-- writel(rx_itr, fep->hwp + FEC_RXIC1);
-- writel(tx_itr, fep->hwp + FEC_TXIC2);
-- writel(rx_itr, fep->hwp + FEC_RXIC2);
-- }
-+ writel(tx_itr, fep->hwp + FEC_TXIC1);
-+ writel(rx_itr, fep->hwp + FEC_RXIC1);
-+ writel(tx_itr, fep->hwp + FEC_TXIC2);
-+ writel(rx_itr, fep->hwp + FEC_RXIC2);
- }
-
- static int
-@@ -2490,7 +2515,7 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
- {
- struct fec_enet_private *fep = netdev_priv(ndev);
-
-- if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
-+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
- return -EOPNOTSUPP;
-
- ec->rx_coalesce_usecs = fep->rx_time_itr;
-@@ -2508,28 +2533,28 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
- struct fec_enet_private *fep = netdev_priv(ndev);
- unsigned int cycle;
-
-- if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
-+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
- return -EOPNOTSUPP;
-
- if (ec->rx_max_coalesced_frames > 255) {
-- pr_err("Rx coalesced frames exceed hardware limitation\n");
-+ pr_err("Rx coalesced frames exceed hardware limiation");
- return -EINVAL;
- }
-
- if (ec->tx_max_coalesced_frames > 255) {
-- pr_err("Tx coalesced frame exceed hardware limitation\n");
-+ pr_err("Tx coalesced frame exceed hardware limiation");
- return -EINVAL;
- }
-
- cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
- if (cycle > 0xFFFF) {
-- pr_err("Rx coalesced usec exceed hardware limitation\n");
-+ pr_err("Rx coalesed usec exceeed hardware limiation");
- return -EINVAL;
- }
-
- cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
- if (cycle > 0xFFFF) {
-- pr_err("Rx coalesced usec exceed hardware limitation\n");
-+ pr_err("Rx coalesed usec exceeed hardware limiation");
- return -EINVAL;
- }
-
-@@ -2629,6 +2654,8 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
- }
-
- static const struct ethtool_ops fec_enet_ethtool_ops = {
-+ .get_settings = fec_enet_get_settings,
-+ .set_settings = fec_enet_set_settings,
- .get_drvinfo = fec_enet_get_drvinfo,
- .get_regs_len = fec_enet_get_regs_len,
- .get_regs = fec_enet_get_regs,
-@@ -2648,14 +2675,12 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
- .set_tunable = fec_enet_set_tunable,
- .get_wol = fec_enet_get_wol,
- .set_wol = fec_enet_set_wol,
-- .get_link_ksettings = phy_ethtool_get_link_ksettings,
-- .set_link_ksettings = phy_ethtool_set_link_ksettings,
- };
-
- static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
- {
- struct fec_enet_private *fep = netdev_priv(ndev);
-- struct phy_device *phydev = ndev->phydev;
-+ struct phy_device *phydev = fep->phy_dev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-@@ -2685,25 +2710,25 @@ static void fec_enet_free_buffers(struct net_device *ndev)
-
- for (q = 0; q < fep->num_rx_queues; q++) {
- rxq = fep->rx_queue[q];
-- bdp = rxq->bd.base;
-- for (i = 0; i < rxq->bd.ring_size; i++) {
-+ bdp = rxq->rx_bd_base;
-+ for (i = 0; i < rxq->rx_ring_size; i++) {
- skb = rxq->rx_skbuff[i];
- rxq->rx_skbuff[i] = NULL;
- if (skb) {
- dma_unmap_single(&fep->pdev->dev,
-- fec32_to_cpu(bdp->cbd_bufaddr),
-+ bdp->cbd_bufaddr,
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- }
-- bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
-+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
- }
- }
-
- for (q = 0; q < fep->num_tx_queues; q++) {
- txq = fep->tx_queue[q];
-- bdp = txq->bd.base;
-- for (i = 0; i < txq->bd.ring_size; i++) {
-+ bdp = txq->tx_bd_base;
-+ for (i = 0; i < txq->tx_ring_size; i++) {
- kfree(txq->tx_bounce[i]);
- txq->tx_bounce[i] = NULL;
- skb = txq->tx_skbuff[i];
-@@ -2722,8 +2747,8 @@ static void fec_enet_free_queue(struct net_device *ndev)
- for (i = 0; i < fep->num_tx_queues; i++)
- if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
- txq = fep->tx_queue[i];
-- dma_free_coherent(&fep->pdev->dev,
-- txq->bd.ring_size * TSO_HEADER_SIZE,
-+ dma_free_coherent(NULL,
-+ txq->tx_ring_size * TSO_HEADER_SIZE,
- txq->tso_hdrs,
- txq->tso_hdrs_dma);
- }
-@@ -2749,15 +2774,15 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
- }
-
- fep->tx_queue[i] = txq;
-- txq->bd.ring_size = TX_RING_SIZE;
-- fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
-+ txq->tx_ring_size = TX_RING_SIZE;
-+ fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size;
-
- txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
- txq->tx_wake_threshold =
-- (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
-+ (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
-
-- txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
-- txq->bd.ring_size * TSO_HEADER_SIZE,
-+ txq->tso_hdrs = dma_alloc_coherent(NULL,
-+ txq->tx_ring_size * TSO_HEADER_SIZE,
- &txq->tso_hdrs_dma,
- GFP_KERNEL);
- if (!txq->tso_hdrs) {
-@@ -2774,8 +2799,8 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
- goto alloc_failed;
- }
-
-- fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
-- fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
-+ fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE;
-+ fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size;
- }
- return ret;
-
-@@ -2794,8 +2819,8 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
- struct fec_enet_priv_rx_q *rxq;
-
- rxq = fep->rx_queue[queue];
-- bdp = rxq->bd.base;
-- for (i = 0; i < rxq->bd.ring_size; i++) {
-+ bdp = rxq->rx_bd_base;
-+ for (i = 0; i < rxq->rx_ring_size; i++) {
- skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
- if (!skb)
- goto err_alloc;
-@@ -2806,19 +2831,19 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
- }
-
- rxq->rx_skbuff[i] = skb;
-- bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
-+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
-
- if (fep->bufdesc_ex) {
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-- ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
-+ ebdp->cbd_esc = BD_ENET_RX_INT;
- }
-
-- bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
-+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
- }
-
- /* Set the last buffer to wrap. */
-- bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
-- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
-+ bdp = fec_enet_get_prevdesc(bdp, fep, queue);
-+ bdp->cbd_sc |= BD_SC_WRAP;
- return 0;
-
- err_alloc:
-@@ -2835,26 +2860,26 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
- struct fec_enet_priv_tx_q *txq;
-
- txq = fep->tx_queue[queue];
-- bdp = txq->bd.base;
-- for (i = 0; i < txq->bd.ring_size; i++) {
-+ bdp = txq->tx_bd_base;
-+ for (i = 0; i < txq->tx_ring_size; i++) {
- txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
- if (!txq->tx_bounce[i])
- goto err_alloc;
-
-- bdp->cbd_sc = cpu_to_fec16(0);
-- bdp->cbd_bufaddr = cpu_to_fec32(0);
-+ bdp->cbd_sc = 0;
-+ bdp->cbd_bufaddr = 0;
-
- if (fep->bufdesc_ex) {
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-- ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
-+ ebdp->cbd_esc = BD_ENET_TX_INT;
- }
-
-- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
-+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
- }
-
- /* Set the last buffer to wrap. */
-- bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
-- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
-+ bdp = fec_enet_get_prevdesc(bdp, fep, queue);
-+ bdp->cbd_sc |= BD_SC_WRAP;
-
- return 0;
-
-@@ -2903,14 +2928,10 @@ fec_enet_open(struct net_device *ndev)
- platform_get_device_id(fep->pdev);
- int ret;
-
-- ret = pm_runtime_get_sync(&fep->pdev->dev);
-- if (ret < 0)
-- return ret;
--
- pinctrl_pm_select_default_state(&fep->pdev->dev);
- ret = fec_enet_clk_enable(ndev, true);
- if (ret)
-- goto clk_enable;
-+ return ret;
-
- /* I should reset the ring buffers here, but I don't yet know
- * a simple way to do that.
-@@ -2928,13 +2949,11 @@ fec_enet_open(struct net_device *ndev)
- if (ret)
- goto err_enet_mii_probe;
-
-- if (fep->quirks & FEC_QUIRK_ERR006687)
-- imx6q_cpuidle_fec_irqs_used();
--
- napi_enable(&fep->napi);
-- phy_start(ndev->phydev);
-+ phy_start(fep->phy_dev);
- netif_tx_start_all_queues(ndev);
-
-+ pm_runtime_get_sync(ndev->dev.parent);
- if ((id_entry->driver_data & FEC_QUIRK_BUG_WAITMODE) &&
- !fec_enet_irq_workaround(fep))
- pm_qos_add_request(&fep->pm_qos_req,
-@@ -2947,16 +2966,14 @@ fec_enet_open(struct net_device *ndev)
-
- device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
- FEC_WOL_FLAG_ENABLE);
-+ fep->miibus_up_failed = false;
-
- return 0;
-
- err_enet_mii_probe:
- fec_enet_free_buffers(ndev);
- err_enet_alloc:
-- fec_enet_clk_enable(ndev, false);
--clk_enable:
-- pm_runtime_mark_last_busy(&fep->pdev->dev);
-- pm_runtime_put_autosuspend(&fep->pdev->dev);
-+ fep->miibus_up_failed = true;
- if (!fep->mii_bus_share)
- pinctrl_pm_select_sleep_state(&fep->pdev->dev);
- return ret;
-@@ -2967,7 +2984,7 @@ fec_enet_close(struct net_device *ndev)
- {
- struct fec_enet_private *fep = netdev_priv(ndev);
-
-- phy_stop(ndev->phydev);
-+ phy_stop(fep->phy_dev);
-
- if (netif_device_present(ndev)) {
- napi_disable(&fep->napi);
-@@ -2975,21 +2992,13 @@ fec_enet_close(struct net_device *ndev)
- fec_stop(ndev);
- }
-
-- phy_disconnect(ndev->phydev);
-- ndev->phydev = NULL;
--
-- if (fep->quirks & FEC_QUIRK_ERR006687)
-- imx6q_cpuidle_fec_irqs_unused();
--
-- fec_enet_update_ethtool_stats(ndev);
-+ phy_disconnect(fep->phy_dev);
-+ fep->phy_dev = NULL;
-
- fec_enet_clk_enable(ndev, false);
- pm_qos_remove_request(&fep->pm_qos_req);
-- if (!fep->mii_bus_share)
-- pinctrl_pm_select_sleep_state(&fep->pdev->dev);
-- pm_runtime_mark_last_busy(&fep->pdev->dev);
-- pm_runtime_put_autosuspend(&fep->pdev->dev);
--
-+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
-+ pm_runtime_put_sync_suspend(ndev->dev.parent);
- fec_enet_free_buffers(ndev);
-
- return 0;
-@@ -3005,7 +3014,7 @@ fec_enet_close(struct net_device *ndev)
- * this kind of feature?).
- */
-
--#define FEC_HASH_BITS 6 /* #bits in hash */
-+#define HASH_BITS 6 /* #bits in hash */
- #define CRC32_POLY 0xEDB88320
-
- static void set_multicast_list(struct net_device *ndev)
-@@ -3014,7 +3023,6 @@ static void set_multicast_list(struct net_device *ndev)
- struct netdev_hw_addr *ha;
- unsigned int i, bit, data, crc, tmp;
- unsigned char hash;
-- unsigned int hash_high, hash_low;
-
- if (ndev->flags & IFF_PROMISC) {
- tmp = readl(fep->hwp + FEC_R_CNTRL);
-@@ -3037,10 +3045,10 @@ static void set_multicast_list(struct net_device *ndev)
- return;
- }
-
-- /* Add the addresses in hash register
-+ /* Clear filter and add the addresses in hash register
- */
-- hash_high = 0;
-- hash_low = 0;
-+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-
- netdev_for_each_mc_addr(ha, ndev) {
- /* calculate crc32 value of mac address */
-@@ -3054,20 +3062,21 @@ static void set_multicast_list(struct net_device *ndev)
- }
- }
-
-- /* only upper 6 bits (FEC_HASH_BITS) are used
-+ /* only upper 6 bits (HASH_BITS) are used
- * which point to specific bit in he hash registers
- */
-- hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
-+ hash = (crc >> (32 - HASH_BITS)) & 0x3f;
-
- if (hash > 31) {
-- hash_high |= 1 << (hash - 32);
-+ tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-+ tmp |= 1 << (hash - 32);
-+ writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
- } else {
-- hash_low |= 1 << hash;
-+ tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-+ tmp |= 1 << hash;
-+ writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- }
- }
--
-- writel_relaxed(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-- writel_relaxed(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- }
-
- /* Set a MAC change in hardware. */
-@@ -3122,6 +3131,7 @@ static void fec_poll_controller(struct net_device *dev)
- }
- #endif
-
-+#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
- static inline void fec_enet_set_netdev_features(struct net_device *netdev,
- netdev_features_t features)
- {
-@@ -3145,7 +3155,7 @@ static int fec_set_features(struct net_device *netdev,
- struct fec_enet_private *fep = netdev_priv(netdev);
- netdev_features_t changed = features ^ netdev->features;
-
-- if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
-+ if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
- napi_disable(&fep->napi);
- netif_tx_lock_bh(netdev);
- fec_stop(netdev);
-@@ -3209,14 +3219,6 @@ static const struct net_device_ops fec_netdev_ops = {
- .ndo_set_features = fec_set_features,
- };
-
--static const unsigned short offset_des_active_rxq[] = {
-- FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
--};
--
--static const unsigned short offset_des_active_txq[] = {
-- FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
--};
--
- /*
- * XXX: We need to clean up on failure exits here.
- *
-@@ -3224,16 +3226,14 @@ static const unsigned short offset_des_active_txq[] = {
- static int fec_enet_init(struct net_device *ndev)
- {
- struct fec_enet_private *fep = netdev_priv(ndev);
-+ struct fec_enet_priv_tx_q *txq;
-+ struct fec_enet_priv_rx_q *rxq;
- struct bufdesc *cbd_base;
- dma_addr_t bd_dma;
- int bd_size;
- unsigned int i;
-- unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
-- sizeof(struct bufdesc);
-- unsigned dsize_log2 = __fls(dsize);
-
-- WARN_ON(dsize != (1 << dsize_log2));
--#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
-+#if defined(CONFIG_ARM)
- fep->rx_align = 0xf;
- fep->tx_align = 0xf;
- #else
-@@ -3243,11 +3243,16 @@ static int fec_enet_init(struct net_device *ndev)
-
- fec_enet_alloc_queue(ndev);
-
-- bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
-+ if (fep->bufdesc_ex)
-+ fep->bufdesc_size = sizeof(struct bufdesc_ex);
-+ else
-+ fep->bufdesc_size = sizeof(struct bufdesc);
-+ bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) *
-+ fep->bufdesc_size;
-
- /* Allocate memory for buffer descriptors. */
-- cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
-- GFP_KERNEL);
-+ cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
-+ GFP_KERNEL);
- if (!cbd_base) {
- return -ENOMEM;
- }
-@@ -3261,35 +3266,33 @@ static int fec_enet_init(struct net_device *ndev)
-
- /* Set receive and transmit descriptor base. */
- for (i = 0; i < fep->num_rx_queues; i++) {
-- struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
-- unsigned size = dsize * rxq->bd.ring_size;
--
-- rxq->bd.qid = i;
-- rxq->bd.base = cbd_base;
-- rxq->bd.cur = cbd_base;
-- rxq->bd.dma = bd_dma;
-- rxq->bd.dsize = dsize;
-- rxq->bd.dsize_log2 = dsize_log2;
-- rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
-- bd_dma += size;
-- cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
-- rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
-+ rxq = fep->rx_queue[i];
-+ rxq->index = i;
-+ rxq->rx_bd_base = (struct bufdesc *)cbd_base;
-+ rxq->bd_dma = bd_dma;
-+ if (fep->bufdesc_ex) {
-+ bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size;
-+ cbd_base = (struct bufdesc *)
-+ (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
-+ } else {
-+ bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size;
-+ cbd_base += rxq->rx_ring_size;
-+ }
- }
-
- for (i = 0; i < fep->num_tx_queues; i++) {
-- struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
-- unsigned size = dsize * txq->bd.ring_size;
--
-- txq->bd.qid = i;
-- txq->bd.base = cbd_base;
-- txq->bd.cur = cbd_base;
-- txq->bd.dma = bd_dma;
-- txq->bd.dsize = dsize;
-- txq->bd.dsize_log2 = dsize_log2;
-- txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
-- bd_dma += size;
-- cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
-- txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
-+ txq = fep->tx_queue[i];
-+ txq->index = i;
-+ txq->tx_bd_base = (struct bufdesc *)cbd_base;
-+ txq->bd_dma = bd_dma;
-+ if (fep->bufdesc_ex) {
-+ bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size;
-+ cbd_base = (struct bufdesc *)
-+ (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size);
-+ } else {
-+ bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size;
-+ cbd_base += txq->tx_ring_size;
-+ }
- }
-
-
-@@ -3323,60 +3326,62 @@ static int fec_enet_init(struct net_device *ndev)
-
- fec_restart(ndev);
-
-- fec_enet_update_ethtool_stats(ndev);
--
- return 0;
- }
-
- #ifdef CONFIG_OF
--static int fec_reset_phy(struct platform_device *pdev)
-+static void fec_reset_phy(struct platform_device *pdev)
-+{
-+ struct net_device *ndev = platform_get_drvdata(pdev);
-+ struct fec_enet_private *fep = netdev_priv(ndev);
-+
-+ if (!gpio_is_valid(fep->phy_reset_gpio))
-+ return;
-+
-+ gpio_set_value_cansleep(fep->phy_reset_gpio, 0);
-+ msleep(fep->phy_reset_duration);
-+ gpio_set_value_cansleep(fep->phy_reset_gpio, 1);
-+}
-+
-+static int fec_get_reset_gpio(struct platform_device *pdev)
- {
- int err, phy_reset;
-- bool active_high = false;
- int msec = 1;
- struct device_node *np = pdev->dev.of_node;
--
-- if (!np)
-- return 0;
--
-- err = of_property_read_u32(np, "phy-reset-duration", &msec);
-- /* A sane reset duration should not be longer than 1s */
-- if (!err && msec > 1000)
-- msec = 1;
-+ struct net_device *ndev = platform_get_drvdata(pdev);
-+ struct fec_enet_private *fep = netdev_priv(ndev);
-
- phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
-- if (phy_reset == -EPROBE_DEFER)
-+ if (!gpio_is_valid(phy_reset))
- return phy_reset;
-- else if (!gpio_is_valid(phy_reset))
-- return 0;
--
-- active_high = of_property_read_bool(np, "phy-reset-active-high");
-
- err = devm_gpio_request_one(&pdev->dev, phy_reset,
-- active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
-- "phy-reset");
-+ GPIOF_OUT_INIT_LOW, "phy-reset");
- if (err) {
- dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
- return err;
- }
--
-- if (msec > 20)
-- msleep(msec);
-- else
-- usleep_range(msec * 1000, msec * 1000 + 1000);
--
-- gpio_set_value_cansleep(phy_reset, !active_high);
--
-- return 0;
-+
-+ of_property_read_u32(np, "phy-reset-duration", &msec);
-+ /* A sane reset duration should not be longer than 1s */
-+ if (msec > 1000)
-+ msec = 1;
-+ fep->phy_reset_duration = msec;
-+
-+ return phy_reset;
- }
- #else /* CONFIG_OF */
--static int fec_reset_phy(struct platform_device *pdev)
-+static void fec_reset_phy(struct platform_device *pdev)
- {
- /*
- * In case of platform probe, the reset has been done
- * by machine code.
- */
-- return 0;
-+}
-+
-+static inline int fec_get_reset_gpio(struct platform_device *pdev)
-+{
-+ return -EINVAL;
- }
- #endif /* CONFIG_OF */
-
-@@ -3384,6 +3389,7 @@ static void
- fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
- {
- struct device_node *np = pdev->dev.of_node;
-+ int err;
-
- *num_tx = *num_rx = 1;
-
-@@ -3391,9 +3397,13 @@ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
- return;
-
- /* parse the num of tx and rx queues */
-- of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
-+ err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
-+ if (err)
-+ *num_tx = 1;
-
-- of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
-+ err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
-+ if (err)
-+ *num_rx = 1;
-
- if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
- dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
-@@ -3460,13 +3470,11 @@ fec_probe(struct platform_device *pdev)
- int num_tx_qs;
- int num_rx_qs;
-
-- of_dma_configure(&pdev->dev, np);
--
- fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
-
- /* Init network device */
-- ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
-- FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
-+ ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private),
-+ num_tx_qs, num_rx_qs);
- if (!ndev)
- return -ENOMEM;
-
-@@ -3505,13 +3513,14 @@ fec_probe(struct platform_device *pdev)
-
- platform_set_drvdata(pdev, ndev);
-
-- if ((of_machine_is_compatible("fsl,imx6q") ||
-- of_machine_is_compatible("fsl,imx6dl")) &&
-- !of_property_read_bool(np, "fsl,err006687-workaround-present"))
-- fep->quirks |= FEC_QUIRK_ERR006687;
--
- fec_enet_of_parse_stop_mode(pdev);
-
-+ ret = fec_get_reset_gpio(pdev);
-+ if (ret == -EPROBE_DEFER)
-+ goto gpio_defer;
-+ fep->phy_reset_gpio = ret;
-+
-+
- if (of_get_property(np, "fsl,magic-packet", NULL))
- fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
-
-@@ -3524,7 +3533,6 @@ fec_probe(struct platform_device *pdev)
- goto failed_phy;
- }
- phy_node = of_node_get(np);
-- fep->fixed_link = true;
- }
- fep->phy_node = phy_node;
-
-@@ -3539,10 +3547,6 @@ fec_probe(struct platform_device *pdev)
- fep->phy_interface = ret;
- }
-
--#if !defined(CONFIG_ARM64)
-- request_bus_freq(BUS_FREQ_HIGH);
--#endif
--
- fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(fep->clk_ipg)) {
- ret = PTR_ERR(fep->clk_ipg);
-@@ -3577,39 +3581,24 @@ fec_probe(struct platform_device *pdev)
- fep->bufdesc_ex = false;
- }
-
-+ pm_runtime_enable(&pdev->dev);
- ret = fec_enet_clk_enable(ndev, true);
- if (ret)
- goto failed_clk;
-
-- ret = clk_prepare_enable(fep->clk_ipg);
-- if (ret)
-- goto failed_clk_ipg;
-- ret = clk_prepare_enable(fep->clk_ahb);
-- if (ret)
-- goto failed_clk_ahb;
--
- fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
- if (!IS_ERR(fep->reg_phy)) {
- ret = regulator_enable(fep->reg_phy);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to enable phy regulator: %d\n", ret);
-- clk_disable_unprepare(fep->clk_ipg);
- goto failed_regulator;
- }
- } else {
- fep->reg_phy = NULL;
- }
-
-- pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
-- pm_runtime_use_autosuspend(&pdev->dev);
-- pm_runtime_get_noresume(&pdev->dev);
-- pm_runtime_set_active(&pdev->dev);
-- pm_runtime_enable(&pdev->dev);
--
-- ret = fec_reset_phy(pdev);
-- if (ret)
-- goto failed_reset;
-+ fec_reset_phy(pdev);
-
- if (fep->bufdesc_ex)
- fec_ptp_init(pdev);
-@@ -3641,15 +3630,9 @@ fec_probe(struct platform_device *pdev)
- fep->wake_irq = fep->irq[0];
-
- init_completion(&fep->mdio_done);
--
-- /* board only enable one mii bus in default */
-- if (!of_get_property(np, "fsl,mii-exclusive", NULL))
-- fep->quirks |= FEC_QUIRK_SINGLE_MDIO;
- ret = fec_enet_mii_init(pdev);
-- if (ret) {
-- dev_id = 0;
-+ if (ret)
- goto failed_mii_init;
-- }
-
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(ndev);
-@@ -3660,11 +3643,6 @@ fec_probe(struct platform_device *pdev)
- if (ret)
- goto failed_register;
-
-- if (!fep->fixed_link) {
-- fep->fixups = of_fec_enet_parse_fixup(np);
-- fec_enet_register_fixup(ndev);
-- }
--
- device_init_wakeup(&ndev->dev, fep->wol_flag &
- FEC_WOL_HAS_MAGIC_PACKET);
-
-@@ -3673,10 +3651,6 @@ fec_probe(struct platform_device *pdev)
-
- fep->rx_copybreak = COPYBREAK_DEFAULT;
- INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
--
-- pm_runtime_mark_last_busy(&pdev->dev);
-- pm_runtime_put_autosuspend(&pdev->dev);
--
- return 0;
-
- failed_register:
-@@ -3684,22 +3658,14 @@ fec_probe(struct platform_device *pdev)
- failed_mii_init:
- failed_irq:
- failed_init:
-- fec_ptp_stop(pdev);
- if (fep->reg_phy)
- regulator_disable(fep->reg_phy);
--failed_reset:
-- pm_runtime_put(&pdev->dev);
-- pm_runtime_disable(&pdev->dev);
- failed_regulator:
--failed_clk_ahb:
-- clk_disable_unprepare(fep->clk_ipg);
--failed_clk_ipg:
- fec_enet_clk_enable(ndev, false);
- failed_clk:
-- if (of_phy_is_fixed_link(np))
-- of_phy_deregister_fixed_link(np);
- failed_phy:
- of_node_put(phy_node);
-+gpio_defer:
- failed_ioremap:
- free_netdev(ndev);
-
-@@ -3711,16 +3677,15 @@ fec_drv_remove(struct platform_device *pdev)
- {
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct fec_enet_private *fep = netdev_priv(ndev);
-- struct device_node *np = pdev->dev.of_node;
-
-+ cancel_delayed_work_sync(&fep->time_keep);
- cancel_work_sync(&fep->tx_timeout_work);
-- fec_ptp_stop(pdev);
- unregister_netdev(ndev);
- fec_enet_mii_remove(fep);
- if (fep->reg_phy)
- regulator_disable(fep->reg_phy);
-- if (of_phy_is_fixed_link(np))
-- of_phy_deregister_fixed_link(np);
-+ if (fep->ptp_clock)
-+ ptp_clock_unregister(fep->ptp_clock);
- of_node_put(fep->phy_node);
- free_netdev(ndev);
-
-@@ -3731,13 +3696,12 @@ static int __maybe_unused fec_suspend(struct device *dev)
- {
- struct net_device *ndev = dev_get_drvdata(dev);
- struct fec_enet_private *fep = netdev_priv(ndev);
-- int ret = 0;
-
- rtnl_lock();
- if (netif_running(ndev)) {
- if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
- fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
-- phy_stop(ndev->phydev);
-+ phy_stop(fep->phy_dev);
- napi_disable(&fep->napi);
- netif_tx_lock_bh(ndev);
- netif_device_detach(ndev);
-@@ -3751,12 +3715,8 @@ static int __maybe_unused fec_suspend(struct device *dev)
- enable_irq_wake(fep->wake_irq);
- }
- fec_enet_clk_enable(ndev, false);
-- fep->active_in_suspend = !pm_runtime_status_suspended(dev);
-- if (fep->active_in_suspend)
-- ret = pm_runtime_force_suspend(dev);
-- if (ret < 0)
-- return ret;
-- } else if (fep->mii_bus_share && !ndev->phydev) {
-+ } else if (fep->mii_bus_share && fep->miibus_up_failed && !fep->phy_dev) {
-+ fec_enet_clk_enable(ndev, false);
- pinctrl_pm_select_sleep_state(&fep->pdev->dev);
- }
- rtnl_unlock();
-@@ -3777,7 +3737,7 @@ static int __maybe_unused fec_resume(struct device *dev)
- {
- struct net_device *ndev = dev_get_drvdata(dev);
- struct fec_enet_private *fep = netdev_priv(ndev);
-- int ret = 0;
-+ int ret;
- int val;
-
- if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
-@@ -3788,8 +3748,6 @@ static int __maybe_unused fec_resume(struct device *dev)
-
- rtnl_lock();
- if (netif_running(ndev)) {
-- if (fep->active_in_suspend)
-- pm_runtime_force_resume(dev);
- ret = fec_enet_clk_enable(ndev, true);
- if (ret) {
- rtnl_unlock();
-@@ -3812,15 +3770,16 @@ static int __maybe_unused fec_resume(struct device *dev)
- netif_device_attach(ndev);
- netif_tx_unlock_bh(ndev);
- napi_enable(&fep->napi);
-- phy_start(ndev->phydev);
-- } else if (fep->mii_bus_share && !ndev->phydev) {
-+ phy_start(fep->phy_dev);
-+ } else if (fep->mii_bus_share && !fep->phy_dev) {
- pinctrl_pm_select_default_state(&fep->pdev->dev);
-+ fep->miibus_up_failed = true;
- /* And then recovery mii bus */
-- ret = fec_restore_mii_bus(ndev);
-+ fec_restore_mii_bus(ndev);
- }
- rtnl_unlock();
-
-- return ret;
-+ return 0;
-
- failed_clk:
- if (fep->reg_phy)
-@@ -3828,46 +3787,21 @@ static int __maybe_unused fec_resume(struct device *dev)
- return ret;
- }
-
--static int __maybe_unused fec_runtime_suspend(struct device *dev)
-+static int fec_runtime_suspend(struct device *dev)
- {
-- struct net_device *ndev = dev_get_drvdata(dev);
-- struct fec_enet_private *fep = netdev_priv(ndev);
--
-- clk_disable_unprepare(fep->clk_ahb);
-- clk_disable_unprepare(fep->clk_ipg);
--#if !defined(CONFIG_ARM64)
- release_bus_freq(BUS_FREQ_HIGH);
--#endif
--
- return 0;
- }
-
--static int __maybe_unused fec_runtime_resume(struct device *dev)
-+static int fec_runtime_resume(struct device *dev)
- {
-- struct net_device *ndev = dev_get_drvdata(dev);
-- struct fec_enet_private *fep = netdev_priv(ndev);
-- int ret;
--
--#if !defined(CONFIG_ARM64)
- request_bus_freq(BUS_FREQ_HIGH);
--#endif
-- ret = clk_prepare_enable(fep->clk_ahb);
-- if (ret)
-- return ret;
-- ret = clk_prepare_enable(fep->clk_ipg);
-- if (ret)
-- goto failed_clk_ipg;
--
- return 0;
--
--failed_clk_ipg:
-- clk_disable_unprepare(fep->clk_ahb);
-- return ret;
- }
-
- static const struct dev_pm_ops fec_pm_ops = {
-- SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
- SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
-+ SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
- };
-
- static struct platform_driver fec_driver = {
-diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
-index 446ae9d..afe7f39 100644
---- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
-+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
-@@ -66,6 +66,7 @@ struct mpc52xx_fec_priv {
- /* MDIO link details */
- unsigned int mdio_speed;
- struct device_node *phy_node;
-+ struct phy_device *phydev;
- enum phy_state link;
- int seven_wire_mode;
- };
-@@ -164,7 +165,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task
- static void mpc52xx_fec_adjust_link(struct net_device *dev)
- {
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-- struct phy_device *phydev = dev->phydev;
-+ struct phy_device *phydev = priv->phydev;
- int new_state = 0;
-
- if (phydev->link != PHY_DOWN) {
-@@ -214,17 +215,16 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev)
- static int mpc52xx_fec_open(struct net_device *dev)
- {
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-- struct phy_device *phydev = NULL;
- int err = -EBUSY;
-
- if (priv->phy_node) {
-- phydev = of_phy_connect(priv->ndev, priv->phy_node,
-- mpc52xx_fec_adjust_link, 0, 0);
-- if (!phydev) {
-+ priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
-+ mpc52xx_fec_adjust_link, 0, 0);
-+ if (!priv->phydev) {
- dev_err(&dev->dev, "of_phy_connect failed\n");
- return -ENODEV;
- }
-- phy_start(phydev);
-+ phy_start(priv->phydev);
- }
-
- if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
-@@ -268,9 +268,10 @@ static int mpc52xx_fec_open(struct net_device *dev)
- free_ctrl_irq:
- free_irq(dev->irq, dev);
- free_phy:
-- if (phydev) {
-- phy_stop(phydev);
-- phy_disconnect(phydev);
-+ if (priv->phydev) {
-+ phy_stop(priv->phydev);
-+ phy_disconnect(priv->phydev);
-+ priv->phydev = NULL;
- }
-
- return err;
-@@ -279,7 +280,6 @@ static int mpc52xx_fec_open(struct net_device *dev)
- static int mpc52xx_fec_close(struct net_device *dev)
- {
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-- struct phy_device *phydev = dev->phydev;
-
- netif_stop_queue(dev);
-
-@@ -291,10 +291,11 @@ static int mpc52xx_fec_close(struct net_device *dev)
- free_irq(priv->r_irq, dev);
- free_irq(priv->t_irq, dev);
-
-- if (phydev) {
-+ if (priv->phydev) {
- /* power down phy */
-- phy_stop(phydev);
-- phy_disconnect(phydev);
-+ phy_stop(priv->phydev);
-+ phy_disconnect(priv->phydev);
-+ priv->phydev = NULL;
- }
-
- return 0;
-@@ -762,6 +763,26 @@ static void mpc52xx_fec_reset(struct net_device *dev)
-
- /* ethtool interface */
-
-+static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-+{
-+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-+
-+ if (!priv->phydev)
-+ return -ENODEV;
-+
-+ return phy_ethtool_gset(priv->phydev, cmd);
-+}
-+
-+static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-+{
-+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-+
-+ if (!priv->phydev)
-+ return -ENODEV;
-+
-+ return phy_ethtool_sset(priv->phydev, cmd);
-+}
-+
- static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
- {
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-@@ -775,23 +796,23 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
- }
-
- static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
-+ .get_settings = mpc52xx_fec_get_settings,
-+ .set_settings = mpc52xx_fec_set_settings,
- .get_link = ethtool_op_get_link,
- .get_msglevel = mpc52xx_fec_get_msglevel,
- .set_msglevel = mpc52xx_fec_set_msglevel,
- .get_ts_info = ethtool_op_get_ts_info,
-- .get_link_ksettings = phy_ethtool_get_link_ksettings,
-- .set_link_ksettings = phy_ethtool_set_link_ksettings,
- };
-
-
- static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
- {
-- struct phy_device *phydev = dev->phydev;
-+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-
-- if (!phydev)
-+ if (!priv->phydev)
- return -ENOTSUPP;
-
-- return phy_mii_ioctl(phydev, rq, cmd);
-+ return phy_mii_ioctl(priv->phydev, rq, cmd);
- }
-
- static const struct net_device_ops mpc52xx_fec_netdev_ops = {
-@@ -1063,23 +1084,27 @@ static struct platform_driver mpc52xx_fec_driver = {
- /* Module */
- /* ======================================================================== */
-
--static struct platform_driver * const drivers[] = {
--#ifdef CONFIG_FEC_MPC52xx_MDIO
-- &mpc52xx_fec_mdio_driver,
--#endif
-- &mpc52xx_fec_driver,
--};
--
- static int __init
- mpc52xx_fec_init(void)
- {
-- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-+#ifdef CONFIG_FEC_MPC52xx_MDIO
-+ int ret;
-+ ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
-+ if (ret) {
-+ pr_err("failed to register mdio driver\n");
-+ return ret;
-+ }
-+#endif
-+ return platform_driver_register(&mpc52xx_fec_driver);
- }
-
- static void __exit
- mpc52xx_fec_exit(void)
- {
-- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-+ platform_driver_unregister(&mpc52xx_fec_driver);
-+#ifdef CONFIG_FEC_MPC52xx_MDIO
-+ platform_driver_unregister(&mpc52xx_fec_mdio_driver);
-+#endif
- }
-
-
-diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
-index b5497e3..1e647be 100644
---- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
-+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
-@@ -22,6 +22,7 @@
-
- struct mpc52xx_fec_mdio_priv {
- struct mpc52xx_fec __iomem *regs;
-+ int mdio_irqs[PHY_MAX_ADDR];
- };
-
- static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
-@@ -82,6 +83,9 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
- bus->read = mpc52xx_fec_mdio_read;
- bus->write = mpc52xx_fec_mdio_write;
-
-+ /* setup irqs */
-+ bus->irq = priv->mdio_irqs;
-+
- /* setup registers */
- err = of_address_to_resource(np, 0, &res);
- if (err)
-diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
-index f9e7446..7a8386a 100644
---- a/drivers/net/ethernet/freescale/fec_ptp.c
-+++ b/drivers/net/ethernet/freescale/fec_ptp.c
-@@ -112,8 +112,9 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
- unsigned long flags;
- u32 val, tempval;
- int inc;
-- struct timespec64 ts;
-+ struct timespec ts;
- u64 ns;
-+ u32 remainder;
- val = 0;
-
- if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
-@@ -162,7 +163,8 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
- tempval = readl(fep->hwp + FEC_ATIME);
- /* Convert the ptp local counter to 1588 timestamp */
- ns = timecounter_cyc2time(&fep->tc, tempval);
-- ts = ns_to_timespec64(ns);
-+ ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
-+ ts.tv_nsec = remainder;
-
- /* The tempval is less than 3 seconds, and so val is less than
- * 4 seconds. No overflow for 32bit calculation.
-@@ -596,16 +598,6 @@ void fec_ptp_init(struct platform_device *pdev)
- schedule_delayed_work(&fep->time_keep, HZ);
- }
-
--void fec_ptp_stop(struct platform_device *pdev)
--{
-- struct net_device *ndev = platform_get_drvdata(pdev);
-- struct fec_enet_private *fep = netdev_priv(ndev);
--
-- cancel_delayed_work_sync(&fep->time_keep);
-- if (fep->ptp_clock)
-- ptp_clock_unregister(fep->ptp_clock);
--}
--
- /**
- * fec_ptp_check_pps_event
- * @fep: the fec_enet_private structure handle
-diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
-deleted file mode 100644
-index 79b7c84..0000000
---- a/drivers/net/ethernet/freescale/fman/Kconfig
-+++ /dev/null
-@@ -1,9 +0,0 @@
--config FSL_FMAN
-- tristate "FMan support"
-- depends on FSL_SOC || COMPILE_TEST
-- select GENERIC_ALLOCATOR
-- select PHYLIB
-- default n
-- help
-- Freescale Data-Path Acceleration Architecture Frame Manager
-- (FMan) support
-diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile
-deleted file mode 100644
-index 6049177..0000000
---- a/drivers/net/ethernet/freescale/fman/Makefile
-+++ /dev/null
-@@ -1,9 +0,0 @@
--subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman
--
--obj-$(CONFIG_FSL_FMAN) += fsl_fman.o
--obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o
--obj-$(CONFIG_FSL_FMAN) += fsl_mac.o
--
--fsl_fman-objs := fman_muram.o fman.o fman_sp.o
--fsl_fman_port-objs := fman_port.o
--fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
-diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
-deleted file mode 100644
-index dafd9e1..0000000
---- a/drivers/net/ethernet/freescale/fman/fman.c
-+++ /dev/null
-@@ -1,2967 +0,0 @@
--/*
-- * Copyright 2008-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
--
--#include "fman.h"
--#include "fman_muram.h"
--
--#include <linux/fsl/guts.h>
--#include <linux/slab.h>
--#include <linux/delay.h>
--#include <linux/module.h>
--#include <linux/of_platform.h>
--#include <linux/clk.h>
--#include <linux/of_address.h>
--#include <linux/of_irq.h>
--#include <linux/interrupt.h>
--#include <linux/libfdt_env.h>
--
--/* General defines */
--#define FMAN_LIODN_TBL 64 /* size of LIODN table */
--#define MAX_NUM_OF_MACS 10
--#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
--#define BASE_RX_PORTID 0x08
--#define BASE_TX_PORTID 0x28
--
--/* Modules registers offsets */
--#define BMI_OFFSET 0x00080000
--#define QMI_OFFSET 0x00080400
--#define DMA_OFFSET 0x000C2000
--#define FPM_OFFSET 0x000C3000
--#define IMEM_OFFSET 0x000C4000
--#define CGP_OFFSET 0x000DB000
--
--/* Exceptions bit map */
--#define EX_DMA_BUS_ERROR 0x80000000
--#define EX_DMA_READ_ECC 0x40000000
--#define EX_DMA_SYSTEM_WRITE_ECC 0x20000000
--#define EX_DMA_FM_WRITE_ECC 0x10000000
--#define EX_FPM_STALL_ON_TASKS 0x08000000
--#define EX_FPM_SINGLE_ECC 0x04000000
--#define EX_FPM_DOUBLE_ECC 0x02000000
--#define EX_QMI_SINGLE_ECC 0x01000000
--#define EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000
--#define EX_QMI_DOUBLE_ECC 0x00400000
--#define EX_BMI_LIST_RAM_ECC 0x00200000
--#define EX_BMI_STORAGE_PROFILE_ECC 0x00100000
--#define EX_BMI_STATISTICS_RAM_ECC 0x00080000
--#define EX_IRAM_ECC 0x00040000
--#define EX_MURAM_ECC 0x00020000
--#define EX_BMI_DISPATCH_RAM_ECC 0x00010000
--#define EX_DMA_SINGLE_PORT_ECC 0x00008000
--
--/* DMA defines */
--/* masks */
--#define DMA_MODE_BER 0x00200000
--#define DMA_MODE_ECC 0x00000020
--#define DMA_MODE_SECURE_PROT 0x00000800
--#define DMA_MODE_AXI_DBG_MASK 0x0F000000
--
--#define DMA_TRANSFER_PORTID_MASK 0xFF000000
--#define DMA_TRANSFER_TNUM_MASK 0x00FF0000
--#define DMA_TRANSFER_LIODN_MASK 0x00000FFF
--
--#define DMA_STATUS_BUS_ERR 0x08000000
--#define DMA_STATUS_READ_ECC 0x04000000
--#define DMA_STATUS_SYSTEM_WRITE_ECC 0x02000000
--#define DMA_STATUS_FM_WRITE_ECC 0x01000000
--#define DMA_STATUS_FM_SPDAT_ECC 0x00080000
--
--#define DMA_MODE_CACHE_OR_SHIFT 30
--#define DMA_MODE_AXI_DBG_SHIFT 24
--#define DMA_MODE_CEN_SHIFT 13
--#define DMA_MODE_CEN_MASK 0x00000007
--#define DMA_MODE_DBG_SHIFT 7
--#define DMA_MODE_AID_MODE_SHIFT 4
--
--#define DMA_THRESH_COMMQ_SHIFT 24
--#define DMA_THRESH_READ_INT_BUF_SHIFT 16
--#define DMA_THRESH_READ_INT_BUF_MASK 0x0000003f
--#define DMA_THRESH_WRITE_INT_BUF_MASK 0x0000003f
--
--#define DMA_TRANSFER_PORTID_SHIFT 24
--#define DMA_TRANSFER_TNUM_SHIFT 16
--
--#define DMA_CAM_SIZEOF_ENTRY 0x40
--#define DMA_CAM_UNITS 8
--
--#define DMA_LIODN_SHIFT 16
--#define DMA_LIODN_BASE_MASK 0x00000FFF
--
--/* FPM defines */
--#define FPM_EV_MASK_DOUBLE_ECC 0x80000000
--#define FPM_EV_MASK_STALL 0x40000000
--#define FPM_EV_MASK_SINGLE_ECC 0x20000000
--#define FPM_EV_MASK_RELEASE_FM 0x00010000
--#define FPM_EV_MASK_DOUBLE_ECC_EN 0x00008000
--#define FPM_EV_MASK_STALL_EN 0x00004000
--#define FPM_EV_MASK_SINGLE_ECC_EN 0x00002000
--#define FPM_EV_MASK_EXTERNAL_HALT 0x00000008
--#define FPM_EV_MASK_ECC_ERR_HALT 0x00000004
--
--#define FPM_RAM_MURAM_ECC 0x00008000
--#define FPM_RAM_IRAM_ECC 0x00004000
--#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000
--#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000
--#define FPM_RAM_IRAM_ECC_EN 0x40000000
--#define FPM_RAM_RAMS_ECC_EN 0x80000000
--#define FPM_RAM_RAMS_ECC_EN_SRC_SEL 0x08000000
--
--#define FPM_REV1_MAJOR_MASK 0x0000FF00
--#define FPM_REV1_MINOR_MASK 0x000000FF
--
--#define FPM_DISP_LIMIT_SHIFT 24
--
--#define FPM_PRT_FM_CTL1 0x00000001
--#define FPM_PRT_FM_CTL2 0x00000002
--#define FPM_PORT_FM_CTL_PORTID_SHIFT 24
--#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT 16
--
--#define FPM_THR1_PRS_SHIFT 24
--#define FPM_THR1_KG_SHIFT 16
--#define FPM_THR1_PLCR_SHIFT 8
--#define FPM_THR1_BMI_SHIFT 0
--
--#define FPM_THR2_QMI_ENQ_SHIFT 24
--#define FPM_THR2_QMI_DEQ_SHIFT 0
--#define FPM_THR2_FM_CTL1_SHIFT 16
--#define FPM_THR2_FM_CTL2_SHIFT 8
--
--#define FPM_EV_MASK_CAT_ERR_SHIFT 1
--#define FPM_EV_MASK_DMA_ERR_SHIFT 0
--
--#define FPM_REV1_MAJOR_SHIFT 8
--
--#define FPM_RSTC_FM_RESET 0x80000000
--#define FPM_RSTC_MAC0_RESET 0x40000000
--#define FPM_RSTC_MAC1_RESET 0x20000000
--#define FPM_RSTC_MAC2_RESET 0x10000000
--#define FPM_RSTC_MAC3_RESET 0x08000000
--#define FPM_RSTC_MAC8_RESET 0x04000000
--#define FPM_RSTC_MAC4_RESET 0x02000000
--#define FPM_RSTC_MAC5_RESET 0x01000000
--#define FPM_RSTC_MAC6_RESET 0x00800000
--#define FPM_RSTC_MAC7_RESET 0x00400000
--#define FPM_RSTC_MAC9_RESET 0x00200000
--
--#define FPM_TS_INT_SHIFT 16
--#define FPM_TS_CTL_EN 0x80000000
--
--/* BMI defines */
--#define BMI_INIT_START 0x80000000
--#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC 0x80000000
--#define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000
--#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000
--#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000
--#define BMI_NUM_OF_TASKS_MASK 0x3F000000
--#define BMI_NUM_OF_EXTRA_TASKS_MASK 0x000F0000
--#define BMI_NUM_OF_DMAS_MASK 0x00000F00
--#define BMI_NUM_OF_EXTRA_DMAS_MASK 0x0000000F
--#define BMI_FIFO_SIZE_MASK 0x000003FF
--#define BMI_EXTRA_FIFO_SIZE_MASK 0x03FF0000
--#define BMI_CFG2_DMAS_MASK 0x0000003F
--#define BMI_CFG2_TASKS_MASK 0x0000003F
--
--#define BMI_CFG2_TASKS_SHIFT 16
--#define BMI_CFG2_DMAS_SHIFT 0
--#define BMI_CFG1_FIFO_SIZE_SHIFT 16
--#define BMI_NUM_OF_TASKS_SHIFT 24
--#define BMI_EXTRA_NUM_OF_TASKS_SHIFT 16
--#define BMI_NUM_OF_DMAS_SHIFT 8
--#define BMI_EXTRA_NUM_OF_DMAS_SHIFT 0
--
--#define BMI_FIFO_ALIGN 0x100
--
--#define BMI_EXTRA_FIFO_SIZE_SHIFT 16
--
--/* QMI defines */
--#define QMI_CFG_ENQ_EN 0x80000000
--#define QMI_CFG_DEQ_EN 0x40000000
--#define QMI_CFG_EN_COUNTERS 0x10000000
--#define QMI_CFG_DEQ_MASK 0x0000003F
--#define QMI_CFG_ENQ_MASK 0x00003F00
--#define QMI_CFG_ENQ_SHIFT 8
--
--#define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000
--#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000
--#define QMI_INTR_EN_SINGLE_ECC 0x80000000
--
--#define QMI_GS_HALT_NOT_BUSY 0x00000002
--
--/* IRAM defines */
--#define IRAM_IADD_AIE 0x80000000
--#define IRAM_READY 0x80000000
--
--/* Default values */
--#define DEFAULT_CATASTROPHIC_ERR 0
--#define DEFAULT_DMA_ERR 0
--#define DEFAULT_AID_MODE FMAN_DMA_AID_OUT_TNUM
--#define DEFAULT_DMA_COMM_Q_LOW 0x2A
--#define DEFAULT_DMA_COMM_Q_HIGH 0x3F
--#define DEFAULT_CACHE_OVERRIDE 0
--#define DEFAULT_DMA_CAM_NUM_OF_ENTRIES 64
--#define DEFAULT_DMA_DBG_CNT_MODE 0
--#define DEFAULT_DMA_SOS_EMERGENCY 0
--#define DEFAULT_DMA_WATCHDOG 0
--#define DEFAULT_DISP_LIMIT 0
--#define DEFAULT_PRS_DISP_TH 16
--#define DEFAULT_PLCR_DISP_TH 16
--#define DEFAULT_KG_DISP_TH 16
--#define DEFAULT_BMI_DISP_TH 16
--#define DEFAULT_QMI_ENQ_DISP_TH 16
--#define DEFAULT_QMI_DEQ_DISP_TH 16
--#define DEFAULT_FM_CTL1_DISP_TH 16
--#define DEFAULT_FM_CTL2_DISP_TH 16
--
--#define DFLT_AXI_DBG_NUM_OF_BEATS 1
--
--#define DFLT_DMA_READ_INT_BUF_LOW(dma_thresh_max_buf) \
-- ((dma_thresh_max_buf + 1) / 2)
--#define DFLT_DMA_READ_INT_BUF_HIGH(dma_thresh_max_buf) \
-- ((dma_thresh_max_buf + 1) * 3 / 4)
--#define DFLT_DMA_WRITE_INT_BUF_LOW(dma_thresh_max_buf) \
-- ((dma_thresh_max_buf + 1) / 2)
--#define DFLT_DMA_WRITE_INT_BUF_HIGH(dma_thresh_max_buf)\
-- ((dma_thresh_max_buf + 1) * 3 / 4)
--
--#define DMA_COMM_Q_LOW_FMAN_V3 0x2A
--#define DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq) \
-- ((dma_thresh_max_commq + 1) / 2)
--#define DFLT_DMA_COMM_Q_LOW(major, dma_thresh_max_commq) \
-- ((major == 6) ? DMA_COMM_Q_LOW_FMAN_V3 : \
-- DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq))
--
--#define DMA_COMM_Q_HIGH_FMAN_V3 0x3f
--#define DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq) \
-- ((dma_thresh_max_commq + 1) * 3 / 4)
--#define DFLT_DMA_COMM_Q_HIGH(major, dma_thresh_max_commq) \
-- ((major == 6) ? DMA_COMM_Q_HIGH_FMAN_V3 : \
-- DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq))
--
--#define TOTAL_NUM_OF_TASKS_FMAN_V3L 59
--#define TOTAL_NUM_OF_TASKS_FMAN_V3H 124
--#define DFLT_TOTAL_NUM_OF_TASKS(major, minor, bmi_max_num_of_tasks) \
-- ((major == 6) ? ((minor == 1 || minor == 4) ? \
-- TOTAL_NUM_OF_TASKS_FMAN_V3L : TOTAL_NUM_OF_TASKS_FMAN_V3H) : \
-- bmi_max_num_of_tasks)
--
--#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 64
--#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V2 32
--#define DFLT_DMA_CAM_NUM_OF_ENTRIES(major) \
-- (major == 6 ? DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 : \
-- DMA_CAM_NUM_OF_ENTRIES_FMAN_V2)
--
--#define FM_TIMESTAMP_1_USEC_BIT 8
--
--/* Defines used for enabling/disabling FMan interrupts */
--#define ERR_INTR_EN_DMA 0x00010000
--#define ERR_INTR_EN_FPM 0x80000000
--#define ERR_INTR_EN_BMI 0x00800000
--#define ERR_INTR_EN_QMI 0x00400000
--#define ERR_INTR_EN_MURAM 0x00040000
--#define ERR_INTR_EN_MAC0 0x00004000
--#define ERR_INTR_EN_MAC1 0x00002000
--#define ERR_INTR_EN_MAC2 0x00001000
--#define ERR_INTR_EN_MAC3 0x00000800
--#define ERR_INTR_EN_MAC4 0x00000400
--#define ERR_INTR_EN_MAC5 0x00000200
--#define ERR_INTR_EN_MAC6 0x00000100
--#define ERR_INTR_EN_MAC7 0x00000080
--#define ERR_INTR_EN_MAC8 0x00008000
--#define ERR_INTR_EN_MAC9 0x00000040
--
--#define INTR_EN_QMI 0x40000000
--#define INTR_EN_MAC0 0x00080000
--#define INTR_EN_MAC1 0x00040000
--#define INTR_EN_MAC2 0x00020000
--#define INTR_EN_MAC3 0x00010000
--#define INTR_EN_MAC4 0x00000040
--#define INTR_EN_MAC5 0x00000020
--#define INTR_EN_MAC6 0x00000008
--#define INTR_EN_MAC7 0x00000002
--#define INTR_EN_MAC8 0x00200000
--#define INTR_EN_MAC9 0x00100000
--#define INTR_EN_REV0 0x00008000
--#define INTR_EN_REV1 0x00004000
--#define INTR_EN_REV2 0x00002000
--#define INTR_EN_REV3 0x00001000
--#define INTR_EN_TMR 0x01000000
--
--enum fman_dma_aid_mode {
-- FMAN_DMA_AID_OUT_PORT_ID = 0, /* 4 LSB of PORT_ID */
-- FMAN_DMA_AID_OUT_TNUM /* 4 LSB of TNUM */
--};
--
--struct fman_iram_regs {
-- u32 iadd; /* FM IRAM instruction address register */
-- u32 idata; /* FM IRAM instruction data register */
-- u32 itcfg; /* FM IRAM timing config register */
-- u32 iready; /* FM IRAM ready register */
--};
--
--struct fman_fpm_regs {
-- u32 fmfp_tnc; /* FPM TNUM Control 0x00 */
-- u32 fmfp_prc; /* FPM Port_ID FmCtl Association 0x04 */
-- u32 fmfp_brkc; /* FPM Breakpoint Control 0x08 */
-- u32 fmfp_mxd; /* FPM Flush Control 0x0c */
-- u32 fmfp_dist1; /* FPM Dispatch Thresholds1 0x10 */
-- u32 fmfp_dist2; /* FPM Dispatch Thresholds2 0x14 */
-- u32 fm_epi; /* FM Error Pending Interrupts 0x18 */
-- u32 fm_rie; /* FM Error Interrupt Enable 0x1c */
-- u32 fmfp_fcev[4]; /* FPM FMan-Controller Event 1-4 0x20-0x2f */
-- u32 res0030[4]; /* res 0x30 - 0x3f */
-- u32 fmfp_cee[4]; /* PM FMan-Controller Event 1-4 0x40-0x4f */
-- u32 res0050[4]; /* res 0x50-0x5f */
-- u32 fmfp_tsc1; /* FPM TimeStamp Control1 0x60 */
-- u32 fmfp_tsc2; /* FPM TimeStamp Control2 0x64 */
-- u32 fmfp_tsp; /* FPM Time Stamp 0x68 */
-- u32 fmfp_tsf; /* FPM Time Stamp Fraction 0x6c */
-- u32 fm_rcr; /* FM Rams Control 0x70 */
-- u32 fmfp_extc; /* FPM External Requests Control 0x74 */
-- u32 fmfp_ext1; /* FPM External Requests Config1 0x78 */
-- u32 fmfp_ext2; /* FPM External Requests Config2 0x7c */
-- u32 fmfp_drd[16]; /* FPM Data_Ram Data 0-15 0x80 - 0xbf */
-- u32 fmfp_dra; /* FPM Data Ram Access 0xc0 */
-- u32 fm_ip_rev_1; /* FM IP Block Revision 1 0xc4 */
-- u32 fm_ip_rev_2; /* FM IP Block Revision 2 0xc8 */
-- u32 fm_rstc; /* FM Reset Command 0xcc */
-- u32 fm_cld; /* FM Classifier Debug 0xd0 */
-- u32 fm_npi; /* FM Normal Pending Interrupts 0xd4 */
-- u32 fmfp_exte; /* FPM External Requests Enable 0xd8 */
-- u32 fmfp_ee; /* FPM Event&Mask 0xdc */
-- u32 fmfp_cev[4]; /* FPM CPU Event 1-4 0xe0-0xef */
-- u32 res00f0[4]; /* res 0xf0-0xff */
-- u32 fmfp_ps[50]; /* FPM Port Status 0x100-0x1c7 */
-- u32 res01c8[14]; /* res 0x1c8-0x1ff */
-- u32 fmfp_clfabc; /* FPM CLFABC 0x200 */
-- u32 fmfp_clfcc; /* FPM CLFCC 0x204 */
-- u32 fmfp_clfaval; /* FPM CLFAVAL 0x208 */
-- u32 fmfp_clfbval; /* FPM CLFBVAL 0x20c */
-- u32 fmfp_clfcval; /* FPM CLFCVAL 0x210 */
-- u32 fmfp_clfamsk; /* FPM CLFAMSK 0x214 */
-- u32 fmfp_clfbmsk; /* FPM CLFBMSK 0x218 */
-- u32 fmfp_clfcmsk; /* FPM CLFCMSK 0x21c */
-- u32 fmfp_clfamc; /* FPM CLFAMC 0x220 */
-- u32 fmfp_clfbmc; /* FPM CLFBMC 0x224 */
-- u32 fmfp_clfcmc; /* FPM CLFCMC 0x228 */
-- u32 fmfp_decceh; /* FPM DECCEH 0x22c */
-- u32 res0230[116]; /* res 0x230 - 0x3ff */
-- u32 fmfp_ts[128]; /* 0x400: FPM Task Status 0x400 - 0x5ff */
-- u32 res0600[0x400 - 384];
--};
--
--struct fman_bmi_regs {
-- u32 fmbm_init; /* BMI Initialization 0x00 */
-- u32 fmbm_cfg1; /* BMI Configuration 1 0x04 */
-- u32 fmbm_cfg2; /* BMI Configuration 2 0x08 */
-- u32 res000c[5]; /* 0x0c - 0x1f */
-- u32 fmbm_ievr; /* Interrupt Event Register 0x20 */
-- u32 fmbm_ier; /* Interrupt Enable Register 0x24 */
-- u32 fmbm_ifr; /* Interrupt Force Register 0x28 */
-- u32 res002c[5]; /* 0x2c - 0x3f */
-- u32 fmbm_arb[8]; /* BMI Arbitration 0x40 - 0x5f */
-- u32 res0060[12]; /* 0x60 - 0x8f */
-- u32 fmbm_dtc[3]; /* Debug Trap Counter 0x90 - 0x9b */
-- u32 res009c; /* 0x9c */
-- u32 fmbm_dcv[3][4]; /* Debug Compare val 0xa0-0xcf */
-- u32 fmbm_dcm[3][4]; /* Debug Compare Mask 0xd0-0xff */
-- u32 fmbm_gde; /* BMI Global Debug Enable 0x100 */
-- u32 fmbm_pp[63]; /* BMI Port Parameters 0x104 - 0x1ff */
-- u32 res0200; /* 0x200 */
-- u32 fmbm_pfs[63]; /* BMI Port FIFO Size 0x204 - 0x2ff */
-- u32 res0300; /* 0x300 */
-- u32 fmbm_spliodn[63]; /* Port Partition ID 0x304 - 0x3ff */
--};
--
--struct fman_qmi_regs {
-- u32 fmqm_gc; /* General Configuration Register 0x00 */
-- u32 res0004; /* 0x04 */
-- u32 fmqm_eie; /* Error Interrupt Event Register 0x08 */
-- u32 fmqm_eien; /* Error Interrupt Enable Register 0x0c */
-- u32 fmqm_eif; /* Error Interrupt Force Register 0x10 */
-- u32 fmqm_ie; /* Interrupt Event Register 0x14 */
-- u32 fmqm_ien; /* Interrupt Enable Register 0x18 */
-- u32 fmqm_if; /* Interrupt Force Register 0x1c */
-- u32 fmqm_gs; /* Global Status Register 0x20 */
-- u32 fmqm_ts; /* Task Status Register 0x24 */
-- u32 fmqm_etfc; /* Enqueue Total Frame Counter 0x28 */
-- u32 fmqm_dtfc; /* Dequeue Total Frame Counter 0x2c */
-- u32 fmqm_dc0; /* Dequeue Counter 0 0x30 */
-- u32 fmqm_dc1; /* Dequeue Counter 1 0x34 */
-- u32 fmqm_dc2; /* Dequeue Counter 2 0x38 */
-- u32 fmqm_dc3; /* Dequeue Counter 3 0x3c */
-- u32 fmqm_dfdc; /* Dequeue FQID from Default Counter 0x40 */
-- u32 fmqm_dfcc; /* Dequeue FQID from Context Counter 0x44 */
-- u32 fmqm_dffc; /* Dequeue FQID from FD Counter 0x48 */
-- u32 fmqm_dcc; /* Dequeue Confirm Counter 0x4c */
-- u32 res0050[7]; /* 0x50 - 0x6b */
-- u32 fmqm_tapc; /* Tnum Aging Period Control 0x6c */
-- u32 fmqm_dmcvc; /* Dequeue MAC Command Valid Counter 0x70 */
-- u32 fmqm_difdcc; /* Dequeue Invalid FD Command Counter 0x74 */
-- u32 fmqm_da1v; /* Dequeue A1 Valid Counter 0x78 */
-- u32 res007c; /* 0x7c */
-- u32 fmqm_dtc; /* 0x80 Debug Trap Counter 0x80 */
-- u32 fmqm_efddd; /* 0x84 Enqueue Frame desc Dynamic dbg 0x84 */
-- u32 res0088[2]; /* 0x88 - 0x8f */
-- struct {
-- u32 fmqm_dtcfg1; /* 0x90 dbg trap cfg 1 Register 0x00 */
-- u32 fmqm_dtval1; /* Debug Trap Value 1 Register 0x04 */
-- u32 fmqm_dtm1; /* Debug Trap Mask 1 Register 0x08 */
-- u32 fmqm_dtc1; /* Debug Trap Counter 1 Register 0x0c */
-- u32 fmqm_dtcfg2; /* dbg Trap cfg 2 Register 0x10 */
-- u32 fmqm_dtval2; /* Debug Trap Value 2 Register 0x14 */
-- u32 fmqm_dtm2; /* Debug Trap Mask 2 Register 0x18 */
-- u32 res001c; /* 0x1c */
-- } dbg_traps[3]; /* 0x90 - 0xef */
-- u8 res00f0[0x400 - 0xf0]; /* 0xf0 - 0x3ff */
--};
--
--struct fman_dma_regs {
-- u32 fmdmsr; /* FM DMA status register 0x00 */
-- u32 fmdmmr; /* FM DMA mode register 0x04 */
-- u32 fmdmtr; /* FM DMA bus threshold register 0x08 */
-- u32 fmdmhy; /* FM DMA bus hysteresis register 0x0c */
-- u32 fmdmsetr; /* FM DMA SOS emergency Threshold Register 0x10 */
-- u32 fmdmtah; /* FM DMA transfer bus address high reg 0x14 */
-- u32 fmdmtal; /* FM DMA transfer bus address low reg 0x18 */
-- u32 fmdmtcid; /* FM DMA transfer bus communication ID reg 0x1c */
-- u32 fmdmra; /* FM DMA bus internal ram address register 0x20 */
-- u32 fmdmrd; /* FM DMA bus internal ram data register 0x24 */
-- u32 fmdmwcr; /* FM DMA CAM watchdog counter value 0x28 */
-- u32 fmdmebcr; /* FM DMA CAM base in MURAM register 0x2c */
-- u32 fmdmccqdr; /* FM DMA CAM and CMD Queue Debug reg 0x30 */
-- u32 fmdmccqvr1; /* FM DMA CAM and CMD Queue Value reg #1 0x34 */
-- u32 fmdmccqvr2; /* FM DMA CAM and CMD Queue Value reg #2 0x38 */
-- u32 fmdmcqvr3; /* FM DMA CMD Queue Value register #3 0x3c */
-- u32 fmdmcqvr4; /* FM DMA CMD Queue Value register #4 0x40 */
-- u32 fmdmcqvr5; /* FM DMA CMD Queue Value register #5 0x44 */
-- u32 fmdmsefrc; /* FM DMA Semaphore Entry Full Reject Cntr 0x48 */
-- u32 fmdmsqfrc; /* FM DMA Semaphore Queue Full Reject Cntr 0x4c */
-- u32 fmdmssrc; /* FM DMA Semaphore SYNC Reject Counter 0x50 */
-- u32 fmdmdcr; /* FM DMA Debug Counter 0x54 */
-- u32 fmdmemsr; /* FM DMA Emergency Smoother Register 0x58 */
-- u32 res005c; /* 0x5c */
-- u32 fmdmplr[FMAN_LIODN_TBL / 2]; /* DMA LIODN regs 0x60-0xdf */
-- u32 res00e0[0x400 - 56];
--};
--
--/* Structure that holds current FMan state.
-- * Used for saving run time information.
-- */
--struct fman_state_struct {
-- u8 fm_id;
-- u16 fm_clk_freq;
-- struct fman_rev_info rev_info;
-- bool enabled_time_stamp;
-- u8 count1_micro_bit;
-- u8 total_num_of_tasks;
-- u8 accumulated_num_of_tasks;
-- u32 accumulated_fifo_size;
-- u8 accumulated_num_of_open_dmas;
-- u8 accumulated_num_of_deq_tnums;
-- u32 exceptions;
-- u32 extra_fifo_pool_size;
-- u8 extra_tasks_pool_size;
-- u8 extra_open_dmas_pool_size;
-- u16 port_mfl[MAX_NUM_OF_MACS];
-- u16 mac_mfl[MAX_NUM_OF_MACS];
--
-- /* SOC specific */
-- u32 fm_iram_size;
-- /* DMA */
-- u32 dma_thresh_max_commq;
-- u32 dma_thresh_max_buf;
-- u32 max_num_of_open_dmas;
-- /* QMI */
-- u32 qmi_max_num_of_tnums;
-- u32 qmi_def_tnums_thresh;
-- /* BMI */
-- u32 bmi_max_num_of_tasks;
-- u32 bmi_max_fifo_size;
-- /* General */
-- u32 fm_port_num_of_cg;
-- u32 num_of_rx_ports;
-- u32 total_fifo_size;
--
-- u32 qman_channel_base;
-- u32 num_of_qman_channels;
--
-- struct resource *res;
--};
--
--/* Structure that holds FMan initial configuration */
--struct fman_cfg {
-- u8 disp_limit_tsh;
-- u8 prs_disp_tsh;
-- u8 plcr_disp_tsh;
-- u8 kg_disp_tsh;
-- u8 bmi_disp_tsh;
-- u8 qmi_enq_disp_tsh;
-- u8 qmi_deq_disp_tsh;
-- u8 fm_ctl1_disp_tsh;
-- u8 fm_ctl2_disp_tsh;
-- int dma_cache_override;
-- enum fman_dma_aid_mode dma_aid_mode;
-- u32 dma_axi_dbg_num_of_beats;
-- u32 dma_cam_num_of_entries;
-- u32 dma_watchdog;
-- u8 dma_comm_qtsh_asrt_emer;
-- u32 dma_write_buf_tsh_asrt_emer;
-- u32 dma_read_buf_tsh_asrt_emer;
-- u8 dma_comm_qtsh_clr_emer;
-- u32 dma_write_buf_tsh_clr_emer;
-- u32 dma_read_buf_tsh_clr_emer;
-- u32 dma_sos_emergency;
-- int dma_dbg_cnt_mode;
-- int catastrophic_err;
-- int dma_err;
-- u32 exceptions;
-- u16 clk_freq;
-- u32 cam_base_addr;
-- u32 fifo_base_addr;
-- u32 total_fifo_size;
-- u32 total_num_of_tasks;
-- u32 qmi_def_tnums_thresh;
--};
--
--/* Structure that holds information received from device tree */
--struct fman_dts_params {
-- void __iomem *base_addr; /* FMan virtual address */
-- struct resource *res; /* FMan memory resource */
-- u8 id; /* FMan ID */
--
-- int err_irq; /* FMan Error IRQ */
--
-- u16 clk_freq; /* FMan clock freq (In Mhz) */
--
-- u32 qman_channel_base; /* QMan channels base */
-- u32 num_of_qman_channels; /* Number of QMan channels */
--
-- struct resource muram_res; /* MURAM resource */
--};
--
--/** fman_exceptions_cb
-- * fman - Pointer to FMan
-- * exception - The exception.
-- *
-- * Exceptions user callback routine, will be called upon an exception
-- * passing the exception identification.
-- *
-- * Return: irq status
-- */
--typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman,
-- enum fman_exceptions exception);
--
--/** fman_bus_error_cb
-- * fman - Pointer to FMan
-- * port_id - Port id
-- * addr - Address that caused the error
-- * tnum - Owner of error
-- * liodn - Logical IO device number
-- *
-- * Bus error user callback routine, will be called upon bus error,
-- * passing parameters describing the errors and the owner.
-- *
-- * Return: IRQ status
-- */
--typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id,
-- u64 addr, u8 tnum, u16 liodn);
--
--struct fman {
-- struct device *dev;
-- void __iomem *base_addr;
-- struct fman_intr_src intr_mng[FMAN_EV_CNT];
--
-- struct fman_fpm_regs __iomem *fpm_regs;
-- struct fman_bmi_regs __iomem *bmi_regs;
-- struct fman_qmi_regs __iomem *qmi_regs;
-- struct fman_dma_regs __iomem *dma_regs;
-- fman_exceptions_cb *exception_cb;
-- fman_bus_error_cb *bus_error_cb;
-- /* Spinlock for FMan use */
-- spinlock_t spinlock;
-- struct fman_state_struct *state;
--
-- struct fman_cfg *cfg;
-- struct muram_info *muram;
-- /* cam section in muram */
-- unsigned long cam_offset;
-- size_t cam_size;
-- /* Fifo in MURAM */
-- unsigned long fifo_offset;
-- size_t fifo_size;
--
-- u32 liodn_base[64];
-- u32 liodn_offset[64];
--
-- struct fman_dts_params dts_params;
--};
--
--static irqreturn_t fman_exceptions(struct fman *fman,
-- enum fman_exceptions exception)
--{
-- dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n",
-- __func__, fman->state->fm_id, exception);
--
-- return IRQ_HANDLED;
--}
--
--static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
-- u64 __maybe_unused addr,
-- u8 __maybe_unused tnum,
-- u16 __maybe_unused liodn)
--{
-- dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n",
-- __func__, fman->state->fm_id, port_id);
--
-- return IRQ_HANDLED;
--}
--
--static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id)
--{
-- if (fman->intr_mng[id].isr_cb) {
-- fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle);
--
-- return IRQ_HANDLED;
-- }
--
-- return IRQ_NONE;
--}
--
--static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id)
--{
-- u8 sw_port_id = 0;
--
-- if (hw_port_id >= BASE_TX_PORTID)
-- sw_port_id = hw_port_id - BASE_TX_PORTID;
-- else if (hw_port_id >= BASE_RX_PORTID)
-- sw_port_id = hw_port_id - BASE_RX_PORTID;
-- else
-- sw_port_id = 0;
--
-- return sw_port_id;
--}
--
--static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg,
-- u8 port_id)
--{
-- u32 tmp = 0;
--
-- tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT;
--
-- tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1;
--
-- /* order restoration */
-- if (port_id % 2)
-- tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
-- else
-- tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
--
-- iowrite32be(tmp, &fpm_rg->fmfp_prc);
--}
--
--static void set_port_liodn(struct fman *fman, u8 port_id,
-- u32 liodn_base, u32 liodn_ofst)
--{
-- u32 tmp;
--
-- /* set LIODN base for this port */
-- tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
-- if (port_id % 2) {
-- tmp &= ~DMA_LIODN_BASE_MASK;
-- tmp |= liodn_base;
-- } else {
-- tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
-- tmp |= liodn_base << DMA_LIODN_SHIFT;
-- }
-- iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
-- iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
--}
--
--static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
--{
-- u32 tmp;
--
-- tmp = ioread32be(&fpm_rg->fm_rcr);
-- if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
-- iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
-- else
-- iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN |
-- FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
--}
--
--static void disable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
--{
-- u32 tmp;
--
-- tmp = ioread32be(&fpm_rg->fm_rcr);
-- if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
-- iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
-- else
-- iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN),
-- &fpm_rg->fm_rcr);
--}
--
--static void fman_defconfig(struct fman_cfg *cfg)
--{
-- memset(cfg, 0, sizeof(struct fman_cfg));
--
-- cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
-- cfg->dma_err = DEFAULT_DMA_ERR;
-- cfg->dma_aid_mode = DEFAULT_AID_MODE;
-- cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
-- cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
-- cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
-- cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
-- cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
-- cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
-- cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
-- cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
-- cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
-- cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
-- cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH;
-- cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH;
-- cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH;
-- cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
-- cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
-- cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
--}
--
--static int dma_init(struct fman *fman)
--{
-- struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
-- struct fman_cfg *cfg = fman->cfg;
-- u32 tmp_reg;
--
-- /* Init DMA Registers */
--
-- /* clear status reg events */
-- tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC |
-- DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC);
-- iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg, &dma_rg->fmdmsr);
--
-- /* configure mode register */
-- tmp_reg = 0;
-- tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
-- if (cfg->exceptions & EX_DMA_BUS_ERROR)
-- tmp_reg |= DMA_MODE_BER;
-- if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) |
-- (cfg->exceptions & EX_DMA_READ_ECC) |
-- (cfg->exceptions & EX_DMA_FM_WRITE_ECC))
-- tmp_reg |= DMA_MODE_ECC;
-- if (cfg->dma_axi_dbg_num_of_beats)
-- tmp_reg |= (DMA_MODE_AXI_DBG_MASK &
-- ((cfg->dma_axi_dbg_num_of_beats - 1)
-- << DMA_MODE_AXI_DBG_SHIFT));
--
-- tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) &
-- DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT;
-- tmp_reg |= DMA_MODE_SECURE_PROT;
-- tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
-- tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
--
-- iowrite32be(tmp_reg, &dma_rg->fmdmmr);
--
-- /* configure thresholds register */
-- tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer <<
-- DMA_THRESH_COMMQ_SHIFT);
-- tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer &
-- DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
-- tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer &
-- DMA_THRESH_WRITE_INT_BUF_MASK;
--
-- iowrite32be(tmp_reg, &dma_rg->fmdmtr);
--
-- /* configure hysteresis register */
-- tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer <<
-- DMA_THRESH_COMMQ_SHIFT);
-- tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer &
-- DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
-- tmp_reg |= cfg->dma_write_buf_tsh_clr_emer &
-- DMA_THRESH_WRITE_INT_BUF_MASK;
--
-- iowrite32be(tmp_reg, &dma_rg->fmdmhy);
--
-- /* configure emergency threshold */
-- iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr);
--
-- /* configure Watchdog */
-- iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr);
--
-- iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr);
--
-- /* Allocate MURAM for CAM */
-- fman->cam_size =
-- (u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY);
-- fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size);
-- if (IS_ERR_VALUE(fman->cam_offset)) {
-- dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
-- __func__);
-- return -ENOMEM;
-- }
--
-- if (fman->state->rev_info.major == 2) {
-- u32 __iomem *cam_base_addr;
--
-- fman_muram_free_mem(fman->muram, fman->cam_offset,
-- fman->cam_size);
--
-- fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128;
-- fman->cam_offset = fman_muram_alloc(fman->muram,
-- fman->cam_size);
-- if (IS_ERR_VALUE(fman->cam_offset)) {
-- dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
-- __func__);
-- return -ENOMEM;
-- }
--
-- if (fman->cfg->dma_cam_num_of_entries % 8 ||
-- fman->cfg->dma_cam_num_of_entries > 32) {
-- dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n",
-- __func__);
-- return -EINVAL;
-- }
--
-- cam_base_addr = (u32 __iomem *)
-- fman_muram_offset_to_vbase(fman->muram,
-- fman->cam_offset);
-- iowrite32be(~((1 <<
-- (32 - fman->cfg->dma_cam_num_of_entries)) - 1),
-- cam_base_addr);
-- }
--
-- fman->cfg->cam_base_addr = fman->cam_offset;
--
-- return 0;
--}
--
--static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
--{
-- u32 tmp_reg;
-- int i;
--
-- /* Init FPM Registers */
--
-- tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT);
-- iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd);
--
-- tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) |
-- ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) |
-- ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) |
-- ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT));
-- iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1);
--
-- tmp_reg =
-- (((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) |
-- ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) |
-- ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) |
-- ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT));
-- iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2);
--
-- /* define exceptions and error behavior */
-- tmp_reg = 0;
-- /* Clear events */
-- tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC |
-- FPM_EV_MASK_SINGLE_ECC);
-- /* enable interrupts */
-- if (cfg->exceptions & EX_FPM_STALL_ON_TASKS)
-- tmp_reg |= FPM_EV_MASK_STALL_EN;
-- if (cfg->exceptions & EX_FPM_SINGLE_ECC)
-- tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN;
-- if (cfg->exceptions & EX_FPM_DOUBLE_ECC)
-- tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
-- tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
-- tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
-- /* FMan is not halted upon external halt activation */
-- tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
-- /* Man is not halted upon Unrecoverable ECC error behavior */
-- tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
-- iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
--
-- /* clear all fmCtls event registers */
-- for (i = 0; i < FM_NUM_OF_FMAN_CTRL_EVENT_REGS; i++)
-- iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]);
--
-- /* RAM ECC - enable and clear events */
-- /* first we need to clear all parser memory,
-- * as it is uninitialized and may cause ECC errors
-- */
-- /* event bits */
-- tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
--
-- iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
--
-- tmp_reg = 0;
-- if (cfg->exceptions & EX_IRAM_ECC) {
-- tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN;
-- enable_rams_ecc(fpm_rg);
-- }
-- if (cfg->exceptions & EX_MURAM_ECC) {
-- tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN;
-- enable_rams_ecc(fpm_rg);
-- }
-- iowrite32be(tmp_reg, &fpm_rg->fm_rie);
--}
--
--static void bmi_init(struct fman_bmi_regs __iomem *bmi_rg,
-- struct fman_cfg *cfg)
--{
-- u32 tmp_reg;
--
-- /* Init BMI Registers */
--
-- /* define common resources */
-- tmp_reg = cfg->fifo_base_addr;
-- tmp_reg = tmp_reg / BMI_FIFO_ALIGN;
--
-- tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) <<
-- BMI_CFG1_FIFO_SIZE_SHIFT);
-- iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1);
--
-- tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) <<
-- BMI_CFG2_TASKS_SHIFT;
-- /* num of DMA's will be dynamically updated when each port is set */
-- iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2);
--
-- /* define unmaskable exceptions, enable and clear events */
-- tmp_reg = 0;
-- iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC |
-- BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC |
-- BMI_ERR_INTR_EN_STATISTICS_RAM_ECC |
-- BMI_ERR_INTR_EN_DISPATCH_RAM_ECC, &bmi_rg->fmbm_ievr);
--
-- if (cfg->exceptions & EX_BMI_LIST_RAM_ECC)
-- tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
-- if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC)
-- tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
-- if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC)
-- tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
-- if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC)
-- tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
-- iowrite32be(tmp_reg, &bmi_rg->fmbm_ier);
--}
--
--static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
-- struct fman_cfg *cfg)
--{
-- u32 tmp_reg;
--
-- /* Init QMI Registers */
--
-- /* Clear error interrupt events */
--
-- iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF,
-- &qmi_rg->fmqm_eie);
-- tmp_reg = 0;
-- if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID)
-- tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
-- if (cfg->exceptions & EX_QMI_DOUBLE_ECC)
-- tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC;
-- /* enable events */
-- iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
--
-- tmp_reg = 0;
-- /* Clear interrupt events */
-- iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
-- if (cfg->exceptions & EX_QMI_SINGLE_ECC)
-- tmp_reg |= QMI_INTR_EN_SINGLE_ECC;
-- /* enable events */
-- iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
--}
--
--static int enable(struct fman *fman, struct fman_cfg *cfg)
--{
-- u32 cfg_reg = 0;
--
-- /* Enable all modules */
--
-- /* clear&enable global counters - calculate reg and save for later,
-- * because it's the same reg for QMI enable
-- */
-- cfg_reg = QMI_CFG_EN_COUNTERS;
--
-- /* Set enqueue and dequeue thresholds */
-- cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh;
--
-- iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init);
-- iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
-- &fman->qmi_regs->fmqm_gc);
--
-- return 0;
--}
--
--static int set_exception(struct fman *fman,
-- enum fman_exceptions exception, bool enable)
--{
-- u32 tmp;
--
-- switch (exception) {
-- case FMAN_EX_DMA_BUS_ERROR:
-- tmp = ioread32be(&fman->dma_regs->fmdmmr);
-- if (enable)
-- tmp |= DMA_MODE_BER;
-- else
-- tmp &= ~DMA_MODE_BER;
-- /* disable bus error */
-- iowrite32be(tmp, &fman->dma_regs->fmdmmr);
-- break;
-- case FMAN_EX_DMA_READ_ECC:
-- case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
-- case FMAN_EX_DMA_FM_WRITE_ECC:
-- tmp = ioread32be(&fman->dma_regs->fmdmmr);
-- if (enable)
-- tmp |= DMA_MODE_ECC;
-- else
-- tmp &= ~DMA_MODE_ECC;
-- iowrite32be(tmp, &fman->dma_regs->fmdmmr);
-- break;
-- case FMAN_EX_FPM_STALL_ON_TASKS:
-- tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
-- if (enable)
-- tmp |= FPM_EV_MASK_STALL_EN;
-- else
-- tmp &= ~FPM_EV_MASK_STALL_EN;
-- iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
-- break;
-- case FMAN_EX_FPM_SINGLE_ECC:
-- tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
-- if (enable)
-- tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
-- else
-- tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
-- iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
-- break;
-- case FMAN_EX_FPM_DOUBLE_ECC:
-- tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
-- if (enable)
-- tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
-- else
-- tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
-- iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
-- break;
-- case FMAN_EX_QMI_SINGLE_ECC:
-- tmp = ioread32be(&fman->qmi_regs->fmqm_ien);
-- if (enable)
-- tmp |= QMI_INTR_EN_SINGLE_ECC;
-- else
-- tmp &= ~QMI_INTR_EN_SINGLE_ECC;
-- iowrite32be(tmp, &fman->qmi_regs->fmqm_ien);
-- break;
-- case FMAN_EX_QMI_DOUBLE_ECC:
-- tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
-- if (enable)
-- tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
-- else
-- tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
-- iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
-- break;
-- case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
-- tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
-- if (enable)
-- tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
-- else
-- tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
-- iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
-- break;
-- case FMAN_EX_BMI_LIST_RAM_ECC:
-- tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
-- if (enable)
-- tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
-- else
-- tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
-- iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
-- break;
-- case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
-- tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
-- if (enable)
-- tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
-- else
-- tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
-- iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
-- break;
-- case FMAN_EX_BMI_STATISTICS_RAM_ECC:
-- tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
-- if (enable)
-- tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
-- else
-- tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
-- iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
-- break;
-- case FMAN_EX_BMI_DISPATCH_RAM_ECC:
-- tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
-- if (enable)
-- tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
-- else
-- tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
-- iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
-- break;
-- case FMAN_EX_IRAM_ECC:
-- tmp = ioread32be(&fman->fpm_regs->fm_rie);
-- if (enable) {
-- /* enable ECC if not enabled */
-- enable_rams_ecc(fman->fpm_regs);
-- /* enable ECC interrupts */
-- tmp |= FPM_IRAM_ECC_ERR_EX_EN;
-- } else {
-- /* ECC mechanism may be disabled,
-- * depending on driver status
-- */
-- disable_rams_ecc(fman->fpm_regs);
-- tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
-- }
-- iowrite32be(tmp, &fman->fpm_regs->fm_rie);
-- break;
-- case FMAN_EX_MURAM_ECC:
-- tmp = ioread32be(&fman->fpm_regs->fm_rie);
-- if (enable) {
-- /* enable ECC if not enabled */
-- enable_rams_ecc(fman->fpm_regs);
-- /* enable ECC interrupts */
-- tmp |= FPM_MURAM_ECC_ERR_EX_EN;
-- } else {
-- /* ECC mechanism may be disabled,
-- * depending on driver status
-- */
-- disable_rams_ecc(fman->fpm_regs);
-- tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
-- }
-- iowrite32be(tmp, &fman->fpm_regs->fm_rie);
-- break;
-- default:
-- return -EINVAL;
-- }
-- return 0;
--}
--
--static void resume(struct fman_fpm_regs __iomem *fpm_rg)
--{
-- u32 tmp;
--
-- tmp = ioread32be(&fpm_rg->fmfp_ee);
-- /* clear tmp_reg event bits in order not to clear standing events */
-- tmp &= ~(FPM_EV_MASK_DOUBLE_ECC |
-- FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC);
-- tmp |= FPM_EV_MASK_RELEASE_FM;
--
-- iowrite32be(tmp, &fpm_rg->fmfp_ee);
--}
--
--static int fill_soc_specific_params(struct fman_state_struct *state)
--{
-- u8 minor = state->rev_info.minor;
-- /* P4080 - Major 2
-- * P2041/P3041/P5020/P5040 - Major 3
-- * Tx/Bx - Major 6
-- */
-- switch (state->rev_info.major) {
-- case 3:
-- state->bmi_max_fifo_size = 160 * 1024;
-- state->fm_iram_size = 64 * 1024;
-- state->dma_thresh_max_commq = 31;
-- state->dma_thresh_max_buf = 127;
-- state->qmi_max_num_of_tnums = 64;
-- state->qmi_def_tnums_thresh = 48;
-- state->bmi_max_num_of_tasks = 128;
-- state->max_num_of_open_dmas = 32;
-- state->fm_port_num_of_cg = 256;
-- state->num_of_rx_ports = 6;
-- state->total_fifo_size = 122 * 1024;
-- break;
--
-- case 2:
-- state->bmi_max_fifo_size = 160 * 1024;
-- state->fm_iram_size = 64 * 1024;
-- state->dma_thresh_max_commq = 31;
-- state->dma_thresh_max_buf = 127;
-- state->qmi_max_num_of_tnums = 64;
-- state->qmi_def_tnums_thresh = 48;
-- state->bmi_max_num_of_tasks = 128;
-- state->max_num_of_open_dmas = 32;
-- state->fm_port_num_of_cg = 256;
-- state->num_of_rx_ports = 5;
-- state->total_fifo_size = 100 * 1024;
-- break;
--
-- case 6:
-- state->dma_thresh_max_commq = 83;
-- state->dma_thresh_max_buf = 127;
-- state->qmi_max_num_of_tnums = 64;
-- state->qmi_def_tnums_thresh = 32;
-- state->fm_port_num_of_cg = 256;
--
-- /* FManV3L */
-- if (minor == 1 || minor == 4) {
-- state->bmi_max_fifo_size = 192 * 1024;
-- state->bmi_max_num_of_tasks = 64;
-- state->max_num_of_open_dmas = 32;
-- state->num_of_rx_ports = 5;
-- if (minor == 1)
-- state->fm_iram_size = 32 * 1024;
-- else
-- state->fm_iram_size = 64 * 1024;
-- state->total_fifo_size = 156 * 1024;
-- }
-- /* FManV3H */
-- else if (minor == 0 || minor == 2 || minor == 3) {
-- state->bmi_max_fifo_size = 384 * 1024;
-- state->fm_iram_size = 64 * 1024;
-- state->bmi_max_num_of_tasks = 128;
-- state->max_num_of_open_dmas = 84;
-- state->num_of_rx_ports = 8;
-- state->total_fifo_size = 295 * 1024;
-- } else {
-- pr_err("Unsupported FManv3 version\n");
-- return -EINVAL;
-- }
--
-- break;
-- default:
-- pr_err("Unsupported FMan version\n");
-- return -EINVAL;
-- }
--
-- return 0;
--}
--
--static bool is_init_done(struct fman_cfg *cfg)
--{
-- /* Checks if FMan driver parameters were initialized */
-- if (!cfg)
-- return true;
--
-- return false;
--}
--
--static void free_init_resources(struct fman *fman)
--{
-- if (fman->cam_offset)
-- fman_muram_free_mem(fman->muram, fman->cam_offset,
-- fman->cam_size);
-- if (fman->fifo_offset)
-- fman_muram_free_mem(fman->muram, fman->fifo_offset,
-- fman->fifo_size);
--}
--
--static irqreturn_t bmi_err_event(struct fman *fman)
--{
-- u32 event, mask, force;
-- struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
-- irqreturn_t ret = IRQ_NONE;
--
-- event = ioread32be(&bmi_rg->fmbm_ievr);
-- mask = ioread32be(&bmi_rg->fmbm_ier);
-- event &= mask;
-- /* clear the forced events */
-- force = ioread32be(&bmi_rg->fmbm_ifr);
-- if (force & event)
-- iowrite32be(force & ~event, &bmi_rg->fmbm_ifr);
-- /* clear the acknowledged events */
-- iowrite32be(event, &bmi_rg->fmbm_ievr);
--
-- if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
-- ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
-- if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
-- ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
-- if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
-- ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
-- if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
-- ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
--
-- return ret;
--}
--
--static irqreturn_t qmi_err_event(struct fman *fman)
--{
-- u32 event, mask, force;
-- struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
-- irqreturn_t ret = IRQ_NONE;
--
-- event = ioread32be(&qmi_rg->fmqm_eie);
-- mask = ioread32be(&qmi_rg->fmqm_eien);
-- event &= mask;
--
-- /* clear the forced events */
-- force = ioread32be(&qmi_rg->fmqm_eif);
-- if (force & event)
-- iowrite32be(force & ~event, &qmi_rg->fmqm_eif);
-- /* clear the acknowledged events */
-- iowrite32be(event, &qmi_rg->fmqm_eie);
--
-- if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
-- ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
-- if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
-- ret = fman->exception_cb(fman,
-- FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
--
-- return ret;
--}
--
--static irqreturn_t dma_err_event(struct fman *fman)
--{
-- u32 status, mask, com_id;
-- u8 tnum, port_id, relative_port_id;
-- u16 liodn;
-- struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
-- irqreturn_t ret = IRQ_NONE;
--
-- status = ioread32be(&dma_rg->fmdmsr);
-- mask = ioread32be(&dma_rg->fmdmmr);
--
-- /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */
-- if ((mask & DMA_MODE_BER) != DMA_MODE_BER)
-- status &= ~DMA_STATUS_BUS_ERR;
--
-- /* clear relevant bits if mask has no DMA_MODE_ECC */
-- if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC)
-- status &= ~(DMA_STATUS_FM_SPDAT_ECC |
-- DMA_STATUS_READ_ECC |
-- DMA_STATUS_SYSTEM_WRITE_ECC |
-- DMA_STATUS_FM_WRITE_ECC);
--
-- /* clear set events */
-- iowrite32be(status, &dma_rg->fmdmsr);
--
-- if (status & DMA_STATUS_BUS_ERR) {
-- u64 addr;
--
-- addr = (u64)ioread32be(&dma_rg->fmdmtal);
-- addr |= ((u64)(ioread32be(&dma_rg->fmdmtah)) << 32);
--
-- com_id = ioread32be(&dma_rg->fmdmtcid);
-- port_id = (u8)(((com_id & DMA_TRANSFER_PORTID_MASK) >>
-- DMA_TRANSFER_PORTID_SHIFT));
-- relative_port_id =
-- hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
-- tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >>
-- DMA_TRANSFER_TNUM_SHIFT);
-- liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK);
-- ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum,
-- liodn);
-- }
-- if (status & DMA_STATUS_FM_SPDAT_ECC)
-- ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
-- if (status & DMA_STATUS_READ_ECC)
-- ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
-- if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
-- ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
-- if (status & DMA_STATUS_FM_WRITE_ECC)
-- ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
--
-- return ret;
--}
--
--static irqreturn_t fpm_err_event(struct fman *fman)
--{
-- u32 event;
-- struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
-- irqreturn_t ret = IRQ_NONE;
--
-- event = ioread32be(&fpm_rg->fmfp_ee);
-- /* clear the all occurred events */
-- iowrite32be(event, &fpm_rg->fmfp_ee);
--
-- if ((event & FPM_EV_MASK_DOUBLE_ECC) &&
-- (event & FPM_EV_MASK_DOUBLE_ECC_EN))
-- ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
-- if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
-- ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
-- if ((event & FPM_EV_MASK_SINGLE_ECC) &&
-- (event & FPM_EV_MASK_SINGLE_ECC_EN))
-- ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
--
-- return ret;
--}
--
--static irqreturn_t muram_err_intr(struct fman *fman)
--{
-- u32 event, mask;
-- struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
-- irqreturn_t ret = IRQ_NONE;
--
-- event = ioread32be(&fpm_rg->fm_rcr);
-- mask = ioread32be(&fpm_rg->fm_rie);
--
-- /* clear MURAM event bit (do not clear IRAM event) */
-- iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
--
-- if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC))
-- ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
--
-- return ret;
--}
--
--static irqreturn_t qmi_event(struct fman *fman)
--{
-- u32 event, mask, force;
-- struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
-- irqreturn_t ret = IRQ_NONE;
--
-- event = ioread32be(&qmi_rg->fmqm_ie);
-- mask = ioread32be(&qmi_rg->fmqm_ien);
-- event &= mask;
-- /* clear the forced events */
-- force = ioread32be(&qmi_rg->fmqm_if);
-- if (force & event)
-- iowrite32be(force & ~event, &qmi_rg->fmqm_if);
-- /* clear the acknowledged events */
-- iowrite32be(event, &qmi_rg->fmqm_ie);
--
-- if (event & QMI_INTR_EN_SINGLE_ECC)
-- ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
--
-- return ret;
--}
--
--static void enable_time_stamp(struct fman *fman)
--{
-- struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
-- u16 fm_clk_freq = fman->state->fm_clk_freq;
-- u32 tmp, intgr, ts_freq;
-- u64 frac;
--
-- ts_freq = (u32)(1 << fman->state->count1_micro_bit);
-- /* configure timestamp so that bit 8 will count 1 microsecond
-- * Find effective count rate at TIMESTAMP least significant bits:
-- * Effective_Count_Rate = 1MHz x 2^8 = 256MHz
-- * Find frequency ratio between effective count rate and the clock:
-- * Effective_Count_Rate / CLK e.g. for 600 MHz clock:
-- * 256/600 = 0.4266666...
-- */
--
-- intgr = ts_freq / fm_clk_freq;
-- /* we multiply by 2^16 to keep the fraction of the division
-- * we do not div back, since we write this value as a fraction
-- * see spec
-- */
--
-- frac = ((ts_freq << 16) - (intgr << 16) * fm_clk_freq) / fm_clk_freq;
-- /* we check remainder of the division in order to round up if not int */
-- if (((ts_freq << 16) - (intgr << 16) * fm_clk_freq) % fm_clk_freq)
-- frac++;
--
-- tmp = (intgr << FPM_TS_INT_SHIFT) | (u16)frac;
-- iowrite32be(tmp, &fpm_rg->fmfp_tsc2);
--
-- /* enable timestamp with original clock */
-- iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1);
-- fman->state->enabled_time_stamp = true;
--}
--
--static int clear_iram(struct fman *fman)
--{
-- struct fman_iram_regs __iomem *iram;
-- int i, count;
--
-- iram = fman->base_addr + IMEM_OFFSET;
--
-- /* Enable the auto-increment */
-- iowrite32be(IRAM_IADD_AIE, &iram->iadd);
-- count = 100;
-- do {
-- udelay(1);
-- } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count);
-- if (count == 0)
-- return -EBUSY;
--
-- for (i = 0; i < (fman->state->fm_iram_size / 4); i++)
-- iowrite32be(0xffffffff, &iram->idata);
--
-- iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd);
-- count = 100;
-- do {
-- udelay(1);
-- } while ((ioread32be(&iram->idata) != 0xffffffff) && --count);
-- if (count == 0)
-- return -EBUSY;
--
-- return 0;
--}
--
--static u32 get_exception_flag(enum fman_exceptions exception)
--{
-- u32 bit_mask;
--
-- switch (exception) {
-- case FMAN_EX_DMA_BUS_ERROR:
-- bit_mask = EX_DMA_BUS_ERROR;
-- break;
-- case FMAN_EX_DMA_SINGLE_PORT_ECC:
-- bit_mask = EX_DMA_SINGLE_PORT_ECC;
-- break;
-- case FMAN_EX_DMA_READ_ECC:
-- bit_mask = EX_DMA_READ_ECC;
-- break;
-- case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
-- bit_mask = EX_DMA_SYSTEM_WRITE_ECC;
-- break;
-- case FMAN_EX_DMA_FM_WRITE_ECC:
-- bit_mask = EX_DMA_FM_WRITE_ECC;
-- break;
-- case FMAN_EX_FPM_STALL_ON_TASKS:
-- bit_mask = EX_FPM_STALL_ON_TASKS;
-- break;
-- case FMAN_EX_FPM_SINGLE_ECC:
-- bit_mask = EX_FPM_SINGLE_ECC;
-- break;
-- case FMAN_EX_FPM_DOUBLE_ECC:
-- bit_mask = EX_FPM_DOUBLE_ECC;
-- break;
-- case FMAN_EX_QMI_SINGLE_ECC:
-- bit_mask = EX_QMI_SINGLE_ECC;
-- break;
-- case FMAN_EX_QMI_DOUBLE_ECC:
-- bit_mask = EX_QMI_DOUBLE_ECC;
-- break;
-- case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
-- bit_mask = EX_QMI_DEQ_FROM_UNKNOWN_PORTID;
-- break;
-- case FMAN_EX_BMI_LIST_RAM_ECC:
-- bit_mask = EX_BMI_LIST_RAM_ECC;
-- break;
-- case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
-- bit_mask = EX_BMI_STORAGE_PROFILE_ECC;
-- break;
-- case FMAN_EX_BMI_STATISTICS_RAM_ECC:
-- bit_mask = EX_BMI_STATISTICS_RAM_ECC;
-- break;
-- case FMAN_EX_BMI_DISPATCH_RAM_ECC:
-- bit_mask = EX_BMI_DISPATCH_RAM_ECC;
-- break;
-- case FMAN_EX_MURAM_ECC:
-- bit_mask = EX_MURAM_ECC;
-- break;
-- default:
-- bit_mask = 0;
-- break;
-- }
--
-- return bit_mask;
--}
--
--static int get_module_event(enum fman_event_modules module, u8 mod_id,
-- enum fman_intr_type intr_type)
--{
-- int event;
--
-- switch (module) {
-- case FMAN_MOD_MAC:
-- if (intr_type == FMAN_INTR_TYPE_ERR)
-- event = FMAN_EV_ERR_MAC0 + mod_id;
-- else
-- event = FMAN_EV_MAC0 + mod_id;
-- break;
-- case FMAN_MOD_FMAN_CTRL:
-- if (intr_type == FMAN_INTR_TYPE_ERR)
-- event = FMAN_EV_CNT;
-- else
-- event = (FMAN_EV_FMAN_CTRL_0 + mod_id);
-- break;
-- case FMAN_MOD_DUMMY_LAST:
-- event = FMAN_EV_CNT;
-- break;
-- default:
-- event = FMAN_EV_CNT;
-- break;
-- }
--
-- return event;
--}
--
--static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo,
-- u32 *extra_size_of_fifo)
--{
-- struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
-- u32 fifo = *size_of_fifo;
-- u32 extra_fifo = *extra_size_of_fifo;
-- u32 tmp;
--
-- /* if this is the first time a port requires extra_fifo_pool_size,
-- * the total extra_fifo_pool_size must be initialized to 1 buffer per
-- * port
-- */
-- if (extra_fifo && !fman->state->extra_fifo_pool_size)
-- fman->state->extra_fifo_pool_size =
-- fman->state->num_of_rx_ports * FMAN_BMI_FIFO_UNITS;
--
-- fman->state->extra_fifo_pool_size =
-- max(fman->state->extra_fifo_pool_size, extra_fifo);
--
-- /* check that there are enough uncommitted fifo size */
-- if ((fman->state->accumulated_fifo_size + fifo) >
-- (fman->state->total_fifo_size -
-- fman->state->extra_fifo_pool_size)) {
-- dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n",
-- __func__);
-- return -EAGAIN;
-- }
--
-- /* Read, modify and write to HW */
-- tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) |
-- ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
-- BMI_EXTRA_FIFO_SIZE_SHIFT);
-- iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
--
-- /* update accumulated */
-- fman->state->accumulated_fifo_size += fifo;
--
-- return 0;
--}
--
--static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks,
-- u8 *num_of_extra_tasks)
--{
-- struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
-- u8 tasks = *num_of_tasks;
-- u8 extra_tasks = *num_of_extra_tasks;
-- u32 tmp;
--
-- if (extra_tasks)
-- fman->state->extra_tasks_pool_size =
-- max(fman->state->extra_tasks_pool_size, extra_tasks);
--
-- /* check that there are enough uncommitted tasks */
-- if ((fman->state->accumulated_num_of_tasks + tasks) >
-- (fman->state->total_num_of_tasks -
-- fman->state->extra_tasks_pool_size)) {
-- dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
-- __func__, fman->state->fm_id);
-- return -EAGAIN;
-- }
-- /* update accumulated */
-- fman->state->accumulated_num_of_tasks += tasks;
--
-- /* Write to HW */
-- tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
-- ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK);
-- tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) |
-- (u32)(extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT));
-- iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
--
-- return 0;
--}
--
--static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
-- u8 *num_of_open_dmas,
-- u8 *num_of_extra_open_dmas)
--{
-- struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
-- u8 open_dmas = *num_of_open_dmas;
-- u8 extra_open_dmas = *num_of_extra_open_dmas;
-- u8 total_num_dmas = 0, current_val = 0, current_extra_val = 0;
-- u32 tmp;
--
-- if (!open_dmas) {
-- /* Configuration according to values in the HW.
-- * read the current number of open Dma's
-- */
-- tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
-- current_extra_val = (u8)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >>
-- BMI_EXTRA_NUM_OF_DMAS_SHIFT);
--
-- tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
-- current_val = (u8)(((tmp & BMI_NUM_OF_DMAS_MASK) >>
-- BMI_NUM_OF_DMAS_SHIFT) + 1);
--
-- /* This is the first configuration and user did not
-- * specify value (!open_dmas), reset values will be used
-- * and we just save these values for resource management
-- */
-- fman->state->extra_open_dmas_pool_size =
-- (u8)max(fman->state->extra_open_dmas_pool_size,
-- current_extra_val);
-- fman->state->accumulated_num_of_open_dmas += current_val;
-- *num_of_open_dmas = current_val;
-- *num_of_extra_open_dmas = current_extra_val;
-- return 0;
-- }
--
-- if (extra_open_dmas > current_extra_val)
-- fman->state->extra_open_dmas_pool_size =
-- (u8)max(fman->state->extra_open_dmas_pool_size,
-- extra_open_dmas);
--
-- if ((fman->state->rev_info.major < 6) &&
-- (fman->state->accumulated_num_of_open_dmas - current_val +
-- open_dmas > fman->state->max_num_of_open_dmas)) {
-- dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
-- __func__, fman->state->fm_id);
-- return -EAGAIN;
-- } else if ((fman->state->rev_info.major >= 6) &&
-- !((fman->state->rev_info.major == 6) &&
-- (fman->state->rev_info.minor == 0)) &&
-- (fman->state->accumulated_num_of_open_dmas -
-- current_val + open_dmas >
-- fman->state->dma_thresh_max_commq + 1)) {
-- dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
-- __func__, fman->state->fm_id,
-- fman->state->dma_thresh_max_commq + 1);
-- return -EAGAIN;
-- }
--
-- WARN_ON(fman->state->accumulated_num_of_open_dmas < current_val);
-- /* update acummulated */
-- fman->state->accumulated_num_of_open_dmas -= current_val;
-- fman->state->accumulated_num_of_open_dmas += open_dmas;
--
-- if (fman->state->rev_info.major < 6)
-- total_num_dmas =
-- (u8)(fman->state->accumulated_num_of_open_dmas +
-- fman->state->extra_open_dmas_pool_size);
--
-- /* calculate reg */
-- tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
-- ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK);
-- tmp |= (u32)(((open_dmas - 1) << BMI_NUM_OF_DMAS_SHIFT) |
-- (extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT));
-- iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
--
-- /* update total num of DMA's with committed number of open DMAS,
-- * and max uncommitted pool.
-- */
-- if (total_num_dmas) {
-- tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK;
-- tmp |= (u32)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT;
-- iowrite32be(tmp, &bmi_rg->fmbm_cfg2);
-- }
--
-- return 0;
--}
--
--static int fman_config(struct fman *fman)
--{
-- void __iomem *base_addr;
-- int err;
--
-- base_addr = fman->dts_params.base_addr;
--
-- fman->state = kzalloc(sizeof(*fman->state), GFP_KERNEL);
-- if (!fman->state)
-- goto err_fm_state;
--
-- /* Allocate the FM driver's parameters structure */
-- fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL);
-- if (!fman->cfg)
-- goto err_fm_drv;
--
-- /* Initialize MURAM block */
-- fman->muram =
-- fman_muram_init(fman->dts_params.muram_res.start,
-- resource_size(&fman->dts_params.muram_res));
-- if (!fman->muram)
-- goto err_fm_soc_specific;
--
-- /* Initialize FM parameters which will be kept by the driver */
-- fman->state->fm_id = fman->dts_params.id;
-- fman->state->fm_clk_freq = fman->dts_params.clk_freq;
-- fman->state->qman_channel_base = fman->dts_params.qman_channel_base;
-- fman->state->num_of_qman_channels =
-- fman->dts_params.num_of_qman_channels;
-- fman->state->res = fman->dts_params.res;
-- fman->exception_cb = fman_exceptions;
-- fman->bus_error_cb = fman_bus_error;
-- fman->fpm_regs = base_addr + FPM_OFFSET;
-- fman->bmi_regs = base_addr + BMI_OFFSET;
-- fman->qmi_regs = base_addr + QMI_OFFSET;
-- fman->dma_regs = base_addr + DMA_OFFSET;
-- fman->base_addr = base_addr;
--
-- spin_lock_init(&fman->spinlock);
-- fman_defconfig(fman->cfg);
--
-- fman->state->extra_fifo_pool_size = 0;
-- fman->state->exceptions = (EX_DMA_BUS_ERROR |
-- EX_DMA_READ_ECC |
-- EX_DMA_SYSTEM_WRITE_ECC |
-- EX_DMA_FM_WRITE_ECC |
-- EX_FPM_STALL_ON_TASKS |
-- EX_FPM_SINGLE_ECC |
-- EX_FPM_DOUBLE_ECC |
-- EX_QMI_DEQ_FROM_UNKNOWN_PORTID |
-- EX_BMI_LIST_RAM_ECC |
-- EX_BMI_STORAGE_PROFILE_ECC |
-- EX_BMI_STATISTICS_RAM_ECC |
-- EX_MURAM_ECC |
-- EX_BMI_DISPATCH_RAM_ECC |
-- EX_QMI_DOUBLE_ECC |
-- EX_QMI_SINGLE_ECC);
--
-- /* Read FMan revision for future use*/
-- fman_get_revision(fman, &fman->state->rev_info);
--
-- err = fill_soc_specific_params(fman->state);
-- if (err)
-- goto err_fm_soc_specific;
--
-- /* FM_AID_MODE_NO_TNUM_SW005 Errata workaround */
-- if (fman->state->rev_info.major >= 6)
-- fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID;
--
-- fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh;
--
-- fman->state->total_num_of_tasks =
-- (u8)DFLT_TOTAL_NUM_OF_TASKS(fman->state->rev_info.major,
-- fman->state->rev_info.minor,
-- fman->state->bmi_max_num_of_tasks);
--
-- if (fman->state->rev_info.major < 6) {
-- fman->cfg->dma_comm_qtsh_clr_emer =
-- (u8)DFLT_DMA_COMM_Q_LOW(fman->state->rev_info.major,
-- fman->state->dma_thresh_max_commq);
--
-- fman->cfg->dma_comm_qtsh_asrt_emer =
-- (u8)DFLT_DMA_COMM_Q_HIGH(fman->state->rev_info.major,
-- fman->state->dma_thresh_max_commq);
--
-- fman->cfg->dma_cam_num_of_entries =
-- DFLT_DMA_CAM_NUM_OF_ENTRIES(fman->state->rev_info.major);
--
-- fman->cfg->dma_read_buf_tsh_clr_emer =
-- DFLT_DMA_READ_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
--
-- fman->cfg->dma_read_buf_tsh_asrt_emer =
-- DFLT_DMA_READ_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
--
-- fman->cfg->dma_write_buf_tsh_clr_emer =
-- DFLT_DMA_WRITE_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
--
-- fman->cfg->dma_write_buf_tsh_asrt_emer =
-- DFLT_DMA_WRITE_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
--
-- fman->cfg->dma_axi_dbg_num_of_beats =
-- DFLT_AXI_DBG_NUM_OF_BEATS;
-- }
--
-- return 0;
--
--err_fm_soc_specific:
-- kfree(fman->cfg);
--err_fm_drv:
-- kfree(fman->state);
--err_fm_state:
-- kfree(fman);
-- return -EINVAL;
--}
--
--static int fman_reset(struct fman *fman)
--{
-- u32 count;
-- int err = 0;
--
-- if (fman->state->rev_info.major < 6) {
-- iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
-- /* Wait for reset completion */
-- count = 100;
-- do {
-- udelay(1);
-- } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
-- FPM_RSTC_FM_RESET) && --count);
-- if (count == 0)
-- err = -EBUSY;
--
-- goto _return;
-- } else {
-- struct device_node *guts_node;
-- struct ccsr_guts __iomem *guts_regs;
-- u32 devdisr2, reg;
--
-- /* Errata A007273 */
-- guts_node =
-- of_find_compatible_node(NULL, NULL,
-- "fsl,qoriq-device-config-2.0");
-- if (!guts_node) {
-- dev_err(fman->dev, "%s: Couldn't find guts node\n",
-- __func__);
-- goto guts_node;
-- }
--
-- guts_regs = of_iomap(guts_node, 0);
-- if (!guts_regs) {
-- dev_err(fman->dev, "%s: Couldn't map %s regs\n",
-- __func__, guts_node->full_name);
-- goto guts_regs;
-- }
--#define FMAN1_ALL_MACS_MASK 0xFCC00000
--#define FMAN2_ALL_MACS_MASK 0x000FCC00
-- /* Read current state */
-- devdisr2 = ioread32be(&guts_regs->devdisr2);
-- if (fman->dts_params.id == 0)
-- reg = devdisr2 & ~FMAN1_ALL_MACS_MASK;
-- else
-- reg = devdisr2 & ~FMAN2_ALL_MACS_MASK;
--
-- /* Enable all MACs */
-- iowrite32be(reg, &guts_regs->devdisr2);
--
-- /* Perform FMan reset */
-- iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
--
-- /* Wait for reset completion */
-- count = 100;
-- do {
-- udelay(1);
-- } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
-- FPM_RSTC_FM_RESET) && --count);
-- if (count == 0) {
-- iounmap(guts_regs);
-- of_node_put(guts_node);
-- err = -EBUSY;
-- goto _return;
-- }
--
-- /* Restore devdisr2 value */
-- iowrite32be(devdisr2, &guts_regs->devdisr2);
--
-- iounmap(guts_regs);
-- of_node_put(guts_node);
--
-- goto _return;
--
--guts_regs:
-- of_node_put(guts_node);
--guts_node:
-- dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
-- __func__);
-- }
--_return:
-- return err;
--}
--
--static int fman_init(struct fman *fman)
--{
-- struct fman_cfg *cfg = NULL;
-- int err = 0, i, count;
--
-- if (is_init_done(fman->cfg))
-- return -EINVAL;
--
-- fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT;
--
-- cfg = fman->cfg;
--
-- /* clear revision-dependent non existing exception */
-- if (fman->state->rev_info.major < 6)
-- fman->state->exceptions &= ~FMAN_EX_BMI_DISPATCH_RAM_ECC;
--
-- if (fman->state->rev_info.major >= 6)
-- fman->state->exceptions &= ~FMAN_EX_QMI_SINGLE_ECC;
--
-- /* clear CPG */
-- memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0,
-- fman->state->fm_port_num_of_cg);
--
-- /* Save LIODN info before FMan reset
-- * Skipping non-existent port 0 (i = 1)
-- */
-- for (i = 1; i < FMAN_LIODN_TBL; i++) {
-- u32 liodn_base;
--
-- fman->liodn_offset[i] =
-- ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
-- liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
-- if (i % 2) {
-- /* FMDM_PLR LSB holds LIODN base for odd ports */
-- liodn_base &= DMA_LIODN_BASE_MASK;
-- } else {
-- /* FMDM_PLR MSB holds LIODN base for even ports */
-- liodn_base >>= DMA_LIODN_SHIFT;
-- liodn_base &= DMA_LIODN_BASE_MASK;
-- }
-- fman->liodn_base[i] = liodn_base;
-- }
--
-- err = fman_reset(fman);
-- if (err)
-- return err;
--
-- if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) {
-- resume(fman->fpm_regs);
-- /* Wait until QMI is not in halt not busy state */
-- count = 100;
-- do {
-- udelay(1);
-- } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) &
-- QMI_GS_HALT_NOT_BUSY) && --count);
-- if (count == 0)
-- dev_warn(fman->dev, "%s: QMI is in halt not busy state\n",
-- __func__);
-- }
--
-- if (clear_iram(fman) != 0)
-- return -EINVAL;
--
-- cfg->exceptions = fman->state->exceptions;
--
-- /* Init DMA Registers */
--
-- err = dma_init(fman);
-- if (err != 0) {
-- free_init_resources(fman);
-- return err;
-- }
--
-- /* Init FPM Registers */
-- fpm_init(fman->fpm_regs, fman->cfg);
--
-- /* define common resources */
-- /* allocate MURAM for FIFO according to total size */
-- fman->fifo_offset = fman_muram_alloc(fman->muram,
-- fman->state->total_fifo_size);
-- if (IS_ERR_VALUE(fman->fifo_offset)) {
-- free_init_resources(fman);
-- dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n",
-- __func__);
-- return -ENOMEM;
-- }
--
-- cfg->fifo_base_addr = fman->fifo_offset;
-- cfg->total_fifo_size = fman->state->total_fifo_size;
-- cfg->total_num_of_tasks = fman->state->total_num_of_tasks;
-- cfg->clk_freq = fman->state->fm_clk_freq;
--
-- /* Init BMI Registers */
-- bmi_init(fman->bmi_regs, fman->cfg);
--
-- /* Init QMI Registers */
-- qmi_init(fman->qmi_regs, fman->cfg);
--
-- err = enable(fman, cfg);
-- if (err != 0)
-- return err;
--
-- enable_time_stamp(fman);
--
-- kfree(fman->cfg);
-- fman->cfg = NULL;
--
-- return 0;
--}
--
--static int fman_set_exception(struct fman *fman,
-- enum fman_exceptions exception, bool enable)
--{
-- u32 bit_mask = 0;
--
-- if (!is_init_done(fman->cfg))
-- return -EINVAL;
--
-- bit_mask = get_exception_flag(exception);
-- if (bit_mask) {
-- if (enable)
-- fman->state->exceptions |= bit_mask;
-- else
-- fman->state->exceptions &= ~bit_mask;
-- } else {
-- dev_err(fman->dev, "%s: Undefined exception (%d)\n",
-- __func__, exception);
-- return -EINVAL;
-- }
--
-- return set_exception(fman, exception, enable);
--}
--
--/**
-- * fman_register_intr
-- * @fman: A Pointer to FMan device
-- * @mod: Calling module
-- * @mod_id: Module id (if more than 1 exists, '0' if not)
-- * @intr_type: Interrupt type (error/normal) selection.
-- * @f_isr: The interrupt service routine.
-- * @h_src_arg: Argument to be passed to f_isr.
-- *
-- * Used to register an event handler to be processed by FMan
-- *
-- * Return: 0 on success; Error code otherwise.
-- */
--void fman_register_intr(struct fman *fman, enum fman_event_modules module,
-- u8 mod_id, enum fman_intr_type intr_type,
-- void (*isr_cb)(void *src_arg), void *src_arg)
--{
-- int event = 0;
--
-- event = get_module_event(module, mod_id, intr_type);
-- WARN_ON(event >= FMAN_EV_CNT);
--
-- /* register in local FM structure */
-- fman->intr_mng[event].isr_cb = isr_cb;
-- fman->intr_mng[event].src_handle = src_arg;
--}
--EXPORT_SYMBOL(fman_register_intr);
--
--/**
-- * fman_unregister_intr
-- * @fman: A Pointer to FMan device
-- * @mod: Calling module
-- * @mod_id: Module id (if more than 1 exists, '0' if not)
-- * @intr_type: Interrupt type (error/normal) selection.
-- *
-- * Used to unregister an event handler to be processed by FMan
-- *
-- * Return: 0 on success; Error code otherwise.
-- */
--void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
-- u8 mod_id, enum fman_intr_type intr_type)
--{
-- int event = 0;
--
-- event = get_module_event(module, mod_id, intr_type);
-- WARN_ON(event >= FMAN_EV_CNT);
--
-- fman->intr_mng[event].isr_cb = NULL;
-- fman->intr_mng[event].src_handle = NULL;
--}
--EXPORT_SYMBOL(fman_unregister_intr);
--
--/**
-- * fman_set_port_params
-- * @fman: A Pointer to FMan device
-- * @port_params: Port parameters
-- *
-- * Used by FMan Port to pass parameters to the FMan
-- *
-- * Return: 0 on success; Error code otherwise.
-- */
--int fman_set_port_params(struct fman *fman,
-- struct fman_port_init_params *port_params)
--{
-- int err;
-- unsigned long flags;
-- u8 port_id = port_params->port_id, mac_id;
--
-- spin_lock_irqsave(&fman->spinlock, flags);
--
-- err = set_num_of_tasks(fman, port_params->port_id,
-- &port_params->num_of_tasks,
-- &port_params->num_of_extra_tasks);
-- if (err)
-- goto return_err;
--
-- /* TX Ports */
-- if (port_params->port_type != FMAN_PORT_TYPE_RX) {
-- u32 enq_th, deq_th, reg;
--
-- /* update qmi ENQ/DEQ threshold */
-- fman->state->accumulated_num_of_deq_tnums +=
-- port_params->deq_pipeline_depth;
-- enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) &
-- QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT;
-- /* if enq_th is too big, we reduce it to the max value
-- * that is still 0
-- */
-- if (enq_th >= (fman->state->qmi_max_num_of_tnums -
-- fman->state->accumulated_num_of_deq_tnums)) {
-- enq_th =
-- fman->state->qmi_max_num_of_tnums -
-- fman->state->accumulated_num_of_deq_tnums - 1;
--
-- reg = ioread32be(&fman->qmi_regs->fmqm_gc);
-- reg &= ~QMI_CFG_ENQ_MASK;
-- reg |= (enq_th << QMI_CFG_ENQ_SHIFT);
-- iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
-- }
--
-- deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) &
-- QMI_CFG_DEQ_MASK;
-- /* if deq_th is too small, we enlarge it to the min
-- * value that is still 0.
-- * depTh may not be larger than 63
-- * (fman->state->qmi_max_num_of_tnums-1).
-- */
-- if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) &&
-- (deq_th < fman->state->qmi_max_num_of_tnums - 1)) {
-- deq_th = fman->state->accumulated_num_of_deq_tnums + 1;
-- reg = ioread32be(&fman->qmi_regs->fmqm_gc);
-- reg &= ~QMI_CFG_DEQ_MASK;
-- reg |= deq_th;
-- iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
-- }
-- }
--
-- err = set_size_of_fifo(fman, port_params->port_id,
-- &port_params->size_of_fifo,
-- &port_params->extra_size_of_fifo);
-- if (err)
-- goto return_err;
--
-- err = set_num_of_open_dmas(fman, port_params->port_id,
-- &port_params->num_of_open_dmas,
-- &port_params->num_of_extra_open_dmas);
-- if (err)
-- goto return_err;
--
-- set_port_liodn(fman, port_id, fman->liodn_base[port_id],
-- fman->liodn_offset[port_id]);
--
-- if (fman->state->rev_info.major < 6)
-- set_port_order_restoration(fman->fpm_regs, port_id);
--
-- mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
--
-- if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) {
-- fman->state->port_mfl[mac_id] = port_params->max_frame_length;
-- } else {
-- dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n",
-- __func__, port_id, mac_id);
-- err = -EINVAL;
-- goto return_err;
-- }
--
-- spin_unlock_irqrestore(&fman->spinlock, flags);
--
-- return 0;
--
--return_err:
-- spin_unlock_irqrestore(&fman->spinlock, flags);
-- return err;
--}
--EXPORT_SYMBOL(fman_set_port_params);
--
--/**
-- * fman_reset_mac
-- * @fman: A Pointer to FMan device
-- * @mac_id: MAC id to be reset
-- *
-- * Reset a specific MAC
-- *
-- * Return: 0 on success; Error code otherwise.
-- */
--int fman_reset_mac(struct fman *fman, u8 mac_id)
--{
-- struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
-- u32 msk, timeout = 100;
--
-- if (fman->state->rev_info.major >= 6) {
-- dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n",
-- __func__);
-- return -EINVAL;
-- }
--
-- /* Get the relevant bit mask */
-- switch (mac_id) {
-- case 0:
-- msk = FPM_RSTC_MAC0_RESET;
-- break;
-- case 1:
-- msk = FPM_RSTC_MAC1_RESET;
-- break;
-- case 2:
-- msk = FPM_RSTC_MAC2_RESET;
-- break;
-- case 3:
-- msk = FPM_RSTC_MAC3_RESET;
-- break;
-- case 4:
-- msk = FPM_RSTC_MAC4_RESET;
-- break;
-- case 5:
-- msk = FPM_RSTC_MAC5_RESET;
-- break;
-- case 6:
-- msk = FPM_RSTC_MAC6_RESET;
-- break;
-- case 7:
-- msk = FPM_RSTC_MAC7_RESET;
-- break;
-- case 8:
-- msk = FPM_RSTC_MAC8_RESET;
-- break;
-- case 9:
-- msk = FPM_RSTC_MAC9_RESET;
-- break;
-- default:
-- dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n",
-- __func__, mac_id);
-- return -EINVAL;
-- }
--
-- /* reset */
-- iowrite32be(msk, &fpm_rg->fm_rstc);
-- while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout)
-- udelay(10);
--
-- if (!timeout)
-- return -EIO;
--
-- return 0;
--}
--EXPORT_SYMBOL(fman_reset_mac);
--
--/**
-- * fman_set_mac_max_frame
-- * @fman: A Pointer to FMan device
-- * @mac_id: MAC id
-- * @mfl: Maximum frame length
-- *
-- * Set maximum frame length of specific MAC in FMan driver
-- *
-- * Return: 0 on success; Error code otherwise.
-- */
--int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
--{
-- /* if port is already initialized, check that MaxFrameLength is smaller
-- * or equal to the port's max
-- */
-- if ((!fman->state->port_mfl[mac_id]) ||
-- (mfl <= fman->state->port_mfl[mac_id])) {
-- fman->state->mac_mfl[mac_id] = mfl;
-- } else {
-- dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n",
-- __func__);
-- return -EINVAL;
-- }
-- return 0;
--}
--EXPORT_SYMBOL(fman_set_mac_max_frame);
--
--/**
-- * fman_get_clock_freq
-- * @fman: A Pointer to FMan device
-- *
-- * Get FMan clock frequency
-- *
-- * Return: FMan clock frequency
-- */
--u16 fman_get_clock_freq(struct fman *fman)
--{
-- return fman->state->fm_clk_freq;
--}
--
--/**
-- * fman_get_bmi_max_fifo_size
-- * @fman: A Pointer to FMan device
-- *
-- * Get FMan maximum FIFO size
-- *
-- * Return: FMan Maximum FIFO size
-- */
--u32 fman_get_bmi_max_fifo_size(struct fman *fman)
--{
-- return fman->state->bmi_max_fifo_size;
--}
--EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
--
--/**
-- * fman_get_revision
-- * @fman - Pointer to the FMan module
-- * @rev_info - A structure of revision information parameters.
-- *
-- * Returns the FM revision
-- *
-- * Allowed only following fman_init().
-- *
-- * Return: 0 on success; Error code otherwise.
-- */
--void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
--{
-- u32 tmp;
--
-- tmp = ioread32be(&fman->fpm_regs->fm_ip_rev_1);
-- rev_info->major = (u8)((tmp & FPM_REV1_MAJOR_MASK) >>
-- FPM_REV1_MAJOR_SHIFT);
-- rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
--}
--EXPORT_SYMBOL(fman_get_revision);
--
--/**
-- * fman_get_qman_channel_id
-- * @fman: A Pointer to FMan device
-- * @port_id: Port id
-- *
-- * Get QMan channel ID associated to the Port id
-- *
-- * Return: QMan channel ID
-- */
--u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
--{
-- int i;
--
-- if (fman->state->rev_info.major >= 6) {
-- u32 port_ids[] = {0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b,
-- 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
-- for (i = 0; i < fman->state->num_of_qman_channels; i++) {
-- if (port_ids[i] == port_id)
-- break;
-- }
-- } else {
-- u32 port_ids[] = {0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1,
-- 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
-- for (i = 0; i < fman->state->num_of_qman_channels; i++) {
-- if (port_ids[i] == port_id)
-- break;
-- }
-- }
--
-- if (i == fman->state->num_of_qman_channels)
-- return 0;
--
-- return fman->state->qman_channel_base + i;
--}
--EXPORT_SYMBOL(fman_get_qman_channel_id);
--
--/**
-- * fman_get_mem_region
-- * @fman: A Pointer to FMan device
-- *
-- * Get FMan memory region
-- *
-- * Return: A structure with FMan memory region information
-- */
--struct resource *fman_get_mem_region(struct fman *fman)
--{
-- return fman->state->res;
--}
--EXPORT_SYMBOL(fman_get_mem_region);
--
--/* Bootargs defines */
--/* Extra headroom for RX buffers - Default, min and max */
--#define FSL_FM_RX_EXTRA_HEADROOM 64
--#define FSL_FM_RX_EXTRA_HEADROOM_MIN 16
--#define FSL_FM_RX_EXTRA_HEADROOM_MAX 384
--
--/* Maximum frame length */
--#define FSL_FM_MAX_FRAME_SIZE 1522
--#define FSL_FM_MAX_POSSIBLE_FRAME_SIZE 9600
--#define FSL_FM_MIN_POSSIBLE_FRAME_SIZE 64
--
--/* Extra headroom for Rx buffers.
-- * FMan is instructed to allocate, on the Rx path, this amount of
-- * space at the beginning of a data buffer, beside the DPA private
-- * data area and the IC fields.
-- * Does not impact Tx buffer layout.
-- * Configurable from bootargs. 64 by default, it's needed on
-- * particular forwarding scenarios that add extra headers to the
-- * forwarded frame.
-- */
--static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
--module_param(fsl_fm_rx_extra_headroom, int, 0);
--MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
--
--/* Max frame size, across all interfaces.
-- * Configurable from bootargs, to avoid allocating oversized (socket)
-- * buffers when not using jumbo frames.
-- * Must be large enough to accommodate the network MTU, but small enough
-- * to avoid wasting skb memory.
-- *
-- * Could be overridden once, at boot-time, via the
-- * fm_set_max_frm() callback.
-- */
--static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
--module_param(fsl_fm_max_frm, int, 0);
--MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
--
--/**
-- * fman_get_max_frm
-- *
-- * Return: Max frame length configured in the FM driver
-- */
--u16 fman_get_max_frm(void)
--{
-- static bool fm_check_mfl;
--
-- if (!fm_check_mfl) {
-- if (fsl_fm_max_frm > FSL_FM_MAX_POSSIBLE_FRAME_SIZE ||
-- fsl_fm_max_frm < FSL_FM_MIN_POSSIBLE_FRAME_SIZE) {
-- pr_warn("Invalid fsl_fm_max_frm value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
-- fsl_fm_max_frm,
-- FSL_FM_MIN_POSSIBLE_FRAME_SIZE,
-- FSL_FM_MAX_POSSIBLE_FRAME_SIZE,
-- FSL_FM_MAX_FRAME_SIZE);
-- fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
-- }
-- fm_check_mfl = true;
-- }
--
-- return fsl_fm_max_frm;
--}
--EXPORT_SYMBOL(fman_get_max_frm);
--
--/**
-- * fman_get_rx_extra_headroom
-- *
-- * Return: Extra headroom size configured in the FM driver
-- */
--int fman_get_rx_extra_headroom(void)
--{
-- static bool fm_check_rx_extra_headroom;
--
-- if (!fm_check_rx_extra_headroom) {
-- if (fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX ||
-- fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN) {
-- pr_warn("Invalid fsl_fm_rx_extra_headroom value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
-- fsl_fm_rx_extra_headroom,
-- FSL_FM_RX_EXTRA_HEADROOM_MIN,
-- FSL_FM_RX_EXTRA_HEADROOM_MAX,
-- FSL_FM_RX_EXTRA_HEADROOM);
-- fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
-- }
--
-- fm_check_rx_extra_headroom = true;
-- fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16);
-- }
--
-- return fsl_fm_rx_extra_headroom;
--}
--EXPORT_SYMBOL(fman_get_rx_extra_headroom);
--
--/**
-- * fman_bind
-- * @dev: FMan OF device pointer
-- *
-- * Bind to a specific FMan device.
-- *
-- * Allowed only after the port was created.
-- *
-- * Return: A pointer to the FMan device
-- */
--struct fman *fman_bind(struct device *fm_dev)
--{
-- return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
--}
--EXPORT_SYMBOL(fman_bind);
--
--static irqreturn_t fman_err_irq(int irq, void *handle)
--{
-- struct fman *fman = (struct fman *)handle;
-- u32 pending;
-- struct fman_fpm_regs __iomem *fpm_rg;
-- irqreturn_t single_ret, ret = IRQ_NONE;
--
-- if (!is_init_done(fman->cfg))
-- return IRQ_NONE;
--
-- fpm_rg = fman->fpm_regs;
--
-- /* error interrupts */
-- pending = ioread32be(&fpm_rg->fm_epi);
-- if (!pending)
-- return IRQ_NONE;
--
-- if (pending & ERR_INTR_EN_BMI) {
-- single_ret = bmi_err_event(fman);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_QMI) {
-- single_ret = qmi_err_event(fman);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_FPM) {
-- single_ret = fpm_err_event(fman);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_DMA) {
-- single_ret = dma_err_event(fman);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_MURAM) {
-- single_ret = muram_err_intr(fman);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
--
-- /* MAC error interrupts */
-- if (pending & ERR_INTR_EN_MAC0) {
-- single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_MAC1) {
-- single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_MAC2) {
-- single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_MAC3) {
-- single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_MAC4) {
-- single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_MAC5) {
-- single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_MAC6) {
-- single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_MAC7) {
-- single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_MAC8) {
-- single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & ERR_INTR_EN_MAC9) {
-- single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
--
-- return ret;
--}
--
--static irqreturn_t fman_irq(int irq, void *handle)
--{
-- struct fman *fman = (struct fman *)handle;
-- u32 pending;
-- struct fman_fpm_regs __iomem *fpm_rg;
-- irqreturn_t single_ret, ret = IRQ_NONE;
--
-- if (!is_init_done(fman->cfg))
-- return IRQ_NONE;
--
-- fpm_rg = fman->fpm_regs;
--
-- /* normal interrupts */
-- pending = ioread32be(&fpm_rg->fm_npi);
-- if (!pending)
-- return IRQ_NONE;
--
-- if (pending & INTR_EN_QMI) {
-- single_ret = qmi_event(fman);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
--
-- /* MAC interrupts */
-- if (pending & INTR_EN_MAC0) {
-- single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & INTR_EN_MAC1) {
-- single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & INTR_EN_MAC2) {
-- single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & INTR_EN_MAC3) {
-- single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & INTR_EN_MAC4) {
-- single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & INTR_EN_MAC5) {
-- single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & INTR_EN_MAC6) {
-- single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & INTR_EN_MAC7) {
-- single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & INTR_EN_MAC8) {
-- single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
-- if (pending & INTR_EN_MAC9) {
-- single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9);
-- if (single_ret == IRQ_HANDLED)
-- ret = IRQ_HANDLED;
-- }
--
-- return ret;
--}
--
--static const struct of_device_id fman_muram_match[] = {
-- {
-- .compatible = "fsl,fman-muram"},
-- {}
--};
--MODULE_DEVICE_TABLE(of, fman_muram_match);
--
--static struct fman *read_dts_node(struct platform_device *of_dev)
--{
-- struct fman *fman;
-- struct device_node *fm_node, *muram_node;
-- struct resource *res;
-- u32 val, range[2];
-- int err, irq;
-- struct clk *clk;
-- u32 clk_rate;
-- phys_addr_t phys_base_addr;
-- resource_size_t mem_size;
--
-- fman = kzalloc(sizeof(*fman), GFP_KERNEL);
-- if (!fman)
-- return NULL;
--
-- fm_node = of_node_get(of_dev->dev.of_node);
--
-- err = of_property_read_u32(fm_node, "cell-index", &val);
-- if (err) {
-- dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n",
-- __func__, fm_node->full_name);
-- goto fman_node_put;
-- }
-- fman->dts_params.id = (u8)val;
--
-- /* Get the FM interrupt */
-- res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
-- if (!res) {
-- dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n",
-- __func__);
-- goto fman_node_put;
-- }
-- irq = res->start;
--
-- /* Get the FM error interrupt */
-- res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
-- if (!res) {
-- dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n",
-- __func__);
-- goto fman_node_put;
-- }
-- fman->dts_params.err_irq = res->start;
--
-- /* Get the FM address */
-- res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
-- if (!res) {
-- dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
-- __func__);
-- goto fman_node_put;
-- }
--
-- phys_base_addr = res->start;
-- mem_size = resource_size(res);
--
-- clk = of_clk_get(fm_node, 0);
-- if (IS_ERR(clk)) {
-- dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
-- __func__, fman->dts_params.id);
-- goto fman_node_put;
-- }
--
-- clk_rate = clk_get_rate(clk);
-- if (!clk_rate) {
-- dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
-- __func__, fman->dts_params.id);
-- goto fman_node_put;
-- }
-- /* Rounding to MHz */
-- fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000);
--
-- err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range",
-- &range[0], 2);
-- if (err) {
-- dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n",
-- __func__, fm_node->full_name);
-- goto fman_node_put;
-- }
-- fman->dts_params.qman_channel_base = range[0];
-- fman->dts_params.num_of_qman_channels = range[1];
--
-- /* Get the MURAM base address and size */
-- muram_node = of_find_matching_node(fm_node, fman_muram_match);
-- if (!muram_node) {
-- dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
-- __func__);
-- goto fman_node_put;
-- }
--
-- err = of_address_to_resource(muram_node, 0,
-- &fman->dts_params.muram_res);
-- if (err) {
-- of_node_put(muram_node);
-- dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
-- __func__, err);
-- goto fman_node_put;
-- }
--
-- of_node_put(muram_node);
-- of_node_put(fm_node);
--
-- err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
-- if (err < 0) {
-- dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
-- __func__, irq, err);
-- goto fman_free;
-- }
--
-- if (fman->dts_params.err_irq != 0) {
-- err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq,
-- fman_err_irq, IRQF_SHARED,
-- "fman-err", fman);
-- if (err < 0) {
-- dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
-- __func__, fman->dts_params.err_irq, err);
-- goto fman_free;
-- }
-- }
--
-- fman->dts_params.res =
-- devm_request_mem_region(&of_dev->dev, phys_base_addr,
-- mem_size, "fman");
-- if (!fman->dts_params.res) {
-- dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
-- __func__);
-- goto fman_free;
-- }
--
-- fman->dts_params.base_addr =
-- devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
-- if (!fman->dts_params.base_addr) {
-- dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
-- goto fman_free;
-- }
--
-- fman->dev = &of_dev->dev;
--
-- return fman;
--
--fman_node_put:
-- of_node_put(fm_node);
--fman_free:
-- kfree(fman);
-- return NULL;
--}
--
--static int fman_probe(struct platform_device *of_dev)
--{
-- struct fman *fman;
-- struct device *dev;
-- int err;
--
-- dev = &of_dev->dev;
--
-- fman = read_dts_node(of_dev);
-- if (!fman)
-- return -EIO;
--
-- err = fman_config(fman);
-- if (err) {
-- dev_err(dev, "%s: FMan config failed\n", __func__);
-- return -EINVAL;
-- }
--
-- if (fman_init(fman) != 0) {
-- dev_err(dev, "%s: FMan init failed\n", __func__);
-- return -EINVAL;
-- }
--
-- if (fman->dts_params.err_irq == 0) {
-- fman_set_exception(fman, FMAN_EX_DMA_BUS_ERROR, false);
-- fman_set_exception(fman, FMAN_EX_DMA_READ_ECC, false);
-- fman_set_exception(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC, false);
-- fman_set_exception(fman, FMAN_EX_DMA_FM_WRITE_ECC, false);
-- fman_set_exception(fman, FMAN_EX_DMA_SINGLE_PORT_ECC, false);
-- fman_set_exception(fman, FMAN_EX_FPM_STALL_ON_TASKS, false);
-- fman_set_exception(fman, FMAN_EX_FPM_SINGLE_ECC, false);
-- fman_set_exception(fman, FMAN_EX_FPM_DOUBLE_ECC, false);
-- fman_set_exception(fman, FMAN_EX_QMI_SINGLE_ECC, false);
-- fman_set_exception(fman, FMAN_EX_QMI_DOUBLE_ECC, false);
-- fman_set_exception(fman,
-- FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID, false);
-- fman_set_exception(fman, FMAN_EX_BMI_LIST_RAM_ECC, false);
-- fman_set_exception(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC,
-- false);
-- fman_set_exception(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC, false);
-- fman_set_exception(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC, false);
-- }
--
-- dev_set_drvdata(dev, fman);
--
-- dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
--
-- return 0;
--}
--
--static const struct of_device_id fman_match[] = {
-- {
-- .compatible = "fsl,fman"},
-- {}
--};
--
--MODULE_DEVICE_TABLE(of, fman_match);
--
--static struct platform_driver fman_driver = {
-- .driver = {
-- .name = "fsl-fman",
-- .of_match_table = fman_match,
-- },
-- .probe = fman_probe,
--};
--
--static int __init fman_load(void)
--{
-- int err;
--
-- pr_debug("FSL DPAA FMan driver\n");
--
-- err = platform_driver_register(&fman_driver);
-- if (err < 0)
-- pr_err("Error, platform_driver_register() = %d\n", err);
--
-- return err;
--}
--module_init(fman_load);
--
--static void __exit fman_unload(void)
--{
-- platform_driver_unregister(&fman_driver);
--}
--module_exit(fman_unload);
--
--MODULE_LICENSE("Dual BSD/GPL");
--MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver");
-diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
-deleted file mode 100644
-index 57aae8d..0000000
---- a/drivers/net/ethernet/freescale/fman/fman.h
-+++ /dev/null
-@@ -1,325 +0,0 @@
--/*
-- * Copyright 2008-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#ifndef __FM_H
--#define __FM_H
--
--#include <linux/io.h>
--
--/* FM Frame descriptor macros */
--/* Frame queue Context Override */
--#define FM_FD_CMD_FCO 0x80000000
--#define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */
--#define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */
--
--/* TX-Port: Unsupported Format */
--#define FM_FD_ERR_UNSUPPORTED_FORMAT 0x04000000
--/* TX Port: Length Error */
--#define FM_FD_ERR_LENGTH 0x02000000
--#define FM_FD_ERR_DMA 0x01000000 /* DMA Data error */
--
--/* IPR frame (not error) */
--#define FM_FD_IPR 0x00000001
--/* IPR non-consistent-sp */
--#define FM_FD_ERR_IPR_NCSP (0x00100000 | FM_FD_IPR)
--/* IPR error */
--#define FM_FD_ERR_IPR (0x00200000 | FM_FD_IPR)
--/* IPR timeout */
--#define FM_FD_ERR_IPR_TO (0x00300000 | FM_FD_IPR)
--/* TX Port: Length Error */
--#define FM_FD_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
--
--/* Rx FIFO overflow, FCS error, code error, running disparity error
-- * (SGMII and TBI modes), FIFO parity error. PHY Sequence error,
-- * PHY error control character detected.
-- */
--#define FM_FD_ERR_PHYSICAL 0x00080000
--/* Frame too long OR Frame size exceeds max_length_frame */
--#define FM_FD_ERR_SIZE 0x00040000
--/* classification discard */
--#define FM_FD_ERR_CLS_DISCARD 0x00020000
--/* Extract Out of Frame */
--#define FM_FD_ERR_EXTRACTION 0x00008000
--/* No Scheme Selected */
--#define FM_FD_ERR_NO_SCHEME 0x00004000
--/* Keysize Overflow */
--#define FM_FD_ERR_KEYSIZE_OVERFLOW 0x00002000
--/* Frame color is red */
--#define FM_FD_ERR_COLOR_RED 0x00000800
--/* Frame color is yellow */
--#define FM_FD_ERR_COLOR_YELLOW 0x00000400
--/* Parser Time out Exceed */
--#define FM_FD_ERR_PRS_TIMEOUT 0x00000080
--/* Invalid Soft Parser instruction */
--#define FM_FD_ERR_PRS_ILL_INSTRUCT 0x00000040
--/* Header error was identified during parsing */
--#define FM_FD_ERR_PRS_HDR_ERR 0x00000020
--/* Frame parsed beyind 256 first bytes */
--#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED 0x00000008
--
--/* non Frame-Manager error */
--#define FM_FD_RX_STATUS_ERR_NON_FM 0x00400000
--
--/* FMan driver defines */
--#define FMAN_BMI_FIFO_UNITS 0x100
--#define OFFSET_UNITS 16
--
--/* BMan defines */
--#define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */
--#define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */
--
--struct fman; /* FMan data */
--
--/* Enum for defining port types */
--enum fman_port_type {
-- FMAN_PORT_TYPE_TX = 0, /* TX Port */
-- FMAN_PORT_TYPE_RX, /* RX Port */
--};
--
--struct fman_rev_info {
-- u8 major; /* Major revision */
-- u8 minor; /* Minor revision */
--};
--
--enum fman_exceptions {
-- FMAN_EX_DMA_BUS_ERROR = 0, /* DMA bus error. */
-- FMAN_EX_DMA_READ_ECC, /* Read Buffer ECC error */
-- FMAN_EX_DMA_SYSTEM_WRITE_ECC, /* Write Buffer ECC err on sys side */
-- FMAN_EX_DMA_FM_WRITE_ECC, /* Write Buffer ECC error on FM side */
-- FMAN_EX_DMA_SINGLE_PORT_ECC, /* Single Port ECC error on FM side */
-- FMAN_EX_FPM_STALL_ON_TASKS, /* Stall of tasks on FPM */
-- FMAN_EX_FPM_SINGLE_ECC, /* Single ECC on FPM. */
-- FMAN_EX_FPM_DOUBLE_ECC, /* Double ECC error on FPM ram access */
-- FMAN_EX_QMI_SINGLE_ECC, /* Single ECC on QMI. */
-- FMAN_EX_QMI_DOUBLE_ECC, /* Double bit ECC occurred on QMI */
-- FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/* DeQ from unknown port id */
-- FMAN_EX_BMI_LIST_RAM_ECC, /* Linked List RAM ECC error */
-- FMAN_EX_BMI_STORAGE_PROFILE_ECC,/* storage profile */
-- FMAN_EX_BMI_STATISTICS_RAM_ECC,/* Statistics RAM ECC Err Enable */
-- FMAN_EX_BMI_DISPATCH_RAM_ECC, /* Dispatch RAM ECC Error Enable */
-- FMAN_EX_IRAM_ECC, /* Double bit ECC occurred on IRAM */
-- FMAN_EX_MURAM_ECC /* Double bit ECC occurred on MURAM */
--};
--
--/* Parse results memory layout */
--struct fman_prs_result {
-- u8 lpid; /* Logical port id */
-- u8 shimr; /* Shim header result */
-- u16 l2r; /* Layer 2 result */
-- u16 l3r; /* Layer 3 result */
-- u8 l4r; /* Layer 4 result */
-- u8 cplan; /* Classification plan id */
-- u16 nxthdr; /* Next Header */
-- u16 cksum; /* Running-sum */
-- /* Flags&fragment-offset field of the last IP-header */
-- u16 flags_frag_off;
-- /* Routing type field of a IPV6 routing extension header */
-- u8 route_type;
-- /* Routing Extension Header Present; last bit is IP valid */
-- u8 rhp_ip_valid;
-- u8 shim_off[2]; /* Shim offset */
-- u8 ip_pid_off; /* IP PID (last IP-proto) offset */
-- u8 eth_off; /* ETH offset */
-- u8 llc_snap_off; /* LLC_SNAP offset */
-- u8 vlan_off[2]; /* VLAN offset */
-- u8 etype_off; /* ETYPE offset */
-- u8 pppoe_off; /* PPP offset */
-- u8 mpls_off[2]; /* MPLS offset */
-- u8 ip_off[2]; /* IP offset */
-- u8 gre_off; /* GRE offset */
-- u8 l4_off; /* Layer 4 offset */
-- u8 nxthdr_off; /* Parser end point */
--};
--
--/* A structure for defining buffer prefix area content. */
--struct fman_buffer_prefix_content {
-- /* Number of bytes to be left at the beginning of the external
-- * buffer; Note that the private-area will start from the base
-- * of the buffer address.
-- */
-- u16 priv_data_size;
-- /* true to pass the parse result to/from the FM;
-- * User may use FM_PORT_GetBufferPrsResult() in
-- * order to get the parser-result from a buffer.
-- */
-- bool pass_prs_result;
-- /* true to pass the timeStamp to/from the FM User */
-- bool pass_time_stamp;
-- /* true to pass the KG hash result to/from the FM User may
-- * use FM_PORT_GetBufferHashResult() in order to get the
-- * parser-result from a buffer.
-- */
-- bool pass_hash_result;
-- /* Add all other Internal-Context information: AD,
-- * hash-result, key, etc.
-- */
-- u16 data_align;
--};
--
--/* A structure of information about each of the external
-- * buffer pools used by a port or storage-profile.
-- */
--struct fman_ext_pool_params {
-- u8 id; /* External buffer pool id */
-- u16 size; /* External buffer pool buffer size */
--};
--
--/* A structure for informing the driver about the external
-- * buffer pools allocated in the BM and used by a port or a
-- * storage-profile.
-- */
--struct fman_ext_pools {
-- u8 num_of_pools_used; /* Number of pools use by this port */
-- struct fman_ext_pool_params ext_buf_pool[FMAN_PORT_MAX_EXT_POOLS_NUM];
-- /* Parameters for each port */
--};
--
--/* A structure for defining BM pool depletion criteria */
--struct fman_buf_pool_depletion {
-- /* select mode in which pause frames will be sent after a
-- * number of pools (all together!) are depleted
-- */
-- bool pools_grp_mode_enable;
-- /* the number of depleted pools that will invoke pause
-- * frames transmission.
-- */
-- u8 num_of_pools;
-- /* For each pool, true if it should be considered for
-- * depletion (Note - this pool must be used by this port!).
-- */
-- bool pools_to_consider[BM_MAX_NUM_OF_POOLS];
-- /* select mode in which pause frames will be sent
-- * after a single-pool is depleted;
-- */
-- bool single_pool_mode_enable;
-- /* For each pool, true if it should be considered
-- * for depletion (Note - this pool must be used by this port!)
-- */
-- bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
--};
--
--/* Enum for inter-module interrupts registration */
--enum fman_event_modules {
-- FMAN_MOD_MAC = 0, /* MAC event */
-- FMAN_MOD_FMAN_CTRL, /* FMAN Controller */
-- FMAN_MOD_DUMMY_LAST
--};
--
--/* Enum for interrupts types */
--enum fman_intr_type {
-- FMAN_INTR_TYPE_ERR,
-- FMAN_INTR_TYPE_NORMAL
--};
--
--/* Enum for inter-module interrupts registration */
--enum fman_inter_module_event {
-- FMAN_EV_ERR_MAC0 = 0, /* MAC 0 error event */
-- FMAN_EV_ERR_MAC1, /* MAC 1 error event */
-- FMAN_EV_ERR_MAC2, /* MAC 2 error event */
-- FMAN_EV_ERR_MAC3, /* MAC 3 error event */
-- FMAN_EV_ERR_MAC4, /* MAC 4 error event */
-- FMAN_EV_ERR_MAC5, /* MAC 5 error event */
-- FMAN_EV_ERR_MAC6, /* MAC 6 error event */
-- FMAN_EV_ERR_MAC7, /* MAC 7 error event */
-- FMAN_EV_ERR_MAC8, /* MAC 8 error event */
-- FMAN_EV_ERR_MAC9, /* MAC 9 error event */
-- FMAN_EV_MAC0, /* MAC 0 event (Magic packet detection) */
-- FMAN_EV_MAC1, /* MAC 1 event (Magic packet detection) */
-- FMAN_EV_MAC2, /* MAC 2 (Magic packet detection) */
-- FMAN_EV_MAC3, /* MAC 3 (Magic packet detection) */
-- FMAN_EV_MAC4, /* MAC 4 (Magic packet detection) */
-- FMAN_EV_MAC5, /* MAC 5 (Magic packet detection) */
-- FMAN_EV_MAC6, /* MAC 6 (Magic packet detection) */
-- FMAN_EV_MAC7, /* MAC 7 (Magic packet detection) */
-- FMAN_EV_MAC8, /* MAC 8 event (Magic packet detection) */
-- FMAN_EV_MAC9, /* MAC 9 event (Magic packet detection) */
-- FMAN_EV_FMAN_CTRL_0, /* Fman controller event 0 */
-- FMAN_EV_FMAN_CTRL_1, /* Fman controller event 1 */
-- FMAN_EV_FMAN_CTRL_2, /* Fman controller event 2 */
-- FMAN_EV_FMAN_CTRL_3, /* Fman controller event 3 */
-- FMAN_EV_CNT
--};
--
--struct fman_intr_src {
-- void (*isr_cb)(void *src_arg);
-- void *src_handle;
--};
--
--/* Structure for port-FM communication during fman_port_init. */
--struct fman_port_init_params {
-- u8 port_id; /* port Id */
-- enum fman_port_type port_type; /* Port type */
-- u16 port_speed; /* Port speed */
-- u16 liodn_offset; /* Port's requested resource */
-- u8 num_of_tasks; /* Port's requested resource */
-- u8 num_of_extra_tasks; /* Port's requested resource */
-- u8 num_of_open_dmas; /* Port's requested resource */
-- u8 num_of_extra_open_dmas; /* Port's requested resource */
-- u32 size_of_fifo; /* Port's requested resource */
-- u32 extra_size_of_fifo; /* Port's requested resource */
-- u8 deq_pipeline_depth; /* Port's requested resource */
-- u16 max_frame_length; /* Port's max frame length. */
-- u16 liodn_base;
-- /* LIODN base for this port, to be used together with LIODN offset. */
--};
--
--void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info);
--
--void fman_register_intr(struct fman *fman, enum fman_event_modules mod,
-- u8 mod_id, enum fman_intr_type intr_type,
-- void (*f_isr)(void *h_src_arg), void *h_src_arg);
--
--void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod,
-- u8 mod_id, enum fman_intr_type intr_type);
--
--int fman_set_port_params(struct fman *fman,
-- struct fman_port_init_params *port_params);
--
--int fman_reset_mac(struct fman *fman, u8 mac_id);
--
--u16 fman_get_clock_freq(struct fman *fman);
--
--u32 fman_get_bmi_max_fifo_size(struct fman *fman);
--
--int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl);
--
--u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id);
--
--struct resource *fman_get_mem_region(struct fman *fman);
--
--u16 fman_get_max_frm(void);
--
--int fman_get_rx_extra_headroom(void);
--
--struct fman *fman_bind(struct device *dev);
--
--#endif /* __FM_H */
-diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
-deleted file mode 100644
-index c88918c..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
-+++ /dev/null
-@@ -1,1451 +0,0 @@
--/*
-- * Copyright 2008-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
--
--#include "fman_dtsec.h"
--#include "fman.h"
--
--#include <linux/slab.h>
--#include <linux/bitrev.h>
--#include <linux/io.h>
--#include <linux/delay.h>
--#include <linux/phy.h>
--#include <linux/crc32.h>
--#include <linux/of_mdio.h>
--#include <linux/mii.h>
--
--/* TBI register addresses */
--#define MII_TBICON 0x11
--
--/* TBICON register bit fields */
--#define TBICON_SOFT_RESET 0x8000 /* Soft reset */
--#define TBICON_DISABLE_RX_DIS 0x2000 /* Disable receive disparity */
--#define TBICON_DISABLE_TX_DIS 0x1000 /* Disable transmit disparity */
--#define TBICON_AN_SENSE 0x0100 /* Auto-negotiation sense enable */
--#define TBICON_CLK_SELECT 0x0020 /* Clock select */
--#define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
--
--#define TBIANA_SGMII 0x4001
--#define TBIANA_1000X 0x01a0
--
--/* Interrupt Mask Register (IMASK) */
--#define DTSEC_IMASK_BREN 0x80000000
--#define DTSEC_IMASK_RXCEN 0x40000000
--#define DTSEC_IMASK_MSROEN 0x04000000
--#define DTSEC_IMASK_GTSCEN 0x02000000
--#define DTSEC_IMASK_BTEN 0x01000000
--#define DTSEC_IMASK_TXCEN 0x00800000
--#define DTSEC_IMASK_TXEEN 0x00400000
--#define DTSEC_IMASK_LCEN 0x00040000
--#define DTSEC_IMASK_CRLEN 0x00020000
--#define DTSEC_IMASK_XFUNEN 0x00010000
--#define DTSEC_IMASK_ABRTEN 0x00008000
--#define DTSEC_IMASK_IFERREN 0x00004000
--#define DTSEC_IMASK_MAGEN 0x00000800
--#define DTSEC_IMASK_MMRDEN 0x00000400
--#define DTSEC_IMASK_MMWREN 0x00000200
--#define DTSEC_IMASK_GRSCEN 0x00000100
--#define DTSEC_IMASK_TDPEEN 0x00000002
--#define DTSEC_IMASK_RDPEEN 0x00000001
--
--#define DTSEC_EVENTS_MASK \
-- ((u32)(DTSEC_IMASK_BREN | \
-- DTSEC_IMASK_RXCEN | \
-- DTSEC_IMASK_BTEN | \
-- DTSEC_IMASK_TXCEN | \
-- DTSEC_IMASK_TXEEN | \
-- DTSEC_IMASK_ABRTEN | \
-- DTSEC_IMASK_LCEN | \
-- DTSEC_IMASK_CRLEN | \
-- DTSEC_IMASK_XFUNEN | \
-- DTSEC_IMASK_IFERREN | \
-- DTSEC_IMASK_MAGEN | \
-- DTSEC_IMASK_TDPEEN | \
-- DTSEC_IMASK_RDPEEN))
--
--/* dtsec timestamp event bits */
--#define TMR_PEMASK_TSREEN 0x00010000
--#define TMR_PEVENT_TSRE 0x00010000
--
--/* Group address bit indication */
--#define MAC_GROUP_ADDRESS 0x0000010000000000ULL
--
--/* Defaults */
--#define DEFAULT_HALFDUP_RETRANSMIT 0xf
--#define DEFAULT_HALFDUP_COLL_WINDOW 0x37
--#define DEFAULT_TX_PAUSE_TIME 0xf000
--#define DEFAULT_RX_PREPEND 0
--#define DEFAULT_PREAMBLE_LEN 7
--#define DEFAULT_TX_PAUSE_TIME_EXTD 0
--#define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
--#define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
--#define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
--#define DEFAULT_BACK_TO_BACK_IPG 0x60
--#define DEFAULT_MAXIMUM_FRAME 0x600
--
--/* register related defines (bits, field offsets..) */
--#define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
--
--#define DTSEC_ECNTRL_GMIIM 0x00000040
--#define DTSEC_ECNTRL_TBIM 0x00000020
--#define DTSEC_ECNTRL_SGMIIM 0x00000002
--#define DTSEC_ECNTRL_RPM 0x00000010
--#define DTSEC_ECNTRL_R100M 0x00000008
--#define DTSEC_ECNTRL_QSGMIIM 0x00000001
--
--#define DTSEC_TCTRL_GTS 0x00000020
--
--#define RCTRL_PAL_MASK 0x001f0000
--#define RCTRL_PAL_SHIFT 16
--#define RCTRL_GHTX 0x00000400
--#define RCTRL_GRS 0x00000020
--#define RCTRL_MPROM 0x00000008
--#define RCTRL_RSF 0x00000004
--#define RCTRL_UPROM 0x00000001
--
--#define MACCFG1_SOFT_RESET 0x80000000
--#define MACCFG1_RX_FLOW 0x00000020
--#define MACCFG1_TX_FLOW 0x00000010
--#define MACCFG1_TX_EN 0x00000001
--#define MACCFG1_RX_EN 0x00000004
--
--#define MACCFG2_NIBBLE_MODE 0x00000100
--#define MACCFG2_BYTE_MODE 0x00000200
--#define MACCFG2_PAD_CRC_EN 0x00000004
--#define MACCFG2_FULL_DUPLEX 0x00000001
--#define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
--#define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
--
--#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
--#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
--#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
--
--#define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
--#define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
--#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
--#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
--
--#define HAFDUP_EXCESS_DEFER 0x00010000
--#define HAFDUP_COLLISION_WINDOW 0x000003ff
--#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
--#define HAFDUP_RETRANSMISSION_MAX 0x0000f000
--
--#define NUM_OF_HASH_REGS 8 /* Number of hash table registers */
--
--#define PTV_PTE_MASK 0xffff0000
--#define PTV_PT_MASK 0x0000ffff
--#define PTV_PTE_SHIFT 16
--
--#define MAX_PACKET_ALIGNMENT 31
--#define MAX_INTER_PACKET_GAP 0x7f
--#define MAX_RETRANSMISSION 0x0f
--#define MAX_COLLISION_WINDOW 0x03ff
--
--/* Hash table size (32 bits*8 regs) */
--#define DTSEC_HASH_TABLE_SIZE 256
--/* Extended Hash table size (32 bits*16 regs) */
--#define EXTENDED_HASH_TABLE_SIZE 512
--
--/* dTSEC Memory Map registers */
--struct dtsec_regs {
-- /* dTSEC General Control and Status Registers */
-- u32 tsec_id; /* 0x000 ETSEC_ID register */
-- u32 tsec_id2; /* 0x004 ETSEC_ID2 register */
-- u32 ievent; /* 0x008 Interrupt event register */
-- u32 imask; /* 0x00C Interrupt mask register */
-- u32 reserved0010[1];
-- u32 ecntrl; /* 0x014 E control register */
-- u32 ptv; /* 0x018 Pause time value register */
-- u32 tbipa; /* 0x01C TBI PHY address register */
-- u32 tmr_ctrl; /* 0x020 Time-stamp Control register */
-- u32 tmr_pevent; /* 0x024 Time-stamp event register */
-- u32 tmr_pemask; /* 0x028 Timer event mask register */
-- u32 reserved002c[5];
-- u32 tctrl; /* 0x040 Transmit control register */
-- u32 reserved0044[3];
-- u32 rctrl; /* 0x050 Receive control register */
-- u32 reserved0054[11];
-- u32 igaddr[8]; /* 0x080-0x09C Individual/group address */
-- u32 gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */
-- u32 reserved00c0[16];
-- u32 maccfg1; /* 0x100 MAC configuration #1 */
-- u32 maccfg2; /* 0x104 MAC configuration #2 */
-- u32 ipgifg; /* 0x108 IPG/IFG */
-- u32 hafdup; /* 0x10C Half-duplex */
-- u32 maxfrm; /* 0x110 Maximum frame */
-- u32 reserved0114[10];
-- u32 ifstat; /* 0x13C Interface status */
-- u32 macstnaddr1; /* 0x140 Station Address,part 1 */
-- u32 macstnaddr2; /* 0x144 Station Address,part 2 */
-- struct {
-- u32 exact_match1; /* octets 1-4 */
-- u32 exact_match2; /* octets 5-6 */
-- } macaddr[15]; /* 0x148-0x1BC mac exact match addresses 1-15 */
-- u32 reserved01c0[16];
-- u32 tr64; /* 0x200 Tx and Rx 64 byte frame counter */
-- u32 tr127; /* 0x204 Tx and Rx 65 to 127 byte frame counter */
-- u32 tr255; /* 0x208 Tx and Rx 128 to 255 byte frame counter */
-- u32 tr511; /* 0x20C Tx and Rx 256 to 511 byte frame counter */
-- u32 tr1k; /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
-- u32 trmax; /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
-- u32 trmgv;
-- /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
-- u32 rbyt; /* 0x21C receive byte counter */
-- u32 rpkt; /* 0x220 receive packet counter */
-- u32 rfcs; /* 0x224 receive FCS error counter */
-- u32 rmca; /* 0x228 RMCA Rx multicast packet counter */
-- u32 rbca; /* 0x22C Rx broadcast packet counter */
-- u32 rxcf; /* 0x230 Rx control frame packet counter */
-- u32 rxpf; /* 0x234 Rx pause frame packet counter */
-- u32 rxuo; /* 0x238 Rx unknown OP code counter */
-- u32 raln; /* 0x23C Rx alignment error counter */
-- u32 rflr; /* 0x240 Rx frame length error counter */
-- u32 rcde; /* 0x244 Rx code error counter */
-- u32 rcse; /* 0x248 Rx carrier sense error counter */
-- u32 rund; /* 0x24C Rx undersize packet counter */
-- u32 rovr; /* 0x250 Rx oversize packet counter */
-- u32 rfrg; /* 0x254 Rx fragments counter */
-- u32 rjbr; /* 0x258 Rx jabber counter */
-- u32 rdrp; /* 0x25C Rx drop */
-- u32 tbyt; /* 0x260 Tx byte counter */
-- u32 tpkt; /* 0x264 Tx packet counter */
-- u32 tmca; /* 0x268 Tx multicast packet counter */
-- u32 tbca; /* 0x26C Tx broadcast packet counter */
-- u32 txpf; /* 0x270 Tx pause control frame counter */
-- u32 tdfr; /* 0x274 Tx deferral packet counter */
-- u32 tedf; /* 0x278 Tx excessive deferral packet counter */
-- u32 tscl; /* 0x27C Tx single collision packet counter */
-- u32 tmcl; /* 0x280 Tx multiple collision packet counter */
-- u32 tlcl; /* 0x284 Tx late collision packet counter */
-- u32 txcl; /* 0x288 Tx excessive collision packet counter */
-- u32 tncl; /* 0x28C Tx total collision counter */
-- u32 reserved0290[1];
-- u32 tdrp; /* 0x294 Tx drop frame counter */
-- u32 tjbr; /* 0x298 Tx jabber frame counter */
-- u32 tfcs; /* 0x29C Tx FCS error counter */
-- u32 txcf; /* 0x2A0 Tx control frame counter */
-- u32 tovr; /* 0x2A4 Tx oversize frame counter */
-- u32 tund; /* 0x2A8 Tx undersize frame counter */
-- u32 tfrg; /* 0x2AC Tx fragments frame counter */
-- u32 car1; /* 0x2B0 carry register one register* */
-- u32 car2; /* 0x2B4 carry register two register* */
-- u32 cam1; /* 0x2B8 carry register one mask register */
-- u32 cam2; /* 0x2BC carry register two mask register */
-- u32 reserved02c0[848];
--};
--
--/* struct dtsec_cfg - dTSEC configuration
-- * Transmit half-duplex flow control, under software control for 10/100-Mbps
-- * half-duplex media. If set, back pressure is applied to media by raising
-- * carrier.
-- * halfdup_retransmit:
-- * Number of retransmission attempts following a collision.
-- * If this is exceeded dTSEC aborts transmission due to excessive collisions.
-- * The standard specifies the attempt limit to be 15.
-- * halfdup_coll_window:
-- * The number of bytes of the frame during which collisions may occur.
-- * The default value of 55 corresponds to the frame byte at the end of the
-- * standard 512-bit slot time window. If collisions are detected after this
-- * byte, the late collision event is asserted and transmission of current
-- * frame is aborted.
-- * tx_pad_crc:
-- * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
-- * appends a CRC to every frame regardless of padding requirement.
-- * tx_pause_time:
-- * Transmit pause time value. This pause value is used as part of the pause
-- * frame to be sent when a transmit pause frame is initiated.
-- * If set to 0 this disables transmission of pause frames.
-- * preamble_len:
-- * Length, in bytes, of the preamble field preceding each Ethernet
-- * start-of-frame delimiter byte. The default value of 0x7 should be used in
-- * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
-- * rx_prepend:
-- * Packet alignment padding length. The specified number of bytes (1-31)
-- * of zero padding are inserted before the start of each received frame.
-- * For Ethernet, where optional preamble extraction is enabled, the padding
-- * appears before the preamble, otherwise the padding precedes the
-- * layer 2 header.
-- *
-- * This structure contains basic dTSEC configuration and must be passed to
-- * init() function. A default set of configuration values can be
-- * obtained by calling set_dflts().
-- */
--struct dtsec_cfg {
-- u16 halfdup_retransmit;
-- u16 halfdup_coll_window;
-- bool tx_pad_crc;
-- u16 tx_pause_time;
-- bool ptp_tsu_en;
-- bool ptp_exception_en;
-- u32 preamble_len;
-- u32 rx_prepend;
-- u16 tx_pause_time_extd;
-- u16 maximum_frame;
-- u32 non_back_to_back_ipg1;
-- u32 non_back_to_back_ipg2;
-- u32 min_ifg_enforcement;
-- u32 back_to_back_ipg;
--};
--
--struct fman_mac {
-- /* pointer to dTSEC memory mapped registers */
-- struct dtsec_regs __iomem *regs;
-- /* MAC address of device */
-- u64 addr;
-- /* Ethernet physical interface */
-- phy_interface_t phy_if;
-- u16 max_speed;
-- void *dev_id; /* device cookie used by the exception cbs */
-- fman_mac_exception_cb *exception_cb;
-- fman_mac_exception_cb *event_cb;
-- /* Number of individual addresses in registers for this station */
-- u8 num_of_ind_addr_in_regs;
-- /* pointer to driver's global address hash table */
-- struct eth_hash_t *multicast_addr_hash;
-- /* pointer to driver's individual address hash table */
-- struct eth_hash_t *unicast_addr_hash;
-- u8 mac_id;
-- u32 exceptions;
-- bool ptp_tsu_enabled;
-- bool en_tsu_err_exeption;
-- struct dtsec_cfg *dtsec_drv_param;
-- void *fm;
-- struct fman_rev_info fm_rev_info;
-- bool basex_if;
-- struct phy_device *tbiphy;
--};
--
--static void set_dflts(struct dtsec_cfg *cfg)
--{
-- cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
-- cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
-- cfg->tx_pad_crc = true;
-- cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
-- /* PHY address 0 is reserved (DPAA RM) */
-- cfg->rx_prepend = DEFAULT_RX_PREPEND;
-- cfg->ptp_tsu_en = true;
-- cfg->ptp_exception_en = true;
-- cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
-- cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
-- cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
-- cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
-- cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
-- cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
-- cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
--}
--
--static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
-- phy_interface_t iface, u16 iface_speed, u8 *macaddr,
-- u32 exception_mask, u8 tbi_addr)
--{
-- bool is_rgmii, is_sgmii, is_qsgmii;
-- int i;
-- u32 tmp;
--
-- /* Soft reset */
-- iowrite32be(MACCFG1_SOFT_RESET, ®s->maccfg1);
-- iowrite32be(0, ®s->maccfg1);
--
-- /* dtsec_id2 */
-- tmp = ioread32be(®s->tsec_id2);
--
-- /* check RGMII support */
-- if (iface == PHY_INTERFACE_MODE_RGMII ||
-- iface == PHY_INTERFACE_MODE_RMII)
-- if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
-- return -EINVAL;
--
-- if (iface == PHY_INTERFACE_MODE_SGMII ||
-- iface == PHY_INTERFACE_MODE_MII)
-- if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
-- return -EINVAL;
--
-- is_rgmii = iface == PHY_INTERFACE_MODE_RGMII;
-- is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
-- is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
--
-- tmp = 0;
-- if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
-- tmp |= DTSEC_ECNTRL_GMIIM;
-- if (is_sgmii)
-- tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
-- if (is_qsgmii)
-- tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
-- DTSEC_ECNTRL_QSGMIIM);
-- if (is_rgmii)
-- tmp |= DTSEC_ECNTRL_RPM;
-- if (iface_speed == SPEED_100)
-- tmp |= DTSEC_ECNTRL_R100M;
--
-- iowrite32be(tmp, ®s->ecntrl);
--
-- tmp = 0;
--
-- if (cfg->tx_pause_time)
-- tmp |= cfg->tx_pause_time;
-- if (cfg->tx_pause_time_extd)
-- tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
-- iowrite32be(tmp, ®s->ptv);
--
-- tmp = 0;
-- tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
-- /* Accept short frames */
-- tmp |= RCTRL_RSF;
--
-- iowrite32be(tmp, ®s->rctrl);
--
-- /* Assign a Phy Address to the TBI (TBIPA).
-- * Done also in cases where TBI is not selected to avoid conflict with
-- * the external PHY's Physical address
-- */
-- iowrite32be(tbi_addr, ®s->tbipa);
--
-- iowrite32be(0, ®s->tmr_ctrl);
--
-- if (cfg->ptp_tsu_en) {
-- tmp = 0;
-- tmp |= TMR_PEVENT_TSRE;
-- iowrite32be(tmp, ®s->tmr_pevent);
--
-- if (cfg->ptp_exception_en) {
-- tmp = 0;
-- tmp |= TMR_PEMASK_TSREEN;
-- iowrite32be(tmp, ®s->tmr_pemask);
-- }
-- }
--
-- tmp = 0;
-- tmp |= MACCFG1_RX_FLOW;
-- tmp |= MACCFG1_TX_FLOW;
-- iowrite32be(tmp, ®s->maccfg1);
--
-- tmp = 0;
--
-- if (iface_speed < SPEED_1000)
-- tmp |= MACCFG2_NIBBLE_MODE;
-- else if (iface_speed == SPEED_1000)
-- tmp |= MACCFG2_BYTE_MODE;
--
-- tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
-- MACCFG2_PREAMBLE_LENGTH_MASK;
-- if (cfg->tx_pad_crc)
-- tmp |= MACCFG2_PAD_CRC_EN;
-- /* Full Duplex */
-- tmp |= MACCFG2_FULL_DUPLEX;
-- iowrite32be(tmp, ®s->maccfg2);
--
-- tmp = (((cfg->non_back_to_back_ipg1 <<
-- IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
-- & IPGIFG_NON_BACK_TO_BACK_IPG_1)
-- | ((cfg->non_back_to_back_ipg2 <<
-- IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
-- & IPGIFG_NON_BACK_TO_BACK_IPG_2)
-- | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
-- & IPGIFG_MIN_IFG_ENFORCEMENT)
-- | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
-- iowrite32be(tmp, ®s->ipgifg);
--
-- tmp = 0;
-- tmp |= HAFDUP_EXCESS_DEFER;
-- tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
-- & HAFDUP_RETRANSMISSION_MAX);
-- tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
--
-- iowrite32be(tmp, ®s->hafdup);
--
-- /* Initialize Maximum frame length */
-- iowrite32be(cfg->maximum_frame, ®s->maxfrm);
--
-- iowrite32be(0xffffffff, ®s->cam1);
-- iowrite32be(0xffffffff, ®s->cam2);
--
-- iowrite32be(exception_mask, ®s->imask);
--
-- iowrite32be(0xffffffff, ®s->ievent);
--
-- tmp = (u32)((macaddr[5] << 24) |
-- (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
-- iowrite32be(tmp, ®s->macstnaddr1);
--
-- tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
-- iowrite32be(tmp, ®s->macstnaddr2);
--
-- /* HASH */
-- for (i = 0; i < NUM_OF_HASH_REGS; i++) {
-- /* Initialize IADDRx */
-- iowrite32be(0, ®s->igaddr[i]);
-- /* Initialize GADDRx */
-- iowrite32be(0, ®s->gaddr[i]);
-- }
--
-- return 0;
--}
--
--static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
--{
-- u32 tmp;
--
-- tmp = (u32)((adr[5] << 24) |
-- (adr[4] << 16) | (adr[3] << 8) | adr[2]);
-- iowrite32be(tmp, ®s->macstnaddr1);
--
-- tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
-- iowrite32be(tmp, ®s->macstnaddr2);
--}
--
--static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
-- bool enable)
--{
-- int reg_idx = (bucket >> 5) & 0xf;
-- int bit_idx = bucket & 0x1f;
-- u32 bit_mask = 0x80000000 >> bit_idx;
-- u32 __iomem *reg;
--
-- if (reg_idx > 7)
-- reg = ®s->gaddr[reg_idx - 8];
-- else
-- reg = ®s->igaddr[reg_idx];
--
-- if (enable)
-- iowrite32be(ioread32be(reg) | bit_mask, reg);
-- else
-- iowrite32be(ioread32be(reg) & (~bit_mask), reg);
--}
--
--static int check_init_parameters(struct fman_mac *dtsec)
--{
-- if (dtsec->max_speed >= SPEED_10000) {
-- pr_err("1G MAC driver supports 1G or lower speeds\n");
-- return -EINVAL;
-- }
-- if (dtsec->addr == 0) {
-- pr_err("Ethernet MAC Must have a valid MAC Address\n");
-- return -EINVAL;
-- }
-- if ((dtsec->dtsec_drv_param)->rx_prepend >
-- MAX_PACKET_ALIGNMENT) {
-- pr_err("packetAlignmentPadding can't be > than %d\n",
-- MAX_PACKET_ALIGNMENT);
-- return -EINVAL;
-- }
-- if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
-- MAX_INTER_PACKET_GAP) ||
-- ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
-- MAX_INTER_PACKET_GAP) ||
-- ((dtsec->dtsec_drv_param)->back_to_back_ipg >
-- MAX_INTER_PACKET_GAP)) {
-- pr_err("Inter packet gap can't be greater than %d\n",
-- MAX_INTER_PACKET_GAP);
-- return -EINVAL;
-- }
-- if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
-- MAX_RETRANSMISSION) {
-- pr_err("maxRetransmission can't be greater than %d\n",
-- MAX_RETRANSMISSION);
-- return -EINVAL;
-- }
-- if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
-- MAX_COLLISION_WINDOW) {
-- pr_err("collisionWindow can't be greater than %d\n",
-- MAX_COLLISION_WINDOW);
-- return -EINVAL;
-- /* If Auto negotiation process is disabled, need to set up the PHY
-- * using the MII Management Interface
-- */
-- }
-- if (!dtsec->exception_cb) {
-- pr_err("uninitialized exception_cb\n");
-- return -EINVAL;
-- }
-- if (!dtsec->event_cb) {
-- pr_err("uninitialized event_cb\n");
-- return -EINVAL;
-- }
--
-- return 0;
--}
--
--static int get_exception_flag(enum fman_mac_exceptions exception)
--{
-- u32 bit_mask;
--
-- switch (exception) {
-- case FM_MAC_EX_1G_BAB_RX:
-- bit_mask = DTSEC_IMASK_BREN;
-- break;
-- case FM_MAC_EX_1G_RX_CTL:
-- bit_mask = DTSEC_IMASK_RXCEN;
-- break;
-- case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
-- bit_mask = DTSEC_IMASK_GTSCEN;
-- break;
-- case FM_MAC_EX_1G_BAB_TX:
-- bit_mask = DTSEC_IMASK_BTEN;
-- break;
-- case FM_MAC_EX_1G_TX_CTL:
-- bit_mask = DTSEC_IMASK_TXCEN;
-- break;
-- case FM_MAC_EX_1G_TX_ERR:
-- bit_mask = DTSEC_IMASK_TXEEN;
-- break;
-- case FM_MAC_EX_1G_LATE_COL:
-- bit_mask = DTSEC_IMASK_LCEN;
-- break;
-- case FM_MAC_EX_1G_COL_RET_LMT:
-- bit_mask = DTSEC_IMASK_CRLEN;
-- break;
-- case FM_MAC_EX_1G_TX_FIFO_UNDRN:
-- bit_mask = DTSEC_IMASK_XFUNEN;
-- break;
-- case FM_MAC_EX_1G_MAG_PCKT:
-- bit_mask = DTSEC_IMASK_MAGEN;
-- break;
-- case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
-- bit_mask = DTSEC_IMASK_MMRDEN;
-- break;
-- case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
-- bit_mask = DTSEC_IMASK_MMWREN;
-- break;
-- case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
-- bit_mask = DTSEC_IMASK_GRSCEN;
-- break;
-- case FM_MAC_EX_1G_DATA_ERR:
-- bit_mask = DTSEC_IMASK_TDPEEN;
-- break;
-- case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
-- bit_mask = DTSEC_IMASK_MSROEN;
-- break;
-- default:
-- bit_mask = 0;
-- break;
-- }
--
-- return bit_mask;
--}
--
--static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
--{
-- /* Checks if dTSEC driver parameters were initialized */
-- if (!dtsec_drv_params)
-- return true;
--
-- return false;
--}
--
--static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
--{
-- struct dtsec_regs __iomem *regs = dtsec->regs;
--
-- if (is_init_done(dtsec->dtsec_drv_param))
-- return 0;
--
-- return (u16)ioread32be(®s->maxfrm);
--}
--
--static void dtsec_isr(void *handle)
--{
-- struct fman_mac *dtsec = (struct fman_mac *)handle;
-- struct dtsec_regs __iomem *regs = dtsec->regs;
-- u32 event;
--
-- /* do not handle MDIO events */
-- event = ioread32be(®s->ievent) &
-- (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
--
-- event &= ioread32be(®s->imask);
--
-- iowrite32be(event, ®s->ievent);
--
-- if (event & DTSEC_IMASK_BREN)
-- dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
-- if (event & DTSEC_IMASK_RXCEN)
-- dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
-- if (event & DTSEC_IMASK_GTSCEN)
-- dtsec->exception_cb(dtsec->dev_id,
-- FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
-- if (event & DTSEC_IMASK_BTEN)
-- dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
-- if (event & DTSEC_IMASK_TXCEN)
-- dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
-- if (event & DTSEC_IMASK_TXEEN)
-- dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
-- if (event & DTSEC_IMASK_LCEN)
-- dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
-- if (event & DTSEC_IMASK_CRLEN)
-- dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
-- if (event & DTSEC_IMASK_XFUNEN) {
-- /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
-- if (dtsec->fm_rev_info.major == 2) {
-- u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
-- /* a. Write 0x00E0_0C00 to DTSEC_ID
-- * This is a read only register
-- * b. Read and save the value of TPKT
-- */
-- tpkt1 = ioread32be(®s->tpkt);
--
-- /* c. Read the register at dTSEC address offset 0x32C */
-- tmp_reg1 = ioread32be(®s->reserved02c0[27]);
--
-- /* d. Compare bits [9:15] to bits [25:31] of the
-- * register at address offset 0x32C.
-- */
-- if ((tmp_reg1 & 0x007F0000) !=
-- (tmp_reg1 & 0x0000007F)) {
-- /* If they are not equal, save the value of
-- * this register and wait for at least
-- * MAXFRM*16 ns
-- */
-- usleep_range((u32)(min
-- (dtsec_get_max_frame_length(dtsec) *
-- 16 / 1000, 1)), (u32)
-- (min(dtsec_get_max_frame_length
-- (dtsec) * 16 / 1000, 1) + 1));
-- }
--
-- /* e. Read and save TPKT again and read the register
-- * at dTSEC address offset 0x32C again
-- */
-- tpkt2 = ioread32be(®s->tpkt);
-- tmp_reg2 = ioread32be(®s->reserved02c0[27]);
--
-- /* f. Compare the value of TPKT saved in step b to
-- * value read in step e. Also compare bits [9:15] of
-- * the register at offset 0x32C saved in step d to the
-- * value of bits [9:15] saved in step e. If the two
-- * registers values are unchanged, then the transmit
-- * portion of the dTSEC controller is locked up and
-- * the user should proceed to the recover sequence.
-- */
-- if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
-- (tmp_reg2 & 0x007F0000))) {
-- /* recover sequence */
--
-- /* a.Write a 1 to RCTRL[GRS] */
--
-- iowrite32be(ioread32be(®s->rctrl) |
-- RCTRL_GRS, ®s->rctrl);
--
-- /* b.Wait until IEVENT[GRSC]=1, or at least
-- * 100 us has elapsed.
-- */
-- for (i = 0; i < 100; i++) {
-- if (ioread32be(®s->ievent) &
-- DTSEC_IMASK_GRSCEN)
-- break;
-- udelay(1);
-- }
-- if (ioread32be(®s->ievent) &
-- DTSEC_IMASK_GRSCEN)
-- iowrite32be(DTSEC_IMASK_GRSCEN,
-- ®s->ievent);
-- else
-- pr_debug("Rx lockup due to Tx lockup\n");
--
-- /* c.Write a 1 to bit n of FM_RSTC
-- * (offset 0x0CC of FPM)
-- */
-- fman_reset_mac(dtsec->fm, dtsec->mac_id);
--
-- /* d.Wait 4 Tx clocks (32 ns) */
-- udelay(1);
--
-- /* e.Write a 0 to bit n of FM_RSTC. */
-- /* cleared by FMAN
-- */
-- }
-- }
--
-- dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
-- }
-- if (event & DTSEC_IMASK_MAGEN)
-- dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
-- if (event & DTSEC_IMASK_GRSCEN)
-- dtsec->exception_cb(dtsec->dev_id,
-- FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
-- if (event & DTSEC_IMASK_TDPEEN)
-- dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
-- if (event & DTSEC_IMASK_RDPEEN)
-- dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
--
-- /* masked interrupts */
-- WARN_ON(event & DTSEC_IMASK_ABRTEN);
-- WARN_ON(event & DTSEC_IMASK_IFERREN);
--}
--
--static void dtsec_1588_isr(void *handle)
--{
-- struct fman_mac *dtsec = (struct fman_mac *)handle;
-- struct dtsec_regs __iomem *regs = dtsec->regs;
-- u32 event;
--
-- if (dtsec->ptp_tsu_enabled) {
-- event = ioread32be(®s->tmr_pevent);
-- event &= ioread32be(®s->tmr_pemask);
--
-- if (event) {
-- iowrite32be(event, ®s->tmr_pevent);
-- WARN_ON(event & TMR_PEVENT_TSRE);
-- dtsec->exception_cb(dtsec->dev_id,
-- FM_MAC_EX_1G_1588_TS_RX_ERR);
-- }
-- }
--}
--
--static void free_init_resources(struct fman_mac *dtsec)
--{
-- fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
-- FMAN_INTR_TYPE_ERR);
-- fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
-- FMAN_INTR_TYPE_NORMAL);
--
-- /* release the driver's group hash table */
-- free_hash_table(dtsec->multicast_addr_hash);
-- dtsec->multicast_addr_hash = NULL;
--
-- /* release the driver's individual hash table */
-- free_hash_table(dtsec->unicast_addr_hash);
-- dtsec->unicast_addr_hash = NULL;
--}
--
--int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
--{
-- if (is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- dtsec->dtsec_drv_param->maximum_frame = new_val;
--
-- return 0;
--}
--
--int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
--{
-- if (is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- dtsec->dtsec_drv_param->tx_pad_crc = new_val;
--
-- return 0;
--}
--
--int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
--{
-- struct dtsec_regs __iomem *regs = dtsec->regs;
-- u32 tmp;
--
-- if (!is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- /* Enable */
-- tmp = ioread32be(®s->maccfg1);
-- if (mode & COMM_MODE_RX)
-- tmp |= MACCFG1_RX_EN;
-- if (mode & COMM_MODE_TX)
-- tmp |= MACCFG1_TX_EN;
--
-- iowrite32be(tmp, ®s->maccfg1);
--
-- /* Graceful start - clear the graceful receive stop bit */
-- if (mode & COMM_MODE_TX)
-- iowrite32be(ioread32be(®s->tctrl) & ~DTSEC_TCTRL_GTS,
-- ®s->tctrl);
-- if (mode & COMM_MODE_RX)
-- iowrite32be(ioread32be(®s->rctrl) & ~RCTRL_GRS,
-- ®s->rctrl);
--
-- return 0;
--}
--
--int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
--{
-- struct dtsec_regs __iomem *regs = dtsec->regs;
-- u32 tmp;
--
-- if (!is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- /* Gracefull stop - Assert the graceful transmit stop bit */
-- if (mode & COMM_MODE_RX) {
-- tmp = ioread32be(®s->rctrl) | RCTRL_GRS;
-- iowrite32be(tmp, ®s->rctrl);
--
-- if (dtsec->fm_rev_info.major == 2)
-- usleep_range(100, 200);
-- else
-- udelay(10);
-- }
--
-- if (mode & COMM_MODE_TX) {
-- if (dtsec->fm_rev_info.major == 2)
-- pr_debug("GTS not supported due to DTSEC_A004 errata.\n");
-- else
-- pr_debug("GTS not supported due to DTSEC_A0014 errata.\n");
-- }
--
-- tmp = ioread32be(®s->maccfg1);
-- if (mode & COMM_MODE_RX)
-- tmp &= ~MACCFG1_RX_EN;
-- if (mode & COMM_MODE_TX)
-- tmp &= ~MACCFG1_TX_EN;
--
-- iowrite32be(tmp, ®s->maccfg1);
--
-- return 0;
--}
--
--int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
-- u8 __maybe_unused priority,
-- u16 pause_time, u16 __maybe_unused thresh_time)
--{
-- struct dtsec_regs __iomem *regs = dtsec->regs;
-- u32 ptv = 0;
--
-- if (!is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- if (pause_time) {
-- /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
-- if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
-- pr_warn("pause-time: %d illegal.Should be > 320\n",
-- pause_time);
-- return -EINVAL;
-- }
--
-- ptv = ioread32be(®s->ptv);
-- ptv &= PTV_PTE_MASK;
-- ptv |= pause_time & PTV_PT_MASK;
-- iowrite32be(ptv, ®s->ptv);
--
-- /* trigger the transmission of a flow-control pause frame */
-- iowrite32be(ioread32be(®s->maccfg1) | MACCFG1_TX_FLOW,
-- ®s->maccfg1);
-- } else
-- iowrite32be(ioread32be(®s->maccfg1) & ~MACCFG1_TX_FLOW,
-- ®s->maccfg1);
--
-- return 0;
--}
--
--int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
--{
-- struct dtsec_regs __iomem *regs = dtsec->regs;
-- u32 tmp;
--
-- if (!is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- tmp = ioread32be(®s->maccfg1);
-- if (en)
-- tmp |= MACCFG1_RX_FLOW;
-- else
-- tmp &= ~MACCFG1_RX_FLOW;
-- iowrite32be(tmp, ®s->maccfg1);
--
-- return 0;
--}
--
--int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
--{
-- if (!is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- /* Initialize MAC Station Address registers (1 & 2)
-- * Station address have to be swapped (big endian to little endian
-- */
-- dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
-- set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
--
-- return 0;
--}
--
--int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
--{
-- struct dtsec_regs __iomem *regs = dtsec->regs;
-- struct eth_hash_entry *hash_entry;
-- u64 addr;
-- s32 bucket;
-- u32 crc = 0xFFFFFFFF;
-- bool mcast, ghtx;
--
-- if (!is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- addr = ENET_ADDR_TO_UINT64(*eth_addr);
--
-- ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false);
-- mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
--
-- /* Cannot handle unicast mac addr when GHTX is on */
-- if (ghtx && !mcast) {
-- pr_err("Could not compute hash bucket\n");
-- return -EINVAL;
-- }
-- crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
-- crc = bitrev32(crc);
--
-- /* considering the 9 highest order bits in crc H[8:0]:
-- *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
-- *and H[5:1] (next 5 bits) identify the hash bit
-- *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
-- *and H[4:0] (next 5 bits) identify the hash bit.
-- *
-- *In bucket index output the low 5 bits identify the hash register
-- *bit, while the higher 4 bits identify the hash register
-- */
--
-- if (ghtx) {
-- bucket = (s32)((crc >> 23) & 0x1ff);
-- } else {
-- bucket = (s32)((crc >> 24) & 0xff);
-- /* if !ghtx and mcast the bit must be set in gaddr instead of
-- *igaddr.
-- */
-- if (mcast)
-- bucket += 0x100;
-- }
--
-- set_bucket(dtsec->regs, bucket, true);
--
-- /* Create element to be added to the driver hash table */
-- hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
-- if (!hash_entry)
-- return -ENOMEM;
-- hash_entry->addr = addr;
-- INIT_LIST_HEAD(&hash_entry->node);
--
-- if (addr & MAC_GROUP_ADDRESS)
-- /* Group Address */
-- list_add_tail(&hash_entry->node,
-- &dtsec->multicast_addr_hash->lsts[bucket]);
-- else
-- list_add_tail(&hash_entry->node,
-- &dtsec->unicast_addr_hash->lsts[bucket]);
--
-- return 0;
--}
--
--int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
--{
-- struct dtsec_regs __iomem *regs = dtsec->regs;
-- struct list_head *pos;
-- struct eth_hash_entry *hash_entry = NULL;
-- u64 addr;
-- s32 bucket;
-- u32 crc = 0xFFFFFFFF;
-- bool mcast, ghtx;
--
-- if (!is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- addr = ENET_ADDR_TO_UINT64(*eth_addr);
--
-- ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false);
-- mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
--
-- /* Cannot handle unicast mac addr when GHTX is on */
-- if (ghtx && !mcast) {
-- pr_err("Could not compute hash bucket\n");
-- return -EINVAL;
-- }
-- crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
-- crc = bitrev32(crc);
--
-- if (ghtx) {
-- bucket = (s32)((crc >> 23) & 0x1ff);
-- } else {
-- bucket = (s32)((crc >> 24) & 0xff);
-- /* if !ghtx and mcast the bit must be set
-- * in gaddr instead of igaddr.
-- */
-- if (mcast)
-- bucket += 0x100;
-- }
--
-- if (addr & MAC_GROUP_ADDRESS) {
-- /* Group Address */
-- list_for_each(pos,
-- &dtsec->multicast_addr_hash->lsts[bucket]) {
-- hash_entry = ETH_HASH_ENTRY_OBJ(pos);
-- if (hash_entry->addr == addr) {
-- list_del_init(&hash_entry->node);
-- kfree(hash_entry);
-- break;
-- }
-- }
-- if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
-- set_bucket(dtsec->regs, bucket, false);
-- } else {
-- /* Individual Address */
-- list_for_each(pos,
-- &dtsec->unicast_addr_hash->lsts[bucket]) {
-- hash_entry = ETH_HASH_ENTRY_OBJ(pos);
-- if (hash_entry->addr == addr) {
-- list_del_init(&hash_entry->node);
-- kfree(hash_entry);
-- break;
-- }
-- }
-- if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
-- set_bucket(dtsec->regs, bucket, false);
-- }
--
-- /* address does not exist */
-- WARN_ON(!hash_entry);
--
-- return 0;
--}
--
--int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
--{
-- struct dtsec_regs __iomem *regs = dtsec->regs;
-- u32 tmp;
--
-- if (!is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- /* Set unicast promiscuous */
-- tmp = ioread32be(®s->rctrl);
-- if (new_val)
-- tmp |= RCTRL_UPROM;
-- else
-- tmp &= ~RCTRL_UPROM;
--
-- iowrite32be(tmp, ®s->rctrl);
--
-- /* Set multicast promiscuous */
-- tmp = ioread32be(®s->rctrl);
-- if (new_val)
-- tmp |= RCTRL_MPROM;
-- else
-- tmp &= ~RCTRL_MPROM;
--
-- iowrite32be(tmp, ®s->rctrl);
--
-- return 0;
--}
--
--int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
--{
-- struct dtsec_regs __iomem *regs = dtsec->regs;
-- u32 tmp;
--
-- if (!is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- tmp = ioread32be(®s->maccfg2);
--
-- /* Full Duplex */
-- tmp |= MACCFG2_FULL_DUPLEX;
--
-- tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
-- if (speed < SPEED_1000)
-- tmp |= MACCFG2_NIBBLE_MODE;
-- else if (speed == SPEED_1000)
-- tmp |= MACCFG2_BYTE_MODE;
-- iowrite32be(tmp, ®s->maccfg2);
--
-- tmp = ioread32be(®s->ecntrl);
-- if (speed == SPEED_100)
-- tmp |= DTSEC_ECNTRL_R100M;
-- else
-- tmp &= ~DTSEC_ECNTRL_R100M;
-- iowrite32be(tmp, ®s->ecntrl);
--
-- return 0;
--}
--
--int dtsec_restart_autoneg(struct fman_mac *dtsec)
--{
-- u16 tmp_reg16;
--
-- if (!is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
--
-- tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
-- tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
-- BMCR_FULLDPLX | BMCR_SPEED1000);
--
-- phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
--
-- return 0;
--}
--
--int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
--{
-- struct dtsec_regs __iomem *regs = dtsec->regs;
--
-- if (!is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- *mac_version = ioread32be(®s->tsec_id);
--
-- return 0;
--}
--
--int dtsec_set_exception(struct fman_mac *dtsec,
-- enum fman_mac_exceptions exception, bool enable)
--{
-- struct dtsec_regs __iomem *regs = dtsec->regs;
-- u32 bit_mask = 0;
--
-- if (!is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
-- bit_mask = get_exception_flag(exception);
-- if (bit_mask) {
-- if (enable)
-- dtsec->exceptions |= bit_mask;
-- else
-- dtsec->exceptions &= ~bit_mask;
-- } else {
-- pr_err("Undefined exception\n");
-- return -EINVAL;
-- }
-- if (enable)
-- iowrite32be(ioread32be(®s->imask) | bit_mask,
-- ®s->imask);
-- else
-- iowrite32be(ioread32be(®s->imask) & ~bit_mask,
-- ®s->imask);
-- } else {
-- if (!dtsec->ptp_tsu_enabled) {
-- pr_err("Exception valid for 1588 only\n");
-- return -EINVAL;
-- }
-- switch (exception) {
-- case FM_MAC_EX_1G_1588_TS_RX_ERR:
-- if (enable) {
-- dtsec->en_tsu_err_exeption = true;
-- iowrite32be(ioread32be(®s->tmr_pemask) |
-- TMR_PEMASK_TSREEN,
-- ®s->tmr_pemask);
-- } else {
-- dtsec->en_tsu_err_exeption = false;
-- iowrite32be(ioread32be(®s->tmr_pemask) &
-- ~TMR_PEMASK_TSREEN,
-- ®s->tmr_pemask);
-- }
-- break;
-- default:
-- pr_err("Undefined exception\n");
-- return -EINVAL;
-- }
-- }
--
-- return 0;
--}
--
--int dtsec_init(struct fman_mac *dtsec)
--{
-- struct dtsec_regs __iomem *regs = dtsec->regs;
-- struct dtsec_cfg *dtsec_drv_param;
-- int err;
-- u16 max_frm_ln;
-- enet_addr_t eth_addr;
--
-- if (is_init_done(dtsec->dtsec_drv_param))
-- return -EINVAL;
--
-- if (DEFAULT_RESET_ON_INIT &&
-- (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
-- pr_err("Can't reset MAC!\n");
-- return -EINVAL;
-- }
--
-- err = check_init_parameters(dtsec);
-- if (err)
-- return err;
--
-- dtsec_drv_param = dtsec->dtsec_drv_param;
--
-- MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
--
-- err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
-- dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
-- dtsec->tbiphy->mdio.addr);
-- if (err) {
-- free_init_resources(dtsec);
-- pr_err("DTSEC version doesn't support this i/f mode\n");
-- return err;
-- }
--
-- if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
-- u16 tmp_reg16;
--
-- /* Configure the TBI PHY Control Register */
-- tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
-- phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
--
-- tmp_reg16 = TBICON_CLK_SELECT;
-- phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
--
-- tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
-- BMCR_FULLDPLX | BMCR_SPEED1000);
-- phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
--
-- if (dtsec->basex_if)
-- tmp_reg16 = TBIANA_1000X;
-- else
-- tmp_reg16 = TBIANA_SGMII;
-- phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
--
-- tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
-- BMCR_FULLDPLX | BMCR_SPEED1000);
--
-- phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
-- }
--
-- /* Max Frame Length */
-- max_frm_ln = (u16)ioread32be(®s->maxfrm);
-- err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
-- if (err) {
-- pr_err("Setting max frame length failed\n");
-- free_init_resources(dtsec);
-- return -EINVAL;
-- }
--
-- dtsec->multicast_addr_hash =
-- alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
-- if (!dtsec->multicast_addr_hash) {
-- free_init_resources(dtsec);
-- pr_err("MC hash table is failed\n");
-- return -ENOMEM;
-- }
--
-- dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
-- if (!dtsec->unicast_addr_hash) {
-- free_init_resources(dtsec);
-- pr_err("UC hash table is failed\n");
-- return -ENOMEM;
-- }
--
-- /* register err intr handler for dtsec to FPM (err) */
-- fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
-- FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
-- /* register 1588 intr handler for TMR to FPM (normal) */
-- fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
-- FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
--
-- kfree(dtsec_drv_param);
-- dtsec->dtsec_drv_param = NULL;
--
-- return 0;
--}
--
--int dtsec_free(struct fman_mac *dtsec)
--{
-- free_init_resources(dtsec);
--
-- kfree(dtsec->dtsec_drv_param);
-- dtsec->dtsec_drv_param = NULL;
-- kfree(dtsec);
--
-- return 0;
--}
--
--struct fman_mac *dtsec_config(struct fman_mac_params *params)
--{
-- struct fman_mac *dtsec;
-- struct dtsec_cfg *dtsec_drv_param;
-- void __iomem *base_addr;
--
-- base_addr = params->base_addr;
--
-- /* allocate memory for the UCC GETH data structure. */
-- dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
-- if (!dtsec)
-- return NULL;
--
-- /* allocate memory for the d_tsec driver parameters data structure. */
-- dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
-- if (!dtsec_drv_param)
-- goto err_dtsec;
--
-- /* Plant parameter structure pointer */
-- dtsec->dtsec_drv_param = dtsec_drv_param;
--
-- set_dflts(dtsec_drv_param);
--
-- dtsec->regs = base_addr;
-- dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
-- dtsec->max_speed = params->max_speed;
-- dtsec->phy_if = params->phy_if;
-- dtsec->mac_id = params->mac_id;
-- dtsec->exceptions = (DTSEC_IMASK_BREN |
-- DTSEC_IMASK_RXCEN |
-- DTSEC_IMASK_BTEN |
-- DTSEC_IMASK_TXCEN |
-- DTSEC_IMASK_TXEEN |
-- DTSEC_IMASK_ABRTEN |
-- DTSEC_IMASK_LCEN |
-- DTSEC_IMASK_CRLEN |
-- DTSEC_IMASK_XFUNEN |
-- DTSEC_IMASK_IFERREN |
-- DTSEC_IMASK_MAGEN |
-- DTSEC_IMASK_TDPEEN |
-- DTSEC_IMASK_RDPEEN);
-- dtsec->exception_cb = params->exception_cb;
-- dtsec->event_cb = params->event_cb;
-- dtsec->dev_id = params->dev_id;
-- dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
-- dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en;
--
-- dtsec->fm = params->fm;
-- dtsec->basex_if = params->basex_if;
--
-- if (!params->internal_phy_node) {
-- pr_err("TBI PHY node is not available\n");
-- goto err_dtsec_drv_param;
-- }
--
-- dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
-- if (!dtsec->tbiphy) {
-- pr_err("of_phy_find_device (TBI PHY) failed\n");
-- goto err_dtsec_drv_param;
-- }
--
-- put_device(&dtsec->tbiphy->mdio.dev);
--
-- /* Save FMan revision */
-- fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
--
-- return dtsec;
--
--err_dtsec_drv_param:
-- kfree(dtsec_drv_param);
--err_dtsec:
-- kfree(dtsec);
-- return NULL;
--}
-diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
-deleted file mode 100644
-index c4467c0..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
-+++ /dev/null
-@@ -1,59 +0,0 @@
--/*
-- * Copyright 2008-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#ifndef __DTSEC_H
--#define __DTSEC_H
--
--#include "fman_mac.h"
--
--struct fman_mac *dtsec_config(struct fman_mac_params *params);
--int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
--int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr);
--int dtsec_adjust_link(struct fman_mac *dtsec,
-- u16 speed);
--int dtsec_restart_autoneg(struct fman_mac *dtsec);
--int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val);
--int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val);
--int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode);
--int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode);
--int dtsec_init(struct fman_mac *dtsec);
--int dtsec_free(struct fman_mac *dtsec);
--int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en);
--int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority,
-- u16 pause_time, u16 thresh_time);
--int dtsec_set_exception(struct fman_mac *dtsec,
-- enum fman_mac_exceptions exception, bool enable);
--int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
--int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
--int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
--
--#endif /* __DTSEC_H */
-diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
-deleted file mode 100644
-index dd6d052..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_mac.h
-+++ /dev/null
-@@ -1,274 +0,0 @@
--/*
-- * Copyright 2008-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--/* FM MAC ... */
--#ifndef __FM_MAC_H
--#define __FM_MAC_H
--
--#include "fman.h"
--
--#include <linux/slab.h>
--#include <linux/phy.h>
--#include <linux/if_ether.h>
--
--struct fman_mac;
--
--/* Ethernet Address */
--typedef u8 enet_addr_t[ETH_ALEN];
--
--#define ENET_ADDR_TO_UINT64(_enet_addr) \
-- (u64)(((u64)(_enet_addr)[0] << 40) | \
-- ((u64)(_enet_addr)[1] << 32) | \
-- ((u64)(_enet_addr)[2] << 24) | \
-- ((u64)(_enet_addr)[3] << 16) | \
-- ((u64)(_enet_addr)[4] << 8) | \
-- ((u64)(_enet_addr)[5]))
--
--#define MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enet_addr) \
-- do { \
-- int i; \
-- for (i = 0; i < ETH_ALEN; i++) \
-- (_enet_addr)[i] = \
-- (u8)((_addr64) >> ((5 - i) * 8)); \
-- } while (0)
--
--/* defaults */
--#define DEFAULT_RESET_ON_INIT false
--
--/* PFC defines */
--#define FSL_FM_PAUSE_TIME_ENABLE 0xf000
--#define FSL_FM_PAUSE_TIME_DISABLE 0
--#define FSL_FM_PAUSE_THRESH_DEFAULT 0
--
--#define FM_MAC_NO_PFC 0xff
--
--/* HASH defines */
--#define ETH_HASH_ENTRY_OBJ(ptr) \
-- hlist_entry_safe(ptr, struct eth_hash_entry, node)
--
--/* Enumeration (bit flags) of communication modes (Transmit,
-- * receive or both).
-- */
--enum comm_mode {
-- COMM_MODE_NONE = 0, /* No transmit/receive communication */
-- COMM_MODE_RX = 1, /* Only receive communication */
-- COMM_MODE_TX = 2, /* Only transmit communication */
-- COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */
--};
--
--/* FM MAC Exceptions */
--enum fman_mac_exceptions {
-- FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
-- /* 10GEC MDIO scan event interrupt */
-- , FM_MAC_EX_10G_MDIO_CMD_CMPL
-- /* 10GEC MDIO command completion interrupt */
-- , FM_MAC_EX_10G_REM_FAULT
-- /* 10GEC, mEMAC Remote fault interrupt */
-- , FM_MAC_EX_10G_LOC_FAULT
-- /* 10GEC, mEMAC Local fault interrupt */
-- , FM_MAC_EX_10G_TX_ECC_ER
-- /* 10GEC, mEMAC Transmit frame ECC error interrupt */
-- , FM_MAC_EX_10G_TX_FIFO_UNFL
-- /* 10GEC, mEMAC Transmit FIFO underflow interrupt */
-- , FM_MAC_EX_10G_TX_FIFO_OVFL
-- /* 10GEC, mEMAC Transmit FIFO overflow interrupt */
-- , FM_MAC_EX_10G_TX_ER
-- /* 10GEC Transmit frame error interrupt */
-- , FM_MAC_EX_10G_RX_FIFO_OVFL
-- /* 10GEC, mEMAC Receive FIFO overflow interrupt */
-- , FM_MAC_EX_10G_RX_ECC_ER
-- /* 10GEC, mEMAC Receive frame ECC error interrupt */
-- , FM_MAC_EX_10G_RX_JAB_FRM
-- /* 10GEC Receive jabber frame interrupt */
-- , FM_MAC_EX_10G_RX_OVRSZ_FRM
-- /* 10GEC Receive oversized frame interrupt */
-- , FM_MAC_EX_10G_RX_RUNT_FRM
-- /* 10GEC Receive runt frame interrupt */
-- , FM_MAC_EX_10G_RX_FRAG_FRM
-- /* 10GEC Receive fragment frame interrupt */
-- , FM_MAC_EX_10G_RX_LEN_ER
-- /* 10GEC Receive payload length error interrupt */
-- , FM_MAC_EX_10G_RX_CRC_ER
-- /* 10GEC Receive CRC error interrupt */
-- , FM_MAC_EX_10G_RX_ALIGN_ER
-- /* 10GEC Receive alignment error interrupt */
-- , FM_MAC_EX_1G_BAB_RX
-- /* dTSEC Babbling receive error */
-- , FM_MAC_EX_1G_RX_CTL
-- /* dTSEC Receive control (pause frame) interrupt */
-- , FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET
-- /* dTSEC Graceful transmit stop complete */
-- , FM_MAC_EX_1G_BAB_TX
-- /* dTSEC Babbling transmit error */
-- , FM_MAC_EX_1G_TX_CTL
-- /* dTSEC Transmit control (pause frame) interrupt */
-- , FM_MAC_EX_1G_TX_ERR
-- /* dTSEC Transmit error */
-- , FM_MAC_EX_1G_LATE_COL
-- /* dTSEC Late collision */
-- , FM_MAC_EX_1G_COL_RET_LMT
-- /* dTSEC Collision retry limit */
-- , FM_MAC_EX_1G_TX_FIFO_UNDRN
-- /* dTSEC Transmit FIFO underrun */
-- , FM_MAC_EX_1G_MAG_PCKT
-- /* dTSEC Magic Packet detection */
-- , FM_MAC_EX_1G_MII_MNG_RD_COMPLET
-- /* dTSEC MII management read completion */
-- , FM_MAC_EX_1G_MII_MNG_WR_COMPLET
-- /* dTSEC MII management write completion */
-- , FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET
-- /* dTSEC Graceful receive stop complete */
-- , FM_MAC_EX_1G_DATA_ERR
-- /* dTSEC Internal data error on transmit */
-- , FM_MAC_1G_RX_DATA_ERR
-- /* dTSEC Internal data error on receive */
-- , FM_MAC_EX_1G_1588_TS_RX_ERR
-- /* dTSEC Time-Stamp Receive Error */
-- , FM_MAC_EX_1G_RX_MIB_CNT_OVFL
-- /* dTSEC MIB counter overflow */
-- , FM_MAC_EX_TS_FIFO_ECC_ERR
-- /* mEMAC Time-stamp FIFO ECC error interrupt;
-- * not supported on T4240/B4860 rev1 chips
-- */
-- , FM_MAC_EX_MAGIC_PACKET_INDICATION = FM_MAC_EX_1G_MAG_PCKT
-- /* mEMAC Magic Packet Indication Interrupt */
--};
--
--struct eth_hash_entry {
-- u64 addr; /* Ethernet Address */
-- struct list_head node;
--};
--
--typedef void (fman_mac_exception_cb)(void *dev_id,
-- enum fman_mac_exceptions exceptions);
--
--/* FMan MAC config input */
--struct fman_mac_params {
-- /* Base of memory mapped FM MAC registers */
-- void __iomem *base_addr;
-- /* MAC address of device; First octet is sent first */
-- enet_addr_t addr;
-- /* MAC ID; numbering of dTSEC and 1G-mEMAC:
-- * 0 - FM_MAX_NUM_OF_1G_MACS;
-- * numbering of 10G-MAC (TGEC) and 10G-mEMAC:
-- * 0 - FM_MAX_NUM_OF_10G_MACS
-- */
-- u8 mac_id;
-- /* PHY interface */
-- phy_interface_t phy_if;
-- /* Note that the speed should indicate the maximum rate that
-- * this MAC should support rather than the actual speed;
-- */
-- u16 max_speed;
-- /* A handle to the FM object this port related to */
-- void *fm;
-- void *dev_id; /* device cookie used by the exception cbs */
-- fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
-- fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
-- /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
-- * and phy or backplane; Note: 1000BaseX auto-negotiation relates only
-- * to interface between MAC and phy/backplane, SGMII phy can still
-- * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
-- */
-- bool basex_if;
-- /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
-- struct device_node *internal_phy_node;
--};
--
--struct eth_hash_t {
-- u16 size;
-- struct list_head *lsts;
--};
--
--static inline struct eth_hash_entry
--*dequeue_addr_from_hash_entry(struct list_head *addr_lst)
--{
-- struct eth_hash_entry *hash_entry = NULL;
--
-- if (!list_empty(addr_lst)) {
-- hash_entry = ETH_HASH_ENTRY_OBJ(addr_lst->next);
-- list_del_init(&hash_entry->node);
-- }
-- return hash_entry;
--}
--
--static inline void free_hash_table(struct eth_hash_t *hash)
--{
-- struct eth_hash_entry *hash_entry;
-- int i = 0;
--
-- if (hash) {
-- if (hash->lsts) {
-- for (i = 0; i < hash->size; i++) {
-- hash_entry =
-- dequeue_addr_from_hash_entry(&hash->lsts[i]);
-- while (hash_entry) {
-- kfree(hash_entry);
-- hash_entry =
-- dequeue_addr_from_hash_entry(&hash->
-- lsts[i]);
-- }
-- }
--
-- kfree(hash->lsts);
-- }
--
-- kfree(hash);
-- }
--}
--
--static inline struct eth_hash_t *alloc_hash_table(u16 size)
--{
-- u32 i;
-- struct eth_hash_t *hash;
--
-- /* Allocate address hash table */
-- hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL);
-- if (!hash)
-- return NULL;
--
-- hash->size = size;
--
-- hash->lsts = kmalloc_array(hash->size, sizeof(struct list_head),
-- GFP_KERNEL);
-- if (!hash->lsts) {
-- kfree(hash);
-- return NULL;
-- }
--
-- for (i = 0; i < hash->size; i++)
-- INIT_LIST_HEAD(&hash->lsts[i]);
--
-- return hash;
--}
--
--#endif /* __FM_MAC_H */
-diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
-deleted file mode 100644
-index 71a5ded..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_memac.c
-+++ /dev/null
-@@ -1,1177 +0,0 @@
--/*
-- * Copyright 2008-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
--
--#include "fman_memac.h"
--#include "fman.h"
--
--#include <linux/slab.h>
--#include <linux/io.h>
--#include <linux/phy.h>
--#include <linux/of_mdio.h>
--
--/* PCS registers */
--#define MDIO_SGMII_CR 0x00
--#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
--#define MDIO_SGMII_LINK_TMR_L 0x12
--#define MDIO_SGMII_LINK_TMR_H 0x13
--#define MDIO_SGMII_IF_MODE 0x14
--
--/* SGMII Control defines */
--#define SGMII_CR_AN_EN 0x1000
--#define SGMII_CR_RESTART_AN 0x0200
--#define SGMII_CR_FD 0x0100
--#define SGMII_CR_SPEED_SEL1_1G 0x0040
--#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
-- SGMII_CR_SPEED_SEL1_1G)
--
--/* SGMII Device Ability for SGMII defines */
--#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001
--#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0
--
--/* Link timer define */
--#define LINK_TMR_L 0xa120
--#define LINK_TMR_H 0x0007
--#define LINK_TMR_L_BASEX 0xaf08
--#define LINK_TMR_H_BASEX 0x002f
--
--/* SGMII IF Mode defines */
--#define IF_MODE_USE_SGMII_AN 0x0002
--#define IF_MODE_SGMII_EN 0x0001
--#define IF_MODE_SGMII_SPEED_100M 0x0004
--#define IF_MODE_SGMII_SPEED_1G 0x0008
--#define IF_MODE_SGMII_DUPLEX_HALF 0x0010
--
--/* Num of additional exact match MAC adr regs */
--#define MEMAC_NUM_OF_PADDRS 7
--
--/* Control and Configuration Register (COMMAND_CONFIG) */
--#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */
--#define CMD_CFG_TX_LOWP_ENA 0x00800000 /* 08 Tx Low Power Idle Enable */
--#define CMD_CFG_PFC_MODE 0x00080000 /* 12 Enable PFC */
--#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 Payload length check disable */
--#define CMD_CFG_SW_RESET 0x00001000 /* 19 S/W Reset, self clearing bit */
--#define CMD_CFG_TX_PAD_EN 0x00000800 /* 20 Enable Tx padding of frames */
--#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 Ignore Pause frame quanta */
--#define CMD_CFG_CRC_FWD 0x00000040 /* 25 Terminate/frwd CRC of frames */
--#define CMD_CFG_PAD_EN 0x00000020 /* 26 Frame padding removal */
--#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 Promiscuous operation enable */
--#define CMD_CFG_RX_EN 0x00000002 /* 30 MAC receive path enable */
--#define CMD_CFG_TX_EN 0x00000001 /* 31 MAC transmit path enable */
--
--/* Transmit FIFO Sections Register (TX_FIFO_SECTIONS) */
--#define TX_FIFO_SECTIONS_TX_EMPTY_MASK 0xFFFF0000
--#define TX_FIFO_SECTIONS_TX_AVAIL_MASK 0x0000FFFF
--#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G 0x00400000
--#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G 0x00100000
--#define TX_FIFO_SECTIONS_TX_AVAIL_10G 0x00000019
--#define TX_FIFO_SECTIONS_TX_AVAIL_1G 0x00000020
--#define TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G 0x00000060
--
--#define GET_TX_EMPTY_DEFAULT_VALUE(_val) \
--do { \
-- _val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK; \
-- ((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ? \
-- (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G) :\
-- (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G));\
--} while (0)
--
--/* Interface Mode Register (IF_MODE) */
--
--#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
--#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
--#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
--#define IF_MODE_RGMII 0x00000004
--#define IF_MODE_RGMII_AUTO 0x00008000
--#define IF_MODE_RGMII_1000 0x00004000 /* 10 - 1000Mbps RGMII */
--#define IF_MODE_RGMII_100 0x00000000 /* 00 - 100Mbps RGMII */
--#define IF_MODE_RGMII_10 0x00002000 /* 01 - 10Mbps RGMII */
--#define IF_MODE_RGMII_SP_MASK 0x00006000 /* Setsp mask bits */
--#define IF_MODE_RGMII_FD 0x00001000 /* Full duplex RGMII */
--#define IF_MODE_HD 0x00000040 /* Half duplex operation */
--
--/* Hash table Control Register (HASHTABLE_CTRL) */
--#define HASH_CTRL_MCAST_EN 0x00000100
--/* 26-31 Hash table address code */
--#define HASH_CTRL_ADDR_MASK 0x0000003F
--/* MAC mcast indication */
--#define GROUP_ADDRESS 0x0000010000000000LL
--#define HASH_TABLE_SIZE 64 /* Hash tbl size */
--
--/* Interrupt Mask Register (IMASK) */
--#define MEMAC_IMASK_MGI 0x40000000 /* 1 Magic pkt detect indication */
--#define MEMAC_IMASK_TSECC_ER 0x20000000 /* 2 Timestamp FIFO ECC error evnt */
--#define MEMAC_IMASK_TECC_ER 0x02000000 /* 6 Transmit frame ECC error evnt */
--#define MEMAC_IMASK_RECC_ER 0x01000000 /* 7 Receive frame ECC error evnt */
--
--#define MEMAC_ALL_ERRS_IMASK \
-- ((u32)(MEMAC_IMASK_TSECC_ER | \
-- MEMAC_IMASK_TECC_ER | \
-- MEMAC_IMASK_RECC_ER | \
-- MEMAC_IMASK_MGI))
--
--#define MEMAC_IEVNT_PCS 0x80000000 /* PCS (XG). Link sync (G) */
--#define MEMAC_IEVNT_AN 0x40000000 /* Auto-negotiation */
--#define MEMAC_IEVNT_LT 0x20000000 /* Link Training/New page */
--#define MEMAC_IEVNT_MGI 0x00004000 /* Magic pkt detection */
--#define MEMAC_IEVNT_TS_ECC_ER 0x00002000 /* Timestamp FIFO ECC error*/
--#define MEMAC_IEVNT_RX_FIFO_OVFL 0x00001000 /* Rx FIFO overflow */
--#define MEMAC_IEVNT_TX_FIFO_UNFL 0x00000800 /* Tx FIFO underflow */
--#define MEMAC_IEVNT_TX_FIFO_OVFL 0x00000400 /* Tx FIFO overflow */
--#define MEMAC_IEVNT_TX_ECC_ER 0x00000200 /* Tx frame ECC error */
--#define MEMAC_IEVNT_RX_ECC_ER 0x00000100 /* Rx frame ECC error */
--#define MEMAC_IEVNT_LI_FAULT 0x00000080 /* Link Interruption flt */
--#define MEMAC_IEVNT_RX_EMPTY 0x00000040 /* Rx FIFO empty */
--#define MEMAC_IEVNT_TX_EMPTY 0x00000020 /* Tx FIFO empty */
--#define MEMAC_IEVNT_RX_LOWP 0x00000010 /* Low Power Idle */
--#define MEMAC_IEVNT_PHY_LOS 0x00000004 /* Phy loss of signal */
--#define MEMAC_IEVNT_REM_FAULT 0x00000002 /* Remote fault (XGMII) */
--#define MEMAC_IEVNT_LOC_FAULT 0x00000001 /* Local fault (XGMII) */
--
--#define DEFAULT_PAUSE_QUANTA 0xf000
--#define DEFAULT_FRAME_LENGTH 0x600
--#define DEFAULT_TX_IPG_LENGTH 12
--
--#define CLXY_PAUSE_QUANTA_CLX_PQNT 0x0000FFFF
--#define CLXY_PAUSE_QUANTA_CLY_PQNT 0xFFFF0000
--#define CLXY_PAUSE_THRESH_CLX_QTH 0x0000FFFF
--#define CLXY_PAUSE_THRESH_CLY_QTH 0xFFFF0000
--
--struct mac_addr {
-- /* Lower 32 bits of 48-bit MAC address */
-- u32 mac_addr_l;
-- /* Upper 16 bits of 48-bit MAC address */
-- u32 mac_addr_u;
--};
--
--/* memory map */
--struct memac_regs {
-- u32 res0000[2]; /* General Control and Status */
-- u32 command_config; /* 0x008 Ctrl and cfg */
-- struct mac_addr mac_addr0; /* 0x00C-0x010 MAC_ADDR_0...1 */
-- u32 maxfrm; /* 0x014 Max frame length */
-- u32 res0018[1];
-- u32 rx_fifo_sections; /* Receive FIFO configuration reg */
-- u32 tx_fifo_sections; /* Transmit FIFO configuration reg */
-- u32 res0024[2];
-- u32 hashtable_ctrl; /* 0x02C Hash table control */
-- u32 res0030[4];
-- u32 ievent; /* 0x040 Interrupt event */
-- u32 tx_ipg_length; /* 0x044 Transmitter inter-packet-gap */
-- u32 res0048;
-- u32 imask; /* 0x04C Interrupt mask */
-- u32 res0050;
-- u32 pause_quanta[4]; /* 0x054 Pause quanta */
-- u32 pause_thresh[4]; /* 0x064 Pause quanta threshold */
-- u32 rx_pause_status; /* 0x074 Receive pause status */
-- u32 res0078[2];
-- struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];/* 0x80-0x0B4 mac padr */
-- u32 lpwake_timer; /* 0x0B8 Low Power Wakeup Timer */
-- u32 sleep_timer; /* 0x0BC Transmit EEE Low Power Timer */
-- u32 res00c0[8];
-- u32 statn_config; /* 0x0E0 Statistics configuration */
-- u32 res00e4[7];
-- /* Rx Statistics Counter */
-- u32 reoct_l;
-- u32 reoct_u;
-- u32 roct_l;
-- u32 roct_u;
-- u32 raln_l;
-- u32 raln_u;
-- u32 rxpf_l;
-- u32 rxpf_u;
-- u32 rfrm_l;
-- u32 rfrm_u;
-- u32 rfcs_l;
-- u32 rfcs_u;
-- u32 rvlan_l;
-- u32 rvlan_u;
-- u32 rerr_l;
-- u32 rerr_u;
-- u32 ruca_l;
-- u32 ruca_u;
-- u32 rmca_l;
-- u32 rmca_u;
-- u32 rbca_l;
-- u32 rbca_u;
-- u32 rdrp_l;
-- u32 rdrp_u;
-- u32 rpkt_l;
-- u32 rpkt_u;
-- u32 rund_l;
-- u32 rund_u;
-- u32 r64_l;
-- u32 r64_u;
-- u32 r127_l;
-- u32 r127_u;
-- u32 r255_l;
-- u32 r255_u;
-- u32 r511_l;
-- u32 r511_u;
-- u32 r1023_l;
-- u32 r1023_u;
-- u32 r1518_l;
-- u32 r1518_u;
-- u32 r1519x_l;
-- u32 r1519x_u;
-- u32 rovr_l;
-- u32 rovr_u;
-- u32 rjbr_l;
-- u32 rjbr_u;
-- u32 rfrg_l;
-- u32 rfrg_u;
-- u32 rcnp_l;
-- u32 rcnp_u;
-- u32 rdrntp_l;
-- u32 rdrntp_u;
-- u32 res01d0[12];
-- /* Tx Statistics Counter */
-- u32 teoct_l;
-- u32 teoct_u;
-- u32 toct_l;
-- u32 toct_u;
-- u32 res0210[2];
-- u32 txpf_l;
-- u32 txpf_u;
-- u32 tfrm_l;
-- u32 tfrm_u;
-- u32 tfcs_l;
-- u32 tfcs_u;
-- u32 tvlan_l;
-- u32 tvlan_u;
-- u32 terr_l;
-- u32 terr_u;
-- u32 tuca_l;
-- u32 tuca_u;
-- u32 tmca_l;
-- u32 tmca_u;
-- u32 tbca_l;
-- u32 tbca_u;
-- u32 res0258[2];
-- u32 tpkt_l;
-- u32 tpkt_u;
-- u32 tund_l;
-- u32 tund_u;
-- u32 t64_l;
-- u32 t64_u;
-- u32 t127_l;
-- u32 t127_u;
-- u32 t255_l;
-- u32 t255_u;
-- u32 t511_l;
-- u32 t511_u;
-- u32 t1023_l;
-- u32 t1023_u;
-- u32 t1518_l;
-- u32 t1518_u;
-- u32 t1519x_l;
-- u32 t1519x_u;
-- u32 res02a8[6];
-- u32 tcnp_l;
-- u32 tcnp_u;
-- u32 res02c8[14];
-- /* Line Interface Control */
-- u32 if_mode; /* 0x300 Interface Mode Control */
-- u32 if_status; /* 0x304 Interface Status */
-- u32 res0308[14];
-- /* HiGig/2 */
-- u32 hg_config; /* 0x340 Control and cfg */
-- u32 res0344[3];
-- u32 hg_pause_quanta; /* 0x350 Pause quanta */
-- u32 res0354[3];
-- u32 hg_pause_thresh; /* 0x360 Pause quanta threshold */
-- u32 res0364[3];
-- u32 hgrx_pause_status; /* 0x370 Receive pause status */
-- u32 hg_fifos_status; /* 0x374 fifos status */
-- u32 rhm; /* 0x378 rx messages counter */
-- u32 thm; /* 0x37C tx messages counter */
--};
--
--struct memac_cfg {
-- bool reset_on_init;
-- bool pause_ignore;
-- bool promiscuous_mode_enable;
-- struct fixed_phy_status *fixed_link;
-- u16 max_frame_length;
-- u16 pause_quanta;
-- u32 tx_ipg_length;
--};
--
--struct fman_mac {
-- /* Pointer to MAC memory mapped registers */
-- struct memac_regs __iomem *regs;
-- /* MAC address of device */
-- u64 addr;
-- /* Ethernet physical interface */
-- phy_interface_t phy_if;
-- u16 max_speed;
-- void *dev_id; /* device cookie used by the exception cbs */
-- fman_mac_exception_cb *exception_cb;
-- fman_mac_exception_cb *event_cb;
-- /* Pointer to driver's global address hash table */
-- struct eth_hash_t *multicast_addr_hash;
-- /* Pointer to driver's individual address hash table */
-- struct eth_hash_t *unicast_addr_hash;
-- u8 mac_id;
-- u32 exceptions;
-- struct memac_cfg *memac_drv_param;
-- void *fm;
-- struct fman_rev_info fm_rev_info;
-- bool basex_if;
-- struct phy_device *pcsphy;
--};
--
--static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
-- u8 paddr_num)
--{
-- u32 tmp0, tmp1;
--
-- tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
-- tmp1 = (u32)(adr[4] | adr[5] << 8);
--
-- if (paddr_num == 0) {
-- iowrite32be(tmp0, ®s->mac_addr0.mac_addr_l);
-- iowrite32be(tmp1, ®s->mac_addr0.mac_addr_u);
-- } else {
-- iowrite32be(tmp0, ®s->mac_addr[paddr_num - 1].mac_addr_l);
-- iowrite32be(tmp1, ®s->mac_addr[paddr_num - 1].mac_addr_u);
-- }
--}
--
--static int reset(struct memac_regs __iomem *regs)
--{
-- u32 tmp;
-- int count;
--
-- tmp = ioread32be(®s->command_config);
--
-- tmp |= CMD_CFG_SW_RESET;
--
-- iowrite32be(tmp, ®s->command_config);
--
-- count = 100;
-- do {
-- udelay(1);
-- } while ((ioread32be(®s->command_config) & CMD_CFG_SW_RESET) &&
-- --count);
--
-- if (count == 0)
-- return -EBUSY;
--
-- return 0;
--}
--
--static void set_exception(struct memac_regs __iomem *regs, u32 val,
-- bool enable)
--{
-- u32 tmp;
--
-- tmp = ioread32be(®s->imask);
-- if (enable)
-- tmp |= val;
-- else
-- tmp &= ~val;
--
-- iowrite32be(tmp, ®s->imask);
--}
--
--static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
-- phy_interface_t phy_if, u16 speed, bool slow_10g_if,
-- u32 exceptions)
--{
-- u32 tmp;
--
-- /* Config */
-- tmp = 0;
-- if (cfg->promiscuous_mode_enable)
-- tmp |= CMD_CFG_PROMIS_EN;
-- if (cfg->pause_ignore)
-- tmp |= CMD_CFG_PAUSE_IGNORE;
--
-- /* Payload length check disable */
-- tmp |= CMD_CFG_NO_LEN_CHK;
-- /* Enable padding of frames in transmit direction */
-- tmp |= CMD_CFG_TX_PAD_EN;
--
-- tmp |= CMD_CFG_CRC_FWD;
--
-- iowrite32be(tmp, ®s->command_config);
--
-- /* Max Frame Length */
-- iowrite32be((u32)cfg->max_frame_length, ®s->maxfrm);
--
-- /* Pause Time */
-- iowrite32be((u32)cfg->pause_quanta, ®s->pause_quanta[0]);
-- iowrite32be((u32)0, ®s->pause_thresh[0]);
--
-- /* IF_MODE */
-- tmp = 0;
-- switch (phy_if) {
-- case PHY_INTERFACE_MODE_XGMII:
-- tmp |= IF_MODE_XGMII;
-- break;
-- default:
-- tmp |= IF_MODE_GMII;
-- if (phy_if == PHY_INTERFACE_MODE_RGMII)
-- tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
-- }
-- iowrite32be(tmp, ®s->if_mode);
--
-- /* TX_FIFO_SECTIONS */
-- tmp = 0;
-- if (phy_if == PHY_INTERFACE_MODE_XGMII) {
-- if (slow_10g_if) {
-- tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
-- TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
-- } else {
-- tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
-- TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
-- }
-- } else {
-- tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
-- TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
-- }
-- iowrite32be(tmp, ®s->tx_fifo_sections);
--
-- /* clear all pending events and set-up interrupts */
-- iowrite32be(0xffffffff, ®s->ievent);
-- set_exception(regs, exceptions, true);
--
-- return 0;
--}
--
--static void set_dflts(struct memac_cfg *cfg)
--{
-- cfg->reset_on_init = false;
-- cfg->promiscuous_mode_enable = false;
-- cfg->pause_ignore = false;
-- cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
-- cfg->max_frame_length = DEFAULT_FRAME_LENGTH;
-- cfg->pause_quanta = DEFAULT_PAUSE_QUANTA;
--}
--
--static u32 get_mac_addr_hash_code(u64 eth_addr)
--{
-- u64 mask1, mask2;
-- u32 xor_val = 0;
-- u8 i, j;
--
-- for (i = 0; i < 6; i++) {
-- mask1 = eth_addr & (u64)0x01;
-- eth_addr >>= 1;
--
-- for (j = 0; j < 7; j++) {
-- mask2 = eth_addr & (u64)0x01;
-- mask1 ^= mask2;
-- eth_addr >>= 1;
-- }
--
-- xor_val |= (mask1 << (5 - i));
-- }
--
-- return xor_val;
--}
--
--static void setup_sgmii_internal_phy(struct fman_mac *memac,
-- struct fixed_phy_status *fixed_link)
--{
-- u16 tmp_reg16;
--
-- if (WARN_ON(!memac->pcsphy))
-- return;
--
-- /* SGMII mode */
-- tmp_reg16 = IF_MODE_SGMII_EN;
-- if (!fixed_link)
-- /* AN enable */
-- tmp_reg16 |= IF_MODE_USE_SGMII_AN;
-- else {
-- switch (fixed_link->speed) {
-- case 10:
-- /* For 10M: IF_MODE[SPEED_10M] = 0 */
-- break;
-- case 100:
-- tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
-- break;
-- case 1000: /* fallthrough */
-- default:
-- tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
-- break;
-- }
-- if (!fixed_link->duplex)
-- tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF;
-- }
-- phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16);
--
-- /* Device ability according to SGMII specification */
-- tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE;
-- phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
--
-- /* Adjust link timer for SGMII -
-- * According to Cisco SGMII specification the timer should be 1.6 ms.
-- * The link_timer register is configured in units of the clock.
-- * - When running as 1G SGMII, Serdes clock is 125 MHz, so
-- * unit = 1 / (125*10^6 Hz) = 8 ns.
-- * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40
-- * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
-- * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
-- * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120.
-- * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
-- * we always set up here a value of 2.5 SGMII.
-- */
-- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H);
-- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L);
--
-- if (!fixed_link)
-- /* Restart AN */
-- tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
-- else
-- /* AN disabled */
-- tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN;
-- phy_write(memac->pcsphy, 0x0, tmp_reg16);
--}
--
--static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
--{
-- u16 tmp_reg16;
--
-- /* AN Device capability */
-- tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE;
-- phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
--
-- /* Adjust link timer for SGMII -
-- * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
-- * The link_timer register is configured in units of the clock.
-- * - When running as 1G SGMII, Serdes clock is 125 MHz, so
-- * unit = 1 / (125*10^6 Hz) = 8 ns.
-- * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
-- * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
-- * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
-- * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
-- * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
-- * we always set up here a value of 2.5 SGMII.
-- */
-- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX);
-- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX);
--
-- /* Restart AN */
-- tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
-- phy_write(memac->pcsphy, 0x0, tmp_reg16);
--}
--
--static int check_init_parameters(struct fman_mac *memac)
--{
-- if (memac->addr == 0) {
-- pr_err("Ethernet MAC must have a valid MAC address\n");
-- return -EINVAL;
-- }
-- if (!memac->exception_cb) {
-- pr_err("Uninitialized exception handler\n");
-- return -EINVAL;
-- }
-- if (!memac->event_cb) {
-- pr_warn("Uninitialize event handler\n");
-- return -EINVAL;
-- }
--
-- return 0;
--}
--
--static int get_exception_flag(enum fman_mac_exceptions exception)
--{
-- u32 bit_mask;
--
-- switch (exception) {
-- case FM_MAC_EX_10G_TX_ECC_ER:
-- bit_mask = MEMAC_IMASK_TECC_ER;
-- break;
-- case FM_MAC_EX_10G_RX_ECC_ER:
-- bit_mask = MEMAC_IMASK_RECC_ER;
-- break;
-- case FM_MAC_EX_TS_FIFO_ECC_ERR:
-- bit_mask = MEMAC_IMASK_TSECC_ER;
-- break;
-- case FM_MAC_EX_MAGIC_PACKET_INDICATION:
-- bit_mask = MEMAC_IMASK_MGI;
-- break;
-- default:
-- bit_mask = 0;
-- break;
-- }
--
-- return bit_mask;
--}
--
--static void memac_err_exception(void *handle)
--{
-- struct fman_mac *memac = (struct fman_mac *)handle;
-- struct memac_regs __iomem *regs = memac->regs;
-- u32 event, imask;
--
-- event = ioread32be(®s->ievent);
-- imask = ioread32be(®s->imask);
--
-- /* Imask include both error and notification/event bits.
-- * Leaving only error bits enabled by imask.
-- * The imask error bits are shifted by 16 bits offset from
-- * their corresponding location in the ievent - hence the >> 16
-- */
-- event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
--
-- iowrite32be(event, ®s->ievent);
--
-- if (event & MEMAC_IEVNT_TS_ECC_ER)
-- memac->exception_cb(memac->dev_id, FM_MAC_EX_TS_FIFO_ECC_ERR);
-- if (event & MEMAC_IEVNT_TX_ECC_ER)
-- memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
-- if (event & MEMAC_IEVNT_RX_ECC_ER)
-- memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
--}
--
--static void memac_exception(void *handle)
--{
-- struct fman_mac *memac = (struct fman_mac *)handle;
-- struct memac_regs __iomem *regs = memac->regs;
-- u32 event, imask;
--
-- event = ioread32be(®s->ievent);
-- imask = ioread32be(®s->imask);
--
-- /* Imask include both error and notification/event bits.
-- * Leaving only error bits enabled by imask.
-- * The imask error bits are shifted by 16 bits offset from
-- * their corresponding location in the ievent - hence the >> 16
-- */
-- event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
--
-- iowrite32be(event, ®s->ievent);
--
-- if (event & MEMAC_IEVNT_MGI)
-- memac->exception_cb(memac->dev_id,
-- FM_MAC_EX_MAGIC_PACKET_INDICATION);
--}
--
--static void free_init_resources(struct fman_mac *memac)
--{
-- fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
-- FMAN_INTR_TYPE_ERR);
--
-- fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
-- FMAN_INTR_TYPE_NORMAL);
--
-- /* release the driver's group hash table */
-- free_hash_table(memac->multicast_addr_hash);
-- memac->multicast_addr_hash = NULL;
--
-- /* release the driver's individual hash table */
-- free_hash_table(memac->unicast_addr_hash);
-- memac->unicast_addr_hash = NULL;
--}
--
--static bool is_init_done(struct memac_cfg *memac_drv_params)
--{
-- /* Checks if mEMAC driver parameters were initialized */
-- if (!memac_drv_params)
-- return true;
--
-- return false;
--}
--
--int memac_enable(struct fman_mac *memac, enum comm_mode mode)
--{
-- struct memac_regs __iomem *regs = memac->regs;
-- u32 tmp;
--
-- if (!is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- tmp = ioread32be(®s->command_config);
-- if (mode & COMM_MODE_RX)
-- tmp |= CMD_CFG_RX_EN;
-- if (mode & COMM_MODE_TX)
-- tmp |= CMD_CFG_TX_EN;
--
-- iowrite32be(tmp, ®s->command_config);
--
-- return 0;
--}
--
--int memac_disable(struct fman_mac *memac, enum comm_mode mode)
--{
-- struct memac_regs __iomem *regs = memac->regs;
-- u32 tmp;
--
-- if (!is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- tmp = ioread32be(®s->command_config);
-- if (mode & COMM_MODE_RX)
-- tmp &= ~CMD_CFG_RX_EN;
-- if (mode & COMM_MODE_TX)
-- tmp &= ~CMD_CFG_TX_EN;
--
-- iowrite32be(tmp, ®s->command_config);
--
-- return 0;
--}
--
--int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
--{
-- struct memac_regs __iomem *regs = memac->regs;
-- u32 tmp;
--
-- if (!is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- tmp = ioread32be(®s->command_config);
-- if (new_val)
-- tmp |= CMD_CFG_PROMIS_EN;
-- else
-- tmp &= ~CMD_CFG_PROMIS_EN;
--
-- iowrite32be(tmp, ®s->command_config);
--
-- return 0;
--}
--
--int memac_adjust_link(struct fman_mac *memac, u16 speed)
--{
-- struct memac_regs __iomem *regs = memac->regs;
-- u32 tmp;
--
-- if (!is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- tmp = ioread32be(®s->if_mode);
--
-- /* Set full duplex */
-- tmp &= ~IF_MODE_HD;
--
-- if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) {
-- /* Configure RGMII in manual mode */
-- tmp &= ~IF_MODE_RGMII_AUTO;
-- tmp &= ~IF_MODE_RGMII_SP_MASK;
-- /* Full duplex */
-- tmp |= IF_MODE_RGMII_FD;
--
-- switch (speed) {
-- case SPEED_1000:
-- tmp |= IF_MODE_RGMII_1000;
-- break;
-- case SPEED_100:
-- tmp |= IF_MODE_RGMII_100;
-- break;
-- case SPEED_10:
-- tmp |= IF_MODE_RGMII_10;
-- break;
-- default:
-- break;
-- }
-- }
--
-- iowrite32be(tmp, ®s->if_mode);
--
-- return 0;
--}
--
--int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val)
--{
-- if (is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- memac->memac_drv_param->max_frame_length = new_val;
--
-- return 0;
--}
--
--int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable)
--{
-- if (is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- memac->memac_drv_param->reset_on_init = enable;
--
-- return 0;
--}
--
--int memac_cfg_fixed_link(struct fman_mac *memac,
-- struct fixed_phy_status *fixed_link)
--{
-- if (is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- memac->memac_drv_param->fixed_link = fixed_link;
--
-- return 0;
--}
--
--int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
-- u16 pause_time, u16 thresh_time)
--{
-- struct memac_regs __iomem *regs = memac->regs;
-- u32 tmp;
--
-- if (!is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- tmp = ioread32be(®s->tx_fifo_sections);
--
-- GET_TX_EMPTY_DEFAULT_VALUE(tmp);
-- iowrite32be(tmp, ®s->tx_fifo_sections);
--
-- tmp = ioread32be(®s->command_config);
-- tmp &= ~CMD_CFG_PFC_MODE;
-- priority = 0;
--
-- iowrite32be(tmp, ®s->command_config);
--
-- tmp = ioread32be(®s->pause_quanta[priority / 2]);
-- if (priority % 2)
-- tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT;
-- else
-- tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT;
-- tmp |= ((u32)pause_time << (16 * (priority % 2)));
-- iowrite32be(tmp, ®s->pause_quanta[priority / 2]);
--
-- tmp = ioread32be(®s->pause_thresh[priority / 2]);
-- if (priority % 2)
-- tmp &= CLXY_PAUSE_THRESH_CLX_QTH;
-- else
-- tmp &= CLXY_PAUSE_THRESH_CLY_QTH;
-- tmp |= ((u32)thresh_time << (16 * (priority % 2)));
-- iowrite32be(tmp, ®s->pause_thresh[priority / 2]);
--
-- return 0;
--}
--
--int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
--{
-- struct memac_regs __iomem *regs = memac->regs;
-- u32 tmp;
--
-- if (!is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- tmp = ioread32be(®s->command_config);
-- if (en)
-- tmp &= ~CMD_CFG_PAUSE_IGNORE;
-- else
-- tmp |= CMD_CFG_PAUSE_IGNORE;
--
-- iowrite32be(tmp, ®s->command_config);
--
-- return 0;
--}
--
--int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr)
--{
-- if (!is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0);
--
-- return 0;
--}
--
--int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
--{
-- struct memac_regs __iomem *regs = memac->regs;
-- struct eth_hash_entry *hash_entry;
-- u32 hash;
-- u64 addr;
--
-- if (!is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- addr = ENET_ADDR_TO_UINT64(*eth_addr);
--
-- if (!(addr & GROUP_ADDRESS)) {
-- /* Unicast addresses not supported in hash */
-- pr_err("Unicast Address\n");
-- return -EINVAL;
-- }
-- hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
--
-- /* Create element to be added to the driver hash table */
-- hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
-- if (!hash_entry)
-- return -ENOMEM;
-- hash_entry->addr = addr;
-- INIT_LIST_HEAD(&hash_entry->node);
--
-- list_add_tail(&hash_entry->node,
-- &memac->multicast_addr_hash->lsts[hash]);
-- iowrite32be(hash | HASH_CTRL_MCAST_EN, ®s->hashtable_ctrl);
--
-- return 0;
--}
--
--int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
--{
-- struct memac_regs __iomem *regs = memac->regs;
-- struct eth_hash_entry *hash_entry = NULL;
-- struct list_head *pos;
-- u32 hash;
-- u64 addr;
--
-- if (!is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- addr = ENET_ADDR_TO_UINT64(*eth_addr);
--
-- hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
--
-- list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) {
-- hash_entry = ETH_HASH_ENTRY_OBJ(pos);
-- if (hash_entry->addr == addr) {
-- list_del_init(&hash_entry->node);
-- kfree(hash_entry);
-- break;
-- }
-- }
-- if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
-- iowrite32be(hash & ~HASH_CTRL_MCAST_EN, ®s->hashtable_ctrl);
--
-- return 0;
--}
--
--int memac_set_exception(struct fman_mac *memac,
-- enum fman_mac_exceptions exception, bool enable)
--{
-- u32 bit_mask = 0;
--
-- if (!is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- bit_mask = get_exception_flag(exception);
-- if (bit_mask) {
-- if (enable)
-- memac->exceptions |= bit_mask;
-- else
-- memac->exceptions &= ~bit_mask;
-- } else {
-- pr_err("Undefined exception\n");
-- return -EINVAL;
-- }
-- set_exception(memac->regs, bit_mask, enable);
--
-- return 0;
--}
--
--int memac_init(struct fman_mac *memac)
--{
-- struct memac_cfg *memac_drv_param;
-- u8 i;
-- enet_addr_t eth_addr;
-- bool slow_10g_if = false;
-- struct fixed_phy_status *fixed_link;
-- int err;
-- u32 reg32 = 0;
--
-- if (is_init_done(memac->memac_drv_param))
-- return -EINVAL;
--
-- err = check_init_parameters(memac);
-- if (err)
-- return err;
--
-- memac_drv_param = memac->memac_drv_param;
--
-- if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4)
-- slow_10g_if = true;
--
-- /* First, reset the MAC if desired. */
-- if (memac_drv_param->reset_on_init) {
-- err = reset(memac->regs);
-- if (err) {
-- pr_err("mEMAC reset failed\n");
-- return err;
-- }
-- }
--
-- /* MAC Address */
-- MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
-- add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
--
-- fixed_link = memac_drv_param->fixed_link;
--
-- init(memac->regs, memac->memac_drv_param, memac->phy_if,
-- memac->max_speed, slow_10g_if, memac->exceptions);
--
-- /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround
-- * Exists only in FMan 6.0 and 6.3.
-- */
-- if ((memac->fm_rev_info.major == 6) &&
-- ((memac->fm_rev_info.minor == 0) ||
-- (memac->fm_rev_info.minor == 3))) {
-- /* MAC strips CRC from received frames - this workaround
-- * should decrease the likelihood of bug appearance
-- */
-- reg32 = ioread32be(&memac->regs->command_config);
-- reg32 &= ~CMD_CFG_CRC_FWD;
-- iowrite32be(reg32, &memac->regs->command_config);
-- }
--
-- if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
-- /* Configure internal SGMII PHY */
-- if (memac->basex_if)
-- setup_sgmii_internal_phy_base_x(memac);
-- else
-- setup_sgmii_internal_phy(memac, fixed_link);
-- } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
-- /* Configure 4 internal SGMII PHYs */
-- for (i = 0; i < 4; i++) {
-- u8 qsmgii_phy_addr, phy_addr;
-- /* QSGMII PHY address occupies 3 upper bits of 5-bit
-- * phy_address; the lower 2 bits are used to extend
-- * register address space and access each one of 4
-- * ports inside QSGMII.
-- */
-- phy_addr = memac->pcsphy->mdio.addr;
-- qsmgii_phy_addr = (u8)((phy_addr << 2) | i);
-- memac->pcsphy->mdio.addr = qsmgii_phy_addr;
-- if (memac->basex_if)
-- setup_sgmii_internal_phy_base_x(memac);
-- else
-- setup_sgmii_internal_phy(memac, fixed_link);
--
-- memac->pcsphy->mdio.addr = phy_addr;
-- }
-- }
--
-- /* Max Frame Length */
-- err = fman_set_mac_max_frame(memac->fm, memac->mac_id,
-- memac_drv_param->max_frame_length);
-- if (err) {
-- pr_err("settings Mac max frame length is FAILED\n");
-- return err;
-- }
--
-- memac->multicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
-- if (!memac->multicast_addr_hash) {
-- free_init_resources(memac);
-- pr_err("allocation hash table is FAILED\n");
-- return -ENOMEM;
-- }
--
-- memac->unicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
-- if (!memac->unicast_addr_hash) {
-- free_init_resources(memac);
-- pr_err("allocation hash table is FAILED\n");
-- return -ENOMEM;
-- }
--
-- fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
-- FMAN_INTR_TYPE_ERR, memac_err_exception, memac);
--
-- fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
-- FMAN_INTR_TYPE_NORMAL, memac_exception, memac);
--
-- kfree(memac_drv_param);
-- memac->memac_drv_param = NULL;
--
-- return 0;
--}
--
--int memac_free(struct fman_mac *memac)
--{
-- free_init_resources(memac);
--
-- if (memac->pcsphy)
-- put_device(&memac->pcsphy->mdio.dev);
--
-- kfree(memac->memac_drv_param);
-- kfree(memac);
--
-- return 0;
--}
--
--struct fman_mac *memac_config(struct fman_mac_params *params)
--{
-- struct fman_mac *memac;
-- struct memac_cfg *memac_drv_param;
-- void __iomem *base_addr;
--
-- base_addr = params->base_addr;
-- /* allocate memory for the m_emac data structure */
-- memac = kzalloc(sizeof(*memac), GFP_KERNEL);
-- if (!memac)
-- return NULL;
--
-- /* allocate memory for the m_emac driver parameters data structure */
-- memac_drv_param = kzalloc(sizeof(*memac_drv_param), GFP_KERNEL);
-- if (!memac_drv_param) {
-- memac_free(memac);
-- return NULL;
-- }
--
-- /* Plant parameter structure pointer */
-- memac->memac_drv_param = memac_drv_param;
--
-- set_dflts(memac_drv_param);
--
-- memac->addr = ENET_ADDR_TO_UINT64(params->addr);
--
-- memac->regs = base_addr;
-- memac->max_speed = params->max_speed;
-- memac->phy_if = params->phy_if;
-- memac->mac_id = params->mac_id;
-- memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
-- MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
-- memac->exception_cb = params->exception_cb;
-- memac->event_cb = params->event_cb;
-- memac->dev_id = params->dev_id;
-- memac->fm = params->fm;
-- memac->basex_if = params->basex_if;
--
-- /* Save FMan revision */
-- fman_get_revision(memac->fm, &memac->fm_rev_info);
--
-- if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
-- memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
-- if (!params->internal_phy_node) {
-- pr_err("PCS PHY node is not available\n");
-- memac_free(memac);
-- return NULL;
-- }
--
-- memac->pcsphy = of_phy_find_device(params->internal_phy_node);
-- if (!memac->pcsphy) {
-- pr_err("of_phy_find_device (PCS PHY) failed\n");
-- memac_free(memac);
-- return NULL;
-- }
-- }
--
-- return memac;
--}
-diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
-deleted file mode 100644
-index 173d8e0..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_memac.h
-+++ /dev/null
-@@ -1,60 +0,0 @@
--/*
-- * Copyright 2008-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#ifndef __MEMAC_H
--#define __MEMAC_H
--
--#include "fman_mac.h"
--
--#include <linux/netdevice.h>
--
--struct fman_mac *memac_config(struct fman_mac_params *params);
--int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
--int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr);
--int memac_adjust_link(struct fman_mac *memac, u16 speed);
--int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
--int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
--int memac_cfg_fixed_link(struct fman_mac *memac,
-- struct fixed_phy_status *fixed_link);
--int memac_enable(struct fman_mac *memac, enum comm_mode mode);
--int memac_disable(struct fman_mac *memac, enum comm_mode mode);
--int memac_init(struct fman_mac *memac);
--int memac_free(struct fman_mac *memac);
--int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en);
--int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
-- u16 pause_time, u16 thresh_time);
--int memac_set_exception(struct fman_mac *memac,
-- enum fman_mac_exceptions exception, bool enable);
--int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
--int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
--
--#endif /* __MEMAC_H */
-diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
-deleted file mode 100644
-index 5ec94d2..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_muram.c
-+++ /dev/null
-@@ -1,159 +0,0 @@
--/*
-- * Copyright 2008-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#include "fman_muram.h"
--
--#include <linux/io.h>
--#include <linux/slab.h>
--#include <linux/genalloc.h>
--
--struct muram_info {
-- struct gen_pool *pool;
-- void __iomem *vbase;
-- size_t size;
-- phys_addr_t pbase;
--};
--
--static unsigned long fman_muram_vbase_to_offset(struct muram_info *muram,
-- unsigned long vaddr)
--{
-- return vaddr - (unsigned long)muram->vbase;
--}
--
--/**
-- * fman_muram_init
-- * @base: Pointer to base of memory mapped FM-MURAM.
-- * @size: Size of the FM-MURAM partition.
-- *
-- * Creates partition in the MURAM.
-- * The routine returns a pointer to the MURAM partition.
-- * This pointer must be passed as to all other FM-MURAM function calls.
-- * No actual initialization or configuration of FM_MURAM hardware is done by
-- * this routine.
-- *
-- * Return: pointer to FM-MURAM object, or NULL for Failure.
-- */
--struct muram_info *fman_muram_init(phys_addr_t base, size_t size)
--{
-- struct muram_info *muram;
-- void __iomem *vaddr;
-- int ret;
--
-- muram = kzalloc(sizeof(*muram), GFP_KERNEL);
-- if (!muram)
-- return NULL;
--
-- muram->pool = gen_pool_create(ilog2(64), -1);
-- if (!muram->pool) {
-- pr_err("%s(): MURAM pool create failed\n", __func__);
-- goto muram_free;
-- }
--
-- vaddr = ioremap(base, size);
-- if (!vaddr) {
-- pr_err("%s(): MURAM ioremap failed\n", __func__);
-- goto pool_destroy;
-- }
--
-- ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr,
-- base, size, -1);
-- if (ret < 0) {
-- pr_err("%s(): MURAM pool add failed\n", __func__);
-- iounmap(vaddr);
-- goto pool_destroy;
-- }
--
-- memset_io(vaddr, 0, (int)size);
--
-- muram->vbase = vaddr;
-- muram->pbase = base;
-- return muram;
--
--pool_destroy:
-- gen_pool_destroy(muram->pool);
--muram_free:
-- kfree(muram);
-- return NULL;
--}
--
--/**
-- * fman_muram_offset_to_vbase
-- * @muram: FM-MURAM module pointer.
-- * @offset: the offset of the memory block
-- *
-- * Gives the address of the memory region from specific offset
-- *
-- * Return: The address of the memory block
-- */
--unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
-- unsigned long offset)
--{
-- return offset + (unsigned long)muram->vbase;
--}
--
--/**
-- * fman_muram_alloc
-- * @muram: FM-MURAM module pointer.
-- * @size: Size of the memory to be allocated.
-- *
-- * Allocate some memory from FM-MURAM partition.
-- *
-- * Return: address of the allocated memory; NULL otherwise.
-- */
--unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
--{
-- unsigned long vaddr;
--
-- vaddr = gen_pool_alloc(muram->pool, size);
-- if (!vaddr)
-- return -ENOMEM;
--
-- memset_io((void __iomem *)vaddr, 0, size);
--
-- return fman_muram_vbase_to_offset(muram, vaddr);
--}
--
--/**
-- * fman_muram_free_mem
-- * muram: FM-MURAM module pointer.
-- * offset: offset of the memory region to be freed.
-- * size: size of the memory to be freed.
-- *
-- * Free an allocated memory from FM-MURAM partition.
-- */
--void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
-- size_t size)
--{
-- unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
--
-- gen_pool_free(muram->pool, addr, size);
--}
-diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
-deleted file mode 100644
-index 453bf84..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_muram.h
-+++ /dev/null
-@@ -1,52 +0,0 @@
--/*
-- * Copyright 2008-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--#ifndef __FM_MURAM_EXT
--#define __FM_MURAM_EXT
--
--#include <linux/types.h>
--
--#define FM_MURAM_INVALID_ALLOCATION -1
--
--/* Structure for FM MURAM information */
--struct muram_info;
--
--struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
--
--unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
-- unsigned long offset);
--
--unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
--
--void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
-- size_t size);
--
--#endif /* __FM_MURAM_EXT */
-diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
-deleted file mode 100644
-index 9f3bb50..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_port.c
-+++ /dev/null
-@@ -1,1791 +0,0 @@
--/*
-- * Copyright 2008 - 2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
--
--#include "fman_port.h"
--#include "fman.h"
--#include "fman_sp.h"
--
--#include <linux/io.h>
--#include <linux/slab.h>
--#include <linux/module.h>
--#include <linux/interrupt.h>
--#include <linux/of_platform.h>
--#include <linux/of_address.h>
--#include <linux/delay.h>
--#include <linux/libfdt_env.h>
--
--/* Queue ID */
--#define DFLT_FQ_ID 0x00FFFFFF
--
--/* General defines */
--#define PORT_BMI_FIFO_UNITS 0x100
--
--#define MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) \
-- min((u32)bmi_max_fifo_size, (u32)1024 * FMAN_BMI_FIFO_UNITS)
--
--#define PORT_CG_MAP_NUM 8
--#define PORT_PRS_RESULT_WORDS_NUM 8
--#define PORT_IC_OFFSET_UNITS 0x10
--
--#define MIN_EXT_BUF_SIZE 64
--
--#define BMI_PORT_REGS_OFFSET 0
--#define QMI_PORT_REGS_OFFSET 0x400
--
--/* Default values */
--#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \
-- DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN
--
--#define DFLT_PORT_CUT_BYTES_FROM_END 4
--
--#define DFLT_PORT_ERRORS_TO_DISCARD FM_PORT_FRM_ERR_CLS_DISCARD
--#define DFLT_PORT_MAX_FRAME_LENGTH 9600
--
--#define DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(bmi_max_fifo_size) \
-- MAX_PORT_FIFO_SIZE(bmi_max_fifo_size)
--
--#define DFLT_PORT_RX_FIFO_THRESHOLD(major, bmi_max_fifo_size) \
-- (major == 6 ? \
-- MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) : \
-- (MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) * 3 / 4)) \
--
--#define DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS 0
--
--/* QMI defines */
--#define QMI_DEQ_CFG_SUBPORTAL_MASK 0x1f
--
--#define QMI_PORT_CFG_EN 0x80000000
--#define QMI_PORT_STATUS_DEQ_FD_BSY 0x20000000
--
--#define QMI_DEQ_CFG_PRI 0x80000000
--#define QMI_DEQ_CFG_TYPE1 0x10000000
--#define QMI_DEQ_CFG_TYPE2 0x20000000
--#define QMI_DEQ_CFG_TYPE3 0x30000000
--#define QMI_DEQ_CFG_PREFETCH_PARTIAL 0x01000000
--#define QMI_DEQ_CFG_PREFETCH_FULL 0x03000000
--#define QMI_DEQ_CFG_SP_MASK 0xf
--#define QMI_DEQ_CFG_SP_SHIFT 20
--
--#define QMI_BYTE_COUNT_LEVEL_CONTROL(_type) \
-- (_type == FMAN_PORT_TYPE_TX ? 0x1400 : 0x400)
--
--/* BMI defins */
--#define BMI_EBD_EN 0x80000000
--
--#define BMI_PORT_CFG_EN 0x80000000
--
--#define BMI_PORT_STATUS_BSY 0x80000000
--
--#define BMI_DMA_ATTR_SWP_SHIFT FMAN_SP_DMA_ATTR_SWP_SHIFT
--#define BMI_DMA_ATTR_WRITE_OPTIMIZE FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
--
--#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16
--#define BMI_RX_FIFO_THRESHOLD_ETHE 0x80000000
--
--#define BMI_FRAME_END_CS_IGNORE_SHIFT 24
--#define BMI_FRAME_END_CS_IGNORE_MASK 0x0000001f
--
--#define BMI_RX_FRAME_END_CUT_SHIFT 16
--#define BMI_RX_FRAME_END_CUT_MASK 0x0000001f
--
--#define BMI_IC_TO_EXT_SHIFT FMAN_SP_IC_TO_EXT_SHIFT
--#define BMI_IC_TO_EXT_MASK 0x0000001f
--#define BMI_IC_FROM_INT_SHIFT FMAN_SP_IC_FROM_INT_SHIFT
--#define BMI_IC_FROM_INT_MASK 0x0000000f
--#define BMI_IC_SIZE_MASK 0x0000001f
--
--#define BMI_INT_BUF_MARG_SHIFT 28
--#define BMI_INT_BUF_MARG_MASK 0x0000000f
--#define BMI_EXT_BUF_MARG_START_SHIFT FMAN_SP_EXT_BUF_MARG_START_SHIFT
--#define BMI_EXT_BUF_MARG_START_MASK 0x000001ff
--#define BMI_EXT_BUF_MARG_END_MASK 0x000001ff
--
--#define BMI_CMD_MR_LEAC 0x00200000
--#define BMI_CMD_MR_SLEAC 0x00100000
--#define BMI_CMD_MR_MA 0x00080000
--#define BMI_CMD_MR_DEAS 0x00040000
--#define BMI_CMD_RX_MR_DEF (BMI_CMD_MR_LEAC | \
-- BMI_CMD_MR_SLEAC | \
-- BMI_CMD_MR_MA | \
-- BMI_CMD_MR_DEAS)
--#define BMI_CMD_TX_MR_DEF 0
--
--#define BMI_CMD_ATTR_ORDER 0x80000000
--#define BMI_CMD_ATTR_SYNC 0x02000000
--#define BMI_CMD_ATTR_COLOR_SHIFT 26
--
--#define BMI_FIFO_PIPELINE_DEPTH_SHIFT 12
--#define BMI_FIFO_PIPELINE_DEPTH_MASK 0x0000000f
--#define BMI_NEXT_ENG_FD_BITS_SHIFT 24
--
--#define BMI_EXT_BUF_POOL_VALID FMAN_SP_EXT_BUF_POOL_VALID
--#define BMI_EXT_BUF_POOL_EN_COUNTER FMAN_SP_EXT_BUF_POOL_EN_COUNTER
--#define BMI_EXT_BUF_POOL_BACKUP FMAN_SP_EXT_BUF_POOL_BACKUP
--#define BMI_EXT_BUF_POOL_ID_SHIFT 16
--#define BMI_EXT_BUF_POOL_ID_MASK 0x003F0000
--#define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT 16
--
--#define BMI_TX_FIFO_MIN_FILL_SHIFT 16
--
--#define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
--#define BMI_FIFO_THRESHOLD ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
--
--#define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed) \
-- ((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1)
--
--#define RX_ERRS_TO_ENQ \
-- (FM_PORT_FRM_ERR_DMA | \
-- FM_PORT_FRM_ERR_PHYSICAL | \
-- FM_PORT_FRM_ERR_SIZE | \
-- FM_PORT_FRM_ERR_EXTRACTION | \
-- FM_PORT_FRM_ERR_NO_SCHEME | \
-- FM_PORT_FRM_ERR_PRS_TIMEOUT | \
-- FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \
-- FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \
-- FM_PORT_FRM_ERR_PRS_HDR_ERR | \
-- FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW | \
-- FM_PORT_FRM_ERR_IPRE)
--
--/* NIA defines */
--#define NIA_ORDER_RESTOR 0x00800000
--#define NIA_ENG_BMI 0x00500000
--#define NIA_ENG_QMI_ENQ 0x00540000
--#define NIA_ENG_QMI_DEQ 0x00580000
--
--#define NIA_BMI_AC_ENQ_FRAME 0x00000002
--#define NIA_BMI_AC_TX_RELEASE 0x000002C0
--#define NIA_BMI_AC_RELEASE 0x000000C0
--#define NIA_BMI_AC_TX 0x00000274
--#define NIA_BMI_AC_FETCH_ALL_FRAME 0x0000020c
--
--/* Port IDs */
--#define TX_10G_PORT_BASE 0x30
--#define RX_10G_PORT_BASE 0x10
--
--/* BMI Rx port register map */
--struct fman_port_rx_bmi_regs {
-- u32 fmbm_rcfg; /* Rx Configuration */
-- u32 fmbm_rst; /* Rx Status */
-- u32 fmbm_rda; /* Rx DMA attributes */
-- u32 fmbm_rfp; /* Rx FIFO Parameters */
-- u32 fmbm_rfed; /* Rx Frame End Data */
-- u32 fmbm_ricp; /* Rx Internal Context Parameters */
-- u32 fmbm_rim; /* Rx Internal Buffer Margins */
-- u32 fmbm_rebm; /* Rx External Buffer Margins */
-- u32 fmbm_rfne; /* Rx Frame Next Engine */
-- u32 fmbm_rfca; /* Rx Frame Command Attributes. */
-- u32 fmbm_rfpne; /* Rx Frame Parser Next Engine */
-- u32 fmbm_rpso; /* Rx Parse Start Offset */
-- u32 fmbm_rpp; /* Rx Policer Profile */
-- u32 fmbm_rccb; /* Rx Coarse Classification Base */
-- u32 fmbm_reth; /* Rx Excessive Threshold */
-- u32 reserved003c[1]; /* (0x03C 0x03F) */
-- u32 fmbm_rprai[PORT_PRS_RESULT_WORDS_NUM];
-- /* Rx Parse Results Array Init */
-- u32 fmbm_rfqid; /* Rx Frame Queue ID */
-- u32 fmbm_refqid; /* Rx Error Frame Queue ID */
-- u32 fmbm_rfsdm; /* Rx Frame Status Discard Mask */
-- u32 fmbm_rfsem; /* Rx Frame Status Error Mask */
-- u32 fmbm_rfene; /* Rx Frame Enqueue Next Engine */
-- u32 reserved0074[0x2]; /* (0x074-0x07C) */
-- u32 fmbm_rcmne; /* Rx Frame Continuous Mode Next Engine */
-- u32 reserved0080[0x20]; /* (0x080 0x0FF) */
-- u32 fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
-- /* Buffer Manager pool Information- */
-- u32 fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM]; /* Allocate Counter- */
-- u32 reserved0130[8]; /* 0x130/0x140 - 0x15F reserved - */
-- u32 fmbm_rcgm[PORT_CG_MAP_NUM]; /* Congestion Group Map */
-- u32 fmbm_mpd; /* BM Pool Depletion */
-- u32 reserved0184[0x1F]; /* (0x184 0x1FF) */
-- u32 fmbm_rstc; /* Rx Statistics Counters */
-- u32 fmbm_rfrc; /* Rx Frame Counter */
-- u32 fmbm_rfbc; /* Rx Bad Frames Counter */
-- u32 fmbm_rlfc; /* Rx Large Frames Counter */
-- u32 fmbm_rffc; /* Rx Filter Frames Counter */
-- u32 fmbm_rfdc; /* Rx Frame Discard Counter */
-- u32 fmbm_rfldec; /* Rx Frames List DMA Error Counter */
-- u32 fmbm_rodc; /* Rx Out of Buffers Discard nntr */
-- u32 fmbm_rbdc; /* Rx Buffers Deallocate Counter */
-- u32 fmbm_rpec; /* RX Prepare to enqueue Counte */
-- u32 reserved0224[0x16]; /* (0x224 0x27F) */
-- u32 fmbm_rpc; /* Rx Performance Counters */
-- u32 fmbm_rpcp; /* Rx Performance Count Parameters */
-- u32 fmbm_rccn; /* Rx Cycle Counter */
-- u32 fmbm_rtuc; /* Rx Tasks Utilization Counter */
-- u32 fmbm_rrquc; /* Rx Receive Queue Utilization cntr */
-- u32 fmbm_rduc; /* Rx DMA Utilization Counter */
-- u32 fmbm_rfuc; /* Rx FIFO Utilization Counter */
-- u32 fmbm_rpac; /* Rx Pause Activation Counter */
-- u32 reserved02a0[0x18]; /* (0x2A0 0x2FF) */
-- u32 fmbm_rdcfg[0x3]; /* Rx Debug Configuration */
-- u32 fmbm_rgpr; /* Rx General Purpose Register */
-- u32 reserved0310[0x3a];
--};
--
--/* BMI Tx port register map */
--struct fman_port_tx_bmi_regs {
-- u32 fmbm_tcfg; /* Tx Configuration */
-- u32 fmbm_tst; /* Tx Status */
-- u32 fmbm_tda; /* Tx DMA attributes */
-- u32 fmbm_tfp; /* Tx FIFO Parameters */
-- u32 fmbm_tfed; /* Tx Frame End Data */
-- u32 fmbm_ticp; /* Tx Internal Context Parameters */
-- u32 fmbm_tfdne; /* Tx Frame Dequeue Next Engine. */
-- u32 fmbm_tfca; /* Tx Frame Command attribute. */
-- u32 fmbm_tcfqid; /* Tx Confirmation Frame Queue ID. */
-- u32 fmbm_tefqid; /* Tx Frame Error Queue ID */
-- u32 fmbm_tfene; /* Tx Frame Enqueue Next Engine */
-- u32 fmbm_trlmts; /* Tx Rate Limiter Scale */
-- u32 fmbm_trlmt; /* Tx Rate Limiter */
-- u32 reserved0034[0x0e]; /* (0x034-0x6c) */
-- u32 fmbm_tccb; /* Tx Coarse Classification base */
-- u32 fmbm_tfne; /* Tx Frame Next Engine */
-- u32 fmbm_tpfcm[0x02];
-- /* Tx Priority based Flow Control (PFC) Mapping */
-- u32 fmbm_tcmne; /* Tx Frame Continuous Mode Next Engine */
-- u32 reserved0080[0x60]; /* (0x080-0x200) */
-- u32 fmbm_tstc; /* Tx Statistics Counters */
-- u32 fmbm_tfrc; /* Tx Frame Counter */
-- u32 fmbm_tfdc; /* Tx Frames Discard Counter */
-- u32 fmbm_tfledc; /* Tx Frame len error discard cntr */
-- u32 fmbm_tfufdc; /* Tx Frame unsprt frmt discard cntr */
-- u32 fmbm_tbdc; /* Tx Buffers Deallocate Counter */
-- u32 reserved0218[0x1A]; /* (0x218-0x280) */
-- u32 fmbm_tpc; /* Tx Performance Counters */
-- u32 fmbm_tpcp; /* Tx Performance Count Parameters */
-- u32 fmbm_tccn; /* Tx Cycle Counter */
-- u32 fmbm_ttuc; /* Tx Tasks Utilization Counter */
-- u32 fmbm_ttcquc; /* Tx Transmit conf Q util Counter */
-- u32 fmbm_tduc; /* Tx DMA Utilization Counter */
-- u32 fmbm_tfuc; /* Tx FIFO Utilization Counter */
-- u32 reserved029c[16]; /* (0x29C-0x2FF) */
-- u32 fmbm_tdcfg[0x3]; /* Tx Debug Configuration */
-- u32 fmbm_tgpr; /* Tx General Purpose Register */
-- u32 reserved0310[0x3a]; /* (0x310-0x3FF) */
--};
--
--/* BMI port register map */
--union fman_port_bmi_regs {
-- struct fman_port_rx_bmi_regs rx;
-- struct fman_port_tx_bmi_regs tx;
--};
--
--/* QMI port register map */
--struct fman_port_qmi_regs {
-- u32 fmqm_pnc; /* PortID n Configuration Register */
-- u32 fmqm_pns; /* PortID n Status Register */
-- u32 fmqm_pnts; /* PortID n Task Status Register */
-- u32 reserved00c[4]; /* 0xn00C - 0xn01B */
-- u32 fmqm_pnen; /* PortID n Enqueue NIA Register */
-- u32 fmqm_pnetfc; /* PortID n Enq Total Frame Counter */
-- u32 reserved024[2]; /* 0xn024 - 0x02B */
-- u32 fmqm_pndn; /* PortID n Dequeue NIA Register */
-- u32 fmqm_pndc; /* PortID n Dequeue Config Register */
-- u32 fmqm_pndtfc; /* PortID n Dequeue tot Frame cntr */
-- u32 fmqm_pndfdc; /* PortID n Dequeue FQID Dflt Cntr */
-- u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */
--};
--
--/* QMI dequeue prefetch modes */
--enum fman_port_deq_prefetch {
-- FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
-- FMAN_PORT_DEQ_PART_PREFETCH, /* Partial prefetch mode */
-- FMAN_PORT_DEQ_FULL_PREFETCH /* Full prefetch mode */
--};
--
--/* A structure for defining FM port resources */
--struct fman_port_rsrc {
-- u32 num; /* Committed required resource */
-- u32 extra; /* Extra (not committed) required resource */
--};
--
--enum fman_port_dma_swap {
-- FMAN_PORT_DMA_NO_SWAP, /* No swap, transfer data as is */
-- FMAN_PORT_DMA_SWAP_LE,
-- /* The transferred data should be swapped in PPC Little Endian mode */
-- FMAN_PORT_DMA_SWAP_BE
-- /* The transferred data should be swapped in Big Endian mode */
--};
--
--/* Default port color */
--enum fman_port_color {
-- FMAN_PORT_COLOR_GREEN, /* Default port color is green */
-- FMAN_PORT_COLOR_YELLOW, /* Default port color is yellow */
-- FMAN_PORT_COLOR_RED, /* Default port color is red */
-- FMAN_PORT_COLOR_OVERRIDE /* Ignore color */
--};
--
--/* QMI dequeue from the SP channel - types */
--enum fman_port_deq_type {
-- FMAN_PORT_DEQ_BY_PRI,
-- /* Priority precedence and Intra-Class scheduling */
-- FMAN_PORT_DEQ_ACTIVE_FQ,
-- /* Active FQ precedence and Intra-Class scheduling */
-- FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS
-- /* Active FQ precedence and override Intra-Class scheduling */
--};
--
--/* External buffer pools configuration */
--struct fman_port_bpools {
-- u8 count; /* Num of pools to set up */
-- bool counters_enable; /* Enable allocate counters */
-- u8 grp_bp_depleted_num;
-- /* Number of depleted pools - if reached the BMI indicates
-- * the MAC to send a pause frame
-- */
-- struct {
-- u8 bpid; /* BM pool ID */
-- u16 size;
-- /* Pool's size - must be in ascending order */
-- bool is_backup;
-- /* If this is a backup pool */
-- bool grp_bp_depleted;
-- /* Consider this buffer in multiple pools depletion criteria */
-- bool single_bp_depleted;
-- /* Consider this buffer in single pool depletion criteria */
-- } bpool[FMAN_PORT_MAX_EXT_POOLS_NUM];
--};
--
--struct fman_port_cfg {
-- u32 dflt_fqid;
-- u32 err_fqid;
-- u8 deq_sp;
-- bool deq_high_priority;
-- enum fman_port_deq_type deq_type;
-- enum fman_port_deq_prefetch deq_prefetch_option;
-- u16 deq_byte_cnt;
-- u8 cheksum_last_bytes_ignore;
-- u8 rx_cut_end_bytes;
-- struct fman_buf_pool_depletion buf_pool_depletion;
-- struct fman_ext_pools ext_buf_pools;
-- u32 tx_fifo_min_level;
-- u32 tx_fifo_low_comf_level;
-- u32 rx_pri_elevation;
-- u32 rx_fifo_thr;
-- struct fman_sp_buf_margins buf_margins;
-- u32 int_buf_start_margin;
-- struct fman_sp_int_context_data_copy int_context;
-- u32 discard_mask;
-- u32 err_mask;
-- struct fman_buffer_prefix_content buffer_prefix_content;
-- bool dont_release_buf;
--
-- u8 rx_fd_bits;
-- u32 tx_fifo_deq_pipeline_depth;
-- bool errata_A006320;
-- bool excessive_threshold_register;
-- bool fmbm_tfne_has_features;
--
-- enum fman_port_dma_swap dma_swap_data;
-- enum fman_port_color color;
--};
--
--struct fman_port_rx_pools_params {
-- u8 num_of_pools;
-- u16 second_largest_buf_size;
-- u16 largest_buf_size;
--};
--
--struct fman_port_dts_params {
-- void __iomem *base_addr; /* FMan port virtual memory */
-- enum fman_port_type type; /* Port type */
-- u16 speed; /* Port speed */
-- u8 id; /* HW Port Id */
-- u32 qman_channel_id; /* QMan channel id (non RX only) */
-- struct fman *fman; /* FMan Handle */
--};
--
--struct fman_port {
-- void *fm;
-- struct device *dev;
-- struct fman_rev_info rev_info;
-- u8 port_id;
-- enum fman_port_type port_type;
-- u16 port_speed;
--
-- union fman_port_bmi_regs __iomem *bmi_regs;
-- struct fman_port_qmi_regs __iomem *qmi_regs;
--
-- struct fman_sp_buffer_offsets buffer_offsets;
--
-- u8 internal_buf_offset;
-- struct fman_ext_pools ext_buf_pools;
--
-- u16 max_frame_length;
-- struct fman_port_rsrc open_dmas;
-- struct fman_port_rsrc tasks;
-- struct fman_port_rsrc fifo_bufs;
-- struct fman_port_rx_pools_params rx_pools_params;
--
-- struct fman_port_cfg *cfg;
-- struct fman_port_dts_params dts_params;
--
-- u8 ext_pools_num;
-- u32 max_port_fifo_size;
-- u32 max_num_of_ext_pools;
-- u32 max_num_of_sub_portals;
-- u32 bm_max_num_of_pools;
--};
--
--static int init_bmi_rx(struct fman_port *port)
--{
-- struct fman_port_rx_bmi_regs __iomem *regs = &port->bmi_regs->rx;
-- struct fman_port_cfg *cfg = port->cfg;
-- u32 tmp;
--
-- /* DMA attributes */
-- tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
-- /* Enable write optimization */
-- tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
-- iowrite32be(tmp, ®s->fmbm_rda);
--
-- /* Rx FIFO parameters */
-- tmp = (cfg->rx_pri_elevation / PORT_BMI_FIFO_UNITS - 1) <<
-- BMI_RX_FIFO_PRI_ELEVATION_SHIFT;
-- tmp |= cfg->rx_fifo_thr / PORT_BMI_FIFO_UNITS - 1;
-- iowrite32be(tmp, ®s->fmbm_rfp);
--
-- if (cfg->excessive_threshold_register)
-- /* always allow access to the extra resources */
-- iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, ®s->fmbm_reth);
--
-- /* Frame end data */
-- tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
-- BMI_FRAME_END_CS_IGNORE_SHIFT;
-- tmp |= (cfg->rx_cut_end_bytes & BMI_RX_FRAME_END_CUT_MASK) <<
-- BMI_RX_FRAME_END_CUT_SHIFT;
-- if (cfg->errata_A006320)
-- tmp &= 0xffe0ffff;
-- iowrite32be(tmp, ®s->fmbm_rfed);
--
-- /* Internal context parameters */
-- tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
-- BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
-- tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
-- BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
-- tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
-- BMI_IC_SIZE_MASK;
-- iowrite32be(tmp, ®s->fmbm_ricp);
--
-- /* Internal buffer offset */
-- tmp = ((cfg->int_buf_start_margin / PORT_IC_OFFSET_UNITS) &
-- BMI_INT_BUF_MARG_MASK) << BMI_INT_BUF_MARG_SHIFT;
-- iowrite32be(tmp, ®s->fmbm_rim);
--
-- /* External buffer margins */
-- tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) <<
-- BMI_EXT_BUF_MARG_START_SHIFT;
-- tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK;
-- iowrite32be(tmp, ®s->fmbm_rebm);
--
-- /* Frame attributes */
-- tmp = BMI_CMD_RX_MR_DEF;
-- tmp |= BMI_CMD_ATTR_ORDER;
-- tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
-- /* Synchronization request */
-- tmp |= BMI_CMD_ATTR_SYNC;
--
-- iowrite32be(tmp, ®s->fmbm_rfca);
--
-- /* NIA */
-- tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
--
-- tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
-- iowrite32be(tmp, ®s->fmbm_rfne);
--
-- /* Enqueue NIA */
-- iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, ®s->fmbm_rfene);
--
-- /* Default/error queues */
-- iowrite32be((cfg->dflt_fqid & DFLT_FQ_ID), ®s->fmbm_rfqid);
-- iowrite32be((cfg->err_fqid & DFLT_FQ_ID), ®s->fmbm_refqid);
--
-- /* Discard/error masks */
-- iowrite32be(cfg->discard_mask, ®s->fmbm_rfsdm);
-- iowrite32be(cfg->err_mask, ®s->fmbm_rfsem);
--
-- return 0;
--}
--
--static int init_bmi_tx(struct fman_port *port)
--{
-- struct fman_port_tx_bmi_regs __iomem *regs = &port->bmi_regs->tx;
-- struct fman_port_cfg *cfg = port->cfg;
-- u32 tmp;
--
-- /* Tx Configuration register */
-- tmp = 0;
-- iowrite32be(tmp, ®s->fmbm_tcfg);
--
-- /* DMA attributes */
-- tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
-- iowrite32be(tmp, ®s->fmbm_tda);
--
-- /* Tx FIFO parameters */
-- tmp = (cfg->tx_fifo_min_level / PORT_BMI_FIFO_UNITS) <<
-- BMI_TX_FIFO_MIN_FILL_SHIFT;
-- tmp |= ((cfg->tx_fifo_deq_pipeline_depth - 1) &
-- BMI_FIFO_PIPELINE_DEPTH_MASK) << BMI_FIFO_PIPELINE_DEPTH_SHIFT;
-- tmp |= (cfg->tx_fifo_low_comf_level / PORT_BMI_FIFO_UNITS) - 1;
-- iowrite32be(tmp, ®s->fmbm_tfp);
--
-- /* Frame end data */
-- tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
-- BMI_FRAME_END_CS_IGNORE_SHIFT;
-- iowrite32be(tmp, ®s->fmbm_tfed);
--
-- /* Internal context parameters */
-- tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
-- BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
-- tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
-- BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
-- tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
-- BMI_IC_SIZE_MASK;
-- iowrite32be(tmp, ®s->fmbm_ticp);
--
-- /* Frame attributes */
-- tmp = BMI_CMD_TX_MR_DEF;
-- tmp |= BMI_CMD_ATTR_ORDER;
-- tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
-- iowrite32be(tmp, ®s->fmbm_tfca);
--
-- /* Dequeue NIA + enqueue NIA */
-- iowrite32be(NIA_ENG_QMI_DEQ, ®s->fmbm_tfdne);
-- iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, ®s->fmbm_tfene);
-- if (cfg->fmbm_tfne_has_features)
-- iowrite32be(!cfg->dflt_fqid ?
-- BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME :
-- NIA_BMI_AC_FETCH_ALL_FRAME, ®s->fmbm_tfne);
-- if (!cfg->dflt_fqid && cfg->dont_release_buf) {
-- iowrite32be(DFLT_FQ_ID, ®s->fmbm_tcfqid);
-- iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
-- ®s->fmbm_tfene);
-- if (cfg->fmbm_tfne_has_features)
-- iowrite32be(ioread32be(®s->fmbm_tfne) & ~BMI_EBD_EN,
-- ®s->fmbm_tfne);
-- }
--
-- /* Confirmation/error queues */
-- if (cfg->dflt_fqid || !cfg->dont_release_buf)
-- iowrite32be(cfg->dflt_fqid & DFLT_FQ_ID, ®s->fmbm_tcfqid);
-- iowrite32be((cfg->err_fqid & DFLT_FQ_ID), ®s->fmbm_tefqid);
--
-- return 0;
--}
--
--static int init_qmi(struct fman_port *port)
--{
-- struct fman_port_qmi_regs __iomem *regs = port->qmi_regs;
-- struct fman_port_cfg *cfg = port->cfg;
-- u32 tmp;
--
-- /* Rx port configuration */
-- if (port->port_type == FMAN_PORT_TYPE_RX) {
-- /* Enqueue NIA */
-- iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, ®s->fmqm_pnen);
-- return 0;
-- }
--
-- /* Continue with Tx port configuration */
-- if (port->port_type == FMAN_PORT_TYPE_TX) {
-- /* Enqueue NIA */
-- iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
-- ®s->fmqm_pnen);
-- /* Dequeue NIA */
-- iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, ®s->fmqm_pndn);
-- }
--
-- /* Dequeue Configuration register */
-- tmp = 0;
-- if (cfg->deq_high_priority)
-- tmp |= QMI_DEQ_CFG_PRI;
--
-- switch (cfg->deq_type) {
-- case FMAN_PORT_DEQ_BY_PRI:
-- tmp |= QMI_DEQ_CFG_TYPE1;
-- break;
-- case FMAN_PORT_DEQ_ACTIVE_FQ:
-- tmp |= QMI_DEQ_CFG_TYPE2;
-- break;
-- case FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS:
-- tmp |= QMI_DEQ_CFG_TYPE3;
-- break;
-- default:
-- return -EINVAL;
-- }
--
-- switch (cfg->deq_prefetch_option) {
-- case FMAN_PORT_DEQ_NO_PREFETCH:
-- break;
-- case FMAN_PORT_DEQ_PART_PREFETCH:
-- tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
-- break;
-- case FMAN_PORT_DEQ_FULL_PREFETCH:
-- tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
-- break;
-- default:
-- return -EINVAL;
-- }
--
-- tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT;
-- tmp |= cfg->deq_byte_cnt;
-- iowrite32be(tmp, ®s->fmqm_pndc);
--
-- return 0;
--}
--
--static int init(struct fman_port *port)
--{
-- int err;
--
-- /* Init BMI registers */
-- switch (port->port_type) {
-- case FMAN_PORT_TYPE_RX:
-- err = init_bmi_rx(port);
-- break;
-- case FMAN_PORT_TYPE_TX:
-- err = init_bmi_tx(port);
-- break;
-- default:
-- return -EINVAL;
-- }
--
-- if (err)
-- return err;
--
-- /* Init QMI registers */
-- err = init_qmi(port);
-- return err;
--
-- return 0;
--}
--
--static int set_bpools(const struct fman_port *port,
-- const struct fman_port_bpools *bp)
--{
-- u32 __iomem *bp_reg, *bp_depl_reg;
-- u32 tmp;
-- u8 i, max_bp_num;
-- bool grp_depl_used = false, rx_port;
--
-- switch (port->port_type) {
-- case FMAN_PORT_TYPE_RX:
-- max_bp_num = port->ext_pools_num;
-- rx_port = true;
-- bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
-- bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd;
-- break;
-- default:
-- return -EINVAL;
-- }
--
-- if (rx_port) {
-- /* Check buffers are provided in ascending order */
-- for (i = 0; (i < (bp->count - 1) &&
-- (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1)); i++) {
-- if (bp->bpool[i].size > bp->bpool[i + 1].size)
-- return -EINVAL;
-- }
-- }
--
-- /* Set up external buffers pools */
-- for (i = 0; i < bp->count; i++) {
-- tmp = BMI_EXT_BUF_POOL_VALID;
-- tmp |= ((u32)bp->bpool[i].bpid <<
-- BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK;
--
-- if (rx_port) {
-- if (bp->counters_enable)
-- tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
--
-- if (bp->bpool[i].is_backup)
-- tmp |= BMI_EXT_BUF_POOL_BACKUP;
--
-- tmp |= (u32)bp->bpool[i].size;
-- }
--
-- iowrite32be(tmp, &bp_reg[i]);
-- }
--
-- /* Clear unused pools */
-- for (i = bp->count; i < max_bp_num; i++)
-- iowrite32be(0, &bp_reg[i]);
--
-- /* Pools depletion */
-- tmp = 0;
-- for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) {
-- if (bp->bpool[i].grp_bp_depleted) {
-- grp_depl_used = true;
-- tmp |= 0x80000000 >> i;
-- }
--
-- if (bp->bpool[i].single_bp_depleted)
-- tmp |= 0x80 >> i;
-- }
--
-- if (grp_depl_used)
-- tmp |= ((u32)bp->grp_bp_depleted_num - 1) <<
-- BMI_POOL_DEP_NUM_OF_POOLS_SHIFT;
--
-- iowrite32be(tmp, bp_depl_reg);
-- return 0;
--}
--
--static bool is_init_done(struct fman_port_cfg *cfg)
--{
-- /* Checks if FMan port driver parameters were initialized */
-- if (!cfg)
-- return true;
--
-- return false;
--}
--
--static int verify_size_of_fifo(struct fman_port *port)
--{
-- u32 min_fifo_size_required = 0, opt_fifo_size_for_b2b = 0;
--
-- /* TX Ports */
-- if (port->port_type == FMAN_PORT_TYPE_TX) {
-- min_fifo_size_required = (u32)
-- (roundup(port->max_frame_length,
-- FMAN_BMI_FIFO_UNITS) + (3 * FMAN_BMI_FIFO_UNITS));
--
-- min_fifo_size_required +=
-- port->cfg->tx_fifo_deq_pipeline_depth *
-- FMAN_BMI_FIFO_UNITS;
--
-- opt_fifo_size_for_b2b = min_fifo_size_required;
--
-- /* Add some margin for back-to-back capability to improve
-- * performance, allows the hardware to pipeline new frame dma
-- * while the previous frame not yet transmitted.
-- */
-- if (port->port_speed == 10000)
-- opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
-- else
-- opt_fifo_size_for_b2b += 2 * FMAN_BMI_FIFO_UNITS;
-- }
--
-- /* RX Ports */
-- else if (port->port_type == FMAN_PORT_TYPE_RX) {
-- if (port->rev_info.major >= 6)
-- min_fifo_size_required = (u32)
-- (roundup(port->max_frame_length,
-- FMAN_BMI_FIFO_UNITS) +
-- (5 * FMAN_BMI_FIFO_UNITS));
-- /* 4 according to spec + 1 for FOF>0 */
-- else
-- min_fifo_size_required = (u32)
-- (roundup(min(port->max_frame_length,
-- port->rx_pools_params.largest_buf_size),
-- FMAN_BMI_FIFO_UNITS) +
-- (7 * FMAN_BMI_FIFO_UNITS));
--
-- opt_fifo_size_for_b2b = min_fifo_size_required;
--
-- /* Add some margin for back-to-back capability to improve
-- * performance,allows the hardware to pipeline new frame dma
-- * while the previous frame not yet transmitted.
-- */
-- if (port->port_speed == 10000)
-- opt_fifo_size_for_b2b += 8 * FMAN_BMI_FIFO_UNITS;
-- else
-- opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
-- }
--
-- WARN_ON(min_fifo_size_required <= 0);
-- WARN_ON(opt_fifo_size_for_b2b < min_fifo_size_required);
--
-- /* Verify the size */
-- if (port->fifo_bufs.num < min_fifo_size_required)
-- dev_dbg(port->dev, "%s: FIFO size should be enlarged to %d bytes\n",
-- __func__, min_fifo_size_required);
-- else if (port->fifo_bufs.num < opt_fifo_size_for_b2b)
-- dev_dbg(port->dev, "%s: For b2b processing,FIFO may be enlarged to %d bytes\n",
-- __func__, opt_fifo_size_for_b2b);
--
-- return 0;
--}
--
--static int set_ext_buffer_pools(struct fman_port *port)
--{
-- struct fman_ext_pools *ext_buf_pools = &port->cfg->ext_buf_pools;
-- struct fman_buf_pool_depletion *buf_pool_depletion =
-- &port->cfg->buf_pool_depletion;
-- u8 ordered_array[FMAN_PORT_MAX_EXT_POOLS_NUM];
-- u16 sizes_array[BM_MAX_NUM_OF_POOLS];
-- int i = 0, j = 0, err;
-- struct fman_port_bpools bpools;
--
-- memset(&ordered_array, 0, sizeof(u8) * FMAN_PORT_MAX_EXT_POOLS_NUM);
-- memset(&sizes_array, 0, sizeof(u16) * BM_MAX_NUM_OF_POOLS);
-- memcpy(&port->ext_buf_pools, ext_buf_pools,
-- sizeof(struct fman_ext_pools));
--
-- fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(ext_buf_pools,
-- ordered_array,
-- sizes_array);
--
-- memset(&bpools, 0, sizeof(struct fman_port_bpools));
-- bpools.count = ext_buf_pools->num_of_pools_used;
-- bpools.counters_enable = true;
-- for (i = 0; i < ext_buf_pools->num_of_pools_used; i++) {
-- bpools.bpool[i].bpid = ordered_array[i];
-- bpools.bpool[i].size = sizes_array[ordered_array[i]];
-- }
--
-- /* save pools parameters for later use */
-- port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
-- port->rx_pools_params.largest_buf_size =
-- sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
-- port->rx_pools_params.second_largest_buf_size =
-- sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]];
--
-- /* FMBM_RMPD reg. - pool depletion */
-- if (buf_pool_depletion->pools_grp_mode_enable) {
-- bpools.grp_bp_depleted_num = buf_pool_depletion->num_of_pools;
-- for (i = 0; i < port->bm_max_num_of_pools; i++) {
-- if (buf_pool_depletion->pools_to_consider[i]) {
-- for (j = 0; j < ext_buf_pools->
-- num_of_pools_used; j++) {
-- if (i == ordered_array[j]) {
-- bpools.bpool[j].
-- grp_bp_depleted = true;
-- break;
-- }
-- }
-- }
-- }
-- }
--
-- if (buf_pool_depletion->single_pool_mode_enable) {
-- for (i = 0; i < port->bm_max_num_of_pools; i++) {
-- if (buf_pool_depletion->
-- pools_to_consider_for_single_mode[i]) {
-- for (j = 0; j < ext_buf_pools->
-- num_of_pools_used; j++) {
-- if (i == ordered_array[j]) {
-- bpools.bpool[j].
-- single_bp_depleted = true;
-- break;
-- }
-- }
-- }
-- }
-- }
--
-- err = set_bpools(port, &bpools);
-- if (err != 0) {
-- dev_err(port->dev, "%s: set_bpools() failed\n", __func__);
-- return -EINVAL;
-- }
--
-- return 0;
--}
--
--static int init_low_level_driver(struct fman_port *port)
--{
-- struct fman_port_cfg *cfg = port->cfg;
-- u32 tmp_val;
--
-- switch (port->port_type) {
-- case FMAN_PORT_TYPE_RX:
-- cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask);
-- break;
-- default:
-- break;
-- }
--
-- tmp_val = (u32)((port->internal_buf_offset % OFFSET_UNITS) ?
-- (port->internal_buf_offset / OFFSET_UNITS + 1) :
-- (port->internal_buf_offset / OFFSET_UNITS));
-- port->internal_buf_offset = (u8)(tmp_val * OFFSET_UNITS);
-- port->cfg->int_buf_start_margin = port->internal_buf_offset;
--
-- if (init(port) != 0) {
-- dev_err(port->dev, "%s: fman port initialization failed\n",
-- __func__);
-- return -ENODEV;
-- }
--
-- /* The code bellow is a trick so the FM will not release the buffer
-- * to BM nor will try to enqueue the frame to QM
-- */
-- if (port->port_type == FMAN_PORT_TYPE_TX) {
-- if (!cfg->dflt_fqid && cfg->dont_release_buf) {
-- /* override fmbm_tcfqid 0 with a false non-0 value.
-- * This will force FM to act according to tfene.
-- * Otherwise, if fmbm_tcfqid is 0 the FM will release
-- * buffers to BM regardless of fmbm_tfene
-- */
-- iowrite32be(0xFFFFFF, &port->bmi_regs->tx.fmbm_tcfqid);
-- iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
-- &port->bmi_regs->tx.fmbm_tfene);
-- }
-- }
--
-- return 0;
--}
--
--static int fill_soc_specific_params(struct fman_port *port)
--{
-- u32 bmi_max_fifo_size;
--
-- bmi_max_fifo_size = fman_get_bmi_max_fifo_size(port->fm);
-- port->max_port_fifo_size = MAX_PORT_FIFO_SIZE(bmi_max_fifo_size);
-- port->bm_max_num_of_pools = 64;
--
-- /* P4080 - Major 2
-- * P2041/P3041/P5020/P5040 - Major 3
-- * Tx/Bx - Major 6
-- */
-- switch (port->rev_info.major) {
-- case 2:
-- case 3:
-- port->max_num_of_ext_pools = 4;
-- port->max_num_of_sub_portals = 12;
-- break;
--
-- case 6:
-- port->max_num_of_ext_pools = 8;
-- port->max_num_of_sub_portals = 16;
-- break;
--
-- default:
-- dev_err(port->dev, "%s: Unsupported FMan version\n", __func__);
-- return -EINVAL;
-- }
--
-- return 0;
--}
--
--static int get_dflt_fifo_deq_pipeline_depth(u8 major, enum fman_port_type type,
-- u16 speed)
--{
-- switch (type) {
-- case FMAN_PORT_TYPE_RX:
-- case FMAN_PORT_TYPE_TX:
-- switch (speed) {
-- case 10000:
-- return 4;
-- case 1000:
-- if (major >= 6)
-- return 2;
-- else
-- return 1;
-- default:
-- return 0;
-- }
-- default:
-- return 0;
-- }
--}
--
--static int get_dflt_num_of_tasks(u8 major, enum fman_port_type type,
-- u16 speed)
--{
-- switch (type) {
-- case FMAN_PORT_TYPE_RX:
-- case FMAN_PORT_TYPE_TX:
-- switch (speed) {
-- case 10000:
-- return 16;
-- case 1000:
-- if (major >= 6)
-- return 4;
-- else
-- return 3;
-- default:
-- return 0;
-- }
-- default:
-- return 0;
-- }
--}
--
--static int get_dflt_extra_num_of_tasks(u8 major, enum fman_port_type type,
-- u16 speed)
--{
-- switch (type) {
-- case FMAN_PORT_TYPE_RX:
-- /* FMan V3 */
-- if (major >= 6)
-- return 0;
--
-- /* FMan V2 */
-- if (speed == 10000)
-- return 8;
-- else
-- return 2;
-- case FMAN_PORT_TYPE_TX:
-- default:
-- return 0;
-- }
--}
--
--static int get_dflt_num_of_open_dmas(u8 major, enum fman_port_type type,
-- u16 speed)
--{
-- int val;
--
-- if (major >= 6) {
-- switch (type) {
-- case FMAN_PORT_TYPE_TX:
-- if (speed == 10000)
-- val = 12;
-- else
-- val = 3;
-- break;
-- case FMAN_PORT_TYPE_RX:
-- if (speed == 10000)
-- val = 8;
-- else
-- val = 2;
-- break;
-- default:
-- return 0;
-- }
-- } else {
-- switch (type) {
-- case FMAN_PORT_TYPE_TX:
-- case FMAN_PORT_TYPE_RX:
-- if (speed == 10000)
-- val = 8;
-- else
-- val = 1;
-- break;
-- default:
-- val = 0;
-- }
-- }
--
-- return val;
--}
--
--static int get_dflt_extra_num_of_open_dmas(u8 major, enum fman_port_type type,
-- u16 speed)
--{
-- /* FMan V3 */
-- if (major >= 6)
-- return 0;
--
-- /* FMan V2 */
-- switch (type) {
-- case FMAN_PORT_TYPE_RX:
-- case FMAN_PORT_TYPE_TX:
-- if (speed == 10000)
-- return 8;
-- else
-- return 1;
-- default:
-- return 0;
-- }
--}
--
--static int get_dflt_num_of_fifo_bufs(u8 major, enum fman_port_type type,
-- u16 speed)
--{
-- int val;
--
-- if (major >= 6) {
-- switch (type) {
-- case FMAN_PORT_TYPE_TX:
-- if (speed == 10000)
-- val = 64;
-- else
-- val = 50;
-- break;
-- case FMAN_PORT_TYPE_RX:
-- if (speed == 10000)
-- val = 96;
-- else
-- val = 50;
-- break;
-- default:
-- val = 0;
-- }
-- } else {
-- switch (type) {
-- case FMAN_PORT_TYPE_TX:
-- if (speed == 10000)
-- val = 48;
-- else
-- val = 44;
-- break;
-- case FMAN_PORT_TYPE_RX:
-- if (speed == 10000)
-- val = 48;
-- else
-- val = 45;
-- break;
-- default:
-- val = 0;
-- }
-- }
--
-- return val;
--}
--
--static void set_dflt_cfg(struct fman_port *port,
-- struct fman_port_params *port_params)
--{
-- struct fman_port_cfg *cfg = port->cfg;
--
-- cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP;
-- cfg->color = FMAN_PORT_COLOR_GREEN;
-- cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END;
-- cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL;
-- cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD;
-- cfg->tx_fifo_low_comf_level = (5 * 1024);
-- cfg->deq_type = FMAN_PORT_DEQ_BY_PRI;
-- cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH;
-- cfg->tx_fifo_deq_pipeline_depth =
-- BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed);
-- cfg->deq_byte_cnt = QMI_BYTE_COUNT_LEVEL_CONTROL(port->port_type);
--
-- cfg->rx_pri_elevation =
-- DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(port->max_port_fifo_size);
-- port->cfg->rx_fifo_thr =
-- DFLT_PORT_RX_FIFO_THRESHOLD(port->rev_info.major,
-- port->max_port_fifo_size);
--
-- if ((port->rev_info.major == 6) &&
-- ((port->rev_info.minor == 0) || (port->rev_info.minor == 3)))
-- cfg->errata_A006320 = true;
--
-- /* Excessive Threshold register - exists for pre-FMv3 chips only */
-- if (port->rev_info.major < 6)
-- cfg->excessive_threshold_register = true;
-- else
-- cfg->fmbm_tfne_has_features = true;
--
-- cfg->buffer_prefix_content.data_align =
-- DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
--}
--
--static void set_rx_dflt_cfg(struct fman_port *port,
-- struct fman_port_params *port_params)
--{
-- port->cfg->discard_mask = DFLT_PORT_ERRORS_TO_DISCARD;
--
-- memcpy(&port->cfg->ext_buf_pools,
-- &port_params->specific_params.rx_params.ext_buf_pools,
-- sizeof(struct fman_ext_pools));
-- port->cfg->err_fqid =
-- port_params->specific_params.rx_params.err_fqid;
-- port->cfg->dflt_fqid =
-- port_params->specific_params.rx_params.dflt_fqid;
--}
--
--static void set_tx_dflt_cfg(struct fman_port *port,
-- struct fman_port_params *port_params,
-- struct fman_port_dts_params *dts_params)
--{
-- port->cfg->tx_fifo_deq_pipeline_depth =
-- get_dflt_fifo_deq_pipeline_depth(port->rev_info.major,
-- port->port_type,
-- port->port_speed);
-- port->cfg->err_fqid =
-- port_params->specific_params.non_rx_params.err_fqid;
-- port->cfg->deq_sp =
-- (u8)(dts_params->qman_channel_id & QMI_DEQ_CFG_SUBPORTAL_MASK);
-- port->cfg->dflt_fqid =
-- port_params->specific_params.non_rx_params.dflt_fqid;
-- port->cfg->deq_high_priority = true;
--}
--
--/**
-- * fman_port_config
-- * @port: Pointer to the port structure
-- * @params: Pointer to data structure of parameters
-- *
-- * Creates a descriptor for the FM PORT module.
-- * The routine returns a pointer to the FM PORT object.
-- * This descriptor must be passed as first parameter to all other FM PORT
-- * function calls.
-- * No actual initialization or configuration of FM hardware is done by this
-- * routine.
-- *
-- * Return: 0 on success; Error code otherwise.
-- */
--int fman_port_config(struct fman_port *port, struct fman_port_params *params)
--{
-- void __iomem *base_addr = port->dts_params.base_addr;
-- int err;
--
-- /* Allocate the FM driver's parameters structure */
-- port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
-- if (!port->cfg)
-- goto err_params;
--
-- /* Initialize FM port parameters which will be kept by the driver */
-- port->port_type = port->dts_params.type;
-- port->port_speed = port->dts_params.speed;
-- port->port_id = port->dts_params.id;
-- port->fm = port->dts_params.fman;
-- port->ext_pools_num = (u8)8;
--
-- /* get FM revision */
-- fman_get_revision(port->fm, &port->rev_info);
--
-- err = fill_soc_specific_params(port);
-- if (err)
-- goto err_port_cfg;
--
-- switch (port->port_type) {
-- case FMAN_PORT_TYPE_RX:
-- set_rx_dflt_cfg(port, params);
-- case FMAN_PORT_TYPE_TX:
-- set_tx_dflt_cfg(port, params, &port->dts_params);
-- default:
-- set_dflt_cfg(port, params);
-- }
--
-- /* Continue with other parameters */
-- /* set memory map pointers */
-- port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
-- port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
--
-- port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
-- /* resource distribution. */
--
-- port->fifo_bufs.num =
-- get_dflt_num_of_fifo_bufs(port->rev_info.major, port->port_type,
-- port->port_speed) * FMAN_BMI_FIFO_UNITS;
-- port->fifo_bufs.extra =
-- DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS * FMAN_BMI_FIFO_UNITS;
--
-- port->open_dmas.num =
-- get_dflt_num_of_open_dmas(port->rev_info.major,
-- port->port_type, port->port_speed);
-- port->open_dmas.extra =
-- get_dflt_extra_num_of_open_dmas(port->rev_info.major,
-- port->port_type, port->port_speed);
-- port->tasks.num =
-- get_dflt_num_of_tasks(port->rev_info.major,
-- port->port_type, port->port_speed);
-- port->tasks.extra =
-- get_dflt_extra_num_of_tasks(port->rev_info.major,
-- port->port_type, port->port_speed);
--
-- /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 errata
-- * workaround
-- */
-- if ((port->rev_info.major == 6) && (port->rev_info.minor == 0) &&
-- (((port->port_type == FMAN_PORT_TYPE_TX) &&
-- (port->port_speed == 1000)))) {
-- port->open_dmas.num = 16;
-- port->open_dmas.extra = 0;
-- }
--
-- if (port->rev_info.major >= 6 &&
-- port->port_type == FMAN_PORT_TYPE_TX &&
-- port->port_speed == 1000) {
-- /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata
-- * workaround
-- */
-- if (port->rev_info.major >= 6) {
-- u32 reg;
--
-- reg = 0x00001013;
-- iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
-- }
-- }
--
-- return 0;
--
--err_port_cfg:
-- kfree(port->cfg);
--err_params:
-- kfree(port);
-- return -EINVAL;
--}
--EXPORT_SYMBOL(fman_port_config);
--
--/**
-- * fman_port_init
-- * port: A pointer to a FM Port module.
-- * Initializes the FM PORT module by defining the software structure and
-- * configuring the hardware registers.
-- *
-- * Return: 0 on success; Error code otherwise.
-- */
--int fman_port_init(struct fman_port *port)
--{
-- struct fman_port_cfg *cfg;
-- int err;
-- struct fman_port_init_params params;
--
-- if (is_init_done(port->cfg))
-- return -EINVAL;
--
-- err = fman_sp_build_buffer_struct(&port->cfg->int_context,
-- &port->cfg->buffer_prefix_content,
-- &port->cfg->buf_margins,
-- &port->buffer_offsets,
-- &port->internal_buf_offset);
-- if (err)
-- return err;
--
-- cfg = port->cfg;
--
-- if (port->port_type == FMAN_PORT_TYPE_RX) {
-- /* Call the external Buffer routine which also checks fifo
-- * size and updates it if necessary
-- */
-- /* define external buffer pools and pool depletion */
-- err = set_ext_buffer_pools(port);
-- if (err)
-- return err;
-- /* check if the largest external buffer pool is large enough */
-- if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE +
-- cfg->buf_margins.end_margins >
-- port->rx_pools_params.largest_buf_size) {
-- dev_err(port->dev, "%s: buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
-- __func__, cfg->buf_margins.start_margins,
-- cfg->buf_margins.end_margins,
-- port->rx_pools_params.largest_buf_size);
-- return -EINVAL;
-- }
-- }
--
-- /* Call FM module routine for communicating parameters */
-- memset(¶ms, 0, sizeof(params));
-- params.port_id = port->port_id;
-- params.port_type = port->port_type;
-- params.port_speed = port->port_speed;
-- params.num_of_tasks = (u8)port->tasks.num;
-- params.num_of_extra_tasks = (u8)port->tasks.extra;
-- params.num_of_open_dmas = (u8)port->open_dmas.num;
-- params.num_of_extra_open_dmas = (u8)port->open_dmas.extra;
--
-- if (port->fifo_bufs.num) {
-- err = verify_size_of_fifo(port);
-- if (err)
-- return err;
-- }
-- params.size_of_fifo = port->fifo_bufs.num;
-- params.extra_size_of_fifo = port->fifo_bufs.extra;
-- params.deq_pipeline_depth = port->cfg->tx_fifo_deq_pipeline_depth;
-- params.max_frame_length = port->max_frame_length;
--
-- err = fman_set_port_params(port->fm, ¶ms);
-- if (err)
-- return err;
--
-- err = init_low_level_driver(port);
-- if (err)
-- return err;
--
-- kfree(port->cfg);
-- port->cfg = NULL;
--
-- return 0;
--}
--EXPORT_SYMBOL(fman_port_init);
--
--/**
-- * fman_port_cfg_buf_prefix_content
-- * @port A pointer to a FM Port module.
-- * @buffer_prefix_content A structure of parameters describing
-- * the structure of the buffer.
-- * Out parameter:
-- * Start margin - offset of data from
-- * start of external buffer.
-- * Defines the structure, size and content of the application buffer.
-- * The prefix, in Tx ports, if 'pass_prs_result', the application should set
-- * a value to their offsets in the prefix of the FM will save the first
-- * 'priv_data_size', than, depending on 'pass_prs_result' and
-- * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
-- * (in this order), to the application buffer, and to offset.
-- * Calling this routine changes the buffer margins definitions in the internal
-- * driver data base from its default configuration:
-- * Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
-- * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
-- * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
-- * May be used for all ports
-- *
-- * Allowed only following fman_port_config() and before fman_port_init().
-- *
-- * Return: 0 on success; Error code otherwise.
-- */
--int fman_port_cfg_buf_prefix_content(struct fman_port *port,
-- struct fman_buffer_prefix_content *
-- buffer_prefix_content)
--{
-- if (is_init_done(port->cfg))
-- return -EINVAL;
--
-- memcpy(&port->cfg->buffer_prefix_content,
-- buffer_prefix_content,
-- sizeof(struct fman_buffer_prefix_content));
-- /* if data_align was not initialized by user,
-- * we return to driver's default
-- */
-- if (!port->cfg->buffer_prefix_content.data_align)
-- port->cfg->buffer_prefix_content.data_align =
-- DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
--
-- return 0;
--}
--EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
--
--/**
-- * fman_port_disable
-- * port: A pointer to a FM Port module.
-- *
-- * Gracefully disable an FM port. The port will not start new tasks after all
-- * tasks associated with the port are terminated.
-- *
-- * This is a blocking routine, it returns after port is gracefully stopped,
-- * i.e. the port will not except new frames, but it will finish all frames
-- * or tasks which were already began.
-- * Allowed only following fman_port_init().
-- *
-- * Return: 0 on success; Error code otherwise.
-- */
--int fman_port_disable(struct fman_port *port)
--{
-- u32 __iomem *bmi_cfg_reg, *bmi_status_reg;
-- u32 tmp;
-- bool rx_port, failure = false;
-- int count;
--
-- if (!is_init_done(port->cfg))
-- return -EINVAL;
--
-- switch (port->port_type) {
-- case FMAN_PORT_TYPE_RX:
-- bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
-- bmi_status_reg = &port->bmi_regs->rx.fmbm_rst;
-- rx_port = true;
-- break;
-- case FMAN_PORT_TYPE_TX:
-- bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
-- bmi_status_reg = &port->bmi_regs->tx.fmbm_tst;
-- rx_port = false;
-- break;
-- default:
-- return -EINVAL;
-- }
--
-- /* Disable QMI */
-- if (!rx_port) {
-- tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN;
-- iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
--
-- /* Wait for QMI to finish FD handling */
-- count = 100;
-- do {
-- udelay(10);
-- tmp = ioread32be(&port->qmi_regs->fmqm_pns);
-- } while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count);
--
-- if (count == 0) {
-- /* Timeout */
-- failure = true;
-- }
-- }
--
-- /* Disable BMI */
-- tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN;
-- iowrite32be(tmp, bmi_cfg_reg);
--
-- /* Wait for graceful stop end */
-- count = 500;
-- do {
-- udelay(10);
-- tmp = ioread32be(bmi_status_reg);
-- } while ((tmp & BMI_PORT_STATUS_BSY) && --count);
--
-- if (count == 0) {
-- /* Timeout */
-- failure = true;
-- }
--
-- if (failure)
-- dev_dbg(port->dev, "%s: FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
-- __func__, port->port_id);
--
-- return 0;
--}
--EXPORT_SYMBOL(fman_port_disable);
--
--/**
-- * fman_port_enable
-- * port: A pointer to a FM Port module.
-- *
-- * A runtime routine provided to allow disable/enable of port.
-- *
-- * Allowed only following fman_port_init().
-- *
-- * Return: 0 on success; Error code otherwise.
-- */
--int fman_port_enable(struct fman_port *port)
--{
-- u32 __iomem *bmi_cfg_reg;
-- u32 tmp;
-- bool rx_port;
--
-- if (!is_init_done(port->cfg))
-- return -EINVAL;
--
-- switch (port->port_type) {
-- case FMAN_PORT_TYPE_RX:
-- bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
-- rx_port = true;
-- break;
-- case FMAN_PORT_TYPE_TX:
-- bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
-- rx_port = false;
-- break;
-- default:
-- return -EINVAL;
-- }
--
-- /* Enable QMI */
-- if (!rx_port) {
-- tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN;
-- iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
-- }
--
-- /* Enable BMI */
-- tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN;
-- iowrite32be(tmp, bmi_cfg_reg);
--
-- return 0;
--}
--EXPORT_SYMBOL(fman_port_enable);
--
--/**
-- * fman_port_bind
-- * dev: FMan Port OF device pointer
-- *
-- * Bind to a specific FMan Port.
-- *
-- * Allowed only after the port was created.
-- *
-- * Return: A pointer to the FMan port device.
-- */
--struct fman_port *fman_port_bind(struct device *dev)
--{
-- return (struct fman_port *)(dev_get_drvdata(get_device(dev)));
--}
--EXPORT_SYMBOL(fman_port_bind);
--
--/**
-- * fman_port_get_qman_channel_id
-- * port: Pointer to the FMan port devuce
-- *
-- * Get the QMan channel ID for the specific port
-- *
-- * Return: QMan channel ID
-- */
--u32 fman_port_get_qman_channel_id(struct fman_port *port)
--{
-- return port->dts_params.qman_channel_id;
--}
--EXPORT_SYMBOL(fman_port_get_qman_channel_id);
--
--static int fman_port_probe(struct platform_device *of_dev)
--{
-- struct fman_port *port;
-- struct fman *fman;
-- struct device_node *fm_node, *port_node;
-- struct resource res;
-- struct resource *dev_res;
-- u32 val;
-- int err = 0, lenp;
-- enum fman_port_type port_type;
-- u16 port_speed;
-- u8 port_id;
--
-- port = kzalloc(sizeof(*port), GFP_KERNEL);
-- if (!port)
-- return -ENOMEM;
--
-- port->dev = &of_dev->dev;
--
-- port_node = of_node_get(of_dev->dev.of_node);
--
-- /* Get the FM node */
-- fm_node = of_get_parent(port_node);
-- if (!fm_node) {
-- dev_err(port->dev, "%s: of_get_parent() failed\n", __func__);
-- err = -ENODEV;
-- goto return_err;
-- }
--
-- fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
-- of_node_put(fm_node);
-- if (!fman) {
-- err = -EINVAL;
-- goto return_err;
-- }
--
-- err = of_property_read_u32(port_node, "cell-index", &val);
-- if (err) {
-- dev_err(port->dev, "%s: reading cell-index for %s failed\n",
-- __func__, port_node->full_name);
-- err = -EINVAL;
-- goto return_err;
-- }
-- port_id = (u8)val;
-- port->dts_params.id = port_id;
--
-- if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
-- port_type = FMAN_PORT_TYPE_TX;
-- port_speed = 1000;
-- if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
-- port_speed = 10000;
--
-- } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
-- if (port_id >= TX_10G_PORT_BASE)
-- port_speed = 10000;
-- else
-- port_speed = 1000;
-- port_type = FMAN_PORT_TYPE_TX;
--
-- } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
-- port_type = FMAN_PORT_TYPE_RX;
-- port_speed = 1000;
-- if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
-- port_speed = 10000;
--
-- } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
-- if (port_id >= RX_10G_PORT_BASE)
-- port_speed = 10000;
-- else
-- port_speed = 1000;
-- port_type = FMAN_PORT_TYPE_RX;
--
-- } else {
-- dev_err(port->dev, "%s: Illegal port type\n", __func__);
-- err = -EINVAL;
-- goto return_err;
-- }
--
-- port->dts_params.type = port_type;
-- port->dts_params.speed = port_speed;
--
-- if (port_type == FMAN_PORT_TYPE_TX) {
-- u32 qman_channel_id;
--
-- qman_channel_id = fman_get_qman_channel_id(fman, port_id);
-- if (qman_channel_id == 0) {
-- dev_err(port->dev, "%s: incorrect qman-channel-id\n",
-- __func__);
-- err = -EINVAL;
-- goto return_err;
-- }
-- port->dts_params.qman_channel_id = qman_channel_id;
-- }
--
-- err = of_address_to_resource(port_node, 0, &res);
-- if (err < 0) {
-- dev_err(port->dev, "%s: of_address_to_resource() failed\n",
-- __func__);
-- err = -ENOMEM;
-- goto return_err;
-- }
--
-- port->dts_params.fman = fman;
--
-- of_node_put(port_node);
--
-- dev_res = __devm_request_region(port->dev, &res, res.start,
-- resource_size(&res), "fman-port");
-- if (!dev_res) {
-- dev_err(port->dev, "%s: __devm_request_region() failed\n",
-- __func__);
-- err = -EINVAL;
-- goto free_port;
-- }
--
-- port->dts_params.base_addr = devm_ioremap(port->dev, res.start,
-- resource_size(&res));
-- if (!port->dts_params.base_addr)
-- dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__);
--
-- dev_set_drvdata(&of_dev->dev, port);
--
-- return 0;
--
--return_err:
-- of_node_put(port_node);
--free_port:
-- kfree(port);
-- return err;
--}
--
--static const struct of_device_id fman_port_match[] = {
-- {.compatible = "fsl,fman-v3-port-rx"},
-- {.compatible = "fsl,fman-v2-port-rx"},
-- {.compatible = "fsl,fman-v3-port-tx"},
-- {.compatible = "fsl,fman-v2-port-tx"},
-- {}
--};
--
--MODULE_DEVICE_TABLE(of, fman_port_match);
--
--static struct platform_driver fman_port_driver = {
-- .driver = {
-- .name = "fsl-fman-port",
-- .of_match_table = fman_port_match,
-- },
-- .probe = fman_port_probe,
--};
--
--static int __init fman_port_load(void)
--{
-- int err;
--
-- pr_debug("FSL DPAA FMan driver\n");
--
-- err = platform_driver_register(&fman_port_driver);
-- if (err < 0)
-- pr_err("Error, platform_driver_register() = %d\n", err);
--
-- return err;
--}
--module_init(fman_port_load);
--
--static void __exit fman_port_unload(void)
--{
-- platform_driver_unregister(&fman_port_driver);
--}
--module_exit(fman_port_unload);
--
--MODULE_LICENSE("Dual BSD/GPL");
--MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver");
-diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
-deleted file mode 100644
-index 8ba9017..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_port.h
-+++ /dev/null
-@@ -1,151 +0,0 @@
--/*
-- * Copyright 2008 - 2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#ifndef __FMAN_PORT_H
--#define __FMAN_PORT_H
--
--#include "fman.h"
--
--/* FM Port API
-- * The FM uses a general module called "port" to represent a Tx port (MAC),
-- * an Rx port (MAC).
-- * The number of ports in an FM varies between SOCs.
-- * The SW driver manages these ports as sub-modules of the FM,i.e. after an
-- * FM is initialized, its ports may be initialized and operated upon.
-- * The port is initialized aware of its type, but other functions on a port
-- * may be indifferent to its type. When necessary, the driver verifies
-- * coherence and returns error if applicable.
-- * On initialization, user specifies the port type and it's index (relative
-- * to the port's type) - always starting at 0.
-- */
--
--/* FM Frame error */
--/* Frame Descriptor errors */
--/* Not for Rx-Port! Unsupported Format */
--#define FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT FM_FD_ERR_UNSUPPORTED_FORMAT
--/* Not for Rx-Port! Length Error */
--#define FM_PORT_FRM_ERR_LENGTH FM_FD_ERR_LENGTH
--/* DMA Data error */
--#define FM_PORT_FRM_ERR_DMA FM_FD_ERR_DMA
--/* non Frame-Manager error; probably come from SEC that was chained to FM */
--#define FM_PORT_FRM_ERR_NON_FM FM_FD_RX_STATUS_ERR_NON_FM
-- /* IPR error */
--#define FM_PORT_FRM_ERR_IPRE (FM_FD_ERR_IPR & ~FM_FD_IPR)
--/* IPR non-consistent-sp */
--#define FM_PORT_FRM_ERR_IPR_NCSP (FM_FD_ERR_IPR_NCSP & \
-- ~FM_FD_IPR)
--
--/* Rx FIFO overflow, FCS error, code error, running disparity
-- * error (SGMII and TBI modes), FIFO parity error.
-- * PHY Sequence error, PHY error control character detected.
-- */
--#define FM_PORT_FRM_ERR_PHYSICAL FM_FD_ERR_PHYSICAL
--/* Frame too long OR Frame size exceeds max_length_frame */
--#define FM_PORT_FRM_ERR_SIZE FM_FD_ERR_SIZE
--/* indicates a classifier "drop" operation */
--#define FM_PORT_FRM_ERR_CLS_DISCARD FM_FD_ERR_CLS_DISCARD
--/* Extract Out of Frame */
--#define FM_PORT_FRM_ERR_EXTRACTION FM_FD_ERR_EXTRACTION
--/* No Scheme Selected */
--#define FM_PORT_FRM_ERR_NO_SCHEME FM_FD_ERR_NO_SCHEME
--/* Keysize Overflow */
--#define FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW FM_FD_ERR_KEYSIZE_OVERFLOW
--/* Frame color is red */
--#define FM_PORT_FRM_ERR_COLOR_RED FM_FD_ERR_COLOR_RED
--/* Frame color is yellow */
--#define FM_PORT_FRM_ERR_COLOR_YELLOW FM_FD_ERR_COLOR_YELLOW
--/* Parser Time out Exceed */
--#define FM_PORT_FRM_ERR_PRS_TIMEOUT FM_FD_ERR_PRS_TIMEOUT
--/* Invalid Soft Parser instruction */
--#define FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT FM_FD_ERR_PRS_ILL_INSTRUCT
--/* Header error was identified during parsing */
--#define FM_PORT_FRM_ERR_PRS_HDR_ERR FM_FD_ERR_PRS_HDR_ERR
--/* Frame parsed beyind 256 first bytes */
--#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED FM_FD_ERR_BLOCK_LIMIT_EXCEEDED
--/* FPM Frame Processing Timeout Exceeded */
--#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT 0x00000001
--
--struct fman_port;
--
--/* A structure for additional Rx port parameters */
--struct fman_port_rx_params {
-- u32 err_fqid; /* Error Queue Id. */
-- u32 dflt_fqid; /* Default Queue Id. */
-- /* Which external buffer pools are used
-- * (up to FMAN_PORT_MAX_EXT_POOLS_NUM), and their sizes.
-- */
-- struct fman_ext_pools ext_buf_pools;
--};
--
--/* A structure for additional non-Rx port parameters */
--struct fman_port_non_rx_params {
-- /* Error Queue Id. */
-- u32 err_fqid;
-- /* For Tx - Default Confirmation queue, 0 means no Tx confirmation
-- * for processed frames. For OP port - default Rx queue.
-- */
-- u32 dflt_fqid;
--};
--
--/* A union for additional parameters depending on port type */
--union fman_port_specific_params {
-- /* Rx port parameters structure */
-- struct fman_port_rx_params rx_params;
-- /* Non-Rx port parameters structure */
-- struct fman_port_non_rx_params non_rx_params;
--};
--
--/* A structure representing FM initialization parameters */
--struct fman_port_params {
-- /* Virtual Address of memory mapped FM Port registers. */
-- void *fm;
-- union fman_port_specific_params specific_params;
-- /* Additional parameters depending on port type. */
--};
--
--int fman_port_config(struct fman_port *port, struct fman_port_params *params);
--
--int fman_port_init(struct fman_port *port);
--
--int fman_port_cfg_buf_prefix_content(struct fman_port *port,
-- struct fman_buffer_prefix_content
-- *buffer_prefix_content);
--
--int fman_port_disable(struct fman_port *port);
--
--int fman_port_enable(struct fman_port *port);
--
--u32 fman_port_get_qman_channel_id(struct fman_port *port);
--
--struct fman_port *fman_port_bind(struct device *dev);
--
--#endif /* __FMAN_PORT_H */
-diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
-deleted file mode 100644
-index 248f5bc..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_sp.c
-+++ /dev/null
-@@ -1,169 +0,0 @@
--/*
-- * Copyright 2008 - 2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#include "fman_sp.h"
--#include "fman.h"
--
--void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
-- *fm_ext_pools,
-- u8 *ordered_array,
-- u16 *sizes_array)
--{
-- u16 buf_size = 0;
-- int i = 0, j = 0, k = 0;
--
-- /* First we copy the external buffers pools information
-- * to an ordered local array
-- */
-- for (i = 0; i < fm_ext_pools->num_of_pools_used; i++) {
-- /* get pool size */
-- buf_size = fm_ext_pools->ext_buf_pool[i].size;
--
-- /* keep sizes in an array according to poolId
-- * for direct access
-- */
-- sizes_array[fm_ext_pools->ext_buf_pool[i].id] = buf_size;
--
-- /* save poolId in an ordered array according to size */
-- for (j = 0; j <= i; j++) {
-- /* this is the next free place in the array */
-- if (j == i)
-- ordered_array[i] =
-- fm_ext_pools->ext_buf_pool[i].id;
-- else {
-- /* find the right place for this poolId */
-- if (buf_size < sizes_array[ordered_array[j]]) {
-- /* move the pool_ids one place ahead
-- * to make room for this poolId
-- */
-- for (k = i; k > j; k--)
-- ordered_array[k] =
-- ordered_array[k - 1];
--
-- /* now k==j, this is the place for
-- * the new size
-- */
-- ordered_array[k] =
-- fm_ext_pools->ext_buf_pool[i].id;
-- break;
-- }
-- }
-- }
-- }
--}
--EXPORT_SYMBOL(fman_sp_set_buf_pools_in_asc_order_of_buf_sizes);
--
--int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
-- int_context_data_copy,
-- struct fman_buffer_prefix_content *
-- buffer_prefix_content,
-- struct fman_sp_buf_margins *buf_margins,
-- struct fman_sp_buffer_offsets *buffer_offsets,
-- u8 *internal_buf_offset)
--{
-- u32 tmp;
--
-- /* Align start of internal context data to 16 byte */
-- int_context_data_copy->ext_buf_offset = (u16)
-- ((buffer_prefix_content->priv_data_size & (OFFSET_UNITS - 1)) ?
-- ((buffer_prefix_content->priv_data_size + OFFSET_UNITS) &
-- ~(u16)(OFFSET_UNITS - 1)) :
-- buffer_prefix_content->priv_data_size);
--
-- /* Translate margin and int_context params to FM parameters */
-- /* Initialize with illegal value. Later we'll set legal values. */
-- buffer_offsets->prs_result_offset = (u32)ILLEGAL_BASE;
-- buffer_offsets->time_stamp_offset = (u32)ILLEGAL_BASE;
-- buffer_offsets->hash_result_offset = (u32)ILLEGAL_BASE;
--
-- /* Internally the driver supports 4 options
-- * 1. prsResult/timestamp/hashResult selection (in fact 8 options,
-- * but for simplicity we'll
-- * relate to it as 1).
-- * 2. All IC context (from AD) not including debug.
-- */
--
-- /* This case covers the options under 1 */
-- /* Copy size must be in 16-byte granularity. */
-- int_context_data_copy->size =
-- (u16)((buffer_prefix_content->pass_prs_result ? 32 : 0) +
-- ((buffer_prefix_content->pass_time_stamp ||
-- buffer_prefix_content->pass_hash_result) ? 16 : 0));
--
-- /* Align start of internal context data to 16 byte */
-- int_context_data_copy->int_context_offset =
-- (u8)(buffer_prefix_content->pass_prs_result ? 32 :
-- ((buffer_prefix_content->pass_time_stamp ||
-- buffer_prefix_content->pass_hash_result) ? 64 : 0));
--
-- if (buffer_prefix_content->pass_prs_result)
-- buffer_offsets->prs_result_offset =
-- int_context_data_copy->ext_buf_offset;
-- if (buffer_prefix_content->pass_time_stamp)
-- buffer_offsets->time_stamp_offset =
-- buffer_prefix_content->pass_prs_result ?
-- (int_context_data_copy->ext_buf_offset +
-- sizeof(struct fman_prs_result)) :
-- int_context_data_copy->ext_buf_offset;
-- if (buffer_prefix_content->pass_hash_result)
-- /* If PR is not requested, whether TS is
-- * requested or not, IC will be copied from TS
-- */
-- buffer_offsets->hash_result_offset =
-- buffer_prefix_content->pass_prs_result ?
-- (int_context_data_copy->ext_buf_offset +
-- sizeof(struct fman_prs_result) + 8) :
-- int_context_data_copy->ext_buf_offset + 8;
--
-- if (int_context_data_copy->size)
-- buf_margins->start_margins =
-- (u16)(int_context_data_copy->ext_buf_offset +
-- int_context_data_copy->size);
-- else
-- /* No Internal Context passing, STartMargin is
-- * immediately after private_info
-- */
-- buf_margins->start_margins =
-- buffer_prefix_content->priv_data_size;
--
-- /* align data start */
-- tmp = (u32)(buf_margins->start_margins %
-- buffer_prefix_content->data_align);
-- if (tmp)
-- buf_margins->start_margins +=
-- (buffer_prefix_content->data_align - tmp);
-- buffer_offsets->data_offset = buf_margins->start_margins;
--
-- return 0;
--}
--EXPORT_SYMBOL(fman_sp_build_buffer_struct);
--
-diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h
-deleted file mode 100644
-index 820b7f6..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_sp.h
-+++ /dev/null
-@@ -1,103 +0,0 @@
--/*
-- * Copyright 2008 - 2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#ifndef __FM_SP_H
--#define __FM_SP_H
--
--#include "fman.h"
--#include <linux/types.h>
--
--#define ILLEGAL_BASE (~0)
--
--/* defaults */
--#define DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN 64
--
--/* Registers bit fields */
--#define FMAN_SP_EXT_BUF_POOL_EN_COUNTER 0x40000000
--#define FMAN_SP_EXT_BUF_POOL_VALID 0x80000000
--#define FMAN_SP_EXT_BUF_POOL_BACKUP 0x20000000
--#define FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE 0x00100000
--#define FMAN_SP_SG_DISABLE 0x80000000
--
--/* shifts */
--#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
--#define FMAN_SP_DMA_ATTR_SWP_SHIFT 30
--#define FMAN_SP_IC_TO_EXT_SHIFT 16
--#define FMAN_SP_IC_FROM_INT_SHIFT 8
--
--/* structure for defining internal context copying */
--struct fman_sp_int_context_data_copy {
-- /* < Offset in External buffer to which internal
-- * context is copied to (Rx) or taken from (Tx, Op).
-- */
-- u16 ext_buf_offset;
-- /* Offset within internal context to copy from
-- * (Rx) or to copy to (Tx, Op).
-- */
-- u8 int_context_offset;
-- /* Internal offset size to be copied */
-- u16 size;
--};
--
--/* struct for defining external buffer margins */
--struct fman_sp_buf_margins {
-- /* Number of bytes to be left at the beginning
-- * of the external buffer (must be divisible by 16)
-- */
-- u16 start_margins;
-- /* number of bytes to be left at the end
-- * of the external buffer(must be divisible by 16)
-- */
-- u16 end_margins;
--};
--
--struct fman_sp_buffer_offsets {
-- u32 data_offset;
-- u32 prs_result_offset;
-- u32 time_stamp_offset;
-- u32 hash_result_offset;
--};
--
--int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy
-- *int_context_data_copy,
-- struct fman_buffer_prefix_content
-- *buffer_prefix_content,
-- struct fman_sp_buf_margins *buf_margins,
-- struct fman_sp_buffer_offsets
-- *buffer_offsets,
-- u8 *internal_buf_offset);
--
--void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
-- *fm_ext_pools,
-- u8 *ordered_array,
-- u16 *sizes_array);
--
--#endif /* __FM_SP_H */
-diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
-deleted file mode 100644
-index 4b0f3a5..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
-+++ /dev/null
-@@ -1,783 +0,0 @@
--/*
-- * Copyright 2008-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
--
--#include "fman_tgec.h"
--#include "fman.h"
--
--#include <linux/slab.h>
--#include <linux/bitrev.h>
--#include <linux/io.h>
--#include <linux/crc32.h>
--
--/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
--#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
--
--/* Command and Configuration Register (COMMAND_CONFIG) */
--#define CMD_CFG_NO_LEN_CHK 0x00020000
--#define CMD_CFG_PAUSE_IGNORE 0x00000100
--#define CMF_CFG_CRC_FWD 0x00000040
--#define CMD_CFG_PROMIS_EN 0x00000010
--#define CMD_CFG_RX_EN 0x00000002
--#define CMD_CFG_TX_EN 0x00000001
--
--/* Interrupt Mask Register (IMASK) */
--#define TGEC_IMASK_MDIO_SCAN_EVENT 0x00010000
--#define TGEC_IMASK_MDIO_CMD_CMPL 0x00008000
--#define TGEC_IMASK_REM_FAULT 0x00004000
--#define TGEC_IMASK_LOC_FAULT 0x00002000
--#define TGEC_IMASK_TX_ECC_ER 0x00001000
--#define TGEC_IMASK_TX_FIFO_UNFL 0x00000800
--#define TGEC_IMASK_TX_FIFO_OVFL 0x00000400
--#define TGEC_IMASK_TX_ER 0x00000200
--#define TGEC_IMASK_RX_FIFO_OVFL 0x00000100
--#define TGEC_IMASK_RX_ECC_ER 0x00000080
--#define TGEC_IMASK_RX_JAB_FRM 0x00000040
--#define TGEC_IMASK_RX_OVRSZ_FRM 0x00000020
--#define TGEC_IMASK_RX_RUNT_FRM 0x00000010
--#define TGEC_IMASK_RX_FRAG_FRM 0x00000008
--#define TGEC_IMASK_RX_LEN_ER 0x00000004
--#define TGEC_IMASK_RX_CRC_ER 0x00000002
--#define TGEC_IMASK_RX_ALIGN_ER 0x00000001
--
--/* Hashtable Control Register (HASHTABLE_CTRL) */
--#define TGEC_HASH_MCAST_SHIFT 23
--#define TGEC_HASH_MCAST_EN 0x00000200
--#define TGEC_HASH_ADR_MSK 0x000001ff
--
--#define DEFAULT_TX_IPG_LENGTH 12
--#define DEFAULT_MAX_FRAME_LENGTH 0x600
--#define DEFAULT_PAUSE_QUANT 0xf000
--
--/* number of pattern match registers (entries) */
--#define TGEC_NUM_OF_PADDRS 1
--
--/* Group address bit indication */
--#define GROUP_ADDRESS 0x0000010000000000LL
--
--/* Hash table size (= 32 bits*8 regs) */
--#define TGEC_HASH_TABLE_SIZE 512
--
--/* tGEC memory map */
--struct tgec_regs {
-- u32 tgec_id; /* 0x000 Controller ID */
-- u32 reserved001[1]; /* 0x004 */
-- u32 command_config; /* 0x008 Control and configuration */
-- u32 mac_addr_0; /* 0x00c Lower 32 bits of the MAC adr */
-- u32 mac_addr_1; /* 0x010 Upper 16 bits of the MAC adr */
-- u32 maxfrm; /* 0x014 Maximum frame length */
-- u32 pause_quant; /* 0x018 Pause quanta */
-- u32 rx_fifo_sections; /* 0x01c */
-- u32 tx_fifo_sections; /* 0x020 */
-- u32 rx_fifo_almost_f_e; /* 0x024 */
-- u32 tx_fifo_almost_f_e; /* 0x028 */
-- u32 hashtable_ctrl; /* 0x02c Hash table control */
-- u32 mdio_cfg_status; /* 0x030 */
-- u32 mdio_command; /* 0x034 */
-- u32 mdio_data; /* 0x038 */
-- u32 mdio_regaddr; /* 0x03c */
-- u32 status; /* 0x040 */
-- u32 tx_ipg_len; /* 0x044 Transmitter inter-packet-gap */
-- u32 mac_addr_2; /* 0x048 Lower 32 bits of 2nd MAC adr */
-- u32 mac_addr_3; /* 0x04c Upper 16 bits of 2nd MAC adr */
-- u32 rx_fifo_ptr_rd; /* 0x050 */
-- u32 rx_fifo_ptr_wr; /* 0x054 */
-- u32 tx_fifo_ptr_rd; /* 0x058 */
-- u32 tx_fifo_ptr_wr; /* 0x05c */
-- u32 imask; /* 0x060 Interrupt mask */
-- u32 ievent; /* 0x064 Interrupt event */
-- u32 udp_port; /* 0x068 Defines a UDP Port number */
-- u32 type_1588v2; /* 0x06c Type field for 1588v2 */
-- u32 reserved070[4]; /* 0x070 */
-- /* 10Ge Statistics Counter */
-- u32 tfrm_u; /* 80 aFramesTransmittedOK */
-- u32 tfrm_l; /* 84 aFramesTransmittedOK */
-- u32 rfrm_u; /* 88 aFramesReceivedOK */
-- u32 rfrm_l; /* 8c aFramesReceivedOK */
-- u32 rfcs_u; /* 90 aFrameCheckSequenceErrors */
-- u32 rfcs_l; /* 94 aFrameCheckSequenceErrors */
-- u32 raln_u; /* 98 aAlignmentErrors */
-- u32 raln_l; /* 9c aAlignmentErrors */
-- u32 txpf_u; /* A0 aPAUSEMACCtrlFramesTransmitted */
-- u32 txpf_l; /* A4 aPAUSEMACCtrlFramesTransmitted */
-- u32 rxpf_u; /* A8 aPAUSEMACCtrlFramesReceived */
-- u32 rxpf_l; /* Ac aPAUSEMACCtrlFramesReceived */
-- u32 rlong_u; /* B0 aFrameTooLongErrors */
-- u32 rlong_l; /* B4 aFrameTooLongErrors */
-- u32 rflr_u; /* B8 aInRangeLengthErrors */
-- u32 rflr_l; /* Bc aInRangeLengthErrors */
-- u32 tvlan_u; /* C0 VLANTransmittedOK */
-- u32 tvlan_l; /* C4 VLANTransmittedOK */
-- u32 rvlan_u; /* C8 VLANReceivedOK */
-- u32 rvlan_l; /* Cc VLANReceivedOK */
-- u32 toct_u; /* D0 if_out_octets */
-- u32 toct_l; /* D4 if_out_octets */
-- u32 roct_u; /* D8 if_in_octets */
-- u32 roct_l; /* Dc if_in_octets */
-- u32 ruca_u; /* E0 if_in_ucast_pkts */
-- u32 ruca_l; /* E4 if_in_ucast_pkts */
-- u32 rmca_u; /* E8 ifInMulticastPkts */
-- u32 rmca_l; /* Ec ifInMulticastPkts */
-- u32 rbca_u; /* F0 ifInBroadcastPkts */
-- u32 rbca_l; /* F4 ifInBroadcastPkts */
-- u32 terr_u; /* F8 if_out_errors */
-- u32 terr_l; /* Fc if_out_errors */
-- u32 reserved100[2]; /* 100-108 */
-- u32 tuca_u; /* 108 if_out_ucast_pkts */
-- u32 tuca_l; /* 10c if_out_ucast_pkts */
-- u32 tmca_u; /* 110 ifOutMulticastPkts */
-- u32 tmca_l; /* 114 ifOutMulticastPkts */
-- u32 tbca_u; /* 118 ifOutBroadcastPkts */
-- u32 tbca_l; /* 11c ifOutBroadcastPkts */
-- u32 rdrp_u; /* 120 etherStatsDropEvents */
-- u32 rdrp_l; /* 124 etherStatsDropEvents */
-- u32 reoct_u; /* 128 etherStatsOctets */
-- u32 reoct_l; /* 12c etherStatsOctets */
-- u32 rpkt_u; /* 130 etherStatsPkts */
-- u32 rpkt_l; /* 134 etherStatsPkts */
-- u32 trund_u; /* 138 etherStatsUndersizePkts */
-- u32 trund_l; /* 13c etherStatsUndersizePkts */
-- u32 r64_u; /* 140 etherStatsPkts64Octets */
-- u32 r64_l; /* 144 etherStatsPkts64Octets */
-- u32 r127_u; /* 148 etherStatsPkts65to127Octets */
-- u32 r127_l; /* 14c etherStatsPkts65to127Octets */
-- u32 r255_u; /* 150 etherStatsPkts128to255Octets */
-- u32 r255_l; /* 154 etherStatsPkts128to255Octets */
-- u32 r511_u; /* 158 etherStatsPkts256to511Octets */
-- u32 r511_l; /* 15c etherStatsPkts256to511Octets */
-- u32 r1023_u; /* 160 etherStatsPkts512to1023Octets */
-- u32 r1023_l; /* 164 etherStatsPkts512to1023Octets */
-- u32 r1518_u; /* 168 etherStatsPkts1024to1518Octets */
-- u32 r1518_l; /* 16c etherStatsPkts1024to1518Octets */
-- u32 r1519x_u; /* 170 etherStatsPkts1519toX */
-- u32 r1519x_l; /* 174 etherStatsPkts1519toX */
-- u32 trovr_u; /* 178 etherStatsOversizePkts */
-- u32 trovr_l; /* 17c etherStatsOversizePkts */
-- u32 trjbr_u; /* 180 etherStatsJabbers */
-- u32 trjbr_l; /* 184 etherStatsJabbers */
-- u32 trfrg_u; /* 188 etherStatsFragments */
-- u32 trfrg_l; /* 18C etherStatsFragments */
-- u32 rerr_u; /* 190 if_in_errors */
-- u32 rerr_l; /* 194 if_in_errors */
--};
--
--struct tgec_cfg {
-- bool pause_ignore;
-- bool promiscuous_mode_enable;
-- u16 max_frame_length;
-- u16 pause_quant;
-- u32 tx_ipg_length;
--};
--
--struct fman_mac {
-- /* Pointer to the memory mapped registers. */
-- struct tgec_regs __iomem *regs;
-- /* MAC address of device; */
-- u64 addr;
-- u16 max_speed;
-- void *dev_id; /* device cookie used by the exception cbs */
-- fman_mac_exception_cb *exception_cb;
-- fman_mac_exception_cb *event_cb;
-- /* pointer to driver's global address hash table */
-- struct eth_hash_t *multicast_addr_hash;
-- /* pointer to driver's individual address hash table */
-- struct eth_hash_t *unicast_addr_hash;
-- u8 mac_id;
-- u32 exceptions;
-- struct tgec_cfg *cfg;
-- void *fm;
-- struct fman_rev_info fm_rev_info;
--};
--
--static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
--{
-- u32 tmp0, tmp1;
--
-- tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
-- tmp1 = (u32)(adr[4] | adr[5] << 8);
-- iowrite32be(tmp0, ®s->mac_addr_0);
-- iowrite32be(tmp1, ®s->mac_addr_1);
--}
--
--static void set_dflts(struct tgec_cfg *cfg)
--{
-- cfg->promiscuous_mode_enable = false;
-- cfg->pause_ignore = false;
-- cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
-- cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH;
-- cfg->pause_quant = DEFAULT_PAUSE_QUANT;
--}
--
--static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
-- u32 exception_mask)
--{
-- u32 tmp;
--
-- /* Config */
-- tmp = CMF_CFG_CRC_FWD;
-- if (cfg->promiscuous_mode_enable)
-- tmp |= CMD_CFG_PROMIS_EN;
-- if (cfg->pause_ignore)
-- tmp |= CMD_CFG_PAUSE_IGNORE;
-- /* Payload length check disable */
-- tmp |= CMD_CFG_NO_LEN_CHK;
-- iowrite32be(tmp, ®s->command_config);
--
-- /* Max Frame Length */
-- iowrite32be((u32)cfg->max_frame_length, ®s->maxfrm);
-- /* Pause Time */
-- iowrite32be(cfg->pause_quant, ®s->pause_quant);
--
-- /* clear all pending events and set-up interrupts */
-- iowrite32be(0xffffffff, ®s->ievent);
-- iowrite32be(ioread32be(®s->imask) | exception_mask, ®s->imask);
--
-- return 0;
--}
--
--static int check_init_parameters(struct fman_mac *tgec)
--{
-- if (tgec->max_speed < SPEED_10000) {
-- pr_err("10G MAC driver only support 10G speed\n");
-- return -EINVAL;
-- }
-- if (tgec->addr == 0) {
-- pr_err("Ethernet 10G MAC Must have valid MAC Address\n");
-- return -EINVAL;
-- }
-- if (!tgec->exception_cb) {
-- pr_err("uninitialized exception_cb\n");
-- return -EINVAL;
-- }
-- if (!tgec->event_cb) {
-- pr_err("uninitialized event_cb\n");
-- return -EINVAL;
-- }
--
-- return 0;
--}
--
--static int get_exception_flag(enum fman_mac_exceptions exception)
--{
-- u32 bit_mask;
--
-- switch (exception) {
-- case FM_MAC_EX_10G_MDIO_SCAN_EVENT:
-- bit_mask = TGEC_IMASK_MDIO_SCAN_EVENT;
-- break;
-- case FM_MAC_EX_10G_MDIO_CMD_CMPL:
-- bit_mask = TGEC_IMASK_MDIO_CMD_CMPL;
-- break;
-- case FM_MAC_EX_10G_REM_FAULT:
-- bit_mask = TGEC_IMASK_REM_FAULT;
-- break;
-- case FM_MAC_EX_10G_LOC_FAULT:
-- bit_mask = TGEC_IMASK_LOC_FAULT;
-- break;
-- case FM_MAC_EX_10G_TX_ECC_ER:
-- bit_mask = TGEC_IMASK_TX_ECC_ER;
-- break;
-- case FM_MAC_EX_10G_TX_FIFO_UNFL:
-- bit_mask = TGEC_IMASK_TX_FIFO_UNFL;
-- break;
-- case FM_MAC_EX_10G_TX_FIFO_OVFL:
-- bit_mask = TGEC_IMASK_TX_FIFO_OVFL;
-- break;
-- case FM_MAC_EX_10G_TX_ER:
-- bit_mask = TGEC_IMASK_TX_ER;
-- break;
-- case FM_MAC_EX_10G_RX_FIFO_OVFL:
-- bit_mask = TGEC_IMASK_RX_FIFO_OVFL;
-- break;
-- case FM_MAC_EX_10G_RX_ECC_ER:
-- bit_mask = TGEC_IMASK_RX_ECC_ER;
-- break;
-- case FM_MAC_EX_10G_RX_JAB_FRM:
-- bit_mask = TGEC_IMASK_RX_JAB_FRM;
-- break;
-- case FM_MAC_EX_10G_RX_OVRSZ_FRM:
-- bit_mask = TGEC_IMASK_RX_OVRSZ_FRM;
-- break;
-- case FM_MAC_EX_10G_RX_RUNT_FRM:
-- bit_mask = TGEC_IMASK_RX_RUNT_FRM;
-- break;
-- case FM_MAC_EX_10G_RX_FRAG_FRM:
-- bit_mask = TGEC_IMASK_RX_FRAG_FRM;
-- break;
-- case FM_MAC_EX_10G_RX_LEN_ER:
-- bit_mask = TGEC_IMASK_RX_LEN_ER;
-- break;
-- case FM_MAC_EX_10G_RX_CRC_ER:
-- bit_mask = TGEC_IMASK_RX_CRC_ER;
-- break;
-- case FM_MAC_EX_10G_RX_ALIGN_ER:
-- bit_mask = TGEC_IMASK_RX_ALIGN_ER;
-- break;
-- default:
-- bit_mask = 0;
-- break;
-- }
--
-- return bit_mask;
--}
--
--static void tgec_err_exception(void *handle)
--{
-- struct fman_mac *tgec = (struct fman_mac *)handle;
-- struct tgec_regs __iomem *regs = tgec->regs;
-- u32 event;
--
-- /* do not handle MDIO events */
-- event = ioread32be(®s->ievent) &
-- ~(TGEC_IMASK_MDIO_SCAN_EVENT |
-- TGEC_IMASK_MDIO_CMD_CMPL);
--
-- event &= ioread32be(®s->imask);
--
-- iowrite32be(event, ®s->ievent);
--
-- if (event & TGEC_IMASK_REM_FAULT)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_REM_FAULT);
-- if (event & TGEC_IMASK_LOC_FAULT)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_LOC_FAULT);
-- if (event & TGEC_IMASK_TX_ECC_ER)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
-- if (event & TGEC_IMASK_TX_FIFO_UNFL)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_UNFL);
-- if (event & TGEC_IMASK_TX_FIFO_OVFL)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_OVFL);
-- if (event & TGEC_IMASK_TX_ER)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ER);
-- if (event & TGEC_IMASK_RX_FIFO_OVFL)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FIFO_OVFL);
-- if (event & TGEC_IMASK_RX_ECC_ER)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
-- if (event & TGEC_IMASK_RX_JAB_FRM)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_JAB_FRM);
-- if (event & TGEC_IMASK_RX_OVRSZ_FRM)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_OVRSZ_FRM);
-- if (event & TGEC_IMASK_RX_RUNT_FRM)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_RUNT_FRM);
-- if (event & TGEC_IMASK_RX_FRAG_FRM)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FRAG_FRM);
-- if (event & TGEC_IMASK_RX_LEN_ER)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_LEN_ER);
-- if (event & TGEC_IMASK_RX_CRC_ER)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_CRC_ER);
-- if (event & TGEC_IMASK_RX_ALIGN_ER)
-- tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ALIGN_ER);
--}
--
--static void free_init_resources(struct fman_mac *tgec)
--{
-- fman_unregister_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
-- FMAN_INTR_TYPE_ERR);
--
-- /* release the driver's group hash table */
-- free_hash_table(tgec->multicast_addr_hash);
-- tgec->multicast_addr_hash = NULL;
--
-- /* release the driver's individual hash table */
-- free_hash_table(tgec->unicast_addr_hash);
-- tgec->unicast_addr_hash = NULL;
--}
--
--static bool is_init_done(struct tgec_cfg *cfg)
--{
-- /* Checks if tGEC driver parameters were initialized */
-- if (!cfg)
-- return true;
--
-- return false;
--}
--
--int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
--{
-- struct tgec_regs __iomem *regs = tgec->regs;
-- u32 tmp;
--
-- if (!is_init_done(tgec->cfg))
-- return -EINVAL;
--
-- tmp = ioread32be(®s->command_config);
-- if (mode & COMM_MODE_RX)
-- tmp |= CMD_CFG_RX_EN;
-- if (mode & COMM_MODE_TX)
-- tmp |= CMD_CFG_TX_EN;
-- iowrite32be(tmp, ®s->command_config);
--
-- return 0;
--}
--
--int tgec_disable(struct fman_mac *tgec, enum comm_mode mode)
--{
-- struct tgec_regs __iomem *regs = tgec->regs;
-- u32 tmp;
--
-- if (!is_init_done(tgec->cfg))
-- return -EINVAL;
--
-- tmp = ioread32be(®s->command_config);
-- if (mode & COMM_MODE_RX)
-- tmp &= ~CMD_CFG_RX_EN;
-- if (mode & COMM_MODE_TX)
-- tmp &= ~CMD_CFG_TX_EN;
-- iowrite32be(tmp, ®s->command_config);
--
-- return 0;
--}
--
--int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
--{
-- struct tgec_regs __iomem *regs = tgec->regs;
-- u32 tmp;
--
-- if (!is_init_done(tgec->cfg))
-- return -EINVAL;
--
-- tmp = ioread32be(®s->command_config);
-- if (new_val)
-- tmp |= CMD_CFG_PROMIS_EN;
-- else
-- tmp &= ~CMD_CFG_PROMIS_EN;
-- iowrite32be(tmp, ®s->command_config);
--
-- return 0;
--}
--
--int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val)
--{
-- if (is_init_done(tgec->cfg))
-- return -EINVAL;
--
-- tgec->cfg->max_frame_length = new_val;
--
-- return 0;
--}
--
--int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
-- u16 pause_time, u16 __maybe_unused thresh_time)
--{
-- struct tgec_regs __iomem *regs = tgec->regs;
--
-- if (!is_init_done(tgec->cfg))
-- return -EINVAL;
--
-- iowrite32be((u32)pause_time, ®s->pause_quant);
--
-- return 0;
--}
--
--int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
--{
-- struct tgec_regs __iomem *regs = tgec->regs;
-- u32 tmp;
--
-- if (!is_init_done(tgec->cfg))
-- return -EINVAL;
--
-- tmp = ioread32be(®s->command_config);
-- if (!en)
-- tmp |= CMD_CFG_PAUSE_IGNORE;
-- else
-- tmp &= ~CMD_CFG_PAUSE_IGNORE;
-- iowrite32be(tmp, ®s->command_config);
--
-- return 0;
--}
--
--int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr)
--{
-- if (!is_init_done(tgec->cfg))
-- return -EINVAL;
--
-- tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
-- set_mac_address(tgec->regs, (u8 *)(*p_enet_addr));
--
-- return 0;
--}
--
--int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
--{
-- struct tgec_regs __iomem *regs = tgec->regs;
-- struct eth_hash_entry *hash_entry;
-- u32 crc = 0xFFFFFFFF, hash;
-- u64 addr;
--
-- if (!is_init_done(tgec->cfg))
-- return -EINVAL;
--
-- addr = ENET_ADDR_TO_UINT64(*eth_addr);
--
-- if (!(addr & GROUP_ADDRESS)) {
-- /* Unicast addresses not supported in hash */
-- pr_err("Unicast Address\n");
-- return -EINVAL;
-- }
-- /* CRC calculation */
-- crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
-- crc = bitrev32(crc);
-- /* Take 9 MSB bits */
-- hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
--
-- /* Create element to be added to the driver hash table */
-- hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
-- if (!hash_entry)
-- return -ENOMEM;
-- hash_entry->addr = addr;
-- INIT_LIST_HEAD(&hash_entry->node);
--
-- list_add_tail(&hash_entry->node,
-- &tgec->multicast_addr_hash->lsts[hash]);
-- iowrite32be((hash | TGEC_HASH_MCAST_EN), ®s->hashtable_ctrl);
--
-- return 0;
--}
--
--int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
--{
-- struct tgec_regs __iomem *regs = tgec->regs;
-- struct eth_hash_entry *hash_entry = NULL;
-- struct list_head *pos;
-- u32 crc = 0xFFFFFFFF, hash;
-- u64 addr;
--
-- if (!is_init_done(tgec->cfg))
-- return -EINVAL;
--
-- addr = ((*(u64 *)eth_addr) >> 16);
--
-- /* CRC calculation */
-- crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
-- crc = bitrev32(crc);
-- /* Take 9 MSB bits */
-- hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
--
-- list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) {
-- hash_entry = ETH_HASH_ENTRY_OBJ(pos);
-- if (hash_entry->addr == addr) {
-- list_del_init(&hash_entry->node);
-- kfree(hash_entry);
-- break;
-- }
-- }
-- if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
-- iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
-- ®s->hashtable_ctrl);
--
-- return 0;
--}
--
--int tgec_get_version(struct fman_mac *tgec, u32 *mac_version)
--{
-- struct tgec_regs __iomem *regs = tgec->regs;
--
-- if (!is_init_done(tgec->cfg))
-- return -EINVAL;
--
-- *mac_version = ioread32be(®s->tgec_id);
--
-- return 0;
--}
--
--int tgec_set_exception(struct fman_mac *tgec,
-- enum fman_mac_exceptions exception, bool enable)
--{
-- struct tgec_regs __iomem *regs = tgec->regs;
-- u32 bit_mask = 0;
--
-- if (!is_init_done(tgec->cfg))
-- return -EINVAL;
--
-- bit_mask = get_exception_flag(exception);
-- if (bit_mask) {
-- if (enable)
-- tgec->exceptions |= bit_mask;
-- else
-- tgec->exceptions &= ~bit_mask;
-- } else {
-- pr_err("Undefined exception\n");
-- return -EINVAL;
-- }
-- if (enable)
-- iowrite32be(ioread32be(®s->imask) | bit_mask, ®s->imask);
-- else
-- iowrite32be(ioread32be(®s->imask) & ~bit_mask, ®s->imask);
--
-- return 0;
--}
--
--int tgec_init(struct fman_mac *tgec)
--{
-- struct tgec_cfg *cfg;
-- enet_addr_t eth_addr;
-- int err;
--
-- if (is_init_done(tgec->cfg))
-- return -EINVAL;
--
-- if (DEFAULT_RESET_ON_INIT &&
-- (fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) {
-- pr_err("Can't reset MAC!\n");
-- return -EINVAL;
-- }
--
-- err = check_init_parameters(tgec);
-- if (err)
-- return err;
--
-- cfg = tgec->cfg;
--
-- MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
-- set_mac_address(tgec->regs, (u8 *)eth_addr);
--
-- /* interrupts */
-- /* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 Errata workaround */
-- if (tgec->fm_rev_info.major <= 2)
-- tgec->exceptions &= ~(TGEC_IMASK_REM_FAULT |
-- TGEC_IMASK_LOC_FAULT);
--
-- err = init(tgec->regs, cfg, tgec->exceptions);
-- if (err) {
-- free_init_resources(tgec);
-- pr_err("TGEC version doesn't support this i/f mode\n");
-- return err;
-- }
--
-- /* Max Frame Length */
-- err = fman_set_mac_max_frame(tgec->fm, tgec->mac_id,
-- cfg->max_frame_length);
-- if (err) {
-- pr_err("Setting max frame length FAILED\n");
-- free_init_resources(tgec);
-- return -EINVAL;
-- }
--
-- /* FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 Errata workaround */
-- if (tgec->fm_rev_info.major == 2) {
-- struct tgec_regs __iomem *regs = tgec->regs;
-- u32 tmp;
--
-- /* restore the default tx ipg Length */
-- tmp = (ioread32be(®s->tx_ipg_len) &
-- ~TGEC_TX_IPG_LENGTH_MASK) | 12;
--
-- iowrite32be(tmp, ®s->tx_ipg_len);
-- }
--
-- tgec->multicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
-- if (!tgec->multicast_addr_hash) {
-- free_init_resources(tgec);
-- pr_err("allocation hash table is FAILED\n");
-- return -ENOMEM;
-- }
--
-- tgec->unicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
-- if (!tgec->unicast_addr_hash) {
-- free_init_resources(tgec);
-- pr_err("allocation hash table is FAILED\n");
-- return -ENOMEM;
-- }
--
-- fman_register_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
-- FMAN_INTR_TYPE_ERR, tgec_err_exception, tgec);
--
-- kfree(cfg);
-- tgec->cfg = NULL;
--
-- return 0;
--}
--
--int tgec_free(struct fman_mac *tgec)
--{
-- free_init_resources(tgec);
--
-- kfree(tgec->cfg);
-- kfree(tgec);
--
-- return 0;
--}
--
--struct fman_mac *tgec_config(struct fman_mac_params *params)
--{
-- struct fman_mac *tgec;
-- struct tgec_cfg *cfg;
-- void __iomem *base_addr;
--
-- base_addr = params->base_addr;
-- /* allocate memory for the UCC GETH data structure. */
-- tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
-- if (!tgec)
-- return NULL;
--
-- /* allocate memory for the 10G MAC driver parameters data structure. */
-- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
-- if (!cfg) {
-- tgec_free(tgec);
-- return NULL;
-- }
--
-- /* Plant parameter structure pointer */
-- tgec->cfg = cfg;
--
-- set_dflts(cfg);
--
-- tgec->regs = base_addr;
-- tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
-- tgec->max_speed = params->max_speed;
-- tgec->mac_id = params->mac_id;
-- tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
-- TGEC_IMASK_REM_FAULT |
-- TGEC_IMASK_LOC_FAULT |
-- TGEC_IMASK_TX_ECC_ER |
-- TGEC_IMASK_TX_FIFO_UNFL |
-- TGEC_IMASK_TX_FIFO_OVFL |
-- TGEC_IMASK_TX_ER |
-- TGEC_IMASK_RX_FIFO_OVFL |
-- TGEC_IMASK_RX_ECC_ER |
-- TGEC_IMASK_RX_JAB_FRM |
-- TGEC_IMASK_RX_OVRSZ_FRM |
-- TGEC_IMASK_RX_RUNT_FRM |
-- TGEC_IMASK_RX_FRAG_FRM |
-- TGEC_IMASK_RX_CRC_ER |
-- TGEC_IMASK_RX_ALIGN_ER);
-- tgec->exception_cb = params->exception_cb;
-- tgec->event_cb = params->event_cb;
-- tgec->dev_id = params->dev_id;
-- tgec->fm = params->fm;
--
-- /* Save FMan revision */
-- fman_get_revision(tgec->fm, &tgec->fm_rev_info);
--
-- return tgec;
--}
-diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
-deleted file mode 100644
-index 514bba9..0000000
---- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
-+++ /dev/null
-@@ -1,55 +0,0 @@
--/*
-- * Copyright 2008-2015 Freescale Semiconductor Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#ifndef __TGEC_H
--#define __TGEC_H
--
--#include "fman_mac.h"
--
--struct fman_mac *tgec_config(struct fman_mac_params *params);
--int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
--int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr);
--int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
--int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
--int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
--int tgec_init(struct fman_mac *tgec);
--int tgec_free(struct fman_mac *tgec);
--int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en);
--int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority,
-- u16 pause_time, u16 thresh_time);
--int tgec_set_exception(struct fman_mac *tgec,
-- enum fman_mac_exceptions exception, bool enable);
--int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
--int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
--int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
--
--#endif /* __TGEC_H */
-diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
-deleted file mode 100644
-index 736db9d..0000000
---- a/drivers/net/ethernet/freescale/fman/mac.c
-+++ /dev/null
-@@ -1,950 +0,0 @@
--/* Copyright 2008-2015 Freescale Semiconductor, Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
--
--#include <linux/init.h>
--#include <linux/module.h>
--#include <linux/of_address.h>
--#include <linux/of_platform.h>
--#include <linux/of_net.h>
--#include <linux/of_mdio.h>
--#include <linux/device.h>
--#include <linux/phy.h>
--#include <linux/netdevice.h>
--#include <linux/phy_fixed.h>
--#include <linux/etherdevice.h>
--#include <linux/libfdt_env.h>
--
--#include "mac.h"
--#include "fman_mac.h"
--#include "fman_dtsec.h"
--#include "fman_tgec.h"
--#include "fman_memac.h"
--
--MODULE_LICENSE("Dual BSD/GPL");
--MODULE_DESCRIPTION("FSL FMan MAC API based driver");
--
--struct mac_priv_s {
-- struct device *dev;
-- void __iomem *vaddr;
-- u8 cell_index;
-- phy_interface_t phy_if;
-- struct fman *fman;
-- struct device_node *phy_node;
-- struct device_node *internal_phy_node;
-- /* List of multicast addresses */
-- struct list_head mc_addr_list;
-- struct platform_device *eth_dev;
-- struct fixed_phy_status *fixed_link;
-- u16 speed;
-- u16 max_speed;
--
-- int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode);
-- int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode);
--};
--
--struct mac_address {
-- u8 addr[ETH_ALEN];
-- struct list_head list;
--};
--
--static void mac_exception(void *handle, enum fman_mac_exceptions ex)
--{
-- struct mac_device *mac_dev;
-- struct mac_priv_s *priv;
--
-- mac_dev = handle;
-- priv = mac_dev->priv;
--
-- if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
-- /* don't flag RX FIFO after the first */
-- mac_dev->set_exception(mac_dev->fman_mac,
-- FM_MAC_EX_10G_RX_FIFO_OVFL, false);
-- dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex);
-- }
--
-- dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
-- __func__, ex);
--}
--
--static void set_fman_mac_params(struct mac_device *mac_dev,
-- struct fman_mac_params *params)
--{
-- struct mac_priv_s *priv = mac_dev->priv;
--
-- params->base_addr = (typeof(params->base_addr))
-- devm_ioremap(priv->dev, mac_dev->res->start,
-- resource_size(mac_dev->res));
-- memcpy(¶ms->addr, mac_dev->addr, sizeof(mac_dev->addr));
-- params->max_speed = priv->max_speed;
-- params->phy_if = priv->phy_if;
-- params->basex_if = false;
-- params->mac_id = priv->cell_index;
-- params->fm = (void *)priv->fman;
-- params->exception_cb = mac_exception;
-- params->event_cb = mac_exception;
-- params->dev_id = mac_dev;
-- params->internal_phy_node = priv->internal_phy_node;
--}
--
--static int tgec_initialization(struct mac_device *mac_dev)
--{
-- int err;
-- struct mac_priv_s *priv;
-- struct fman_mac_params params;
-- u32 version;
--
-- priv = mac_dev->priv;
--
-- set_fman_mac_params(mac_dev, ¶ms);
--
-- mac_dev->fman_mac = tgec_config(¶ms);
-- if (!mac_dev->fman_mac) {
-- err = -EINVAL;
-- goto _return;
-- }
--
-- err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- err = tgec_init(mac_dev->fman_mac);
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- /* For 10G MAC, disable Tx ECC exception */
-- err = mac_dev->set_exception(mac_dev->fman_mac,
-- FM_MAC_EX_10G_TX_ECC_ER, false);
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- err = tgec_get_version(mac_dev->fman_mac, &version);
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version);
--
-- goto _return;
--
--_return_fm_mac_free:
-- tgec_free(mac_dev->fman_mac);
--
--_return:
-- return err;
--}
--
--static int dtsec_initialization(struct mac_device *mac_dev)
--{
-- int err;
-- struct mac_priv_s *priv;
-- struct fman_mac_params params;
-- u32 version;
--
-- priv = mac_dev->priv;
--
-- set_fman_mac_params(mac_dev, ¶ms);
--
-- mac_dev->fman_mac = dtsec_config(¶ms);
-- if (!mac_dev->fman_mac) {
-- err = -EINVAL;
-- goto _return;
-- }
--
-- err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true);
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- err = dtsec_init(mac_dev->fman_mac);
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- /* For 1G MAC, disable by default the MIB counters overflow interrupt */
-- err = mac_dev->set_exception(mac_dev->fman_mac,
-- FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- err = dtsec_get_version(mac_dev->fman_mac, &version);
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version);
--
-- goto _return;
--
--_return_fm_mac_free:
-- dtsec_free(mac_dev->fman_mac);
--
--_return:
-- return err;
--}
--
--static int memac_initialization(struct mac_device *mac_dev)
--{
-- int err;
-- struct mac_priv_s *priv;
-- struct fman_mac_params params;
--
-- priv = mac_dev->priv;
--
-- set_fman_mac_params(mac_dev, ¶ms);
--
-- if (priv->max_speed == SPEED_10000)
-- params.phy_if = PHY_INTERFACE_MODE_XGMII;
--
-- mac_dev->fman_mac = memac_config(¶ms);
-- if (!mac_dev->fman_mac) {
-- err = -EINVAL;
-- goto _return;
-- }
--
-- err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- err = memac_cfg_reset_on_init(mac_dev->fman_mac, true);
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link);
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- err = memac_init(mac_dev->fman_mac);
-- if (err < 0)
-- goto _return_fm_mac_free;
--
-- dev_info(priv->dev, "FMan MEMAC\n");
--
-- goto _return;
--
--_return_fm_mac_free:
-- memac_free(mac_dev->fman_mac);
--
--_return:
-- return err;
--}
--
--static int start(struct mac_device *mac_dev)
--{
-- int err;
-- struct phy_device *phy_dev = mac_dev->phy_dev;
-- struct mac_priv_s *priv = mac_dev->priv;
--
-- err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
-- if (!err && phy_dev)
-- phy_start(phy_dev);
--
-- return err;
--}
--
--static int stop(struct mac_device *mac_dev)
--{
-- struct mac_priv_s *priv = mac_dev->priv;
--
-- if (mac_dev->phy_dev)
-- phy_stop(mac_dev->phy_dev);
--
-- return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
--}
--
--static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
--{
-- struct mac_priv_s *priv;
-- struct mac_address *old_addr, *tmp;
-- struct netdev_hw_addr *ha;
-- int err;
-- enet_addr_t *addr;
--
-- priv = mac_dev->priv;
--
-- /* Clear previous address list */
-- list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) {
-- addr = (enet_addr_t *)old_addr->addr;
-- err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr);
-- if (err < 0)
-- return err;
--
-- list_del(&old_addr->list);
-- kfree(old_addr);
-- }
--
-- /* Add all the addresses from the new list */
-- netdev_for_each_mc_addr(ha, net_dev) {
-- addr = (enet_addr_t *)ha->addr;
-- err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr);
-- if (err < 0)
-- return err;
--
-- tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
-- if (!tmp)
-- return -ENOMEM;
--
-- ether_addr_copy(tmp->addr, ha->addr);
-- list_add(&tmp->list, &priv->mc_addr_list);
-- }
-- return 0;
--}
--
--/**
-- * fman_set_mac_active_pause
-- * @mac_dev: A pointer to the MAC device
-- * @rx: Pause frame setting for RX
-- * @tx: Pause frame setting for TX
-- *
-- * Set the MAC RX/TX PAUSE frames settings
-- *
-- * Avoid redundant calls to FMD, if the MAC driver already contains the desired
-- * active PAUSE settings. Otherwise, the new active settings should be reflected
-- * in FMan.
-- *
-- * Return: 0 on success; Error code otherwise.
-- */
--int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
--{
-- struct fman_mac *fman_mac = mac_dev->fman_mac;
-- int err = 0;
--
-- if (rx != mac_dev->rx_pause_active) {
-- err = mac_dev->set_rx_pause(fman_mac, rx);
-- if (likely(err == 0))
-- mac_dev->rx_pause_active = rx;
-- }
--
-- if (tx != mac_dev->tx_pause_active) {
-- u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE :
-- FSL_FM_PAUSE_TIME_DISABLE);
--
-- err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0);
--
-- if (likely(err == 0))
-- mac_dev->tx_pause_active = tx;
-- }
--
-- return err;
--}
--EXPORT_SYMBOL(fman_set_mac_active_pause);
--
--/**
-- * fman_get_pause_cfg
-- * @mac_dev: A pointer to the MAC device
-- * @rx: Return value for RX setting
-- * @tx: Return value for TX setting
-- *
-- * Determine the MAC RX/TX PAUSE frames settings based on PHY
-- * autonegotiation or values set by eththool.
-- *
-- * Return: Pointer to FMan device.
-- */
--void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
-- bool *tx_pause)
--{
-- struct phy_device *phy_dev = mac_dev->phy_dev;
-- u16 lcl_adv, rmt_adv;
-- u8 flowctrl;
--
-- *rx_pause = *tx_pause = false;
--
-- if (!phy_dev->duplex)
-- return;
--
-- /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
-- * are those set by ethtool.
-- */
-- if (!mac_dev->autoneg_pause) {
-- *rx_pause = mac_dev->rx_pause_req;
-- *tx_pause = mac_dev->tx_pause_req;
-- return;
-- }
--
-- /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
-- * settings depend on the result of the link negotiation.
-- */
--
-- /* get local capabilities */
-- lcl_adv = 0;
-- if (phy_dev->advertising & ADVERTISED_Pause)
-- lcl_adv |= ADVERTISE_PAUSE_CAP;
-- if (phy_dev->advertising & ADVERTISED_Asym_Pause)
-- lcl_adv |= ADVERTISE_PAUSE_ASYM;
--
-- /* get link partner capabilities */
-- rmt_adv = 0;
-- if (phy_dev->pause)
-- rmt_adv |= LPA_PAUSE_CAP;
-- if (phy_dev->asym_pause)
-- rmt_adv |= LPA_PAUSE_ASYM;
--
-- /* Calculate TX/RX settings based on local and peer advertised
-- * symmetric/asymmetric PAUSE capabilities.
-- */
-- flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
-- if (flowctrl & FLOW_CTRL_RX)
-- *rx_pause = true;
-- if (flowctrl & FLOW_CTRL_TX)
-- *tx_pause = true;
--}
--EXPORT_SYMBOL(fman_get_pause_cfg);
--
--static void adjust_link_void(struct net_device *net_dev)
--{
--}
--
--static void adjust_link_dtsec(struct net_device *net_dev)
--{
-- struct device *dev = net_dev->dev.parent;
-- struct dpaa_eth_data *eth_data = dev->platform_data;
-- struct mac_device *mac_dev = eth_data->mac_dev;
-- struct phy_device *phy_dev = mac_dev->phy_dev;
-- struct fman_mac *fman_mac;
-- bool rx_pause, tx_pause;
-- int err;
--
-- fman_mac = mac_dev->fman_mac;
-- if (!phy_dev->link) {
-- dtsec_restart_autoneg(fman_mac);
--
-- return;
-- }
--
-- dtsec_adjust_link(fman_mac, phy_dev->speed);
-- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
-- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
-- if (err < 0)
-- netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
--}
--
--static void adjust_link_memac(struct net_device *net_dev)
--{
-- struct device *dev = net_dev->dev.parent;
-- struct dpaa_eth_data *eth_data = dev->platform_data;
-- struct mac_device *mac_dev = eth_data->mac_dev;
-- struct phy_device *phy_dev = mac_dev->phy_dev;
-- struct fman_mac *fman_mac;
-- bool rx_pause, tx_pause;
-- int err;
--
-- fman_mac = mac_dev->fman_mac;
-- memac_adjust_link(fman_mac, phy_dev->speed);
--
-- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
-- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
-- if (err < 0)
-- netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
--}
--
--/* Initializes driver's PHY state, and attaches to the PHY.
-- * Returns 0 on success.
-- */
--static struct phy_device *init_phy(struct net_device *net_dev,
-- struct mac_device *mac_dev,
-- void (*adj_lnk)(struct net_device *))
--{
-- struct phy_device *phy_dev;
-- struct mac_priv_s *priv = mac_dev->priv;
--
-- phy_dev = of_phy_connect(net_dev, priv->phy_node, adj_lnk, 0,
-- priv->phy_if);
-- if (!phy_dev) {
-- netdev_err(net_dev, "Could not connect to PHY\n");
-- return NULL;
-- }
--
-- /* Remove any features not supported by the controller */
-- phy_dev->supported &= mac_dev->if_support;
-- /* Enable the symmetric and asymmetric PAUSE frame advertisements,
-- * as most of the PHY drivers do not enable them by default.
-- */
-- phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-- phy_dev->advertising = phy_dev->supported;
--
-- mac_dev->phy_dev = phy_dev;
--
-- return phy_dev;
--}
--
--static struct phy_device *dtsec_init_phy(struct net_device *net_dev,
-- struct mac_device *mac_dev)
--{
-- return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
--}
--
--static struct phy_device *tgec_init_phy(struct net_device *net_dev,
-- struct mac_device *mac_dev)
--{
-- return init_phy(net_dev, mac_dev, adjust_link_void);
--}
--
--static struct phy_device *memac_init_phy(struct net_device *net_dev,
-- struct mac_device *mac_dev)
--{
-- return init_phy(net_dev, mac_dev, &adjust_link_memac);
--}
--
--static void setup_dtsec(struct mac_device *mac_dev)
--{
-- mac_dev->init_phy = dtsec_init_phy;
-- mac_dev->init = dtsec_initialization;
-- mac_dev->set_promisc = dtsec_set_promiscuous;
-- mac_dev->change_addr = dtsec_modify_mac_address;
-- mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
-- mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
-- mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
-- mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
-- mac_dev->set_exception = dtsec_set_exception;
-- mac_dev->set_multi = set_multi;
-- mac_dev->start = start;
-- mac_dev->stop = stop;
--
-- mac_dev->priv->enable = dtsec_enable;
-- mac_dev->priv->disable = dtsec_disable;
--}
--
--static void setup_tgec(struct mac_device *mac_dev)
--{
-- mac_dev->init_phy = tgec_init_phy;
-- mac_dev->init = tgec_initialization;
-- mac_dev->set_promisc = tgec_set_promiscuous;
-- mac_dev->change_addr = tgec_modify_mac_address;
-- mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
-- mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
-- mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
-- mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
-- mac_dev->set_exception = tgec_set_exception;
-- mac_dev->set_multi = set_multi;
-- mac_dev->start = start;
-- mac_dev->stop = stop;
--
-- mac_dev->priv->enable = tgec_enable;
-- mac_dev->priv->disable = tgec_disable;
--}
--
--static void setup_memac(struct mac_device *mac_dev)
--{
-- mac_dev->init_phy = memac_init_phy;
-- mac_dev->init = memac_initialization;
-- mac_dev->set_promisc = memac_set_promiscuous;
-- mac_dev->change_addr = memac_modify_mac_address;
-- mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
-- mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
-- mac_dev->set_tx_pause = memac_set_tx_pause_frames;
-- mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
-- mac_dev->set_exception = memac_set_exception;
-- mac_dev->set_multi = set_multi;
-- mac_dev->start = start;
-- mac_dev->stop = stop;
--
-- mac_dev->priv->enable = memac_enable;
-- mac_dev->priv->disable = memac_disable;
--}
--
--#define DTSEC_SUPPORTED \
-- (SUPPORTED_10baseT_Half \
-- | SUPPORTED_10baseT_Full \
-- | SUPPORTED_100baseT_Half \
-- | SUPPORTED_100baseT_Full \
-- | SUPPORTED_Autoneg \
-- | SUPPORTED_Pause \
-- | SUPPORTED_Asym_Pause \
-- | SUPPORTED_MII)
--
--static DEFINE_MUTEX(eth_lock);
--
--static const u16 phy2speed[] = {
-- [PHY_INTERFACE_MODE_MII] = SPEED_100,
-- [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
-- [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
-- [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
-- [PHY_INTERFACE_MODE_RMII] = SPEED_100,
-- [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
-- [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
-- [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
-- [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
-- [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
-- [PHY_INTERFACE_MODE_XGMII] = SPEED_10000
--};
--
--static struct platform_device *dpaa_eth_add_device(int fman_id,
-- struct mac_device *mac_dev,
-- struct device_node *node)
--{
-- struct platform_device *pdev;
-- struct dpaa_eth_data data;
-- struct mac_priv_s *priv;
-- static int dpaa_eth_dev_cnt;
-- int ret;
--
-- priv = mac_dev->priv;
--
-- data.mac_dev = mac_dev;
-- data.mac_hw_id = priv->cell_index;
-- data.fman_hw_id = fman_id;
-- data.mac_node = node;
--
-- mutex_lock(ð_lock);
--
-- pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt);
-- if (!pdev) {
-- ret = -ENOMEM;
-- goto no_mem;
-- }
--
-- ret = platform_device_add_data(pdev, &data, sizeof(data));
-- if (ret)
-- goto err;
--
-- ret = platform_device_add(pdev);
-- if (ret)
-- goto err;
--
-- dpaa_eth_dev_cnt++;
-- mutex_unlock(ð_lock);
--
-- return pdev;
--
--err:
-- platform_device_put(pdev);
--no_mem:
-- mutex_unlock(ð_lock);
--
-- return ERR_PTR(ret);
--}
--
--static const struct of_device_id mac_match[] = {
-- { .compatible = "fsl,fman-dtsec" },
-- { .compatible = "fsl,fman-xgec" },
-- { .compatible = "fsl,fman-memac" },
-- {}
--};
--MODULE_DEVICE_TABLE(of, mac_match);
--
--static int mac_probe(struct platform_device *_of_dev)
--{
-- int err, i, nph;
-- struct device *dev;
-- struct device_node *mac_node, *dev_node;
-- struct mac_device *mac_dev;
-- struct platform_device *of_dev;
-- struct resource res;
-- struct mac_priv_s *priv;
-- const u8 *mac_addr;
-- u32 val;
-- u8 fman_id;
-- int phy_if;
--
-- dev = &_of_dev->dev;
-- mac_node = dev->of_node;
--
-- mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
-- if (!mac_dev) {
-- err = -ENOMEM;
-- dev_err(dev, "devm_kzalloc() = %d\n", err);
-- goto _return;
-- }
-- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-- if (!priv) {
-- err = -ENOMEM;
-- goto _return;
-- }
--
-- /* Save private information */
-- mac_dev->priv = priv;
-- priv->dev = dev;
--
-- if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
-- setup_dtsec(mac_dev);
-- priv->internal_phy_node = of_parse_phandle(mac_node,
-- "tbi-handle", 0);
-- } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
-- setup_tgec(mac_dev);
-- } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
-- setup_memac(mac_dev);
-- priv->internal_phy_node = of_parse_phandle(mac_node,
-- "pcsphy-handle", 0);
-- } else {
-- dev_err(dev, "MAC node (%s) contains unsupported MAC\n",
-- mac_node->full_name);
-- err = -EINVAL;
-- goto _return;
-- }
--
-- /* Register mac_dev */
-- dev_set_drvdata(dev, mac_dev);
--
-- INIT_LIST_HEAD(&priv->mc_addr_list);
--
-- /* Get the FM node */
-- dev_node = of_get_parent(mac_node);
-- if (!dev_node) {
-- dev_err(dev, "of_get_parent(%s) failed\n",
-- mac_node->full_name);
-- err = -EINVAL;
-- goto _return_dev_set_drvdata;
-- }
--
-- of_dev = of_find_device_by_node(dev_node);
-- if (!of_dev) {
-- dev_err(dev, "of_find_device_by_node(%s) failed\n",
-- dev_node->full_name);
-- err = -EINVAL;
-- goto _return_of_node_put;
-- }
--
-- /* Get the FMan cell-index */
-- err = of_property_read_u32(dev_node, "cell-index", &val);
-- if (err) {
-- dev_err(dev, "failed to read cell-index for %s\n",
-- dev_node->full_name);
-- err = -EINVAL;
-- goto _return_of_node_put;
-- }
-- /* cell-index 0 => FMan id 1 */
-- fman_id = (u8)(val + 1);
--
-- priv->fman = fman_bind(&of_dev->dev);
-- if (!priv->fman) {
-- dev_err(dev, "fman_bind(%s) failed\n", dev_node->full_name);
-- err = -ENODEV;
-- goto _return_of_node_put;
-- }
--
-- of_node_put(dev_node);
--
-- /* Get the address of the memory mapped registers */
-- err = of_address_to_resource(mac_node, 0, &res);
-- if (err < 0) {
-- dev_err(dev, "of_address_to_resource(%s) = %d\n",
-- mac_node->full_name, err);
-- goto _return_dev_set_drvdata;
-- }
--
-- mac_dev->res = __devm_request_region(dev,
-- fman_get_mem_region(priv->fman),
-- res.start, res.end + 1 - res.start,
-- "mac");
-- if (!mac_dev->res) {
-- dev_err(dev, "__devm_request_mem_region(mac) failed\n");
-- err = -EBUSY;
-- goto _return_dev_set_drvdata;
-- }
--
-- priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
-- mac_dev->res->end + 1 - mac_dev->res->start);
-- if (!priv->vaddr) {
-- dev_err(dev, "devm_ioremap() failed\n");
-- err = -EIO;
-- goto _return_dev_set_drvdata;
-- }
--
-- if (!of_device_is_available(mac_node)) {
-- devm_iounmap(dev, priv->vaddr);
-- __devm_release_region(dev, fman_get_mem_region(priv->fman),
-- res.start, res.end + 1 - res.start);
-- devm_kfree(dev, mac_dev);
-- dev_set_drvdata(dev, NULL);
-- return -ENODEV;
-- }
--
-- /* Get the cell-index */
-- err = of_property_read_u32(mac_node, "cell-index", &val);
-- if (err) {
-- dev_err(dev, "failed to read cell-index for %s\n",
-- mac_node->full_name);
-- err = -EINVAL;
-- goto _return_dev_set_drvdata;
-- }
-- priv->cell_index = (u8)val;
--
-- /* Get the MAC address */
-- mac_addr = of_get_mac_address(mac_node);
-- if (!mac_addr) {
-- dev_err(dev, "of_get_mac_address(%s) failed\n",
-- mac_node->full_name);
-- err = -EINVAL;
-- goto _return_dev_set_drvdata;
-- }
-- memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
--
-- /* Get the port handles */
-- nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
-- if (unlikely(nph < 0)) {
-- dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n",
-- mac_node->full_name);
-- err = nph;
-- goto _return_dev_set_drvdata;
-- }
--
-- if (nph != ARRAY_SIZE(mac_dev->port)) {
-- dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n",
-- mac_node->full_name);
-- err = -EINVAL;
-- goto _return_dev_set_drvdata;
-- }
--
-- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
-- /* Find the port node */
-- dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
-- if (!dev_node) {
-- dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n",
-- mac_node->full_name);
-- err = -EINVAL;
-- goto _return_of_node_put;
-- }
--
-- of_dev = of_find_device_by_node(dev_node);
-- if (!of_dev) {
-- dev_err(dev, "of_find_device_by_node(%s) failed\n",
-- dev_node->full_name);
-- err = -EINVAL;
-- goto _return_of_node_put;
-- }
--
-- mac_dev->port[i] = fman_port_bind(&of_dev->dev);
-- if (!mac_dev->port[i]) {
-- dev_err(dev, "dev_get_drvdata(%s) failed\n",
-- dev_node->full_name);
-- err = -EINVAL;
-- goto _return_of_node_put;
-- }
-- of_node_put(dev_node);
-- }
--
-- /* Get the PHY connection type */
-- phy_if = of_get_phy_mode(mac_node);
-- if (phy_if < 0) {
-- dev_warn(dev,
-- "of_get_phy_mode() for %s failed. Defaulting to SGMII\n",
-- mac_node->full_name);
-- phy_if = PHY_INTERFACE_MODE_SGMII;
-- }
-- priv->phy_if = phy_if;
--
-- priv->speed = phy2speed[priv->phy_if];
-- priv->max_speed = priv->speed;
-- mac_dev->if_support = DTSEC_SUPPORTED;
-- /* We don't support half-duplex in SGMII mode */
-- if (priv->phy_if == PHY_INTERFACE_MODE_SGMII)
-- mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
-- SUPPORTED_100baseT_Half);
--
-- /* Gigabit support (no half-duplex) */
-- if (priv->max_speed == 1000)
-- mac_dev->if_support |= SUPPORTED_1000baseT_Full;
--
-- /* The 10G interface only supports one mode */
-- if (priv->phy_if == PHY_INTERFACE_MODE_XGMII)
-- mac_dev->if_support = SUPPORTED_10000baseT_Full;
--
-- /* Get the rest of the PHY information */
-- priv->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
-- if (!priv->phy_node && of_phy_is_fixed_link(mac_node)) {
-- struct phy_device *phy;
--
-- err = of_phy_register_fixed_link(mac_node);
-- if (err)
-- goto _return_dev_set_drvdata;
--
-- priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
-- GFP_KERNEL);
-- if (!priv->fixed_link)
-- goto _return_dev_set_drvdata;
--
-- priv->phy_node = of_node_get(mac_node);
-- phy = of_phy_find_device(priv->phy_node);
-- if (!phy)
-- goto _return_dev_set_drvdata;
--
-- priv->fixed_link->link = phy->link;
-- priv->fixed_link->speed = phy->speed;
-- priv->fixed_link->duplex = phy->duplex;
-- priv->fixed_link->pause = phy->pause;
-- priv->fixed_link->asym_pause = phy->asym_pause;
--
-- put_device(&phy->mdio.dev);
-- }
--
-- err = mac_dev->init(mac_dev);
-- if (err < 0) {
-- dev_err(dev, "mac_dev->init() = %d\n", err);
-- of_node_put(priv->phy_node);
-- goto _return_dev_set_drvdata;
-- }
--
-- /* pause frame autonegotiation enabled */
-- mac_dev->autoneg_pause = true;
--
-- /* By intializing the values to false, force FMD to enable PAUSE frames
-- * on RX and TX
-- */
-- mac_dev->rx_pause_req = true;
-- mac_dev->tx_pause_req = true;
-- mac_dev->rx_pause_active = false;
-- mac_dev->tx_pause_active = false;
-- err = fman_set_mac_active_pause(mac_dev, true, true);
-- if (err < 0)
-- dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
--
-- dev_info(dev, "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
-- mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
-- mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
--
-- priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev, mac_node);
-- if (IS_ERR(priv->eth_dev)) {
-- dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
-- priv->cell_index);
-- priv->eth_dev = NULL;
-- }
--
-- goto _return;
--
--_return_of_node_put:
-- of_node_put(dev_node);
--_return_dev_set_drvdata:
-- kfree(priv->fixed_link);
-- dev_set_drvdata(dev, NULL);
--_return:
-- return err;
--}
--
--static struct platform_driver mac_driver = {
-- .driver = {
-- .name = KBUILD_MODNAME,
-- .of_match_table = mac_match,
-- },
-- .probe = mac_probe,
--};
--
--builtin_platform_driver(mac_driver);
-diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
-deleted file mode 100644
-index d7313f0..0000000
---- a/drivers/net/ethernet/freescale/fman/mac.h
-+++ /dev/null
-@@ -1,98 +0,0 @@
--/* Copyright 2008-2015 Freescale Semiconductor, Inc.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--#ifndef __MAC_H
--#define __MAC_H
--
--#include <linux/device.h>
--#include <linux/if_ether.h>
--#include <linux/phy.h>
--#include <linux/list.h>
--
--#include "fman_port.h"
--#include "fman.h"
--#include "fman_mac.h"
--
--struct fman_mac;
--struct mac_priv_s;
--
--struct mac_device {
-- struct resource *res;
-- u8 addr[ETH_ALEN];
-- struct fman_port *port[2];
-- u32 if_support;
-- struct phy_device *phy_dev;
--
-- bool autoneg_pause;
-- bool rx_pause_req;
-- bool tx_pause_req;
-- bool rx_pause_active;
-- bool tx_pause_active;
-- bool promisc;
--
-- struct phy_device *(*init_phy)(struct net_device *net_dev,
-- struct mac_device *mac_dev);
-- int (*init)(struct mac_device *mac_dev);
-- int (*start)(struct mac_device *mac_dev);
-- int (*stop)(struct mac_device *mac_dev);
-- int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
-- int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
-- int (*set_multi)(struct net_device *net_dev,
-- struct mac_device *mac_dev);
-- int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
-- int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority,
-- u16 pause_time, u16 thresh_time);
-- int (*set_exception)(struct fman_mac *mac_dev,
-- enum fman_mac_exceptions exception, bool enable);
-- int (*add_hash_mac_addr)(struct fman_mac *mac_dev,
-- enet_addr_t *eth_addr);
-- int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
-- enet_addr_t *eth_addr);
--
-- struct fman_mac *fman_mac;
-- struct mac_priv_s *priv;
--};
--
--struct dpaa_eth_data {
-- struct device_node *mac_node;
-- struct mac_device *mac_dev;
-- int mac_hw_id;
-- int fman_hw_id;
--};
--
--extern const char *mac_driver_description;
--
--int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
--
--void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
-- bool *tx_pause);
--
--#endif /* __MAC_H */
-diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
-index 4b86260..9b3639e 100644
---- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
-+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
-@@ -60,9 +60,6 @@ module_param(fs_enet_debug, int, 0);
- MODULE_PARM_DESC(fs_enet_debug,
- "Freescale bitmapped debugging message enable value");
-
--#define RX_RING_SIZE 32
--#define TX_RING_SIZE 64
--
- #ifdef CONFIG_NET_POLL_CONTROLLER
- static void fs_enet_netpoll(struct net_device *dev);
- #endif
-@@ -82,113 +79,20 @@ static void skb_align(struct sk_buff *skb, int align)
- skb_reserve(skb, align - off);
- }
-
--/* NAPI function */
--static int fs_enet_napi(struct napi_struct *napi, int budget)
-+/* NAPI receive function */
-+static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
- {
- struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
- struct net_device *dev = fep->ndev;
- const struct fs_platform_info *fpi = fep->fpi;
- cbd_t __iomem *bdp;
-- struct sk_buff *skb, *skbn;
-+ struct sk_buff *skb, *skbn, *skbt;
- int received = 0;
- u16 pkt_len, sc;
- int curidx;
-- int dirtyidx, do_wake, do_restart;
-- int tx_left = TX_RING_SIZE;
--
-- spin_lock(&fep->tx_lock);
-- bdp = fep->dirty_tx;
--
-- /* clear status bits for napi*/
-- (*fep->ops->napi_clear_event)(dev);
--
-- do_wake = do_restart = 0;
-- while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) {
-- dirtyidx = bdp - fep->tx_bd_base;
--
-- if (fep->tx_free == fep->tx_ring)
-- break;
--
-- skb = fep->tx_skbuff[dirtyidx];
--
-- /*
-- * Check for errors.
-- */
-- if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
-- BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
--
-- if (sc & BD_ENET_TX_HB) /* No heartbeat */
-- fep->stats.tx_heartbeat_errors++;
-- if (sc & BD_ENET_TX_LC) /* Late collision */
-- fep->stats.tx_window_errors++;
-- if (sc & BD_ENET_TX_RL) /* Retrans limit */
-- fep->stats.tx_aborted_errors++;
-- if (sc & BD_ENET_TX_UN) /* Underrun */
-- fep->stats.tx_fifo_errors++;
-- if (sc & BD_ENET_TX_CSL) /* Carrier lost */
-- fep->stats.tx_carrier_errors++;
--
-- if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
-- fep->stats.tx_errors++;
-- do_restart = 1;
-- }
-- } else
-- fep->stats.tx_packets++;
--
-- if (sc & BD_ENET_TX_READY) {
-- dev_warn(fep->dev,
-- "HEY! Enet xmit interrupt and TX_READY.\n");
-- }
--
-- /*
-- * Deferred means some collisions occurred during transmit,
-- * but we eventually sent the packet OK.
-- */
-- if (sc & BD_ENET_TX_DEF)
-- fep->stats.collisions++;
--
-- /* unmap */
-- if (fep->mapped_as_page[dirtyidx])
-- dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
-- CBDR_DATLEN(bdp), DMA_TO_DEVICE);
-- else
-- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
-- CBDR_DATLEN(bdp), DMA_TO_DEVICE);
-
-- /*
-- * Free the sk buffer associated with this last transmit.
-- */
-- if (skb) {
-- dev_kfree_skb(skb);
-- fep->tx_skbuff[dirtyidx] = NULL;
-- }
--
-- /*
-- * Update pointer to next buffer descriptor to be transmitted.
-- */
-- if ((sc & BD_ENET_TX_WRAP) == 0)
-- bdp++;
-- else
-- bdp = fep->tx_bd_base;
--
-- /*
-- * Since we have freed up a buffer, the ring is no longer
-- * full.
-- */
-- if (++fep->tx_free == MAX_SKB_FRAGS)
-- do_wake = 1;
-- tx_left--;
-- }
--
-- fep->dirty_tx = bdp;
--
-- if (do_restart)
-- (*fep->ops->tx_restart)(dev);
--
-- spin_unlock(&fep->tx_lock);
--
-- if (do_wake)
-- netif_wake_queue(dev);
-+ if (budget <= 0)
-+ return received;
-
- /*
- * First, grab all of the stats for the incoming packet.
-@@ -196,8 +100,10 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
- */
- bdp = fep->cur_rx;
-
-- while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0 &&
-- received < budget) {
-+ /* clear RX status bits for napi*/
-+ (*fep->ops->napi_clear_rx_event)(dev);
-+
-+ while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
- curidx = bdp - fep->rx_bd_base;
-
- /*
-@@ -226,10 +132,21 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
- if (sc & BD_ENET_RX_OV)
- fep->stats.rx_crc_errors++;
-
-- skbn = fep->rx_skbuff[curidx];
-+ skb = fep->rx_skbuff[curidx];
-+
-+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
-+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
-+ DMA_FROM_DEVICE);
-+
-+ skbn = skb;
-+
- } else {
- skb = fep->rx_skbuff[curidx];
-
-+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
-+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
-+ DMA_FROM_DEVICE);
-+
- /*
- * Process the incoming frame.
- */
-@@ -244,31 +161,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
- skb_reserve(skbn, 2); /* align IP header */
- skb_copy_from_linear_data(skb,
- skbn->data, pkt_len);
-- swap(skb, skbn);
-- dma_sync_single_for_cpu(fep->dev,
-- CBDR_BUFADDR(bdp),
-- L1_CACHE_ALIGN(pkt_len),
-- DMA_FROM_DEVICE);
-+ /* swap */
-+ skbt = skb;
-+ skb = skbn;
-+ skbn = skbt;
- }
- } else {
- skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
-
-- if (skbn) {
-- dma_addr_t dma;
--
-+ if (skbn)
- skb_align(skbn, ENET_RX_ALIGN);
--
-- dma_unmap_single(fep->dev,
-- CBDR_BUFADDR(bdp),
-- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
-- DMA_FROM_DEVICE);
--
-- dma = dma_map_single(fep->dev,
-- skbn->data,
-- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
-- DMA_FROM_DEVICE);
-- CBDW_BUFADDR(bdp, dma);
-- }
- }
-
- if (skbn != NULL) {
-@@ -283,6 +185,9 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
- }
-
- fep->rx_skbuff[curidx] = skbn;
-+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
-+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
-+ DMA_FROM_DEVICE));
- CBDW_DATLEN(bdp, 0);
- CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
-
-@@ -295,19 +200,134 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
- bdp = fep->rx_bd_base;
-
- (*fep->ops->rx_bd_done)(dev);
-+
-+ if (received >= budget)
-+ break;
- }
-
- fep->cur_rx = bdp;
-
-- if (received < budget && tx_left) {
-+ if (received < budget) {
- /* done */
- napi_complete(napi);
-- (*fep->ops->napi_enable)(dev);
-+ (*fep->ops->napi_enable_rx)(dev);
-+ }
-+ return received;
-+}
-
-- return received;
-+static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
-+{
-+ struct fs_enet_private *fep = container_of(napi, struct fs_enet_private,
-+ napi_tx);
-+ struct net_device *dev = fep->ndev;
-+ cbd_t __iomem *bdp;
-+ struct sk_buff *skb;
-+ int dirtyidx, do_wake, do_restart;
-+ u16 sc;
-+ int has_tx_work = 0;
-+
-+ spin_lock(&fep->tx_lock);
-+ bdp = fep->dirty_tx;
-+
-+ /* clear TX status bits for napi*/
-+ (*fep->ops->napi_clear_tx_event)(dev);
-+
-+ do_wake = do_restart = 0;
-+ while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
-+ dirtyidx = bdp - fep->tx_bd_base;
-+
-+ if (fep->tx_free == fep->tx_ring)
-+ break;
-+
-+ skb = fep->tx_skbuff[dirtyidx];
-+
-+ /*
-+ * Check for errors.
-+ */
-+ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
-+ BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
-+
-+ if (sc & BD_ENET_TX_HB) /* No heartbeat */
-+ fep->stats.tx_heartbeat_errors++;
-+ if (sc & BD_ENET_TX_LC) /* Late collision */
-+ fep->stats.tx_window_errors++;
-+ if (sc & BD_ENET_TX_RL) /* Retrans limit */
-+ fep->stats.tx_aborted_errors++;
-+ if (sc & BD_ENET_TX_UN) /* Underrun */
-+ fep->stats.tx_fifo_errors++;
-+ if (sc & BD_ENET_TX_CSL) /* Carrier lost */
-+ fep->stats.tx_carrier_errors++;
-+
-+ if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
-+ fep->stats.tx_errors++;
-+ do_restart = 1;
-+ }
-+ } else
-+ fep->stats.tx_packets++;
-+
-+ if (sc & BD_ENET_TX_READY) {
-+ dev_warn(fep->dev,
-+ "HEY! Enet xmit interrupt and TX_READY.\n");
-+ }
-+
-+ /*
-+ * Deferred means some collisions occurred during transmit,
-+ * but we eventually sent the packet OK.
-+ */
-+ if (sc & BD_ENET_TX_DEF)
-+ fep->stats.collisions++;
-+
-+ /* unmap */
-+ if (fep->mapped_as_page[dirtyidx])
-+ dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
-+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
-+ else
-+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
-+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
-+
-+ /*
-+ * Free the sk buffer associated with this last transmit.
-+ */
-+ if (skb) {
-+ dev_kfree_skb(skb);
-+ fep->tx_skbuff[dirtyidx] = NULL;
-+ }
-+
-+ /*
-+ * Update pointer to next buffer descriptor to be transmitted.
-+ */
-+ if ((sc & BD_ENET_TX_WRAP) == 0)
-+ bdp++;
-+ else
-+ bdp = fep->tx_bd_base;
-+
-+ /*
-+ * Since we have freed up a buffer, the ring is no longer
-+ * full.
-+ */
-+ if (++fep->tx_free >= MAX_SKB_FRAGS)
-+ do_wake = 1;
-+ has_tx_work = 1;
-+ }
-+
-+ fep->dirty_tx = bdp;
-+
-+ if (do_restart)
-+ (*fep->ops->tx_restart)(dev);
-+
-+ if (!has_tx_work) {
-+ napi_complete(napi);
-+ (*fep->ops->napi_enable_tx)(dev);
- }
-
-- return budget;
-+ spin_unlock(&fep->tx_lock);
-+
-+ if (do_wake)
-+ netif_wake_queue(dev);
-+
-+ if (has_tx_work)
-+ return budget;
-+ return 0;
- }
-
- /*
-@@ -333,18 +353,18 @@ fs_enet_interrupt(int irq, void *dev_id)
- nr++;
-
- int_clr_events = int_events;
-- int_clr_events &= ~fep->ev_napi;
-+ int_clr_events &= ~fep->ev_napi_rx;
-
- (*fep->ops->clear_int_events)(dev, int_clr_events);
-
- if (int_events & fep->ev_err)
- (*fep->ops->ev_error)(dev, int_events);
-
-- if (int_events & fep->ev) {
-+ if (int_events & fep->ev_rx) {
- napi_ok = napi_schedule_prep(&fep->napi);
-
-- (*fep->ops->napi_disable)(dev);
-- (*fep->ops->clear_int_events)(dev, fep->ev_napi);
-+ (*fep->ops->napi_disable_rx)(dev);
-+ (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
-
- /* NOTE: it is possible for FCCs in NAPI mode */
- /* to submit a spurious interrupt while in poll */
-@@ -352,6 +372,17 @@ fs_enet_interrupt(int irq, void *dev_id)
- __napi_schedule(&fep->napi);
- }
-
-+ if (int_events & fep->ev_tx) {
-+ napi_ok = napi_schedule_prep(&fep->napi_tx);
-+
-+ (*fep->ops->napi_disable_tx)(dev);
-+ (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx);
-+
-+ /* NOTE: it is possible for FCCs in NAPI mode */
-+ /* to submit a spurious interrupt while in poll */
-+ if (napi_ok)
-+ __napi_schedule(&fep->napi_tx);
-+ }
- }
-
- handled = nr > 0;
-@@ -459,9 +490,6 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
- {
- struct sk_buff *new_skb;
-
-- if (skb_linearize(skb))
-- return NULL;
--
- /* Alloc new skb */
- new_skb = netdev_alloc_skb(dev, skb->len + 4);
- if (!new_skb)
-@@ -487,27 +515,12 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
- cbd_t __iomem *bdp;
- int curidx;
- u16 sc;
-- int nr_frags;
-+ int nr_frags = skb_shinfo(skb)->nr_frags;
- skb_frag_t *frag;
- int len;
--#ifdef CONFIG_FS_ENET_MPC5121_FEC
-- int is_aligned = 1;
-- int i;
--
-- if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
-- is_aligned = 0;
-- } else {
-- nr_frags = skb_shinfo(skb)->nr_frags;
-- frag = skb_shinfo(skb)->frags;
-- for (i = 0; i < nr_frags; i++, frag++) {
-- if (!IS_ALIGNED(frag->page_offset, 4)) {
-- is_aligned = 0;
-- break;
-- }
-- }
-- }
-
-- if (!is_aligned) {
-+#ifdef CONFIG_FS_ENET_MPC5121_FEC
-+ if (((unsigned long)skb->data) & 0x3) {
- skb = tx_skb_align_workaround(dev, skb);
- if (!skb) {
- /*
-@@ -519,7 +532,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
- }
- }
- #endif
--
- spin_lock(&fep->tx_lock);
-
- /*
-@@ -527,7 +539,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
- */
- bdp = fep->cur_tx;
-
-- nr_frags = skb_shinfo(skb)->nr_frags;
- if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
- netif_stop_queue(dev);
- spin_unlock(&fep->tx_lock);
-@@ -558,8 +569,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
- frag = skb_shinfo(skb)->frags;
- while (nr_frags) {
- CBDC_SC(bdp,
-- BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
-- BD_ENET_TX_TC);
-+ BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
- CBDS_SC(bdp, BD_ENET_TX_READY);
-
- if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
-@@ -624,15 +634,14 @@ static void fs_timeout(struct net_device *dev)
- spin_lock_irqsave(&fep->lock, flags);
-
- if (dev->flags & IFF_UP) {
-- phy_stop(dev->phydev);
-+ phy_stop(fep->phydev);
- (*fep->ops->stop)(dev);
- (*fep->ops->restart)(dev);
-- phy_start(dev->phydev);
-+ phy_start(fep->phydev);
- }
-
-- phy_start(dev->phydev);
-- wake = fep->tx_free >= MAX_SKB_FRAGS &&
-- !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
-+ phy_start(fep->phydev);
-+ wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
- spin_unlock_irqrestore(&fep->lock, flags);
-
- if (wake)
-@@ -645,7 +654,7 @@ static void fs_timeout(struct net_device *dev)
- static void generic_adjust_link(struct net_device *dev)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
-- struct phy_device *phydev = dev->phydev;
-+ struct phy_device *phydev = fep->phydev;
- int new_state = 0;
-
- if (phydev->link) {
-@@ -714,6 +723,8 @@ static int fs_init_phy(struct net_device *dev)
- return -ENODEV;
- }
-
-+ fep->phydev = phydev;
-+
- return 0;
- }
-
-@@ -724,10 +735,11 @@ static int fs_enet_open(struct net_device *dev)
- int err;
-
- /* to initialize the fep->cur_rx,... */
-- /* not doing this, will cause a crash in fs_enet_napi */
-+ /* not doing this, will cause a crash in fs_enet_rx_napi */
- fs_init_bds(fep->ndev);
-
- napi_enable(&fep->napi);
-+ napi_enable(&fep->napi_tx);
-
- /* Install our interrupt handler. */
- r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
-@@ -735,6 +747,7 @@ static int fs_enet_open(struct net_device *dev)
- if (r != 0) {
- dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
- napi_disable(&fep->napi);
-+ napi_disable(&fep->napi_tx);
- return -EINVAL;
- }
-
-@@ -742,9 +755,10 @@ static int fs_enet_open(struct net_device *dev)
- if (err) {
- free_irq(fep->interrupt, dev);
- napi_disable(&fep->napi);
-+ napi_disable(&fep->napi_tx);
- return err;
- }
-- phy_start(dev->phydev);
-+ phy_start(fep->phydev);
-
- netif_start_queue(dev);
-
-@@ -759,7 +773,8 @@ static int fs_enet_close(struct net_device *dev)
- netif_stop_queue(dev);
- netif_carrier_off(dev);
- napi_disable(&fep->napi);
-- phy_stop(dev->phydev);
-+ napi_disable(&fep->napi_tx);
-+ phy_stop(fep->phydev);
-
- spin_lock_irqsave(&fep->lock, flags);
- spin_lock(&fep->tx_lock);
-@@ -768,7 +783,8 @@ static int fs_enet_close(struct net_device *dev)
- spin_unlock_irqrestore(&fep->lock, flags);
-
- /* release any irqs */
-- phy_disconnect(dev->phydev);
-+ phy_disconnect(fep->phydev);
-+ fep->phydev = NULL;
- free_irq(fep->interrupt, dev);
-
- return 0;
-@@ -813,82 +829,64 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
- regs->version = 0;
- }
-
--static int fs_nway_reset(struct net_device *dev)
-+static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
- {
-- return 0;
-+ struct fs_enet_private *fep = netdev_priv(dev);
-+
-+ if (!fep->phydev)
-+ return -ENODEV;
-+
-+ return phy_ethtool_gset(fep->phydev, cmd);
- }
-
--static u32 fs_get_msglevel(struct net_device *dev)
-+static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
-- return fep->msg_enable;
-+
-+ if (!fep->phydev)
-+ return -ENODEV;
-+
-+ return phy_ethtool_sset(fep->phydev, cmd);
- }
-
--static void fs_set_msglevel(struct net_device *dev, u32 value)
-+static int fs_nway_reset(struct net_device *dev)
- {
-- struct fs_enet_private *fep = netdev_priv(dev);
-- fep->msg_enable = value;
-+ return 0;
- }
-
--static int fs_get_tunable(struct net_device *dev,
-- const struct ethtool_tunable *tuna, void *data)
-+static u32 fs_get_msglevel(struct net_device *dev)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
-- struct fs_platform_info *fpi = fep->fpi;
-- int ret = 0;
--
-- switch (tuna->id) {
-- case ETHTOOL_RX_COPYBREAK:
-- *(u32 *)data = fpi->rx_copybreak;
-- break;
-- default:
-- ret = -EINVAL;
-- break;
-- }
--
-- return ret;
-+ return fep->msg_enable;
- }
-
--static int fs_set_tunable(struct net_device *dev,
-- const struct ethtool_tunable *tuna, const void *data)
-+static void fs_set_msglevel(struct net_device *dev, u32 value)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
-- struct fs_platform_info *fpi = fep->fpi;
-- int ret = 0;
--
-- switch (tuna->id) {
-- case ETHTOOL_RX_COPYBREAK:
-- fpi->rx_copybreak = *(u32 *)data;
-- break;
-- default:
-- ret = -EINVAL;
-- break;
-- }
--
-- return ret;
-+ fep->msg_enable = value;
- }
-
- static const struct ethtool_ops fs_ethtool_ops = {
- .get_drvinfo = fs_get_drvinfo,
- .get_regs_len = fs_get_regs_len,
-+ .get_settings = fs_get_settings,
-+ .set_settings = fs_set_settings,
- .nway_reset = fs_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_msglevel = fs_get_msglevel,
- .set_msglevel = fs_set_msglevel,
- .get_regs = fs_get_regs,
- .get_ts_info = ethtool_op_get_ts_info,
-- .get_link_ksettings = phy_ethtool_get_link_ksettings,
-- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-- .get_tunable = fs_get_tunable,
-- .set_tunable = fs_set_tunable,
- };
-
- static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
- {
-+ struct fs_enet_private *fep = netdev_priv(dev);
-+
- if (!netif_running(dev))
- return -EINVAL;
-
-- return phy_mii_ioctl(dev->phydev, rq, cmd);
-+ return phy_mii_ioctl(fep->phydev, rq, cmd);
- }
-
- extern int fs_mii_connect(struct net_device *dev);
-@@ -948,8 +946,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
- fpi->cp_command = *data;
- }
-
-- fpi->rx_ring = RX_RING_SIZE;
-- fpi->tx_ring = TX_RING_SIZE;
-+ fpi->rx_ring = 32;
-+ fpi->tx_ring = 64;
- fpi->rx_copybreak = 240;
- fpi->napi_weight = 17;
- fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
-@@ -980,7 +978,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
- err = clk_prepare_enable(clk);
- if (err) {
- ret = err;
-- goto out_deregister_fixed_link;
-+ goto out_free_fpi;
- }
- fpi->clk_per = clk;
- }
-@@ -1033,7 +1031,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
-
- ndev->netdev_ops = &fs_enet_netdev_ops;
- ndev->watchdog_timeo = 2 * HZ;
-- netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
-+ netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight);
-+ netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2);
-
- ndev->ethtool_ops = &fs_ethtool_ops;
-
-@@ -1061,9 +1060,6 @@ static int fs_enet_probe(struct platform_device *ofdev)
- of_node_put(fpi->phy_node);
- if (fpi->clk_per)
- clk_disable_unprepare(fpi->clk_per);
--out_deregister_fixed_link:
-- if (of_phy_is_fixed_link(ofdev->dev.of_node))
-- of_phy_deregister_fixed_link(ofdev->dev.of_node);
- out_free_fpi:
- kfree(fpi);
- return ret;
-@@ -1082,8 +1078,6 @@ static int fs_enet_remove(struct platform_device *ofdev)
- of_node_put(fep->fpi->phy_node);
- if (fep->fpi->clk_per)
- clk_disable_unprepare(fep->fpi->clk_per);
-- if (of_phy_is_fixed_link(ofdev->dev.of_node))
-- of_phy_deregister_fixed_link(ofdev->dev.of_node);
- free_netdev(ndev);
- return 0;
- }
-diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
-index fee24c8..f184d8f 100644
---- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
-+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
-@@ -81,9 +81,12 @@ struct fs_ops {
- void (*adjust_link)(struct net_device *dev);
- void (*restart)(struct net_device *dev);
- void (*stop)(struct net_device *dev);
-- void (*napi_clear_event)(struct net_device *dev);
-- void (*napi_enable)(struct net_device *dev);
-- void (*napi_disable)(struct net_device *dev);
-+ void (*napi_clear_rx_event)(struct net_device *dev);
-+ void (*napi_enable_rx)(struct net_device *dev);
-+ void (*napi_disable_rx)(struct net_device *dev);
-+ void (*napi_clear_tx_event)(struct net_device *dev);
-+ void (*napi_enable_tx)(struct net_device *dev);
-+ void (*napi_disable_tx)(struct net_device *dev);
- void (*rx_bd_done)(struct net_device *dev);
- void (*tx_kickstart)(struct net_device *dev);
- u32 (*get_int_events)(struct net_device *dev);
-@@ -119,6 +122,7 @@ struct phy_info {
-
- struct fs_enet_private {
- struct napi_struct napi;
-+ struct napi_struct napi_tx;
- struct device *dev; /* pointer back to the device (must be initialized first) */
- struct net_device *ndev;
- spinlock_t lock; /* during all ops except TX pckt processing */
-@@ -145,11 +149,14 @@ struct fs_enet_private {
- unsigned int last_mii_status;
- int interrupt;
-
-+ struct phy_device *phydev;
- int oldduplex, oldspeed, oldlink; /* current settings */
-
- /* event masks */
-- u32 ev_napi; /* mask of NAPI events */
-- u32 ev; /* event mask */
-+ u32 ev_napi_rx; /* mask of NAPI rx events */
-+ u32 ev_napi_tx; /* mask of NAPI rx events */
-+ u32 ev_rx; /* rx event mask */
-+ u32 ev_tx; /* tx event mask */
- u32 ev_err; /* error event mask */
-
- u16 bd_rx_empty; /* mask of BD rx empty */
-diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
-index 120c758..08f5b91 100644
---- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
-+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
-@@ -90,7 +90,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
- int ret = -EINVAL;
-
- fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-- if (!fep->interrupt)
-+ if (fep->interrupt == NO_IRQ)
- goto out;
-
- fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
-@@ -124,8 +124,10 @@ static int do_pd_setup(struct fs_enet_private *fep)
- return ret;
- }
-
--#define FCC_NAPI_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB | FCC_ENET_TXB)
--#define FCC_EVENT (FCC_ENET_RXF | FCC_ENET_TXB)
-+#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
-+#define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXB)
-+#define FCC_RX_EVENT (FCC_ENET_RXF)
-+#define FCC_TX_EVENT (FCC_ENET_TXB)
- #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
-
- static int setup_data(struct net_device *dev)
-@@ -135,8 +137,10 @@ static int setup_data(struct net_device *dev)
- if (do_pd_setup(fep) != 0)
- return -EINVAL;
-
-- fep->ev_napi = FCC_NAPI_EVENT_MSK;
-- fep->ev = FCC_EVENT;
-+ fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
-+ fep->ev_napi_tx = FCC_NAPI_TX_EVENT_MSK;
-+ fep->ev_rx = FCC_RX_EVENT;
-+ fep->ev_tx = FCC_TX_EVENT;
- fep->ev_err = FCC_ERR_EVENT_MSK;
-
- return 0;
-@@ -366,7 +370,7 @@ static void restart(struct net_device *dev)
-
- /* adjust to speed (for RMII mode) */
- if (fpi->use_rmii) {
-- if (dev->phydev->speed == 100)
-+ if (fep->phydev->speed == 100)
- C8(fcccp, fcc_gfemr, 0x20);
- else
- S8(fcccp, fcc_gfemr, 0x20);
-@@ -392,7 +396,7 @@ static void restart(struct net_device *dev)
- S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
-
- /* adjust to duplex mode */
-- if (dev->phydev->duplex)
-+ if (fep->phydev->duplex)
- S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
- else
- C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
-@@ -420,28 +424,52 @@ static void stop(struct net_device *dev)
- fs_cleanup_bds(dev);
- }
-
--static void napi_clear_event_fs(struct net_device *dev)
-+static void napi_clear_rx_event(struct net_device *dev)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
- fcc_t __iomem *fccp = fep->fcc.fccp;
-
-- W16(fccp, fcc_fcce, FCC_NAPI_EVENT_MSK);
-+ W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
- }
-
--static void napi_enable_fs(struct net_device *dev)
-+static void napi_enable_rx(struct net_device *dev)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
- fcc_t __iomem *fccp = fep->fcc.fccp;
-
-- S16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
-+ S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
- }
-
--static void napi_disable_fs(struct net_device *dev)
-+static void napi_disable_rx(struct net_device *dev)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
- fcc_t __iomem *fccp = fep->fcc.fccp;
-
-- C16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
-+ C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
-+}
-+
-+static void napi_clear_tx_event(struct net_device *dev)
-+{
-+ struct fs_enet_private *fep = netdev_priv(dev);
-+ fcc_t __iomem *fccp = fep->fcc.fccp;
-+
-+ W16(fccp, fcc_fcce, FCC_NAPI_TX_EVENT_MSK);
-+}
-+
-+static void napi_enable_tx(struct net_device *dev)
-+{
-+ struct fs_enet_private *fep = netdev_priv(dev);
-+ fcc_t __iomem *fccp = fep->fcc.fccp;
-+
-+ S16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
-+}
-+
-+static void napi_disable_tx(struct net_device *dev)
-+{
-+ struct fs_enet_private *fep = netdev_priv(dev);
-+ fcc_t __iomem *fccp = fep->fcc.fccp;
-+
-+ C16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
- }
-
- static void rx_bd_done(struct net_device *dev)
-@@ -524,7 +552,7 @@ static void tx_restart(struct net_device *dev)
- cbd_t __iomem *prev_bd;
- cbd_t __iomem *last_tx_bd;
-
-- last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1);
-+ last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
-
- /* get the current bd held in TBPTR and scan back from this point */
- recheck_bd = curr_tbptr = (cbd_t __iomem *)
-@@ -567,9 +595,12 @@ const struct fs_ops fs_fcc_ops = {
- .set_multicast_list = set_multicast_list,
- .restart = restart,
- .stop = stop,
-- .napi_clear_event = napi_clear_event_fs,
-- .napi_enable = napi_enable_fs,
-- .napi_disable = napi_disable_fs,
-+ .napi_clear_rx_event = napi_clear_rx_event,
-+ .napi_enable_rx = napi_enable_rx,
-+ .napi_disable_rx = napi_disable_rx,
-+ .napi_clear_tx_event = napi_clear_tx_event,
-+ .napi_enable_tx = napi_enable_tx,
-+ .napi_disable_tx = napi_disable_tx,
- .rx_bd_done = rx_bd_done,
- .tx_kickstart = tx_kickstart,
- .get_int_events = get_int_events,
-diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
-index 777beff..b34214e 100644
---- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
-+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
-@@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
- struct platform_device *ofdev = to_platform_device(fep->dev);
-
- fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-- if (!fep->interrupt)
-+ if (fep->interrupt == NO_IRQ)
- return -EINVAL;
-
- fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
-@@ -109,8 +109,10 @@ static int do_pd_setup(struct fs_enet_private *fep)
- return 0;
- }
-
--#define FEC_NAPI_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_TXF)
--#define FEC_EVENT (FEC_ENET_RXF | FEC_ENET_TXF)
-+#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
-+#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB)
-+#define FEC_RX_EVENT (FEC_ENET_RXF)
-+#define FEC_TX_EVENT (FEC_ENET_TXF)
- #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
- FEC_ENET_BABT | FEC_ENET_EBERR)
-
-@@ -124,8 +126,10 @@ static int setup_data(struct net_device *dev)
- fep->fec.hthi = 0;
- fep->fec.htlo = 0;
-
-- fep->ev_napi = FEC_NAPI_EVENT_MSK;
-- fep->ev = FEC_EVENT;
-+ fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
-+ fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK;
-+ fep->ev_rx = FEC_RX_EVENT;
-+ fep->ev_tx = FEC_TX_EVENT;
- fep->ev_err = FEC_ERR_EVENT_MSK;
-
- return 0;
-@@ -250,7 +254,7 @@ static void restart(struct net_device *dev)
- int r;
- u32 addrhi, addrlo;
-
-- struct mii_bus *mii = dev->phydev->mdio.bus;
-+ struct mii_bus* mii = fep->phydev->bus;
- struct fec_info* fec_inf = mii->priv;
-
- r = whack_reset(fep->fec.fecp);
-@@ -329,7 +333,7 @@ static void restart(struct net_device *dev)
- /*
- * adjust to duplex mode
- */
-- if (dev->phydev->duplex) {
-+ if (fep->phydev->duplex) {
- FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
- FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
- } else {
-@@ -359,7 +363,7 @@ static void stop(struct net_device *dev)
- const struct fs_platform_info *fpi = fep->fpi;
- struct fec __iomem *fecp = fep->fec.fecp;
-
-- struct fec_info *feci = dev->phydev->mdio.bus->priv;
-+ struct fec_info* feci= fep->phydev->bus->priv;
-
- int i;
-
-@@ -392,28 +396,52 @@ static void stop(struct net_device *dev)
- }
- }
-
--static void napi_clear_event_fs(struct net_device *dev)
-+static void napi_clear_rx_event(struct net_device *dev)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fec __iomem *fecp = fep->fec.fecp;
-
-- FW(fecp, ievent, FEC_NAPI_EVENT_MSK);
-+ FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
- }
-
--static void napi_enable_fs(struct net_device *dev)
-+static void napi_enable_rx(struct net_device *dev)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fec __iomem *fecp = fep->fec.fecp;
-
-- FS(fecp, imask, FEC_NAPI_EVENT_MSK);
-+ FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
- }
-
--static void napi_disable_fs(struct net_device *dev)
-+static void napi_disable_rx(struct net_device *dev)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fec __iomem *fecp = fep->fec.fecp;
-
-- FC(fecp, imask, FEC_NAPI_EVENT_MSK);
-+ FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
-+}
-+
-+static void napi_clear_tx_event(struct net_device *dev)
-+{
-+ struct fs_enet_private *fep = netdev_priv(dev);
-+ struct fec __iomem *fecp = fep->fec.fecp;
-+
-+ FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK);
-+}
-+
-+static void napi_enable_tx(struct net_device *dev)
-+{
-+ struct fs_enet_private *fep = netdev_priv(dev);
-+ struct fec __iomem *fecp = fep->fec.fecp;
-+
-+ FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
-+}
-+
-+static void napi_disable_tx(struct net_device *dev)
-+{
-+ struct fs_enet_private *fep = netdev_priv(dev);
-+ struct fec __iomem *fecp = fep->fec.fecp;
-+
-+ FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
- }
-
- static void rx_bd_done(struct net_device *dev)
-@@ -485,9 +513,12 @@ const struct fs_ops fs_fec_ops = {
- .set_multicast_list = set_multicast_list,
- .restart = restart,
- .stop = stop,
-- .napi_clear_event = napi_clear_event_fs,
-- .napi_enable = napi_enable_fs,
-- .napi_disable = napi_disable_fs,
-+ .napi_clear_rx_event = napi_clear_rx_event,
-+ .napi_enable_rx = napi_enable_rx,
-+ .napi_disable_rx = napi_disable_rx,
-+ .napi_clear_tx_event = napi_clear_tx_event,
-+ .napi_enable_tx = napi_enable_tx,
-+ .napi_disable_tx = napi_disable_tx,
- .rx_bd_done = rx_bd_done,
- .tx_kickstart = tx_kickstart,
- .get_int_events = get_int_events,
-diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
-index 15abd37..7a184e8 100644
---- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
-+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
-@@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
- struct platform_device *ofdev = to_platform_device(fep->dev);
-
- fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-- if (!fep->interrupt)
-+ if (fep->interrupt == NO_IRQ)
- return -EINVAL;
-
- fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
-@@ -115,8 +115,10 @@ static int do_pd_setup(struct fs_enet_private *fep)
- return 0;
- }
-
--#define SCC_NAPI_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
--#define SCC_EVENT (SCCE_ENET_RXF | SCCE_ENET_TXB)
-+#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
-+#define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXB)
-+#define SCC_RX_EVENT (SCCE_ENET_RXF)
-+#define SCC_TX_EVENT (SCCE_ENET_TXB)
- #define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
-
- static int setup_data(struct net_device *dev)
-@@ -128,8 +130,10 @@ static int setup_data(struct net_device *dev)
- fep->scc.hthi = 0;
- fep->scc.htlo = 0;
-
-- fep->ev_napi = SCC_NAPI_EVENT_MSK;
-- fep->ev = SCC_EVENT | SCCE_ENET_TXE;
-+ fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
-+ fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK;
-+ fep->ev_rx = SCC_RX_EVENT;
-+ fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
- fep->ev_err = SCC_ERR_EVENT_MSK;
-
- return 0;
-@@ -348,7 +352,7 @@ static void restart(struct net_device *dev)
- W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
-
- /* Set full duplex mode if needed */
-- if (dev->phydev->duplex)
-+ if (fep->phydev->duplex)
- S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
-
- /* Restore multicast and promiscuous settings */
-@@ -375,28 +379,52 @@ static void stop(struct net_device *dev)
- fs_cleanup_bds(dev);
- }
-
--static void napi_clear_event_fs(struct net_device *dev)
-+static void napi_clear_rx_event(struct net_device *dev)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
- scc_t __iomem *sccp = fep->scc.sccp;
-
-- W16(sccp, scc_scce, SCC_NAPI_EVENT_MSK);
-+ W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
- }
-
--static void napi_enable_fs(struct net_device *dev)
-+static void napi_enable_rx(struct net_device *dev)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
- scc_t __iomem *sccp = fep->scc.sccp;
-
-- S16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
-+ S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
- }
-
--static void napi_disable_fs(struct net_device *dev)
-+static void napi_disable_rx(struct net_device *dev)
- {
- struct fs_enet_private *fep = netdev_priv(dev);
- scc_t __iomem *sccp = fep->scc.sccp;
-
-- C16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
-+ C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
-+}
-+
-+static void napi_clear_tx_event(struct net_device *dev)
-+{
-+ struct fs_enet_private *fep = netdev_priv(dev);
-+ scc_t __iomem *sccp = fep->scc.sccp;
-+
-+ W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK);
-+}
-+
-+static void napi_enable_tx(struct net_device *dev)
-+{
-+ struct fs_enet_private *fep = netdev_priv(dev);
-+ scc_t __iomem *sccp = fep->scc.sccp;
-+
-+ S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
-+}
-+
-+static void napi_disable_tx(struct net_device *dev)
-+{
-+ struct fs_enet_private *fep = netdev_priv(dev);
-+ scc_t __iomem *sccp = fep->scc.sccp;
-+
-+ C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
- }
-
- static void rx_bd_done(struct net_device *dev)
-@@ -469,9 +497,12 @@ const struct fs_ops fs_scc_ops = {
- .set_multicast_list = set_multicast_list,
- .restart = restart,
- .stop = stop,
-- .napi_clear_event = napi_clear_event_fs,
-- .napi_enable = napi_enable_fs,
-- .napi_disable = napi_disable_fs,
-+ .napi_clear_rx_event = napi_clear_rx_event,
-+ .napi_enable_rx = napi_enable_rx,
-+ .napi_disable_rx = napi_disable_rx,
-+ .napi_clear_tx_event = napi_clear_tx_event,
-+ .napi_enable_tx = napi_enable_tx,
-+ .napi_disable_tx = napi_disable_tx,
- .rx_bd_done = rx_bd_done,
- .tx_kickstart = tx_kickstart,
- .get_int_events = get_int_events,
-diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
-index 1f015ed..68a428d 100644
---- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
-+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
-@@ -172,16 +172,23 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
- goto out_free_bus;
-
- new_bus->phy_mask = ~0;
-+ new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
-+ if (!new_bus->irq) {
-+ ret = -ENOMEM;
-+ goto out_unmap_regs;
-+ }
-
- new_bus->parent = &ofdev->dev;
- platform_set_drvdata(ofdev, new_bus);
-
- ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
- if (ret)
-- goto out_unmap_regs;
-+ goto out_free_irqs;
-
- return 0;
-
-+out_free_irqs:
-+ kfree(new_bus->irq);
- out_unmap_regs:
- iounmap(bitbang->dir);
- out_free_bus:
-@@ -198,6 +205,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
- struct bb_info *bitbang = bus->priv;
-
- mdiobus_unregister(bus);
-+ kfree(bus->irq);
- free_mdio_bitbang(bus);
- iounmap(bitbang->dir);
- kfree(bitbang);
-diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
-index a89267b..2be383e 100644
---- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
-+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
-@@ -166,16 +166,23 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
- clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed);
-
- new_bus->phy_mask = ~0;
-+ new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
-+ if (!new_bus->irq) {
-+ ret = -ENOMEM;
-+ goto out_unmap_regs;
-+ }
-
- new_bus->parent = &ofdev->dev;
- platform_set_drvdata(ofdev, new_bus);
-
- ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
- if (ret)
-- goto out_unmap_regs;
-+ goto out_free_irqs;
-
- return 0;
-
-+out_free_irqs:
-+ kfree(new_bus->irq);
- out_unmap_regs:
- iounmap(fec->fecp);
- out_res:
-@@ -193,6 +200,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
- struct fec_info *fec = bus->priv;
-
- mdiobus_unregister(bus);
-+ kfree(bus->irq);
- iounmap(fec->fecp);
- kfree(fec);
- mdiobus_free(bus);
-diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
-index 446c7b3..3c40f6b 100644
---- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
-+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
-@@ -29,7 +29,7 @@
-
- #include <asm/io.h>
- #if IS_ENABLED(CONFIG_UCC_GETH)
--#include <soc/fsl/qe/ucc.h>
-+#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */
- #endif
-
- #include "gianfar.h"
-@@ -69,6 +69,7 @@ struct fsl_pq_mdio {
- struct fsl_pq_mdio_priv {
- void __iomem *map;
- struct fsl_pq_mii __iomem *regs;
-+ int irqs[PHY_MAX_ADDR];
- };
-
- /*
-@@ -195,15 +196,13 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
- return 0;
- }
-
--#if IS_ENABLED(CONFIG_GIANFAR)
-+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
- /*
-- * Return the TBIPA address, starting from the address
-- * of the mapped GFAR MDIO registers (struct gfar)
- * This is mildly evil, but so is our hardware for doing this.
- * Also, we have to cast back to struct gfar because of
- * definition weirdness done in gianfar.h.
- */
--static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
-+static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
- {
- struct gfar __iomem *enet_regs = p;
-
-@@ -211,15 +210,6 @@ static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
- }
-
- /*
-- * Return the TBIPA address, starting from the address
-- * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar)
-- */
--static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p)
--{
-- return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs));
--}
--
--/*
- * Return the TBIPAR address for an eTSEC2 node
- */
- static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
-@@ -228,14 +218,13 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
- }
- #endif
-
--#if IS_ENABLED(CONFIG_UCC_GETH)
-+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
- /*
-- * Return the TBIPAR address for a QE MDIO node, starting from the address
-- * of the mapped MII registers (struct fsl_pq_mii)
-+ * Return the TBIPAR address for a QE MDIO node
- */
- static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
- {
-- struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii);
-+ struct fsl_pq_mdio __iomem *mdio = p;
-
- return &mdio->utbipar;
- }
-@@ -306,19 +295,19 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
- #endif
-
- static const struct of_device_id fsl_pq_mdio_match[] = {
--#if IS_ENABLED(CONFIG_GIANFAR)
-+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
- {
- .compatible = "fsl,gianfar-tbi",
- .data = &(struct fsl_pq_mdio_data) {
- .mii_offset = 0,
-- .get_tbipa = get_gfar_tbipa_from_mii,
-+ .get_tbipa = get_gfar_tbipa,
- },
- },
- {
- .compatible = "fsl,gianfar-mdio",
- .data = &(struct fsl_pq_mdio_data) {
- .mii_offset = 0,
-- .get_tbipa = get_gfar_tbipa_from_mii,
-+ .get_tbipa = get_gfar_tbipa,
- },
- },
- {
-@@ -326,7 +315,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
- .compatible = "gianfar",
- .data = &(struct fsl_pq_mdio_data) {
- .mii_offset = offsetof(struct fsl_pq_mdio, mii),
-- .get_tbipa = get_gfar_tbipa_from_mdio,
-+ .get_tbipa = get_gfar_tbipa,
- },
- },
- {
-@@ -344,7 +333,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
- },
- },
- #endif
--#if IS_ENABLED(CONFIG_UCC_GETH)
-+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
- {
- .compatible = "fsl,ucc-mdio",
- .data = &(struct fsl_pq_mdio_data) {
-@@ -400,6 +389,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
- new_bus->read = &fsl_pq_mdio_read;
- new_bus->write = &fsl_pq_mdio_write;
- new_bus->reset = &fsl_pq_mdio_reset;
-+ new_bus->irq = priv->irqs;
-
- err = of_address_to_resource(np, 0, &res);
- if (err < 0) {
-@@ -455,16 +445,6 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
-
- tbipa = data->get_tbipa(priv->map);
-
-- /*
-- * Add consistency check to make sure TBI is contained
-- * within the mapped range (not because we would get a
-- * segfault, rather to catch bugs in computing TBI
-- * address). Print error message but continue anyway.
-- */
-- if ((void *)tbipa > priv->map + resource_size(&res) - 4)
-- dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
-- ((void *)tbipa - priv->map) + 4);
--
- iowrite32be(be32_to_cpup(prop), tbipa);
- }
- }
-diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
-index 9061c2f..4ee080d 100644
---- a/drivers/net/ethernet/freescale/gianfar.c
-+++ b/drivers/net/ethernet/freescale/gianfar.c
-@@ -107,17 +107,17 @@
-
- #include "gianfar.h"
-
--#define TX_TIMEOUT (5*HZ)
-+#define TX_TIMEOUT (1*HZ)
-
--const char gfar_driver_version[] = "2.0";
-+const char gfar_driver_version[] = "1.3";
-
- static int gfar_enet_open(struct net_device *dev);
- static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
- static void gfar_reset_task(struct work_struct *work);
- static void gfar_timeout(struct net_device *dev);
- static int gfar_close(struct net_device *dev);
--static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
-- int alloc_cnt);
-+static struct sk_buff *gfar_new_skb(struct net_device *dev,
-+ dma_addr_t *bufaddr);
- static int gfar_set_mac_address(struct net_device *dev);
- static int gfar_change_mtu(struct net_device *dev, int new_mtu);
- static irqreturn_t gfar_error(int irq, void *dev_id);
-@@ -141,7 +141,8 @@ static void gfar_netpoll(struct net_device *dev);
- #endif
- int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
- static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
--static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
-+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-+ int amount_pull, struct napi_struct *napi);
- static void gfar_halt_nodisable(struct gfar_private *priv);
- static void gfar_clear_exact_match(struct net_device *dev);
- static void gfar_set_mac_for_addr(struct net_device *dev, int num,
-@@ -168,15 +169,17 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- bdp->lstatus = cpu_to_be32(lstatus);
- }
-
--static void gfar_init_bds(struct net_device *ndev)
-+static int gfar_init_bds(struct net_device *ndev)
- {
- struct gfar_private *priv = netdev_priv(ndev);
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- struct gfar_priv_tx_q *tx_queue = NULL;
- struct gfar_priv_rx_q *rx_queue = NULL;
- struct txbd8 *txbdp;
-+ struct rxbd8 *rxbdp;
- u32 __iomem *rfbptr;
- int i, j;
-+ dma_addr_t bufaddr;
-
- for (i = 0; i < priv->num_tx_queues; i++) {
- tx_queue = priv->tx_queue[i];
-@@ -204,26 +207,40 @@ static void gfar_init_bds(struct net_device *ndev)
- rfbptr = ®s->rfbptr0;
- for (i = 0; i < priv->num_rx_queues; i++) {
- rx_queue = priv->rx_queue[i];
-+ rx_queue->cur_rx = rx_queue->rx_bd_base;
-+ rx_queue->skb_currx = 0;
-+ rxbdp = rx_queue->rx_bd_base;
-
-- rx_queue->next_to_clean = 0;
-- rx_queue->next_to_use = 0;
-- rx_queue->next_to_alloc = 0;
-+ for (j = 0; j < rx_queue->rx_ring_size; j++) {
-+ struct sk_buff *skb = rx_queue->rx_skbuff[j];
-
-- /* make sure next_to_clean != next_to_use after this
-- * by leaving at least 1 unused descriptor
-- */
-- gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
-+ if (skb) {
-+ bufaddr = be32_to_cpu(rxbdp->bufPtr);
-+ } else {
-+ skb = gfar_new_skb(ndev, &bufaddr);
-+ if (!skb) {
-+ netdev_err(ndev, "Can't allocate RX buffers\n");
-+ return -ENOMEM;
-+ }
-+ rx_queue->rx_skbuff[j] = skb;
-+ }
-+
-+ gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
-+ rxbdp++;
-+ }
-
- rx_queue->rfbptr = rfbptr;
- rfbptr += 2;
- }
-+
-+ return 0;
- }
-
- static int gfar_alloc_skb_resources(struct net_device *ndev)
- {
- void *vaddr;
- dma_addr_t addr;
-- int i, j;
-+ int i, j, k;
- struct gfar_private *priv = netdev_priv(ndev);
- struct device *dev = priv->dev;
- struct gfar_priv_tx_q *tx_queue = NULL;
-@@ -262,8 +279,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
- rx_queue = priv->rx_queue[i];
- rx_queue->rx_bd_base = vaddr;
- rx_queue->rx_bd_dma_base = addr;
-- rx_queue->ndev = ndev;
-- rx_queue->dev = dev;
-+ rx_queue->dev = ndev;
- addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
- vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
- }
-@@ -278,20 +294,25 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
- if (!tx_queue->tx_skbuff)
- goto cleanup;
-
-- for (j = 0; j < tx_queue->tx_ring_size; j++)
-- tx_queue->tx_skbuff[j] = NULL;
-+ for (k = 0; k < tx_queue->tx_ring_size; k++)
-+ tx_queue->tx_skbuff[k] = NULL;
- }
-
- for (i = 0; i < priv->num_rx_queues; i++) {
- rx_queue = priv->rx_queue[i];
-- rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
-- sizeof(*rx_queue->rx_buff),
-- GFP_KERNEL);
-- if (!rx_queue->rx_buff)
-+ rx_queue->rx_skbuff =
-+ kmalloc_array(rx_queue->rx_ring_size,
-+ sizeof(*rx_queue->rx_skbuff),
-+ GFP_KERNEL);
-+ if (!rx_queue->rx_skbuff)
- goto cleanup;
-+
-+ for (j = 0; j < rx_queue->rx_ring_size; j++)
-+ rx_queue->rx_skbuff[j] = NULL;
- }
-
-- gfar_init_bds(ndev);
-+ if (gfar_init_bds(ndev))
-+ goto cleanup;
-
- return 0;
-
-@@ -333,16 +354,28 @@ static void gfar_init_rqprm(struct gfar_private *priv)
- }
- }
-
--static void gfar_rx_offload_en(struct gfar_private *priv)
-+static void gfar_rx_buff_size_config(struct gfar_private *priv)
- {
-+ int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
-+
- /* set this when rx hw offload (TOE) functions are being used */
- priv->uses_rxfcb = 0;
-
- if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
- priv->uses_rxfcb = 1;
-
-- if (priv->hwts_rx_en || priv->rx_filer_enable)
-+ if (priv->hwts_rx_en)
- priv->uses_rxfcb = 1;
-+
-+ if (priv->uses_rxfcb)
-+ frame_size += GMAC_FCB_LEN;
-+
-+ frame_size += priv->padding;
-+
-+ frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
-+ INCREMENTAL_BUFFER_SIZE;
-+
-+ priv->rx_buffer_size = frame_size;
- }
-
- static void gfar_mac_rx_config(struct gfar_private *priv)
-@@ -351,7 +384,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
- u32 rctrl = 0;
-
- if (priv->rx_filer_enable) {
-- rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
-+ rctrl |= RCTRL_FILREN;
- /* Program the RIR0 reg with the required distribution */
- if (priv->poll_mode == GFAR_SQ_POLLING)
- gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
-@@ -483,15 +516,6 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
- return &dev->stats;
- }
-
--static int gfar_set_mac_addr(struct net_device *dev, void *p)
--{
-- eth_mac_addr(dev, p);
--
-- gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
--
-- return 0;
--}
--
- static const struct net_device_ops gfar_netdev_ops = {
- .ndo_open = gfar_enet_open,
- .ndo_start_xmit = gfar_start_xmit,
-@@ -502,7 +526,7 @@ static const struct net_device_ops gfar_netdev_ops = {
- .ndo_tx_timeout = gfar_timeout,
- .ndo_do_ioctl = gfar_ioctl,
- .ndo_get_stats = gfar_get_stats,
-- .ndo_set_mac_address = gfar_set_mac_addr,
-+ .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- #ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = gfar_netpoll,
-@@ -532,6 +556,22 @@ static void gfar_ints_enable(struct gfar_private *priv)
- }
- }
-
-+static void lock_tx_qs(struct gfar_private *priv)
-+{
-+ int i;
-+
-+ for (i = 0; i < priv->num_tx_queues; i++)
-+ spin_lock(&priv->tx_queue[i]->txlock);
-+}
-+
-+static void unlock_tx_qs(struct gfar_private *priv)
-+{
-+ int i;
-+
-+ for (i = 0; i < priv->num_tx_queues; i++)
-+ spin_unlock(&priv->tx_queue[i]->txlock);
-+}
-+
- static int gfar_alloc_tx_queues(struct gfar_private *priv)
- {
- int i;
-@@ -560,8 +600,9 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv)
- if (!priv->rx_queue[i])
- return -ENOMEM;
-
-+ priv->rx_queue[i]->rx_skbuff = NULL;
- priv->rx_queue[i]->qindex = i;
-- priv->rx_queue[i]->ndev = priv->ndev;
-+ priv->rx_queue[i]->dev = priv->ndev;
- }
- return 0;
- }
-@@ -647,9 +688,9 @@ static int gfar_parse_group(struct device_node *np,
- if (model && strcasecmp(model, "FEC")) {
- gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
- gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
-- if (!gfar_irq(grp, TX)->irq ||
-- !gfar_irq(grp, RX)->irq ||
-- !gfar_irq(grp, ER)->irq)
-+ if (gfar_irq(grp, TX)->irq == NO_IRQ ||
-+ gfar_irq(grp, RX)->irq == NO_IRQ ||
-+ gfar_irq(grp, ER)->irq == NO_IRQ)
- return -EINVAL;
- }
-
-@@ -738,6 +779,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
- struct gfar_private *priv = NULL;
- struct device_node *np = ofdev->dev.of_node;
- struct device_node *child = NULL;
-+ struct property *stash;
- u32 stash_len = 0;
- u32 stash_idx = 0;
- unsigned int num_tx_qs, num_rx_qs;
-@@ -853,7 +895,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
- goto err_grp_init;
- }
-
-- if (of_property_read_bool(np, "bd-stash")) {
-+ stash = of_find_property(np, "bd-stash", NULL);
-+
-+ if (stash) {
- priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
- priv->bd_stash_en = 1;
- }
-@@ -891,8 +935,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
- FSL_GIANFAR_DEV_HAS_VLAN |
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
- FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
-- FSL_GIANFAR_DEV_HAS_TIMER |
-- FSL_GIANFAR_DEV_HAS_RX_FILER;
-+ FSL_GIANFAR_DEV_HAS_TIMER;
-
- err = of_property_read_string(np, "phy-connection-type", &ctype);
-
-@@ -905,9 +948,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
- if (of_find_property(np, "fsl,magic-packet", NULL))
- priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
-
-- if (of_get_property(np, "fsl,wake-on-filer", NULL))
-- priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
--
- priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
-
- /* In the case of a fixed PHY, the DT node associated
-@@ -999,7 +1039,7 @@ static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
-
- static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
- {
-- struct phy_device *phydev = dev->phydev;
-+ struct gfar_private *priv = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EINVAL;
-@@ -1009,10 +1049,10 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
- if (cmd == SIOCGHWTSTAMP)
- return gfar_hwtstamp_get(dev, rq);
-
-- if (!phydev)
-+ if (!priv->phydev)
- return -ENODEV;
-
-- return phy_mii_ioctl(phydev, rq, cmd);
-+ return phy_mii_ioctl(priv->phydev, rq, cmd);
- }
-
- static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
-@@ -1111,10 +1151,8 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv)
-
- if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
- priv->errata |= GFAR_ERRATA_12;
-- /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
- if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
-- ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
-- ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
-+ ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
- priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
- }
- #endif
-@@ -1156,11 +1194,12 @@ void gfar_mac_reset(struct gfar_private *priv)
-
- udelay(3);
-
-- gfar_rx_offload_en(priv);
-+ /* Compute rx_buff_size based on config flags */
-+ gfar_rx_buff_size_config(priv);
-
- /* Initialize the max receive frame/buffer lengths */
-- gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
-- gfar_write(®s->mrblr, GFAR_RXB_SIZE);
-+ gfar_write(®s->maxfrm, priv->rx_buffer_size);
-+ gfar_write(®s->mrblr, priv->rx_buffer_size);
-
- /* Initialize the Minimum Frame Length Register */
- gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
-@@ -1168,11 +1207,12 @@ void gfar_mac_reset(struct gfar_private *priv)
- /* Initialize MACCFG2. */
- tempval = MACCFG2_INIT_SETTINGS;
-
-- /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
-- * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
-- * and by checking RxBD[LG] and discarding larger than MAXFRM.
-+ /* If the mtu is larger than the max size for standard
-+ * ethernet frames (ie, a jumbo frame), then set maccfg2
-+ * to allow huge frames, and to check the length
- */
-- if (gfar_has_errata(priv, GFAR_ERRATA_74))
-+ if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
-+ gfar_has_errata(priv, GFAR_ERRATA_74))
- tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
-
- gfar_write(®s->maccfg2, tempval);
-@@ -1312,7 +1352,6 @@ static void gfar_init_addr_hash_table(struct gfar_private *priv)
- */
- static int gfar_probe(struct platform_device *ofdev)
- {
-- struct device_node *np = ofdev->dev.of_node;
- struct net_device *dev = NULL;
- struct gfar_private *priv = NULL;
- int err = 0, i;
-@@ -1328,6 +1367,7 @@ static int gfar_probe(struct platform_device *ofdev)
- priv->dev = &ofdev->dev;
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
-+ spin_lock_init(&priv->bflock);
- INIT_WORK(&priv->reset_task, gfar_reset_task);
-
- platform_set_drvdata(ofdev, priv);
-@@ -1348,12 +1388,12 @@ static int gfar_probe(struct platform_device *ofdev)
- if (priv->poll_mode == GFAR_SQ_POLLING) {
- netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
- gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
-- netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
-+ netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
- gfar_poll_tx_sq, 2);
- } else {
- netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
- gfar_poll_rx, GFAR_DEV_WEIGHT);
-- netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
-+ netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
- gfar_poll_tx, 2);
- }
- }
-@@ -1371,8 +1411,6 @@ static int gfar_probe(struct platform_device *ofdev)
- dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
- }
-
-- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
--
- gfar_init_addr_hash_table(priv);
-
- /* Insert receive time stamps into padding alignment bytes */
-@@ -1383,6 +1421,8 @@ static int gfar_probe(struct platform_device *ofdev)
- priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- dev->needed_headroom = GMAC_FCB_LEN;
-
-+ priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
-+
- /* Initializing some of the rx/tx queue level parameters */
- for (i = 0; i < priv->num_tx_queues; i++) {
- priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
-@@ -1397,9 +1437,8 @@ static int gfar_probe(struct platform_device *ofdev)
- priv->rx_queue[i]->rxic = DEFAULT_RXIC;
- }
-
-- /* Always enable rx filer if available */
-- priv->rx_filer_enable =
-- (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
-+ /* always enable rx filer */
-+ priv->rx_filer_enable = 1;
- /* Enable most messages by default */
- priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
- /* use pritority h/w tx queue scheduling for single queue devices */
-@@ -1420,14 +1459,9 @@ static int gfar_probe(struct platform_device *ofdev)
- goto register_fail;
- }
-
-- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
-- priv->wol_supported |= GFAR_WOL_MAGIC;
--
-- if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
-- priv->rx_filer_enable)
-- priv->wol_supported |= GFAR_WOL_FILER_UCAST;
--
-- device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
-+ device_init_wakeup(&dev->dev,
-+ priv->device_flags &
-+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
-
- /* fill out IRQ number and name fields */
- for (i = 0; i < priv->num_grps; i++) {
-@@ -1463,8 +1497,6 @@ static int gfar_probe(struct platform_device *ofdev)
- return 0;
-
- register_fail:
-- if (of_phy_is_fixed_link(np))
-- of_phy_deregister_fixed_link(np);
- unmap_group_regs(priv);
- gfar_free_rx_queues(priv);
- gfar_free_tx_queues(priv);
-@@ -1477,16 +1509,11 @@ static int gfar_probe(struct platform_device *ofdev)
- static int gfar_remove(struct platform_device *ofdev)
- {
- struct gfar_private *priv = platform_get_drvdata(ofdev);
-- struct device_node *np = ofdev->dev.of_node;
-
- of_node_put(priv->phy_node);
- of_node_put(priv->tbi_node);
-
- unregister_netdev(priv->ndev);
--
-- if (of_phy_is_fixed_link(np))
-- of_phy_deregister_fixed_link(np);
--
- unmap_group_regs(priv);
- gfar_free_rx_queues(priv);
- gfar_free_tx_queues(priv);
-@@ -1497,153 +1524,53 @@ static int gfar_remove(struct platform_device *ofdev)
-
- #ifdef CONFIG_PM
-
--static void __gfar_filer_disable(struct gfar_private *priv)
--{
-- struct gfar __iomem *regs = priv->gfargrp[0].regs;
-- u32 temp;
--
-- temp = gfar_read(®s->rctrl);
-- temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
-- gfar_write(®s->rctrl, temp);
--}
--
--static void __gfar_filer_enable(struct gfar_private *priv)
--{
-- struct gfar __iomem *regs = priv->gfargrp[0].regs;
-- u32 temp;
--
-- temp = gfar_read(®s->rctrl);
-- temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
-- gfar_write(®s->rctrl, temp);
--}
--
--/* Filer rules implementing wol capabilities */
--static void gfar_filer_config_wol(struct gfar_private *priv)
--{
-- unsigned int i;
-- u32 rqfcr;
--
-- __gfar_filer_disable(priv);
--
-- /* clear the filer table, reject any packet by default */
-- rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
-- for (i = 0; i <= MAX_FILER_IDX; i++)
-- gfar_write_filer(priv, i, rqfcr, 0);
--
-- i = 0;
-- if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
-- /* unicast packet, accept it */
-- struct net_device *ndev = priv->ndev;
-- /* get the default rx queue index */
-- u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
-- u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
-- (ndev->dev_addr[1] << 8) |
-- ndev->dev_addr[2];
--
-- rqfcr = (qindex << 10) | RQFCR_AND |
-- RQFCR_CMP_EXACT | RQFCR_PID_DAH;
--
-- gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
--
-- dest_mac_addr = (ndev->dev_addr[3] << 16) |
-- (ndev->dev_addr[4] << 8) |
-- ndev->dev_addr[5];
-- rqfcr = (qindex << 10) | RQFCR_GPI |
-- RQFCR_CMP_EXACT | RQFCR_PID_DAL;
-- gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
-- }
--
-- __gfar_filer_enable(priv);
--}
--
--static void gfar_filer_restore_table(struct gfar_private *priv)
--{
-- u32 rqfcr, rqfpr;
-- unsigned int i;
--
-- __gfar_filer_disable(priv);
--
-- for (i = 0; i <= MAX_FILER_IDX; i++) {
-- rqfcr = priv->ftp_rqfcr[i];
-- rqfpr = priv->ftp_rqfpr[i];
-- gfar_write_filer(priv, i, rqfcr, rqfpr);
-- }
--
-- __gfar_filer_enable(priv);
--}
--
--/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
--static void gfar_start_wol_filer(struct gfar_private *priv)
--{
-- struct gfar __iomem *regs = priv->gfargrp[0].regs;
-- u32 tempval;
-- int i = 0;
--
-- /* Enable Rx hw queues */
-- gfar_write(®s->rqueue, priv->rqueue);
--
-- /* Initialize DMACTRL to have WWR and WOP */
-- tempval = gfar_read(®s->dmactrl);
-- tempval |= DMACTRL_INIT_SETTINGS;
-- gfar_write(®s->dmactrl, tempval);
--
-- /* Make sure we aren't stopped */
-- tempval = gfar_read(®s->dmactrl);
-- tempval &= ~DMACTRL_GRS;
-- gfar_write(®s->dmactrl, tempval);
--
-- for (i = 0; i < priv->num_grps; i++) {
-- regs = priv->gfargrp[i].regs;
-- /* Clear RHLT, so that the DMA starts polling now */
-- gfar_write(®s->rstat, priv->gfargrp[i].rstat);
-- /* enable the Filer General Purpose Interrupt */
-- gfar_write(®s->imask, IMASK_FGPI);
-- }
--
-- /* Enable Rx DMA */
-- tempval = gfar_read(®s->maccfg1);
-- tempval |= MACCFG1_RX_EN;
-- gfar_write(®s->maccfg1, tempval);
--}
--
- static int gfar_suspend(struct device *dev)
- {
- struct gfar_private *priv = dev_get_drvdata(dev);
- struct net_device *ndev = priv->ndev;
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
-+ unsigned long flags;
- u32 tempval;
-- u16 wol = priv->wol_opts;
-
-- if (!netif_running(ndev))
-- return 0;
-+ int magic_packet = priv->wol_en &&
-+ (priv->device_flags &
-+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
-
-- disable_napi(priv);
-- netif_tx_lock(ndev);
- netif_device_detach(ndev);
-- netif_tx_unlock(ndev);
-
-- gfar_halt(priv);
-+ if (netif_running(ndev)) {
-
-- if (wol & GFAR_WOL_MAGIC) {
-- /* Enable interrupt on Magic Packet */
-- gfar_write(®s->imask, IMASK_MAG);
-+ local_irq_save(flags);
-+ lock_tx_qs(priv);
-
-- /* Enable Magic Packet mode */
-- tempval = gfar_read(®s->maccfg2);
-- tempval |= MACCFG2_MPEN;
-- gfar_write(®s->maccfg2, tempval);
-+ gfar_halt_nodisable(priv);
-
-- /* re-enable the Rx block */
-+ /* Disable Tx, and Rx if wake-on-LAN is disabled. */
- tempval = gfar_read(®s->maccfg1);
-- tempval |= MACCFG1_RX_EN;
-+
-+ tempval &= ~MACCFG1_TX_EN;
-+
-+ if (!magic_packet)
-+ tempval &= ~MACCFG1_RX_EN;
-+
- gfar_write(®s->maccfg1, tempval);
-
-- } else if (wol & GFAR_WOL_FILER_UCAST) {
-- gfar_filer_config_wol(priv);
-- gfar_start_wol_filer(priv);
-+ unlock_tx_qs(priv);
-+ local_irq_restore(flags);
-
-- } else {
-- phy_stop(ndev->phydev);
-+ disable_napi(priv);
-+
-+ if (magic_packet) {
-+ /* Enable interrupt on Magic Packet */
-+ gfar_write(®s->imask, IMASK_MAG);
-+
-+ /* Enable Magic Packet mode */
-+ tempval = gfar_read(®s->maccfg2);
-+ tempval |= MACCFG2_MPEN;
-+ gfar_write(®s->maccfg2, tempval);
-+ } else {
-+ phy_stop(priv->phydev);
-+ }
- }
-
- return 0;
-@@ -1654,30 +1581,37 @@ static int gfar_resume(struct device *dev)
- struct gfar_private *priv = dev_get_drvdata(dev);
- struct net_device *ndev = priv->ndev;
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
-+ unsigned long flags;
- u32 tempval;
-- u16 wol = priv->wol_opts;
-+ int magic_packet = priv->wol_en &&
-+ (priv->device_flags &
-+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
-
-- if (!netif_running(ndev))
-+ if (!netif_running(ndev)) {
-+ netif_device_attach(ndev);
- return 0;
-+ }
-
-- if (wol & GFAR_WOL_MAGIC) {
-- /* Disable Magic Packet mode */
-- tempval = gfar_read(®s->maccfg2);
-- tempval &= ~MACCFG2_MPEN;
-- gfar_write(®s->maccfg2, tempval);
-+ if (!magic_packet && priv->phydev)
-+ phy_start(priv->phydev);
-
-- } else if (wol & GFAR_WOL_FILER_UCAST) {
-- /* need to stop rx only, tx is already down */
-- gfar_halt(priv);
-- gfar_filer_restore_table(priv);
-+ /* Disable Magic Packet mode, in case something
-+ * else woke us up.
-+ */
-+ local_irq_save(flags);
-+ lock_tx_qs(priv);
-
-- } else {
-- phy_start(ndev->phydev);
-- }
-+ tempval = gfar_read(®s->maccfg2);
-+ tempval &= ~MACCFG2_MPEN;
-+ gfar_write(®s->maccfg2, tempval);
-
- gfar_start(priv);
-
-+ unlock_tx_qs(priv);
-+ local_irq_restore(flags);
-+
- netif_device_attach(ndev);
-+
- enable_napi(priv);
-
- return 0;
-@@ -1694,7 +1628,10 @@ static int gfar_restore(struct device *dev)
- return 0;
- }
-
-- gfar_init_bds(ndev);
-+ if (gfar_init_bds(ndev)) {
-+ free_skb_resources(priv);
-+ return -ENOMEM;
-+ }
-
- gfar_mac_reset(priv);
-
-@@ -1706,8 +1643,8 @@ static int gfar_restore(struct device *dev)
- priv->oldspeed = 0;
- priv->oldduplex = -1;
-
-- if (ndev->phydev)
-- phy_start(ndev->phydev);
-+ if (priv->phydev)
-+ phy_start(priv->phydev);
-
- netif_device_attach(ndev);
- enable_napi(priv);
-@@ -1786,7 +1723,6 @@ static int init_phy(struct net_device *dev)
- priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
- GFAR_SUPPORTED_GBIT : 0;
- phy_interface_t interface;
-- struct phy_device *phydev;
-
- priv->oldlink = 0;
- priv->oldspeed = 0;
-@@ -1794,9 +1730,9 @@ static int init_phy(struct net_device *dev)
-
- interface = gfar_get_interface(dev);
-
-- phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
-- interface);
-- if (!phydev) {
-+ priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
-+ interface);
-+ if (!priv->phydev) {
- dev_err(&dev->dev, "could not attach to PHY\n");
- return -ENODEV;
- }
-@@ -1805,11 +1741,11 @@ static int init_phy(struct net_device *dev)
- gfar_configure_serdes(dev);
-
- /* Remove any features not supported by the controller */
-- phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
-- phydev->advertising = phydev->supported;
-+ priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
-+ priv->phydev->advertising = priv->phydev->supported;
-
- /* Add support for flow control, but don't advertise it by default */
-- phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-+ priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-
- return 0;
- }
-@@ -1844,10 +1780,8 @@ static void gfar_configure_serdes(struct net_device *dev)
- * everything for us? Resetting it takes the link down and requires
- * several seconds for it to come back.
- */
-- if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
-- put_device(&tbiphy->mdio.dev);
-+ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
- return;
-- }
-
- /* Single clk mode, mii mode off(for serdes communication) */
- phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
-@@ -1859,8 +1793,6 @@ static void gfar_configure_serdes(struct net_device *dev)
- phy_write(tbiphy, MII_BMCR,
- BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
- BMCR_SPEED1000);
--
-- put_device(&tbiphy->mdio.dev);
- }
-
- static int __gfar_is_rx_idle(struct gfar_private *priv)
-@@ -1953,7 +1885,7 @@ void stop_gfar(struct net_device *dev)
- /* disable ints and gracefully shut down Rx/Tx DMA */
- gfar_halt(priv);
-
-- phy_stop(dev->phydev);
-+ phy_stop(priv->phydev);
-
- free_skb_resources(priv);
- }
-@@ -1990,32 +1922,26 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
-
- static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
- {
-+ struct rxbd8 *rxbdp;
-+ struct gfar_private *priv = netdev_priv(rx_queue->dev);
- int i;
-
-- struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
--
-- if (rx_queue->skb)
-- dev_kfree_skb(rx_queue->skb);
-+ rxbdp = rx_queue->rx_bd_base;
-
- for (i = 0; i < rx_queue->rx_ring_size; i++) {
-- struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
--
-+ if (rx_queue->rx_skbuff[i]) {
-+ dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
-+ priv->rx_buffer_size,
-+ DMA_FROM_DEVICE);
-+ dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
-+ rx_queue->rx_skbuff[i] = NULL;
-+ }
- rxbdp->lstatus = 0;
- rxbdp->bufPtr = 0;
- rxbdp++;
--
-- if (!rxb->page)
-- continue;
--
-- dma_unmap_single(rx_queue->dev, rxb->dma,
-- PAGE_SIZE, DMA_FROM_DEVICE);
-- __free_page(rxb->page);
--
-- rxb->page = NULL;
- }
--
-- kfree(rx_queue->rx_buff);
-- rx_queue->rx_buff = NULL;
-+ kfree(rx_queue->rx_skbuff);
-+ rx_queue->rx_skbuff = NULL;
- }
-
- /* If there are any tx skbs or rx skbs still around, free them.
-@@ -2040,7 +1966,7 @@ static void free_skb_resources(struct gfar_private *priv)
-
- for (i = 0; i < priv->num_rx_queues; i++) {
- rx_queue = priv->rx_queue[i];
-- if (rx_queue->rx_buff)
-+ if (rx_queue->rx_skbuff)
- free_skb_rx_queue(rx_queue);
- }
-
-@@ -2085,7 +2011,7 @@ void gfar_start(struct gfar_private *priv)
-
- gfar_ints_enable(priv);
-
-- netif_trans_update(priv->ndev); /* prevent tx timeout */
-+ priv->ndev->trans_start = jiffies; /* prevent tx timeout */
- }
-
- static void free_grp_irqs(struct gfar_priv_grp *grp)
-@@ -2116,8 +2042,6 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
-
- goto err_irq_fail;
- }
-- enable_irq_wake(gfar_irq(grp, ER)->irq);
--
- err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
- gfar_irq(grp, TX)->name, grp);
- if (err < 0) {
-@@ -2132,8 +2056,6 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
- gfar_irq(grp, RX)->irq);
- goto rx_irq_fail;
- }
-- enable_irq_wake(gfar_irq(grp, RX)->irq);
--
- } else {
- err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
- gfar_irq(grp, TX)->name, grp);
-@@ -2142,7 +2064,6 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
- gfar_irq(grp, TX)->irq);
- goto err_irq_fail;
- }
-- enable_irq_wake(gfar_irq(grp, TX)->irq);
- }
-
- return 0;
-@@ -2208,12 +2129,7 @@ int startup_gfar(struct net_device *ndev)
- /* Start Rx/Tx DMA and enable the interrupts */
- gfar_start(priv);
-
-- /* force link state update after mac reset */
-- priv->oldlink = 0;
-- priv->oldspeed = 0;
-- priv->oldduplex = -1;
--
-- phy_start(ndev->phydev);
-+ phy_start(priv->phydev);
-
- enable_napi(priv);
-
-@@ -2242,6 +2158,8 @@ static int gfar_enet_open(struct net_device *dev)
- if (err)
- return err;
-
-+ device_set_wakeup_enable(&dev->dev, priv->wol_en);
-+
- return err;
- }
-
-@@ -2283,7 +2201,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
- fcb->flags = flags;
- }
-
--static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
-+void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
- {
- fcb->flags |= TXFCB_VLN;
- fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
-@@ -2333,10 +2251,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
- struct txfcb *fcb = NULL;
- struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
- u32 lstatus;
-- skb_frag_t *frag;
- int i, rq = 0;
- int do_tstamp, do_csum, do_vlan;
- u32 bufaddr;
-+ unsigned long flags;
- unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
-
- rq = skb->queue_mapping;
-@@ -2401,6 +2319,52 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
- txbdp = txbdp_start = tx_queue->cur_tx;
- lstatus = be32_to_cpu(txbdp->lstatus);
-
-+ /* Time stamp insertion requires one additional TxBD */
-+ if (unlikely(do_tstamp))
-+ txbdp_tstamp = txbdp = next_txbd(txbdp, base,
-+ tx_queue->tx_ring_size);
-+
-+ if (nr_frags == 0) {
-+ if (unlikely(do_tstamp)) {
-+ u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
-+
-+ lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
-+ txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
-+ } else {
-+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
-+ }
-+ } else {
-+ /* Place the fragment addresses and lengths into the TxBDs */
-+ for (i = 0; i < nr_frags; i++) {
-+ unsigned int frag_len;
-+ /* Point at the next BD, wrapping as needed */
-+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
-+
-+ frag_len = skb_shinfo(skb)->frags[i].size;
-+
-+ lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
-+ BD_LFLAG(TXBD_READY);
-+
-+ /* Handle the last BD specially */
-+ if (i == nr_frags - 1)
-+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
-+
-+ bufaddr = skb_frag_dma_map(priv->dev,
-+ &skb_shinfo(skb)->frags[i],
-+ 0,
-+ frag_len,
-+ DMA_TO_DEVICE);
-+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
-+ goto dma_map_err;
-+
-+ /* set the TxBD length and buffer pointer */
-+ txbdp->bufPtr = cpu_to_be32(bufaddr);
-+ txbdp->lstatus = cpu_to_be32(lstatus);
-+ }
-+
-+ lstatus = be32_to_cpu(txbdp_start->lstatus);
-+ }
-+
- /* Add TxPAL between FCB and frame if required */
- if (unlikely(do_tstamp)) {
- skb_push(skb, GMAC_TXPAL_LEN);
-@@ -2435,6 +2399,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
- if (do_vlan)
- gfar_tx_vlan(skb, fcb);
-
-+ /* Setup tx hardware time stamping if requested */
-+ if (unlikely(do_tstamp)) {
-+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-+ fcb->ptp = 1;
-+ }
-+
- bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
-@@ -2442,47 +2412,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
-
- txbdp_start->bufPtr = cpu_to_be32(bufaddr);
-
-- /* Time stamp insertion requires one additional TxBD */
-- if (unlikely(do_tstamp))
-- txbdp_tstamp = txbdp = next_txbd(txbdp, base,
-- tx_queue->tx_ring_size);
--
-- if (likely(!nr_frags)) {
-- if (likely(!do_tstamp))
-- lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
-- } else {
-- u32 lstatus_start = lstatus;
--
-- /* Place the fragment addresses and lengths into the TxBDs */
-- frag = &skb_shinfo(skb)->frags[0];
-- for (i = 0; i < nr_frags; i++, frag++) {
-- unsigned int size;
--
-- /* Point at the next BD, wrapping as needed */
-- txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
--
-- size = skb_frag_size(frag);
--
-- lstatus = be32_to_cpu(txbdp->lstatus) | size |
-- BD_LFLAG(TXBD_READY);
--
-- /* Handle the last BD specially */
-- if (i == nr_frags - 1)
-- lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
--
-- bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
-- size, DMA_TO_DEVICE);
-- if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
-- goto dma_map_err;
--
-- /* set the TxBD length and buffer pointer */
-- txbdp->bufPtr = cpu_to_be32(bufaddr);
-- txbdp->lstatus = cpu_to_be32(lstatus);
-- }
--
-- lstatus = lstatus_start;
-- }
--
- /* If time stamping is requested one additional TxBD must be set up. The
- * first TxBD points to the FCB and must have a data length of
- * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
-@@ -2493,25 +2422,31 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
-
- bufaddr = be32_to_cpu(txbdp_start->bufPtr);
- bufaddr += fcb_len;
--
- lstatus_ts |= BD_LFLAG(TXBD_READY) |
- (skb_headlen(skb) - fcb_len);
-- if (!nr_frags)
-- lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
-
- txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
- txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
- lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
--
-- /* Setup tx hardware time stamping */
-- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-- fcb->ptp = 1;
- } else {
- lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
- }
-
- netdev_tx_sent_queue(txq, bytes_sent);
-
-+ /* We can work in parallel with gfar_clean_tx_ring(), except
-+ * when modifying num_txbdfree. Note that we didn't grab the lock
-+ * when we were reading the num_txbdfree and checking for available
-+ * space, that's because outside of this function it can only grow,
-+ * and once we've got needed space, it cannot suddenly disappear.
-+ *
-+ * The lock also protects us from gfar_error(), which can modify
-+ * regs->tstat and thus retrigger the transfers, which is why we
-+ * also must grab the lock before setting ready bit for the first
-+ * to be transmitted BD.
-+ */
-+ spin_lock_irqsave(&tx_queue->txlock, flags);
-+
- gfar_wmb();
-
- txbdp_start->lstatus = cpu_to_be32(lstatus);
-@@ -2528,15 +2463,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
-
- tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
-
-- /* We can work in parallel with gfar_clean_tx_ring(), except
-- * when modifying num_txbdfree. Note that we didn't grab the lock
-- * when we were reading the num_txbdfree and checking for available
-- * space, that's because outside of this function it can only grow.
-- */
-- spin_lock_bh(&tx_queue->txlock);
- /* reduce TxBD free count */
- tx_queue->num_txbdfree -= (nr_txbds);
-- spin_unlock_bh(&tx_queue->txlock);
-
- /* If the next BD still needs to be cleaned up, then the bds
- * are full. We need to tell the kernel to stop sending us stuff.
-@@ -2550,6 +2478,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
- /* Tell the DMA to go go go */
- gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
-
-+ /* Unlock priv */
-+ spin_unlock_irqrestore(&tx_queue->txlock, flags);
-+
- return NETDEV_TX_OK;
-
- dma_map_err:
-@@ -2582,7 +2513,8 @@ static int gfar_close(struct net_device *dev)
- stop_gfar(dev);
-
- /* Disconnect from the PHY */
-- phy_disconnect(dev->phydev);
-+ phy_disconnect(priv->phydev);
-+ priv->phydev = NULL;
-
- gfar_free_irq(priv);
-
-@@ -2602,7 +2534,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
- struct gfar_private *priv = netdev_priv(dev);
- int frame_size = new_mtu + ETH_HLEN;
-
-- if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
-+ if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
- netif_err(priv, drv, dev, "Invalid MTU setting\n");
- return -EINVAL;
- }
-@@ -2656,6 +2588,15 @@ static void gfar_timeout(struct net_device *dev)
- schedule_work(&priv->reset_task);
- }
-
-+static void gfar_align_skb(struct sk_buff *skb)
-+{
-+ /* We need the data buffer to be aligned properly. We will reserve
-+ * as many bytes as needed to align the data properly
-+ */
-+ skb_reserve(skb, RXBUF_ALIGNMENT -
-+ (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
-+}
-+
- /* Interrupt Handler for Transmit complete */
- static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
- {
-@@ -2681,6 +2622,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
- skb_dirtytx = tx_queue->skb_dirtytx;
-
- while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
-+ unsigned long flags;
-
- frags = skb_shinfo(skb)->nr_frags;
-
-@@ -2713,11 +2655,10 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
-
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
- struct skb_shared_hwtstamps shhwtstamps;
-- u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
-- ~0x7UL);
-+ u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
-
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-- shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
-+ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
- skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
- skb_tstamp_tx(skb, &shhwtstamps);
- gfar_clear_txbd_status(bdp);
-@@ -2745,9 +2686,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
- TX_RING_MOD_MASK(tx_ring_size);
-
- howmany++;
-- spin_lock(&tx_queue->txlock);
-+ spin_lock_irqsave(&tx_queue->txlock, flags);
- tx_queue->num_txbdfree += nr_txbds;
-- spin_unlock(&tx_queue->txlock);
-+ spin_unlock_irqrestore(&tx_queue->txlock, flags);
- }
-
- /* If we freed a buffer, we can restart transmission, if necessary */
-@@ -2763,85 +2704,49 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
- netdev_tx_completed_queue(txq, howmany, bytes_sent);
- }
-
--static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
-+static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
- {
-- struct page *page;
-- dma_addr_t addr;
--
-- page = dev_alloc_page();
-- if (unlikely(!page))
-- return false;
--
-- addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
-- if (unlikely(dma_mapping_error(rxq->dev, addr))) {
-- __free_page(page);
--
-- return false;
-- }
-+ struct gfar_private *priv = netdev_priv(dev);
-+ struct sk_buff *skb;
-
-- rxb->dma = addr;
-- rxb->page = page;
-- rxb->page_offset = 0;
-+ skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
-+ if (!skb)
-+ return NULL;
-
-- return true;
--}
-+ gfar_align_skb(skb);
-
--static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
--{
-- struct gfar_private *priv = netdev_priv(rx_queue->ndev);
-- struct gfar_extra_stats *estats = &priv->extra_stats;
--
-- netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
-- atomic64_inc(&estats->rx_alloc_err);
-+ return skb;
- }
-
--static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
-- int alloc_cnt)
-+static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
- {
-- struct rxbd8 *bdp;
-- struct gfar_rx_buff *rxb;
-- int i;
--
-- i = rx_queue->next_to_use;
-- bdp = &rx_queue->rx_bd_base[i];
-- rxb = &rx_queue->rx_buff[i];
--
-- while (alloc_cnt--) {
-- /* try reuse page */
-- if (unlikely(!rxb->page)) {
-- if (unlikely(!gfar_new_page(rx_queue, rxb))) {
-- gfar_rx_alloc_err(rx_queue);
-- break;
-- }
-- }
-+ struct gfar_private *priv = netdev_priv(dev);
-+ struct sk_buff *skb;
-+ dma_addr_t addr;
-
-- /* Setup the new RxBD */
-- gfar_init_rxbdp(rx_queue, bdp,
-- rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
-+ skb = gfar_alloc_skb(dev);
-+ if (!skb)
-+ return NULL;
-
-- /* Update to the next pointer */
-- bdp++;
-- rxb++;
--
-- if (unlikely(++i == rx_queue->rx_ring_size)) {
-- i = 0;
-- bdp = rx_queue->rx_bd_base;
-- rxb = rx_queue->rx_buff;
-- }
-+ addr = dma_map_single(priv->dev, skb->data,
-+ priv->rx_buffer_size, DMA_FROM_DEVICE);
-+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
-+ dev_kfree_skb_any(skb);
-+ return NULL;
- }
-
-- rx_queue->next_to_use = i;
-- rx_queue->next_to_alloc = i;
-+ *bufaddr = addr;
-+ return skb;
- }
-
--static void count_errors(u32 lstatus, struct net_device *ndev)
-+static inline void count_errors(unsigned short status, struct net_device *dev)
- {
-- struct gfar_private *priv = netdev_priv(ndev);
-- struct net_device_stats *stats = &ndev->stats;
-+ struct gfar_private *priv = netdev_priv(dev);
-+ struct net_device_stats *stats = &dev->stats;
- struct gfar_extra_stats *estats = &priv->extra_stats;
-
- /* If the packet was truncated, none of the other errors matter */
-- if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
-+ if (status & RXBD_TRUNCATED) {
- stats->rx_length_errors++;
-
- atomic64_inc(&estats->rx_trunc);
-@@ -2849,25 +2754,25 @@ static void count_errors(u32 lstatus, struct net_device *ndev)
- return;
- }
- /* Count the errors, if there were any */
-- if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
-+ if (status & (RXBD_LARGE | RXBD_SHORT)) {
- stats->rx_length_errors++;
-
-- if (lstatus & BD_LFLAG(RXBD_LARGE))
-+ if (status & RXBD_LARGE)
- atomic64_inc(&estats->rx_large);
- else
- atomic64_inc(&estats->rx_short);
- }
-- if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
-+ if (status & RXBD_NONOCTET) {
- stats->rx_frame_errors++;
- atomic64_inc(&estats->rx_nonoctet);
- }
-- if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
-+ if (status & RXBD_CRCERR) {
- atomic64_inc(&estats->rx_crcerr);
- stats->rx_crc_errors++;
- }
-- if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
-+ if (status & RXBD_OVERRUN) {
- atomic64_inc(&estats->rx_overrun);
-- stats->rx_over_errors++;
-+ stats->rx_crc_errors++;
- }
- }
-
-@@ -2875,14 +2780,7 @@ irqreturn_t gfar_receive(int irq, void *grp_id)
- {
- struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
- unsigned long flags;
-- u32 imask, ievent;
--
-- ievent = gfar_read(&grp->regs->ievent);
--
-- if (unlikely(ievent & IEVENT_FGPI)) {
-- gfar_write(&grp->regs->ievent, IEVENT_FGPI);
-- return IRQ_HANDLED;
-- }
-+ u32 imask;
-
- if (likely(napi_schedule_prep(&grp->napi_rx))) {
- spin_lock_irqsave(&grp->grplock, flags);
-@@ -2925,101 +2823,6 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
- return IRQ_HANDLED;
- }
-
--static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
-- struct sk_buff *skb, bool first)
--{
-- unsigned int size = lstatus & BD_LENGTH_MASK;
-- struct page *page = rxb->page;
-- bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
--
-- /* Remove the FCS from the packet length */
-- if (last)
-- size -= ETH_FCS_LEN;
--
-- if (likely(first)) {
-- skb_put(skb, size);
-- } else {
-- /* the last fragments' length contains the full frame length */
-- if (last)
-- size -= skb->len;
--
-- /* in case the last fragment consisted only of the FCS */
-- if (size > 0)
-- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-- rxb->page_offset + RXBUF_ALIGNMENT,
-- size, GFAR_RXB_TRUESIZE);
-- }
--
-- /* try reuse page */
-- if (unlikely(page_count(page) != 1))
-- return false;
--
-- /* change offset to the other half */
-- rxb->page_offset ^= GFAR_RXB_TRUESIZE;
--
-- page_ref_inc(page);
--
-- return true;
--}
--
--static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
-- struct gfar_rx_buff *old_rxb)
--{
-- struct gfar_rx_buff *new_rxb;
-- u16 nta = rxq->next_to_alloc;
--
-- new_rxb = &rxq->rx_buff[nta];
--
-- /* find next buf that can reuse a page */
-- nta++;
-- rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
--
-- /* copy page reference */
-- *new_rxb = *old_rxb;
--
-- /* sync for use by the device */
-- dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
-- old_rxb->page_offset,
-- GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
--}
--
--static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
-- u32 lstatus, struct sk_buff *skb)
--{
-- struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
-- struct page *page = rxb->page;
-- bool first = false;
--
-- if (likely(!skb)) {
-- void *buff_addr = page_address(page) + rxb->page_offset;
--
-- skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
-- if (unlikely(!skb)) {
-- gfar_rx_alloc_err(rx_queue);
-- return NULL;
-- }
-- skb_reserve(skb, RXBUF_ALIGNMENT);
-- first = true;
-- }
--
-- dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
-- GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
--
-- if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
-- /* reuse the free half of the page */
-- gfar_reuse_rx_page(rx_queue, rxb);
-- } else {
-- /* page cannot be reused, unmap it */
-- dma_unmap_page(rx_queue->dev, rxb->dma,
-- PAGE_SIZE, DMA_FROM_DEVICE);
-- }
--
-- /* clear rxb content */
-- rxb->page = NULL;
--
-- return skb;
--}
--
- static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
- {
- /* If valid headers were found, and valid sums
-@@ -3034,9 +2837,10 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
- }
-
- /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
--static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
-+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-+ int amount_pull, struct napi_struct *napi)
- {
-- struct gfar_private *priv = netdev_priv(ndev);
-+ struct gfar_private *priv = netdev_priv(dev);
- struct rxfcb *fcb = NULL;
-
- /* fcb is at the beginning if exists */
-@@ -3045,8 +2849,10 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
- /* Remove the FCB from the skb
- * Remove the padded bytes, if there are any
- */
-- if (priv->uses_rxfcb)
-- skb_pull(skb, GMAC_FCB_LEN);
-+ if (amount_pull) {
-+ skb_record_rx_queue(skb, fcb->rq);
-+ skb_pull(skb, amount_pull);
-+ }
-
- /* Get receive timestamp from the skb */
- if (priv->hwts_rx_en) {
-@@ -3054,26 +2860,30 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
- u64 *ns = (u64 *) skb->data;
-
- memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-- shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
-+ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
- }
-
- if (priv->padding)
- skb_pull(skb, priv->padding);
-
-- if (ndev->features & NETIF_F_RXCSUM)
-+ if (dev->features & NETIF_F_RXCSUM)
- gfar_rx_checksum(skb, fcb);
-
- /* Tell the skb what kind of packet this is */
-- skb->protocol = eth_type_trans(skb, ndev);
-+ skb->protocol = eth_type_trans(skb, dev);
-
- /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
- * Even if vlan rx accel is disabled, on some chips
- * RXFCB_VLN is pseudo randomly set.
- */
-- if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
-+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- be16_to_cpu(fcb->flags) & RXFCB_VLN)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- be16_to_cpu(fcb->vlctl));
-+
-+ /* Send the packet up the stack */
-+ napi_gro_receive(napi, skb);
-+
- }
-
- /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
-@@ -3082,89 +2892,91 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
- */
- int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
- {
-- struct net_device *ndev = rx_queue->ndev;
-- struct gfar_private *priv = netdev_priv(ndev);
-- struct rxbd8 *bdp;
-- int i, howmany = 0;
-- struct sk_buff *skb = rx_queue->skb;
-- int cleaned_cnt = gfar_rxbd_unused(rx_queue);
-- unsigned int total_bytes = 0, total_pkts = 0;
-+ struct net_device *dev = rx_queue->dev;
-+ struct rxbd8 *bdp, *base;
-+ struct sk_buff *skb;
-+ int pkt_len;
-+ int amount_pull;
-+ int howmany = 0;
-+ struct gfar_private *priv = netdev_priv(dev);
-
- /* Get the first full descriptor */
-- i = rx_queue->next_to_clean;
-+ bdp = rx_queue->cur_rx;
-+ base = rx_queue->rx_bd_base;
-
-- while (rx_work_limit--) {
-- u32 lstatus;
-+ amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
-
-- if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
-- gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
-- cleaned_cnt = 0;
-- }
--
-- bdp = &rx_queue->rx_bd_base[i];
-- lstatus = be32_to_cpu(bdp->lstatus);
-- if (lstatus & BD_LFLAG(RXBD_EMPTY))
-- break;
-+ while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
-+ struct sk_buff *newskb;
-+ dma_addr_t bufaddr;
-
-- /* order rx buffer descriptor reads */
- rmb();
-
-- /* fetch next to clean buffer from the ring */
-- skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
-- if (unlikely(!skb))
-- break;
--
-- cleaned_cnt++;
-- howmany++;
-+ /* Add another skb for the future */
-+ newskb = gfar_new_skb(dev, &bufaddr);
-
-- if (unlikely(++i == rx_queue->rx_ring_size))
-- i = 0;
-+ skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
-
-- rx_queue->next_to_clean = i;
--
-- /* fetch next buffer if not the last in frame */
-- if (!(lstatus & BD_LFLAG(RXBD_LAST)))
-- continue;
-+ dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
-+ priv->rx_buffer_size, DMA_FROM_DEVICE);
-+
-+ if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
-+ be16_to_cpu(bdp->length) > priv->rx_buffer_size))
-+ bdp->status = cpu_to_be16(RXBD_LARGE);
-+
-+ /* We drop the frame if we failed to allocate a new buffer */
-+ if (unlikely(!newskb ||
-+ !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
-+ be16_to_cpu(bdp->status) & RXBD_ERR)) {
-+ count_errors(be16_to_cpu(bdp->status), dev);
-+
-+ if (unlikely(!newskb)) {
-+ newskb = skb;
-+ bufaddr = be32_to_cpu(bdp->bufPtr);
-+ } else if (skb)
-+ dev_kfree_skb(skb);
-+ } else {
-+ /* Increment the number of packets */
-+ rx_queue->stats.rx_packets++;
-+ howmany++;
-+
-+ if (likely(skb)) {
-+ pkt_len = be16_to_cpu(bdp->length) -
-+ ETH_FCS_LEN;
-+ /* Remove the FCS from the packet length */
-+ skb_put(skb, pkt_len);
-+ rx_queue->stats.rx_bytes += pkt_len;
-+ skb_record_rx_queue(skb, rx_queue->qindex);
-+ gfar_process_frame(dev, skb, amount_pull,
-+ &rx_queue->grp->napi_rx);
-
-- if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
-- count_errors(lstatus, ndev);
-+ } else {
-+ netif_warn(priv, rx_err, dev, "Missing skb!\n");
-+ rx_queue->stats.rx_dropped++;
-+ atomic64_inc(&priv->extra_stats.rx_skbmissing);
-+ }
-
-- /* discard faulty buffer */
-- dev_kfree_skb(skb);
-- skb = NULL;
-- rx_queue->stats.rx_dropped++;
-- continue;
- }
-
-- /* Increment the number of packets */
-- total_pkts++;
-- total_bytes += skb->len;
-+ rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
-
-- skb_record_rx_queue(skb, rx_queue->qindex);
-+ /* Setup the new bdp */
-+ gfar_init_rxbdp(rx_queue, bdp, bufaddr);
-
-- gfar_process_frame(ndev, skb);
-+ /* Update Last Free RxBD pointer for LFC */
-+ if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
-+ gfar_write(rx_queue->rfbptr, (u32)bdp);
-
-- /* Send the packet up the stack */
-- napi_gro_receive(&rx_queue->grp->napi_rx, skb);
-+ /* Update to the next pointer */
-+ bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
-
-- skb = NULL;
-+ /* update to point at the next skb */
-+ rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
-+ RX_RING_MOD_MASK(rx_queue->rx_ring_size);
- }
-
-- /* Store incomplete frames for completion */
-- rx_queue->skb = skb;
--
-- rx_queue->stats.rx_packets += total_pkts;
-- rx_queue->stats.rx_bytes += total_bytes;
--
-- if (cleaned_cnt)
-- gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
--
-- /* Update Last Free RxBD pointer for LFC */
-- if (unlikely(priv->tx_actual_en)) {
-- u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
--
-- gfar_write(rx_queue->rfbptr, bdp_dma);
-- }
-+ /* Update the current rxbd pointer to be the next one */
-+ rx_queue->cur_rx = bdp;
-
- return howmany;
- }
-@@ -3396,7 +3208,7 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
- static void adjust_link(struct net_device *dev)
- {
- struct gfar_private *priv = netdev_priv(dev);
-- struct phy_device *phydev = dev->phydev;
-+ struct phy_device *phydev = priv->phydev;
-
- if (unlikely(phydev->link != priv->oldlink ||
- (phydev->link && (phydev->duplex != priv->oldduplex ||
-@@ -3599,19 +3411,30 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
- if (events & IEVENT_CRL)
- dev->stats.tx_aborted_errors++;
- if (events & IEVENT_XFUN) {
-+ unsigned long flags;
-+
- netif_dbg(priv, tx_err, dev,
- "TX FIFO underrun, packet dropped\n");
- dev->stats.tx_dropped++;
- atomic64_inc(&priv->extra_stats.tx_underrun);
-
-- schedule_work(&priv->reset_task);
-+ local_irq_save(flags);
-+ lock_tx_qs(priv);
-+
-+ /* Reactivate the Tx Queues */
-+ gfar_write(®s->tstat, gfargrp->tstat);
-+
-+ unlock_tx_qs(priv);
-+ local_irq_restore(flags);
- }
- netif_dbg(priv, tx_err, dev, "Transmit Error\n");
- }
- if (events & IEVENT_BSY) {
-- dev->stats.rx_over_errors++;
-+ dev->stats.rx_errors++;
- atomic64_inc(&priv->extra_stats.rx_bsy);
-
-+ gfar_receive(irq, grp_id);
-+
- netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
- gfar_read(®s->rstat));
- }
-@@ -3637,8 +3460,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
-
- static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
- {
-- struct net_device *ndev = priv->ndev;
-- struct phy_device *phydev = ndev->phydev;
-+ struct phy_device *phydev = priv->phydev;
- u32 val = 0;
-
- if (!phydev->duplex)
-@@ -3678,10 +3500,10 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
- static noinline void gfar_update_link_state(struct gfar_private *priv)
- {
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
-- struct net_device *ndev = priv->ndev;
-- struct phy_device *phydev = ndev->phydev;
-+ struct phy_device *phydev = priv->phydev;
- struct gfar_priv_rx_q *rx_queue = NULL;
- int i;
-+ struct rxbd8 *bdp;
-
- if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
- return;
-@@ -3738,11 +3560,15 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
- /* Turn last free buffer recording on */
- if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
- for (i = 0; i < priv->num_rx_queues; i++) {
-- u32 bdp_dma;
--
- rx_queue = priv->rx_queue[i];
-- bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
-- gfar_write(rx_queue->rfbptr, bdp_dma);
-+ bdp = rx_queue->cur_rx;
-+ /* skip to previous bd */
-+ bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
-+ rx_queue->rx_bd_base,
-+ rx_queue->rx_ring_size);
-+
-+ if (rx_queue->rfbptr)
-+ gfar_write(rx_queue->rfbptr, (u32)bdp);
- }
-
- priv->tx_actual_en = 1;
-diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
-index 6e8a9c8..daa1d37 100644
---- a/drivers/net/ethernet/freescale/gianfar.h
-+++ b/drivers/net/ethernet/freescale/gianfar.h
-@@ -71,6 +71,11 @@ struct ethtool_rx_list {
- /* Number of bytes to align the rx bufs to */
- #define RXBUF_ALIGNMENT 64
-
-+/* The number of bytes which composes a unit for the purpose of
-+ * allocating data buffers. ie-for any given MTU, the data buffer
-+ * will be the next highest multiple of 512 bytes. */
-+#define INCREMENTAL_BUFFER_SIZE 512
-+
- #define PHY_INIT_TIMEOUT 100000
-
- #define DRV_NAME "gfar-enet"
-@@ -87,8 +92,6 @@ extern const char gfar_driver_version[];
- #define DEFAULT_TX_RING_SIZE 256
- #define DEFAULT_RX_RING_SIZE 256
-
--#define GFAR_RX_BUFF_ALLOC 16
--
- #define GFAR_RX_MAX_RING_SIZE 256
- #define GFAR_TX_MAX_RING_SIZE 256
-
-@@ -100,15 +103,11 @@ extern const char gfar_driver_version[];
- #define DEFAULT_RX_LFC_THR 16
- #define DEFAULT_LFC_PTVVAL 4
-
--/* prevent fragmenation by HW in DSA environments */
--#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
--#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
-- + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
--#define GFAR_RXB_TRUESIZE 2048
--
-+#define DEFAULT_RX_BUFFER_SIZE 1536
- #define TX_RING_MOD_MASK(size) (size-1)
- #define RX_RING_MOD_MASK(size) (size-1)
--#define GFAR_JUMBO_FRAME_SIZE 9600
-+#define JUMBO_BUFFER_SIZE 9728
-+#define JUMBO_FRAME_SIZE 9600
-
- #define DEFAULT_FIFO_TX_THR 0x100
- #define DEFAULT_FIFO_TX_STARVE 0x40
-@@ -341,7 +340,6 @@ extern const char gfar_driver_version[];
- #define IEVENT_MAG 0x00000800
- #define IEVENT_GRSC 0x00000100
- #define IEVENT_RXF0 0x00000080
--#define IEVENT_FGPI 0x00000010
- #define IEVENT_FIR 0x00000008
- #define IEVENT_FIQ 0x00000004
- #define IEVENT_DPE 0x00000002
-@@ -374,7 +372,6 @@ extern const char gfar_driver_version[];
- #define IMASK_MAG 0x00000800
- #define IMASK_GRSC 0x00000100
- #define IMASK_RXFEN0 0x00000080
--#define IMASK_FGPI 0x00000010
- #define IMASK_FIR 0x00000008
- #define IMASK_FIQ 0x00000004
- #define IMASK_DPE 0x00000002
-@@ -543,9 +540,6 @@ extern const char gfar_driver_version[];
-
- #define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */
-
--#define GFAR_WOL_MAGIC 0x00000001
--#define GFAR_WOL_FILER_UCAST 0x00000002
--
- struct txbd8
- {
- union {
-@@ -646,7 +640,6 @@ struct rmon_mib
- };
-
- struct gfar_extra_stats {
-- atomic64_t rx_alloc_err;
- atomic64_t rx_large;
- atomic64_t rx_short;
- atomic64_t rx_nonoctet;
-@@ -658,6 +651,7 @@ struct gfar_extra_stats {
- atomic64_t eberr;
- atomic64_t tx_babt;
- atomic64_t tx_underrun;
-+ atomic64_t rx_skbmissing;
- atomic64_t tx_timeout;
- };
-
-@@ -923,8 +917,6 @@ struct gfar {
- #define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
- #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
- #define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
--#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000
--#define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000
-
- #if (MAXGROUPS == 2)
- #define DEFAULT_MAPPING 0xAA
-@@ -1020,42 +1012,34 @@ struct rx_q_stats {
- unsigned long rx_dropped;
- };
-
--struct gfar_rx_buff {
-- dma_addr_t dma;
-- struct page *page;
-- unsigned int page_offset;
--};
--
- /**
- * struct gfar_priv_rx_q - per rx queue structure
-- * @rx_buff: Array of buffer info metadata structs
-+ * @rx_skbuff: skb pointers
-+ * @skb_currx: currently use skb pointer
- * @rx_bd_base: First rx buffer descriptor
-- * @next_to_use: index of the next buffer to be alloc'd
-- * @next_to_clean: index of the next buffer to be cleaned
-+ * @cur_rx: Next free rx ring entry
- * @qindex: index of this queue
-- * @ndev: back pointer to net_device
-+ * @dev: back pointer to the dev structure
- * @rx_ring_size: Rx ring size
- * @rxcoalescing: enable/disable rx-coalescing
- * @rxic: receive interrupt coalescing vlaue
- */
-
- struct gfar_priv_rx_q {
-- struct gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES);
-+ struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
-+ dma_addr_t rx_bd_dma_base;
- struct rxbd8 *rx_bd_base;
-- struct net_device *ndev;
-- struct device *dev;
-- u16 rx_ring_size;
-- u16 qindex;
-- struct gfar_priv_grp *grp;
-- u16 next_to_clean;
-- u16 next_to_use;
-- u16 next_to_alloc;
-- struct sk_buff *skb;
-+ struct rxbd8 *cur_rx;
-+ struct net_device *dev;
-+ struct gfar_priv_grp *grp;
- struct rx_q_stats stats;
-- u32 __iomem *rfbptr;
-+ u16 skb_currx;
-+ u16 qindex;
-+ unsigned int rx_ring_size;
-+ /* RX Coalescing values */
- unsigned char rxcoalescing;
- unsigned long rxic;
-- dma_addr_t rx_bd_dma_base;
-+ u32 __iomem *rfbptr;
- };
-
- enum gfar_irqinfo_id {
-@@ -1125,6 +1109,7 @@ struct gfar_private {
- struct device *dev;
- struct net_device *ndev;
- enum gfar_errata errata;
-+ unsigned int rx_buffer_size;
-
- u16 uses_rxfcb;
- u16 padding;
-@@ -1154,11 +1139,15 @@ struct gfar_private {
- phy_interface_t interface;
- struct device_node *phy_node;
- struct device_node *tbi_node;
-+ struct phy_device *phydev;
- struct mii_bus *mii_bus;
- int oldspeed;
- int oldduplex;
- int oldlink;
-
-+ /* Bitfield update lock */
-+ spinlock_t bflock;
-+
- uint32_t msg_enable;
-
- struct work_struct reset_task;
-@@ -1168,6 +1157,8 @@ struct gfar_private {
- extended_hash:1,
- bd_stash_en:1,
- rx_filer_enable:1,
-+ /* Wake-on-LAN enabled */
-+ wol_en:1,
- /* Enable priorty based Tx scheduling in Hw */
- prio_sched_en:1,
- /* Flow control flags */
-@@ -1196,10 +1187,6 @@ struct gfar_private {
- u32 __iomem *hash_regs[16];
- int hash_width;
-
-- /* wake-on-lan settings */
-- u16 wol_opts;
-- u16 wol_supported;
--
- /*Filer table*/
- unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
- unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
-@@ -1308,28 +1295,6 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
- bdp->lstatus = cpu_to_be32(lstatus);
- }
-
--static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
--{
-- if (rxq->next_to_clean > rxq->next_to_use)
-- return rxq->next_to_clean - rxq->next_to_use - 1;
--
-- return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
--}
--
--static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
--{
-- struct rxbd8 *bdp;
-- u32 bdp_dma;
-- int i;
--
-- i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
-- bdp = &rxq->rx_bd_base[i];
-- bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
-- bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
--
-- return bdp_dma;
--}
--
- irqreturn_t gfar_receive(int irq, void *dev_id);
- int startup_gfar(struct net_device *dev);
- void stop_gfar(struct net_device *dev);
-diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
-index 56588f2..fda12fb 100644
---- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
-+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
-@@ -61,8 +61,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo);
-
- static const char stat_gstrings[][ETH_GSTRING_LEN] = {
-- /* extra stats */
-- "rx-allocation-errors",
- "rx-large-frame-errors",
- "rx-short-frame-errors",
- "rx-non-octet-errors",
-@@ -74,8 +72,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = {
- "ethernet-bus-error",
- "tx-babbling-errors",
- "tx-underrun-errors",
-+ "rx-skb-missing-errors",
- "tx-timeout-errors",
-- /* rmon stats */
- "tx-rx-64-frames",
- "tx-rx-65-127-frames",
- "tx-rx-128-255-frames",
-@@ -182,6 +180,42 @@ static void gfar_gdrvinfo(struct net_device *dev,
- sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
-+ drvinfo->regdump_len = 0;
-+ drvinfo->eedump_len = 0;
-+}
-+
-+
-+static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
-+{
-+ struct gfar_private *priv = netdev_priv(dev);
-+ struct phy_device *phydev = priv->phydev;
-+
-+ if (NULL == phydev)
-+ return -ENODEV;
-+
-+ return phy_ethtool_sset(phydev, cmd);
-+}
-+
-+
-+/* Return the current settings in the ethtool_cmd structure */
-+static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-+{
-+ struct gfar_private *priv = netdev_priv(dev);
-+ struct phy_device *phydev = priv->phydev;
-+ struct gfar_priv_rx_q *rx_queue = NULL;
-+ struct gfar_priv_tx_q *tx_queue = NULL;
-+
-+ if (NULL == phydev)
-+ return -ENODEV;
-+ tx_queue = priv->tx_queue[0];
-+ rx_queue = priv->rx_queue[0];
-+
-+ /* etsec-1.7 and older versions have only one txic
-+ * and rxic regs although they support multiple queues */
-+ cmd->maxtxpkt = get_icft_value(tx_queue->txic);
-+ cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
-+
-+ return phy_ethtool_gset(phydev, cmd);
- }
-
- /* Return the length of the register structure */
-@@ -208,12 +242,10 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
- static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
- unsigned int usecs)
- {
-- struct net_device *ndev = priv->ndev;
-- struct phy_device *phydev = ndev->phydev;
- unsigned int count;
-
- /* The timer is different, depending on the interface speed */
-- switch (phydev->speed) {
-+ switch (priv->phydev->speed) {
- case SPEED_1000:
- count = GFAR_GBIT_TIME;
- break;
-@@ -235,12 +267,10 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
- static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
- unsigned int ticks)
- {
-- struct net_device *ndev = priv->ndev;
-- struct phy_device *phydev = ndev->phydev;
- unsigned int count;
-
- /* The timer is different, depending on the interface speed */
-- switch (phydev->speed) {
-+ switch (priv->phydev->speed) {
- case SPEED_1000:
- count = GFAR_GBIT_TIME;
- break;
-@@ -274,7 +304,7 @@ static int gfar_gcoalesce(struct net_device *dev,
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
- return -EOPNOTSUPP;
-
-- if (!dev->phydev)
-+ if (NULL == priv->phydev)
- return -ENODEV;
-
- rx_queue = priv->rx_queue[0];
-@@ -335,7 +365,7 @@ static int gfar_scoalesce(struct net_device *dev,
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
- return -EOPNOTSUPP;
-
-- if (!dev->phydev)
-+ if (NULL == priv->phydev)
- return -ENODEV;
-
- /* Check the bounds of the values */
-@@ -499,7 +529,7 @@ static int gfar_spauseparam(struct net_device *dev,
- struct ethtool_pauseparam *epause)
- {
- struct gfar_private *priv = netdev_priv(dev);
-- struct phy_device *phydev = dev->phydev;
-+ struct phy_device *phydev = priv->phydev;
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u32 oldadv, newadv;
-
-@@ -612,49 +642,31 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
- {
- struct gfar_private *priv = netdev_priv(dev);
-
-- wol->supported = 0;
-- wol->wolopts = 0;
--
-- if (priv->wol_supported & GFAR_WOL_MAGIC)
-- wol->supported |= WAKE_MAGIC;
--
-- if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
-- wol->supported |= WAKE_UCAST;
--
-- if (priv->wol_opts & GFAR_WOL_MAGIC)
-- wol->wolopts |= WAKE_MAGIC;
--
-- if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
-- wol->wolopts |= WAKE_UCAST;
-+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
-+ wol->supported = WAKE_MAGIC;
-+ wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
-+ } else {
-+ wol->supported = wol->wolopts = 0;
-+ }
- }
-
- static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
- {
- struct gfar_private *priv = netdev_priv(dev);
-- u16 wol_opts = 0;
-- int err;
-+ unsigned long flags;
-
-- if (!priv->wol_supported && wol->wolopts)
-+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
-+ wol->wolopts != 0)
- return -EINVAL;
-
-- if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
-+ if (wol->wolopts & ~WAKE_MAGIC)
- return -EINVAL;
-
-- if (wol->wolopts & WAKE_MAGIC) {
-- wol_opts |= GFAR_WOL_MAGIC;
-- } else {
-- if (wol->wolopts & WAKE_UCAST)
-- wol_opts |= GFAR_WOL_FILER_UCAST;
-- }
--
-- wol_opts &= priv->wol_supported;
-- priv->wol_opts = 0;
--
-- err = device_set_wakeup_enable(priv->dev, wol_opts);
-- if (err)
-- return err;
-+ device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
-
-- priv->wol_opts = wol_opts;
-+ spin_lock_irqsave(&priv->bflock, flags);
-+ priv->wol_en = !!device_may_wakeup(&dev->dev);
-+ spin_unlock_irqrestore(&priv->bflock, flags);
-
- return 0;
- }
-@@ -665,14 +677,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
- u32 fcr = 0x0, fpr = FPR_FILER_MASK;
-
- if (ethflow & RXH_L2DA) {
-- fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
-+ fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
- RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
- priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
- priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
- gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
- priv->cur_filer_idx = priv->cur_filer_idx - 1;
-
-- fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
-+ fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
- RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
- priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
- priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
-@@ -891,6 +903,27 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
- return 0;
- }
-
-+static int gfar_comp_asc(const void *a, const void *b)
-+{
-+ return memcmp(a, b, 4);
-+}
-+
-+static int gfar_comp_desc(const void *a, const void *b)
-+{
-+ return -memcmp(a, b, 4);
-+}
-+
-+static void gfar_swap(void *a, void *b, int size)
-+{
-+ u32 *_a = a;
-+ u32 *_b = b;
-+
-+ swap(_a[0], _b[0]);
-+ swap(_a[1], _b[1]);
-+ swap(_a[2], _b[2]);
-+ swap(_a[3], _b[3]);
-+}
-+
- /* Write a mask to filer cache */
- static void gfar_set_mask(u32 mask, struct filer_table *tab)
- {
-@@ -1240,6 +1273,310 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
- return 0;
- }
-
-+/* Copy size filer entries */
-+static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
-+ struct gfar_filer_entry src[0], s32 size)
-+{
-+ while (size > 0) {
-+ size--;
-+ dst[size].ctrl = src[size].ctrl;
-+ dst[size].prop = src[size].prop;
-+ }
-+}
-+
-+/* Delete the contents of the filer-table between start and end
-+ * and collapse them
-+ */
-+static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
-+{
-+ int length;
-+
-+ if (end > MAX_FILER_CACHE_IDX || end < begin)
-+ return -EINVAL;
-+
-+ end++;
-+ length = end - begin;
-+
-+ /* Copy */
-+ while (end < tab->index) {
-+ tab->fe[begin].ctrl = tab->fe[end].ctrl;
-+ tab->fe[begin++].prop = tab->fe[end++].prop;
-+
-+ }
-+ /* Fill up with don't cares */
-+ while (begin < tab->index) {
-+ tab->fe[begin].ctrl = 0x60;
-+ tab->fe[begin].prop = 0xFFFFFFFF;
-+ begin++;
-+ }
-+
-+ tab->index -= length;
-+ return 0;
-+}
-+
-+/* Make space on the wanted location */
-+static int gfar_expand_filer_entries(u32 begin, u32 length,
-+ struct filer_table *tab)
-+{
-+ if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
-+ begin > MAX_FILER_CACHE_IDX)
-+ return -EINVAL;
-+
-+ gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
-+ tab->index - length + 1);
-+
-+ tab->index += length;
-+ return 0;
-+}
-+
-+static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
-+{
-+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
-+ start++) {
-+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
-+ (RQFCR_AND | RQFCR_CLE))
-+ return start;
-+ }
-+ return -1;
-+}
-+
-+static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
-+{
-+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
-+ start++) {
-+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
-+ (RQFCR_CLE))
-+ return start;
-+ }
-+ return -1;
-+}
-+
-+/* Uses hardwares clustering option to reduce
-+ * the number of filer table entries
-+ */
-+static void gfar_cluster_filer(struct filer_table *tab)
-+{
-+ s32 i = -1, j, iend, jend;
-+
-+ while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
-+ j = i;
-+ while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
-+ /* The cluster entries self and the previous one
-+ * (a mask) must be identical!
-+ */
-+ if (tab->fe[i].ctrl != tab->fe[j].ctrl)
-+ break;
-+ if (tab->fe[i].prop != tab->fe[j].prop)
-+ break;
-+ if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
-+ break;
-+ if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
-+ break;
-+ iend = gfar_get_next_cluster_end(i, tab);
-+ jend = gfar_get_next_cluster_end(j, tab);
-+ if (jend == -1 || iend == -1)
-+ break;
-+
-+ /* First we make some free space, where our cluster
-+ * element should be. Then we copy it there and finally
-+ * delete in from its old location.
-+ */
-+ if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
-+ -EINVAL)
-+ break;
-+
-+ gfar_copy_filer_entries(&(tab->fe[iend + 1]),
-+ &(tab->fe[jend + 1]), jend - j);
-+
-+ if (gfar_trim_filer_entries(jend - 1,
-+ jend + (jend - j),
-+ tab) == -EINVAL)
-+ return;
-+
-+ /* Mask out cluster bit */
-+ tab->fe[iend].ctrl &= ~(RQFCR_CLE);
-+ }
-+ }
-+}
-+
-+/* Swaps the masked bits of a1<>a2 and b1<>b2 */
-+static void gfar_swap_bits(struct gfar_filer_entry *a1,
-+ struct gfar_filer_entry *a2,
-+ struct gfar_filer_entry *b1,
-+ struct gfar_filer_entry *b2, u32 mask)
-+{
-+ u32 temp[4];
-+ temp[0] = a1->ctrl & mask;
-+ temp[1] = a2->ctrl & mask;
-+ temp[2] = b1->ctrl & mask;
-+ temp[3] = b2->ctrl & mask;
-+
-+ a1->ctrl &= ~mask;
-+ a2->ctrl &= ~mask;
-+ b1->ctrl &= ~mask;
-+ b2->ctrl &= ~mask;
-+
-+ a1->ctrl |= temp[1];
-+ a2->ctrl |= temp[0];
-+ b1->ctrl |= temp[3];
-+ b2->ctrl |= temp[2];
-+}
-+
-+/* Generate a list consisting of masks values with their start and
-+ * end of validity and block as indicator for parts belonging
-+ * together (glued by ANDs) in mask_table
-+ */
-+static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
-+ struct filer_table *tab)
-+{
-+ u32 i, and_index = 0, block_index = 1;
-+
-+ for (i = 0; i < tab->index; i++) {
-+
-+ /* LSByte of control = 0 sets a mask */
-+ if (!(tab->fe[i].ctrl & 0xF)) {
-+ mask_table[and_index].mask = tab->fe[i].prop;
-+ mask_table[and_index].start = i;
-+ mask_table[and_index].block = block_index;
-+ if (and_index >= 1)
-+ mask_table[and_index - 1].end = i - 1;
-+ and_index++;
-+ }
-+ /* cluster starts and ends will be separated because they should
-+ * hold their position
-+ */
-+ if (tab->fe[i].ctrl & RQFCR_CLE)
-+ block_index++;
-+ /* A not set AND indicates the end of a depended block */
-+ if (!(tab->fe[i].ctrl & RQFCR_AND))
-+ block_index++;
-+ }
-+
-+ mask_table[and_index - 1].end = i - 1;
-+
-+ return and_index;
-+}
-+
-+/* Sorts the entries of mask_table by the values of the masks.
-+ * Important: The 0xFF80 flags of the first and last entry of a
-+ * block must hold their position (which queue, CLusterEnable, ReJEct,
-+ * AND)
-+ */
-+static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
-+ struct filer_table *temp_table, u32 and_index)
-+{
-+ /* Pointer to compare function (_asc or _desc) */
-+ int (*gfar_comp)(const void *, const void *);
-+
-+ u32 i, size = 0, start = 0, prev = 1;
-+ u32 old_first, old_last, new_first, new_last;
-+
-+ gfar_comp = &gfar_comp_desc;
-+
-+ for (i = 0; i < and_index; i++) {
-+ if (prev != mask_table[i].block) {
-+ old_first = mask_table[start].start + 1;
-+ old_last = mask_table[i - 1].end;
-+ sort(mask_table + start, size,
-+ sizeof(struct gfar_mask_entry),
-+ gfar_comp, &gfar_swap);
-+
-+ /* Toggle order for every block. This makes the
-+ * thing more efficient!
-+ */
-+ if (gfar_comp == gfar_comp_desc)
-+ gfar_comp = &gfar_comp_asc;
-+ else
-+ gfar_comp = &gfar_comp_desc;
-+
-+ new_first = mask_table[start].start + 1;
-+ new_last = mask_table[i - 1].end;
-+
-+ gfar_swap_bits(&temp_table->fe[new_first],
-+ &temp_table->fe[old_first],
-+ &temp_table->fe[new_last],
-+ &temp_table->fe[old_last],
-+ RQFCR_QUEUE | RQFCR_CLE |
-+ RQFCR_RJE | RQFCR_AND);
-+
-+ start = i;
-+ size = 0;
-+ }
-+ size++;
-+ prev = mask_table[i].block;
-+ }
-+}
-+
-+/* Reduces the number of masks needed in the filer table to save entries
-+ * This is done by sorting the masks of a depended block. A depended block is
-+ * identified by gluing ANDs or CLE. The sorting order toggles after every
-+ * block. Of course entries in scope of a mask must change their location with
-+ * it.
-+ */
-+static int gfar_optimize_filer_masks(struct filer_table *tab)
-+{
-+ struct filer_table *temp_table;
-+ struct gfar_mask_entry *mask_table;
-+
-+ u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
-+ s32 ret = 0;
-+
-+ /* We need a copy of the filer table because
-+ * we want to change its order
-+ */
-+ temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
-+ if (temp_table == NULL)
-+ return -ENOMEM;
-+
-+ mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
-+ sizeof(struct gfar_mask_entry), GFP_KERNEL);
-+
-+ if (mask_table == NULL) {
-+ ret = -ENOMEM;
-+ goto end;
-+ }
-+
-+ and_index = gfar_generate_mask_table(mask_table, tab);
-+
-+ gfar_sort_mask_table(mask_table, temp_table, and_index);
-+
-+ /* Now we can copy the data from our duplicated filer table to
-+ * the real one in the order the mask table says
-+ */
-+ for (i = 0; i < and_index; i++) {
-+ size = mask_table[i].end - mask_table[i].start + 1;
-+ gfar_copy_filer_entries(&(tab->fe[j]),
-+ &(temp_table->fe[mask_table[i].start]), size);
-+ j += size;
-+ }
-+
-+ /* And finally we just have to check for duplicated masks and drop the
-+ * second ones
-+ */
-+ for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
-+ if (tab->fe[i].ctrl == 0x80) {
-+ previous_mask = i++;
-+ break;
-+ }
-+ }
-+ for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
-+ if (tab->fe[i].ctrl == 0x80) {
-+ if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
-+ /* Two identical ones found!
-+ * So drop the second one!
-+ */
-+ gfar_trim_filer_entries(i, i, tab);
-+ } else
-+ /* Not identical! */
-+ previous_mask = i;
-+ }
-+ }
-+
-+ kfree(mask_table);
-+end: kfree(temp_table);
-+ return ret;
-+}
-+
- /* Write the bit-pattern from software's buffer to hardware registers */
- static int gfar_write_filer_table(struct gfar_private *priv,
- struct filer_table *tab)
-@@ -1249,10 +1586,11 @@ static int gfar_write_filer_table(struct gfar_private *priv,
- return -EBUSY;
-
- /* Fill regular entries */
-- for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
-+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
-+ i++)
- gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
- /* Fill the rest with fall-troughs */
-- for (; i < MAX_FILER_IDX; i++)
-+ for (; i < MAX_FILER_IDX - 1; i++)
- gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
- /* Last entry must be default accept
- * because that's what people expect
-@@ -1286,6 +1624,7 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
- {
- struct ethtool_flow_spec_container *j;
- struct filer_table *tab;
-+ s32 i = 0;
- s32 ret = 0;
-
- /* So index is set to zero, too! */
-@@ -1310,6 +1649,17 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
- }
- }
-
-+ i = tab->index;
-+
-+ /* Optimizations to save entries */
-+ gfar_cluster_filer(tab);
-+ gfar_optimize_filer_masks(tab);
-+
-+ pr_debug("\tSummary:\n"
-+ "\tData on hardware: %d\n"
-+ "\tCompression rate: %d%%\n",
-+ tab->index, 100 - (100 * tab->index) / i);
-+
- /* Write everything to hardware */
- ret = gfar_write_filer_table(priv, tab);
- if (ret == -EBUSY) {
-@@ -1375,14 +1725,13 @@ static int gfar_add_cls(struct gfar_private *priv,
- }
-
- process:
-- priv->rx_list.count++;
- ret = gfar_process_filer_changes(priv);
- if (ret)
- goto clean_list;
-+ priv->rx_list.count++;
- return ret;
-
- clean_list:
-- priv->rx_list.count--;
- list_del(&temp->list);
- clean_mem:
- kfree(temp);
-@@ -1535,6 +1884,8 @@ static int gfar_get_ts_info(struct net_device *dev,
- }
-
- const struct ethtool_ops gfar_ethtool_ops = {
-+ .get_settings = gfar_gsettings,
-+ .set_settings = gfar_ssettings,
- .get_drvinfo = gfar_gdrvinfo,
- .get_regs_len = gfar_reglen,
- .get_regs = gfar_get_regs,
-@@ -1557,6 +1908,4 @@ const struct ethtool_ops gfar_ethtool_ops = {
- .set_rxnfc = gfar_set_nfc,
- .get_rxnfc = gfar_get_nfc,
- .get_ts_info = gfar_get_ts_info,
-- .get_link_ksettings = phy_ethtool_get_link_ksettings,
-- .set_link_ksettings = phy_ethtool_set_link_ksettings,
- };
-diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
-index 5779881..8e3cd77 100644
---- a/drivers/net/ethernet/freescale/gianfar_ptp.c
-+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
-@@ -422,6 +422,19 @@ static struct ptp_clock_info ptp_gianfar_caps = {
- .enable = ptp_gianfar_enable,
- };
-
-+/* OF device tree */
-+
-+static int get_of_u32(struct device_node *node, char *str, u32 *val)
-+{
-+ int plen;
-+ const u32 *prop = of_get_property(node, str, &plen);
-+
-+ if (!prop || plen != sizeof(*prop))
-+ return -1;
-+ *val = *prop;
-+ return 0;
-+}
-+
- static int gianfar_ptp_probe(struct platform_device *dev)
- {
- struct device_node *node = dev->dev.of_node;
-@@ -439,28 +452,22 @@ static int gianfar_ptp_probe(struct platform_device *dev)
-
- etsects->caps = ptp_gianfar_caps;
-
-- if (of_property_read_u32(node, "fsl,cksel", &etsects->cksel))
-+ if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
- etsects->cksel = DEFAULT_CKSEL;
-
-- if (of_property_read_u32(node,
-- "fsl,tclk-period", &etsects->tclk_period) ||
-- of_property_read_u32(node,
-- "fsl,tmr-prsc", &etsects->tmr_prsc) ||
-- of_property_read_u32(node,
-- "fsl,tmr-add", &etsects->tmr_add) ||
-- of_property_read_u32(node,
-- "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
-- of_property_read_u32(node,
-- "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
-- of_property_read_u32(node,
-- "fsl,max-adj", &etsects->caps.max_adj)) {
-+ if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
-+ get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
-+ get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) ||
-+ get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
-+ get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
-+ get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) {
- pr_err("device tree node missing required elements\n");
- goto no_node;
- }
-
- etsects->irq = platform_get_irq(dev, 0);
-
-- if (etsects->irq < 0) {
-+ if (etsects->irq == NO_IRQ) {
- pr_err("irq not in device tree\n");
- goto no_node;
- }
-@@ -550,7 +557,6 @@ static const struct of_device_id match_table[] = {
- { .compatible = "fsl,etsec-ptp" },
- {},
- };
--MODULE_DEVICE_TABLE(of, match_table);
-
- static struct platform_driver gianfar_ptp_driver = {
- .driver = {
-diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
-index f76d332..4dd40e0 100644
---- a/drivers/net/ethernet/freescale/ucc_geth.c
-+++ b/drivers/net/ethernet/freescale/ucc_geth.c
-@@ -40,10 +40,10 @@
- #include <asm/uaccess.h>
- #include <asm/irq.h>
- #include <asm/io.h>
--#include <soc/fsl/qe/immap_qe.h>
--#include <soc/fsl/qe/qe.h>
--#include <soc/fsl/qe/ucc.h>
--#include <soc/fsl/qe/ucc_fast.h>
-+#include <asm/immap_qe.h>
-+#include <asm/qe.h>
-+#include <asm/ucc.h>
-+#include <asm/ucc_fast.h>
- #include <asm/machdep.h>
-
- #include "ucc_geth.h"
-@@ -1384,8 +1384,6 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
- value = phy_read(tbiphy, ENET_TBI_MII_CR);
- value &= ~0x1000; /* Turn off autonegotiation */
- phy_write(tbiphy, ENET_TBI_MII_CR, value);
--
-- put_device(&tbiphy->mdio.dev);
- }
-
- init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
-@@ -1704,10 +1702,8 @@ static void uec_configure_serdes(struct net_device *dev)
- * everything for us? Resetting it takes the link down and requires
- * several seconds for it to come back.
- */
-- if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) {
-- put_device(&tbiphy->mdio.dev);
-+ if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
- return;
-- }
-
- /* Single clk mode, mii mode off(for serdes communication) */
- phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
-@@ -1715,8 +1711,6 @@ static void uec_configure_serdes(struct net_device *dev)
- phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
-
- phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
--
-- put_device(&tbiphy->mdio.dev);
- }
-
- /* Configure the PHY for dev.
-@@ -3756,7 +3750,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
- return -EINVAL;
- }
- if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
-- pr_err("invalid rx-clock property\n");
-+ pr_err("invalid rx-clock propperty\n");
- return -EINVAL;
- }
- ug_info->uf_info.rx_clock = *prop;
-@@ -3868,8 +3862,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
- dev = alloc_etherdev(sizeof(*ugeth));
-
- if (dev == NULL) {
-- err = -ENOMEM;
-- goto err_deregister_fixed_link;
-+ of_node_put(ug_info->tbi_node);
-+ of_node_put(ug_info->phy_node);
-+ return -ENOMEM;
- }
-
- ugeth = netdev_priv(dev);
-@@ -3906,7 +3901,10 @@ static int ucc_geth_probe(struct platform_device* ofdev)
- if (netif_msg_probe(ugeth))
- pr_err("%s: Cannot register net device, aborting\n",
- dev->name);
-- goto err_free_netdev;
-+ free_netdev(dev);
-+ of_node_put(ug_info->tbi_node);
-+ of_node_put(ug_info->phy_node);
-+ return err;
- }
-
- mac_addr = of_get_mac_address(np);
-@@ -3919,29 +3917,16 @@ static int ucc_geth_probe(struct platform_device* ofdev)
- ugeth->node = np;
-
- return 0;
--
--err_free_netdev:
-- free_netdev(dev);
--err_deregister_fixed_link:
-- if (of_phy_is_fixed_link(np))
-- of_phy_deregister_fixed_link(np);
-- of_node_put(ug_info->tbi_node);
-- of_node_put(ug_info->phy_node);
--
-- return err;
- }
-
- static int ucc_geth_remove(struct platform_device* ofdev)
- {
- struct net_device *dev = platform_get_drvdata(ofdev);
- struct ucc_geth_private *ugeth = netdev_priv(dev);
-- struct device_node *np = ofdev->dev.of_node;
-
- unregister_netdev(dev);
- free_netdev(dev);
- ucc_geth_memclean(ugeth);
-- if (of_phy_is_fixed_link(np))
-- of_phy_deregister_fixed_link(np);
- of_node_put(ugeth->ug_info->tbi_node);
- of_node_put(ugeth->ug_info->phy_node);
-
-diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
-index 5da19b4..75f3371 100644
---- a/drivers/net/ethernet/freescale/ucc_geth.h
-+++ b/drivers/net/ethernet/freescale/ucc_geth.h
-@@ -22,11 +22,11 @@
- #include <linux/list.h>
- #include <linux/if_ether.h>
-
--#include <soc/fsl/qe/immap_qe.h>
--#include <soc/fsl/qe/qe.h>
-+#include <asm/immap_qe.h>
-+#include <asm/qe.h>
-
--#include <soc/fsl/qe/ucc.h>
--#include <soc/fsl/qe/ucc_fast.h>
-+#include <asm/ucc.h>
-+#include <asm/ucc_fast.h>
-
- #define DRV_DESC "QE UCC Gigabit Ethernet Controller"
- #define DRV_NAME "ucc_geth"
-diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
-index 812a968..cc83350 100644
---- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
-+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
-@@ -105,20 +105,23 @@ static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
- #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
-
- static int
--uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
-+uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
- {
- struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
-+ struct ucc_geth_info *ug_info = ugeth->ug_info;
-
- if (!phydev)
- return -ENODEV;
-
-- return phy_ethtool_ksettings_get(phydev, cmd);
-+ ecmd->maxtxpkt = 1;
-+ ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
-+
-+ return phy_ethtool_gset(phydev, ecmd);
- }
-
- static int
--uec_set_ksettings(struct net_device *netdev,
-- const struct ethtool_link_ksettings *cmd)
-+uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
- {
- struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
-@@ -126,7 +129,7 @@ uec_set_ksettings(struct net_device *netdev,
- if (!phydev)
- return -ENODEV;
-
-- return phy_ethtool_ksettings_set(phydev, cmd);
-+ return phy_ethtool_sset(phydev, ecmd);
- }
-
- static void
-@@ -348,6 +351,8 @@ uec_get_drvinfo(struct net_device *netdev,
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
-+ drvinfo->eedump_len = 0;
-+ drvinfo->regdump_len = uec_get_regs_len(netdev);
- }
-
- #ifdef CONFIG_PM
-@@ -389,6 +394,8 @@ static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
- #endif /* CONFIG_PM */
-
- static const struct ethtool_ops uec_ethtool_ops = {
-+ .get_settings = uec_get_settings,
-+ .set_settings = uec_set_settings,
- .get_drvinfo = uec_get_drvinfo,
- .get_regs_len = uec_get_regs_len,
- .get_regs = uec_get_regs,
-@@ -406,8 +413,6 @@ static const struct ethtool_ops uec_ethtool_ops = {
- .get_wol = uec_get_wol,
- .set_wol = uec_set_wol,
- .get_ts_info = ethtool_op_get_ts_info,
-- .get_link_ksettings = uec_get_ksettings,
-- .set_link_ksettings = uec_set_ksettings,
- };
-
- void uec_set_ethtool_ops(struct net_device *netdev)
-diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
-index e03b30c..7b8fe86 100644
---- a/drivers/net/ethernet/freescale/xgmac_mdio.c
-+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
-@@ -271,8 +271,11 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
- goto err_ioremap;
- }
-
-- priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
-- "little-endian");
-+ if (of_get_property(pdev->dev.of_node,
-+ "little-endian", NULL))
-+ priv->is_little_endian = true;
-+ else
-+ priv->is_little_endian = false;
-
- ret = of_mdiobus_register(bus, np);
- if (ret) {