]> git.kernelconcepts.de Git - meta-kc-bsp.git/blob - recipes-kernel/linux/linux-karo-4.9.11/ethernet-update-driver.patch
linux-karo: Add initial version of linux-karo 4.9.11 based on NXP kernel tree
[meta-kc-bsp.git] / recipes-kernel / linux / linux-karo-4.9.11 / ethernet-update-driver.patch
1 diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
2 index 2204c57..25e3425 100644
3 --- a/drivers/net/ethernet/freescale/Kconfig
4 +++ b/drivers/net/ethernet/freescale/Kconfig
5 @@ -7,10 +7,11 @@ config NET_VENDOR_FREESCALE
6         default y
7         depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
8                    M523x || M527x || M5272 || M528x || M520x || M532x || \
9 -                  ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
10 -                  ARCH_LAYERSCAPE
11 +                  ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
12         ---help---
13 -         If you have a network (Ethernet) card belonging to this class, say Y.
14 +         If you have a network (Ethernet) card belonging to this class, say Y
15 +         and read the Ethernet-HOWTO, available from
16 +         <http://www.tldp.org/docs.html#howto>.
17  
18           Note that the answer to this question doesn't directly affect the
19           kernel: saying N will just cause the configurator to skip all
20 @@ -22,8 +23,8 @@ if NET_VENDOR_FREESCALE
21  config FEC
22         tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
23         depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
24 -                  ARM || ARM64)
25 -       default y
26 +                  ARCH_MXC || SOC_IMX28)
27 +       default ARCH_MXC || SOC_IMX28 if ARM
28         select PHYLIB
29         select PTP_1588_CLOCK
30         ---help---
31 @@ -54,7 +55,6 @@ config FEC_MPC52xx_MDIO
32           If compiled as module, it will be called fec_mpc52xx_phy.
33  
34  source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
35 -source "drivers/net/ethernet/freescale/fman/Kconfig"
36  
37  config FSL_PQ_MDIO
38         tristate "Freescale PQ MDIO"
39 @@ -85,12 +85,12 @@ config UGETH_TX_ON_DEMAND
40  
41  config GIANFAR
42         tristate "Gianfar Ethernet"
43 +       depends on FSL_SOC
44         select FSL_PQ_MDIO
45         select PHYLIB
46         select CRC32
47         ---help---
48           This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
49 -         and MPC86xx family of chips, the eTSEC on LS1021A and the FEC
50 -         on the 8540.
51 +         and MPC86xx family of chips, and the FEC on the 8540.
52  
53  endif # NET_VENDOR_FREESCALE
54 diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
55 index 7f022dd..71debd1 100644
56 --- a/drivers/net/ethernet/freescale/Makefile
57 +++ b/drivers/net/ethernet/freescale/Makefile
58 @@ -3,10 +3,7 @@
59  #
60  
61  obj-$(CONFIG_FEC) += fec.o
62 -fec-objs :=fec_main.o fec_fixup.o fec_ptp.o
63 -CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
64 -CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
65 -
66 +fec-objs :=fec_main.o fec_ptp.o
67  obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
68  ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
69         obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
70 @@ -20,5 +17,3 @@ gianfar_driver-objs := gianfar.o \
71                 gianfar_ethtool.o
72  obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
73  ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
74 -
75 -obj-$(CONFIG_FSL_FMAN) += fman/
76 diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
77 index 1d7b3cc..ecdc711 100644
78 --- a/drivers/net/ethernet/freescale/fec.h
79 +++ b/drivers/net/ethernet/freescale/fec.h
80 @@ -20,8 +20,8 @@
81  #include <linux/timecounter.h>
82  
83  #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
84 -    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
85 -    defined(CONFIG_ARM64)
86 +    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
87 +    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
88  /*
89   *     Just figures, Motorola would have to change the offsets for
90   *     registers in the same peripheral device on different models
91 @@ -192,45 +192,28 @@
92  
93  /*
94   *     Define the buffer descriptor structure.
95 - *
96 - *     Evidently, ARM SoCs have the FEC block generated in a
97 - *     little endian mode so adjust endianness accordingly.
98   */
99 -#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
100 -#define fec32_to_cpu le32_to_cpu
101 -#define fec16_to_cpu le16_to_cpu
102 -#define cpu_to_fec32 cpu_to_le32
103 -#define cpu_to_fec16 cpu_to_le16
104 -#define __fec32 __le32
105 -#define __fec16 __le16
106 -
107 +#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
108  struct bufdesc {
109 -       __fec16 cbd_datlen;     /* Data length */
110 -       __fec16 cbd_sc;         /* Control and status info */
111 -       __fec32 cbd_bufaddr;    /* Buffer address */
112 +       unsigned short cbd_datlen;      /* Data length */
113 +       unsigned short cbd_sc;  /* Control and status info */
114 +       unsigned long cbd_bufaddr;      /* Buffer address */
115  };
116  #else
117 -#define fec32_to_cpu be32_to_cpu
118 -#define fec16_to_cpu be16_to_cpu
119 -#define cpu_to_fec32 cpu_to_be32
120 -#define cpu_to_fec16 cpu_to_be16
121 -#define __fec32 __be32
122 -#define __fec16 __be16
123 -
124  struct bufdesc {
125 -       __fec16 cbd_sc;         /* Control and status info */
126 -       __fec16 cbd_datlen;     /* Data length */
127 -       __fec32 cbd_bufaddr;    /* Buffer address */
128 +       unsigned short  cbd_sc;                 /* Control and status info */
129 +       unsigned short  cbd_datlen;             /* Data length */
130 +       unsigned long   cbd_bufaddr;            /* Buffer address */
131  };
132  #endif
133  
134  struct bufdesc_ex {
135         struct bufdesc desc;
136 -       __fec32 cbd_esc;
137 -       __fec32 cbd_prot;
138 -       __fec32 cbd_bdu;
139 -       __fec32 ts;
140 -       __fec16 res0[4];
141 +       unsigned long cbd_esc;
142 +       unsigned long cbd_prot;
143 +       unsigned long cbd_bdu;
144 +       unsigned long ts;
145 +       unsigned short res0[4];
146  };
147  
148  /*
149 @@ -294,7 +277,7 @@ struct bufdesc_ex {
150  
151  
152  /* This device has up to three irqs on some platforms */
153 -#define FEC_IRQ_NUM            4
154 +#define FEC_IRQ_NUM            3
155  
156  /* Maximum number of queues supported
157   * ENET with AVB IP can support up to 3 independent tx queues and rx queues.
158 @@ -312,6 +295,12 @@ struct bufdesc_ex {
159  #define FEC_R_BUFF_SIZE(X)     (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
160                                 (((X) == 2) ? \
161                                         FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
162 +#define FEC_R_DES_ACTIVE(X)    (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \
163 +                               (((X) == 2) ? \
164 +                                  FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
165 +#define FEC_X_DES_ACTIVE(X)    (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \
166 +                               (((X) == 2) ? \
167 +                                  FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
168  
169  #define FEC_DMA_CFG(X)         (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
170  
171 @@ -379,7 +368,6 @@ struct bufdesc_ex {
172  #define FEC_ENET_TS_TIMER       ((uint)0x00008000)
173  
174  #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
175 -#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER)
176  #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
177  
178  #define FEC_ENET_ETHEREN       ((uint)0x00000002)
179 @@ -448,32 +436,12 @@ struct bufdesc_ex {
180  #define FEC_QUIRK_SINGLE_MDIO          (1 << 11)
181  /* Controller supports RACC register */
182  #define FEC_QUIRK_HAS_RACC             (1 << 12)
183 -/* Controller supports interrupt coalesc */
184 -#define FEC_QUIRK_HAS_COALESCE         (1 << 13)
185 -/* Interrupt doesn't wake CPU from deep idle */
186 -#define FEC_QUIRK_ERR006687            (1 << 14)
187  /*
188   * i.MX6Q/DL ENET cannot wake up system in wait mode because ENET tx & rx
189   * interrupt signal don't connect to GPC. So use pm qos to avoid cpu enter
190   * to wait mode.
191   */
192 -#define FEC_QUIRK_BUG_WAITMODE         (1 << 15)
193 -
194 -/* PHY fixup flag define */
195 -#define FEC_QUIRK_AR8031_FIXUP         (1 << 0)
196 -
197 -struct bufdesc_prop {
198 -       int qid;
199 -       /* Address of Rx and Tx buffers */
200 -       struct bufdesc  *base;
201 -       struct bufdesc  *last;
202 -       struct bufdesc  *cur;
203 -       void __iomem    *reg_desc_active;
204 -       dma_addr_t      dma;
205 -       unsigned short ring_size;
206 -       unsigned char dsize;
207 -       unsigned char dsize_log2;
208 -};
209 +#define FEC_QUIRK_BUG_WAITMODE         (1 << 13)
210  
211  struct fec_enet_stop_mode {
212         struct regmap *gpr;
213 @@ -482,21 +450,32 @@ struct fec_enet_stop_mode {
214  };
215  
216  struct fec_enet_priv_tx_q {
217 -       struct bufdesc_prop bd;
218 +       int index;
219         unsigned char *tx_bounce[TX_RING_SIZE];
220         struct  sk_buff *tx_skbuff[TX_RING_SIZE];
221  
222 +       dma_addr_t      bd_dma;
223 +       struct bufdesc  *tx_bd_base;
224 +       uint tx_ring_size;
225 +
226         unsigned short tx_stop_threshold;
227         unsigned short tx_wake_threshold;
228  
229 +       struct bufdesc  *cur_tx;
230         struct bufdesc  *dirty_tx;
231         char *tso_hdrs;
232         dma_addr_t tso_hdrs_dma;
233  };
234  
235  struct fec_enet_priv_rx_q {
236 -       struct bufdesc_prop bd;
237 +       int index;
238         struct  sk_buff *rx_skbuff[RX_RING_SIZE];
239 +
240 +       dma_addr_t      bd_dma;
241 +       struct bufdesc  *rx_bd_base;
242 +       uint rx_ring_size;
243 +
244 +       struct bufdesc  *cur_rx;
245  };
246  
247  /* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
248 @@ -536,20 +515,22 @@ struct fec_enet_private {
249         unsigned long work_ts;
250         unsigned long work_mdio;
251  
252 +       unsigned short bufdesc_size;
253 +
254         struct  platform_device *pdev;
255  
256         int     dev_id;
257  
258         /* Phylib and MDIO interface */
259         struct  mii_bus *mii_bus;
260 +       struct  phy_device *phy_dev;
261         int     mii_timeout;
262         int     mii_bus_share;
263 -       bool    active_in_suspend;
264 +       bool    miibus_up_failed;
265         uint    phy_speed;
266         phy_interface_t phy_interface;
267         struct device_node *phy_node;
268         int     link;
269 -       bool    fixed_link;
270         int     full_duplex;
271         int     speed;
272         struct  completion mdio_done;
273 @@ -559,7 +540,8 @@ struct fec_enet_private {
274         int     wol_flag;
275         int     wake_irq;
276         u32     quirks;
277 -       u32     fixups;
278 +       int phy_reset_gpio;
279 +       int phy_reset_duration;
280  
281         struct  napi_struct napi;
282         int     csum_flags;
283 @@ -602,19 +584,14 @@ struct fec_enet_private {
284         int pps_enable;
285         unsigned int next_counter;
286  
287 -       u64 ethtool_stats[0];
288 -
289         struct fec_enet_stop_mode gpr;
290  };
291  
292  void fec_ptp_init(struct platform_device *pdev);
293 -void fec_ptp_stop(struct platform_device *pdev);
294  void fec_ptp_start_cyclecounter(struct net_device *ndev);
295  int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
296  int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
297  uint fec_ptp_check_pps_event(struct fec_enet_private *fep);
298 -void fec_enet_register_fixup(struct net_device *ndev);
299 -int of_fec_enet_parse_fixup(struct device_node *np);
300  
301  /****************************************************************************/
302  #endif /* FEC_H */
303 diff --git a/drivers/net/ethernet/freescale/fec_fixup.c b/drivers/net/ethernet/freescale/fec_fixup.c
304 deleted file mode 100644
305 index 5a8497c..0000000
306 --- a/drivers/net/ethernet/freescale/fec_fixup.c
307 +++ /dev/null
308 @@ -1,74 +0,0 @@
309 -/*
310 - * Copyright 2017 NXP
311 - *
312 - * This program is free software; you can redistribute it and/or
313 - * modify it under the terms of the GNU General Public License
314 - * as published by the Free Software Foundation; either version 2
315 - * of the License, or (at your option) any later version.
316 - *
317 - * This program is distributed in the hope that it will be useful,
318 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
319 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
320 - * GNU General Public License for more details.
321 - */
322 -
323 -#include <linux/netdevice.h>
324 -#include <linux/phy.h>
325 -#include "fec.h"
326 -
327 -#define PHY_ID_AR8031   0x004dd074
328 -
329 -static int ar8031_phy_fixup(struct phy_device *dev)
330 -{
331 -       u16 val;
332 -
333 -       /* Set RGMII IO voltage to 1.8V */
334 -       phy_write(dev, 0x1d, 0x1f);
335 -       phy_write(dev, 0x1e, 0x8);
336 -
337 -       /* Disable phy AR8031 SmartEEE function */
338 -       phy_write(dev, 0xd, 0x3);
339 -       phy_write(dev, 0xe, 0x805d);
340 -       phy_write(dev, 0xd, 0x4003);
341 -       val = phy_read(dev, 0xe);
342 -       val &= ~(0x1 << 8);
343 -       phy_write(dev, 0xe, val);
344 -
345 -       /* Introduce tx clock delay */
346 -       phy_write(dev, 0x1d, 0x5);
347 -       phy_write(dev, 0x1e, 0x100);
348 -
349 -       return 0;
350 -}
351 -
352 -void fec_enet_register_fixup(struct net_device *ndev)
353 -{
354 -       struct fec_enet_private *fep = netdev_priv(ndev);
355 -       static int registered = 0;
356 -       int err;
357 -
358 -       if (!IS_BUILTIN(CONFIG_PHYLIB))
359 -               return;
360 -
361 -       if (fep->fixups & FEC_QUIRK_AR8031_FIXUP) {
362 -               static int ar8031_registered = 0;
363 -
364 -               if (ar8031_registered)
365 -                       return;
366 -               err = phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef,
367 -                                       ar8031_phy_fixup);
368 -               if (err)
369 -                       netdev_info(ndev, "Cannot register PHY board fixup\n");
370 -               registered = 1;
371 -       }
372 -}
373 -
374 -int of_fec_enet_parse_fixup(struct device_node *np)
375 -{
376 -       int fixups = 0;
377 -
378 -       if (of_get_property(np, "fsl,ar8031-phy-fixup", NULL))
379 -               fixups |= FEC_QUIRK_AR8031_FIXUP;
380 -
381 -       return fixups;
382 -}
383 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
384 index 41a31f2..15c06df 100644
385 --- a/drivers/net/ethernet/freescale/fec_main.c
386 +++ b/drivers/net/ethernet/freescale/fec_main.c
387 @@ -19,8 +19,6 @@
388   * Copyright (c) 2004-2006 Macq Electronique SA.
389   *
390   * Copyright (C) 2010-2014 Freescale Semiconductor, Inc.
391 - *
392 - * Copyright 2017 NXP
393   */
394  
395  #include <linux/module.h>
396 @@ -48,9 +46,7 @@
397  #include <linux/io.h>
398  #include <linux/irq.h>
399  #include <linux/clk.h>
400 -#include <linux/clk/clk-conf.h>
401  #include <linux/platform_device.h>
402 -#include <linux/mdio.h>
403  #include <linux/phy.h>
404  #include <linux/fec.h>
405  #include <linux/of.h>
406 @@ -68,12 +64,12 @@
407  #include <linux/regmap.h>
408  
409  #include <asm/cacheflush.h>
410 -#include <soc/imx/cpuidle.h>
411  
412  #include "fec.h"
413  
414  static void set_multicast_list(struct net_device *ndev);
415  static void fec_enet_itr_coal_init(struct net_device *ndev);
416 +static void fec_reset_phy(struct platform_device *pdev);
417  
418  #define DRIVER_NAME    "fec"
419  
420 @@ -87,7 +83,6 @@ static const u16 fec_enet_vlan_pri_to_queue[8] = {1, 1, 1, 1, 2, 2, 2, 2};
421  #define FEC_ENET_RAEM_V        0x8
422  #define FEC_ENET_RAFL_V        0x8
423  #define FEC_ENET_OPD_V 0xFFF0
424 -#define FEC_MDIO_PM_TIMEOUT  100 /* ms */
425  
426  static struct platform_device_id fec_devtype[] = {
427         {
428 @@ -96,10 +91,10 @@ static struct platform_device_id fec_devtype[] = {
429                 .driver_data = 0,
430         }, {
431                 .name = "imx25-fec",
432 -               .driver_data = FEC_QUIRK_USE_GASKET,
433 +               .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
434         }, {
435                 .name = "imx27-fec",
436 -               .driver_data = 0,
437 +               .driver_data = FEC_QUIRK_HAS_RACC,
438         }, {
439                 .name = "imx28-fec",
440                 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
441 @@ -119,20 +114,12 @@ static struct platform_device_id fec_devtype[] = {
442                                 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
443                                 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
444                                 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
445 -                               FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
446 +                               FEC_QUIRK_HAS_RACC,
447         }, {
448                 .name = "imx6ul-fec",
449                 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
450                                 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
451 -                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
452 -                               FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
453 -       }, {
454 -               .name = "imx8qm-fec",
455 -               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
456 -                               FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
457 -                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
458 -                               FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
459 -                               FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
460 +                               FEC_QUIRK_HAS_VLAN,
461         }, {
462                 /* sentinel */
463         }
464 @@ -147,7 +134,6 @@ enum imx_fec_type {
465         MVF600_FEC,
466         IMX6SX_FEC,
467         IMX6UL_FEC,
468 -       IMX8QM_FEC,
469  };
470  
471  static const struct of_device_id fec_dt_ids[] = {
472 @@ -158,7 +144,6 @@ static const struct of_device_id fec_dt_ids[] = {
473         { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
474         { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
475         { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
476 -       { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
477         { /* sentinel */ }
478  };
479  MODULE_DEVICE_TABLE(of, fec_dt_ids);
480 @@ -196,7 +181,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
481  /* FEC receive acceleration */
482  #define FEC_RACC_IPDIS         (1 << 1)
483  #define FEC_RACC_PRODIS                (1 << 2)
484 -#define FEC_RACC_SHIFT16       BIT(7)
485  #define FEC_RACC_OPTIONS       (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
486  
487  /*
488 @@ -205,8 +189,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
489   * account when setting it.
490   */
491  #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
492 -    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
493 -    defined(CONFIG_ARM64)
494 +    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
495  #define        OPT_FRAME_SIZE  (PKT_MAXBUF_SIZE << 16)
496  #else
497  #define        OPT_FRAME_SIZE  0
498 @@ -244,38 +227,86 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
499  
500  #define IS_TSO_HEADER(txq, addr) \
501         ((addr >= txq->tso_hdrs_dma) && \
502 -       (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
503 +       (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
504  
505  static int mii_cnt;
506  
507 -static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
508 -                                            struct bufdesc_prop *bd)
509 -{
510 -       return (bdp >= bd->last) ? bd->base
511 -                       : (struct bufdesc *)(((void *)bdp) + bd->dsize);
512 -}
513 +static inline
514 +struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
515 +                                     struct fec_enet_private *fep,
516 +                                     int queue_id)
517 +{
518 +       struct bufdesc *new_bd = bdp + 1;
519 +       struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
520 +       struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
521 +       struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
522 +       struct bufdesc_ex *ex_base;
523 +       struct bufdesc *base;
524 +       int ring_size;
525 +
526 +       if (bdp >= txq->tx_bd_base) {
527 +               base = txq->tx_bd_base;
528 +               ring_size = txq->tx_ring_size;
529 +               ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
530 +       } else {
531 +               base = rxq->rx_bd_base;
532 +               ring_size = rxq->rx_ring_size;
533 +               ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
534 +       }
535  
536 -static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
537 -                                            struct bufdesc_prop *bd)
538 -{
539 -       return (bdp <= bd->base) ? bd->last
540 -                       : (struct bufdesc *)(((void *)bdp) - bd->dsize);
541 +       if (fep->bufdesc_ex)
542 +               return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
543 +                       ex_base : ex_new_bd);
544 +       else
545 +               return (new_bd >= (base + ring_size)) ?
546 +                       base : new_bd;
547 +}
548 +
549 +static inline
550 +struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
551 +                                     struct fec_enet_private *fep,
552 +                                     int queue_id)
553 +{
554 +       struct bufdesc *new_bd = bdp - 1;
555 +       struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
556 +       struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
557 +       struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
558 +       struct bufdesc_ex *ex_base;
559 +       struct bufdesc *base;
560 +       int ring_size;
561 +
562 +       if (bdp >= txq->tx_bd_base) {
563 +               base = txq->tx_bd_base;
564 +               ring_size = txq->tx_ring_size;
565 +               ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
566 +       } else {
567 +               base = rxq->rx_bd_base;
568 +               ring_size = rxq->rx_ring_size;
569 +               ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
570 +       }
571 +
572 +       if (fep->bufdesc_ex)
573 +               return (struct bufdesc *)((ex_new_bd < ex_base) ?
574 +                       (ex_new_bd + ring_size) : ex_new_bd);
575 +       else
576 +               return (new_bd < base) ? (new_bd + ring_size) : new_bd;
577  }
578  
579 -static int fec_enet_get_bd_index(struct bufdesc *bdp,
580 -                                struct bufdesc_prop *bd)
581 +static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
582 +                               struct fec_enet_private *fep)
583  {
584 -       return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
585 +       return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
586  }
587  
588 -static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
589 +static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
590 +                                       struct fec_enet_priv_tx_q *txq)
591  {
592         int entries;
593  
594 -       entries = (((const char *)txq->dirty_tx -
595 -                       (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
596 +       entries = ((const char *)txq->dirty_tx -
597 +                       (const char *)txq->cur_tx) / fep->bufdesc_size - 1;
598  
599 -       return entries >= 0 ? entries : entries + txq->bd.ring_size;
600 +       return entries >= 0 ? entries : entries + txq->tx_ring_size;
601  }
602  
603  static void swap_buffer(void *bufaddr, int len)
604 @@ -308,20 +339,18 @@ static void fec_dump(struct net_device *ndev)
605         pr_info("Nr     SC     addr       len  SKB\n");
606  
607         txq = fep->tx_queue[0];
608 -       bdp = txq->bd.base;
609 +       bdp = txq->tx_bd_base;
610  
611         do {
612 -               pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
613 +               pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
614                         index,
615 -                       bdp == txq->bd.cur ? 'S' : ' ',
616 +                       bdp == txq->cur_tx ? 'S' : ' ',
617                         bdp == txq->dirty_tx ? 'H' : ' ',
618 -                       fec16_to_cpu(bdp->cbd_sc),
619 -                       fec32_to_cpu(bdp->cbd_bufaddr),
620 -                       fec16_to_cpu(bdp->cbd_datlen),
621 +                       bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
622                         txq->tx_skbuff[index]);
623 -               bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
624 +               bdp = fec_enet_get_nextdesc(bdp, fep, 0);
625                 index++;
626 -       } while (bdp != txq->bd.base);
627 +       } while (bdp != txq->tx_bd_base);
628  }
629  
630  static inline bool is_ipv4_pkt(struct sk_buff *skb)
631 @@ -352,9 +381,10 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
632                              struct net_device *ndev)
633  {
634         struct fec_enet_private *fep = netdev_priv(ndev);
635 -       struct bufdesc *bdp = txq->bd.cur;
636 +       struct bufdesc *bdp = txq->cur_tx;
637         struct bufdesc_ex *ebdp;
638         int nr_frags = skb_shinfo(skb)->nr_frags;
639 +       unsigned short queue = skb_get_queue_mapping(skb);
640         int frag, frag_len;
641         unsigned short status;
642         unsigned int estatus = 0;
643 @@ -366,10 +396,10 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
644  
645         for (frag = 0; frag < nr_frags; frag++) {
646                 this_frag = &skb_shinfo(skb)->frags[frag];
647 -               bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
648 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue);
649                 ebdp = (struct bufdesc_ex *)bdp;
650  
651 -               status = fec16_to_cpu(bdp->cbd_sc);
652 +               status = bdp->cbd_sc;
653                 status &= ~BD_ENET_TX_STATS;
654                 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
655                 frag_len = skb_shinfo(skb)->frags[frag].size;
656 @@ -387,16 +417,16 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
657  
658                 if (fep->bufdesc_ex) {
659                         if (fep->quirks & FEC_QUIRK_HAS_AVB)
660 -                               estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
661 +                               estatus |= FEC_TX_BD_FTYPE(queue);
662                         if (skb->ip_summed == CHECKSUM_PARTIAL)
663                                 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
664                         ebdp->cbd_bdu = 0;
665 -                       ebdp->cbd_esc = cpu_to_fec32(estatus);
666 +                       ebdp->cbd_esc = estatus;
667                 }
668  
669                 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
670  
671 -               index = fec_enet_get_bd_index(bdp, &txq->bd);
672 +               index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
673                 if (((unsigned long) bufaddr) & fep->tx_align ||
674                         fep->quirks & FEC_QUIRK_SWAP_FRAME) {
675                         memcpy(txq->tx_bounce[index], bufaddr, frag_len);
676 @@ -409,27 +439,24 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
677                 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
678                                       DMA_TO_DEVICE);
679                 if (dma_mapping_error(&fep->pdev->dev, addr)) {
680 +                       dev_kfree_skb_any(skb);
681                         if (net_ratelimit())
682                                 netdev_err(ndev, "Tx DMA memory map failed\n");
683                         goto dma_mapping_error;
684                 }
685  
686 -               bdp->cbd_bufaddr = cpu_to_fec32(addr);
687 -               bdp->cbd_datlen = cpu_to_fec16(frag_len);
688 -               /* Make sure the updates to rest of the descriptor are
689 -                * performed before transferring ownership.
690 -                */
691 -               wmb();
692 -               bdp->cbd_sc = cpu_to_fec16(status);
693 +               bdp->cbd_bufaddr = addr;
694 +               bdp->cbd_datlen = frag_len;
695 +               bdp->cbd_sc = status;
696         }
697  
698         return bdp;
699  dma_mapping_error:
700 -       bdp = txq->bd.cur;
701 +       bdp = txq->cur_tx;
702         for (i = 0; i < frag; i++) {
703 -               bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
704 -               dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
705 -                                fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
706 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue);
707 +               dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
708 +                               bdp->cbd_datlen, DMA_TO_DEVICE);
709         }
710         return ERR_PTR(-ENOMEM);
711  }
712 @@ -444,11 +471,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
713         dma_addr_t addr;
714         unsigned short status;
715         unsigned short buflen;
716 +       unsigned short queue;
717         unsigned int estatus = 0;
718         unsigned int index;
719         int entries_free;
720  
721 -       entries_free = fec_enet_get_free_txdesc_num(txq);
722 +       entries_free = fec_enet_get_free_txdesc_num(fep, txq);
723         if (entries_free < MAX_SKB_FRAGS + 1) {
724                 dev_kfree_skb_any(skb);
725                 if (net_ratelimit())
726 @@ -463,16 +491,17 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
727         }
728  
729         /* Fill in a Tx ring entry */
730 -       bdp = txq->bd.cur;
731 +       bdp = txq->cur_tx;
732         last_bdp = bdp;
733 -       status = fec16_to_cpu(bdp->cbd_sc);
734 +       status = bdp->cbd_sc;
735         status &= ~BD_ENET_TX_STATS;
736  
737         /* Set buffer length and buffer pointer */
738         bufaddr = skb->data;
739         buflen = skb_headlen(skb);
740  
741 -       index = fec_enet_get_bd_index(bdp, &txq->bd);
742 +       queue = skb_get_queue_mapping(skb);
743 +       index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
744         if (((unsigned long) bufaddr) & fep->tx_align ||
745                 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
746                 memcpy(txq->tx_bounce[index], skb->data, buflen);
747 @@ -493,12 +522,8 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
748  
749         if (nr_frags) {
750                 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
751 -               if (IS_ERR(last_bdp)) {
752 -                       dma_unmap_single(&fep->pdev->dev, addr,
753 -                                        buflen, DMA_TO_DEVICE);
754 -                       dev_kfree_skb_any(skb);
755 +               if (IS_ERR(last_bdp))
756                         return NETDEV_TX_OK;
757 -               }
758         } else {
759                 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
760                 if (fep->bufdesc_ex) {
761 @@ -508,8 +533,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
762                                 estatus |= BD_ENET_TX_TS;
763                 }
764         }
765 -       bdp->cbd_bufaddr = cpu_to_fec32(addr);
766 -       bdp->cbd_datlen = cpu_to_fec16(buflen);
767  
768         if (fep->bufdesc_ex) {
769  
770 @@ -520,43 +543,41 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
771                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
772  
773                 if (fep->quirks & FEC_QUIRK_HAS_AVB)
774 -                       estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
775 +                       estatus |= FEC_TX_BD_FTYPE(queue);
776  
777                 if (skb->ip_summed == CHECKSUM_PARTIAL)
778                         estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
779  
780                 ebdp->cbd_bdu = 0;
781 -               ebdp->cbd_esc = cpu_to_fec32(estatus);
782 +               ebdp->cbd_esc = estatus;
783         }
784  
785 -       index = fec_enet_get_bd_index(last_bdp, &txq->bd);
786 +       index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
787         /* Save skb pointer */
788         txq->tx_skbuff[index] = skb;
789  
790 -       /* Make sure the updates to rest of the descriptor are performed before
791 -        * transferring ownership.
792 -        */
793 -       wmb();
794 +       bdp->cbd_datlen = buflen;
795 +       bdp->cbd_bufaddr = addr;
796  
797         /* Send it on its way.  Tell FEC it's ready, interrupt when done,
798          * it's the last BD of the frame, and to put the CRC on the end.
799          */
800         status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
801 -       bdp->cbd_sc = cpu_to_fec16(status);
802 +       bdp->cbd_sc = status;
803  
804         /* If this was the last BD in the ring, start at the beginning again. */
805 -       bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
806 +       bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
807  
808         skb_tx_timestamp(skb);
809  
810         /* Make sure the update to bdp and tx_skbuff are performed before
811 -        * txq->bd.cur.
812 +        * cur_tx.
813          */
814         wmb();
815 -       txq->bd.cur = bdp;
816 +       txq->cur_tx = bdp;
817  
818         /* Trigger transmission start */
819 -       writel(0, txq->bd.reg_desc_active);
820 +       writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
821  
822         return 0;
823  }
824 @@ -569,11 +590,12 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
825  {
826         struct fec_enet_private *fep = netdev_priv(ndev);
827         struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
828 +       unsigned short queue = skb_get_queue_mapping(skb);
829         unsigned short status;
830         unsigned int estatus = 0;
831         dma_addr_t addr;
832  
833 -       status = fec16_to_cpu(bdp->cbd_sc);
834 +       status = bdp->cbd_sc;
835         status &= ~BD_ENET_TX_STATS;
836  
837         status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
838 @@ -595,16 +617,16 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
839                 return NETDEV_TX_BUSY;
840         }
841  
842 -       bdp->cbd_datlen = cpu_to_fec16(size);
843 -       bdp->cbd_bufaddr = cpu_to_fec32(addr);
844 +       bdp->cbd_datlen = size;
845 +       bdp->cbd_bufaddr = addr;
846  
847         if (fep->bufdesc_ex) {
848                 if (fep->quirks & FEC_QUIRK_HAS_AVB)
849 -                       estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
850 +                       estatus |= FEC_TX_BD_FTYPE(queue);
851                 if (skb->ip_summed == CHECKSUM_PARTIAL)
852                         estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
853                 ebdp->cbd_bdu = 0;
854 -               ebdp->cbd_esc = cpu_to_fec32(estatus);
855 +               ebdp->cbd_esc = estatus;
856         }
857  
858         /* Handle the last BD specially */
859 @@ -613,10 +635,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
860         if (is_last) {
861                 status |= BD_ENET_TX_INTR;
862                 if (fep->bufdesc_ex)
863 -                       ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
864 +                       ebdp->cbd_esc |= BD_ENET_TX_INT;
865         }
866  
867 -       bdp->cbd_sc = cpu_to_fec16(status);
868 +       bdp->cbd_sc = status;
869  
870         return 0;
871  }
872 @@ -629,12 +651,13 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
873         struct fec_enet_private *fep = netdev_priv(ndev);
874         int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
875         struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
876 +       unsigned short queue = skb_get_queue_mapping(skb);
877         void *bufaddr;
878         unsigned long dmabuf;
879         unsigned short status;
880         unsigned int estatus = 0;
881  
882 -       status = fec16_to_cpu(bdp->cbd_sc);
883 +       status = bdp->cbd_sc;
884         status &= ~BD_ENET_TX_STATS;
885         status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
886  
887 @@ -658,19 +681,19 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
888                 }
889         }
890  
891 -       bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
892 -       bdp->cbd_datlen = cpu_to_fec16(hdr_len);
893 +       bdp->cbd_bufaddr = dmabuf;
894 +       bdp->cbd_datlen = hdr_len;
895  
896         if (fep->bufdesc_ex) {
897                 if (fep->quirks & FEC_QUIRK_HAS_AVB)
898 -                       estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
899 +                       estatus |= FEC_TX_BD_FTYPE(queue);
900                 if (skb->ip_summed == CHECKSUM_PARTIAL)
901                         estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
902                 ebdp->cbd_bdu = 0;
903 -               ebdp->cbd_esc = cpu_to_fec32(estatus);
904 +               ebdp->cbd_esc = estatus;
905         }
906  
907 -       bdp->cbd_sc = cpu_to_fec16(status);
908 +       bdp->cbd_sc = status;
909  
910         return 0;
911  }
912 @@ -682,12 +705,13 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
913         struct fec_enet_private *fep = netdev_priv(ndev);
914         int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
915         int total_len, data_left;
916 -       struct bufdesc *bdp = txq->bd.cur;
917 +       struct bufdesc *bdp = txq->cur_tx;
918 +       unsigned short queue = skb_get_queue_mapping(skb);
919         struct tso_t tso;
920         unsigned int index = 0;
921         int ret;
922  
923 -       if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
924 +       if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
925                 dev_kfree_skb_any(skb);
926                 if (net_ratelimit())
927                         netdev_err(ndev, "NOT enough BD for TSO!\n");
928 @@ -707,7 +731,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
929         while (total_len > 0) {
930                 char *hdr;
931  
932 -               index = fec_enet_get_bd_index(bdp, &txq->bd);
933 +               index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
934                 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
935                 total_len -= data_left;
936  
937 @@ -722,8 +746,9 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
938                         int size;
939  
940                         size = min_t(int, tso.size, data_left);
941 -                       bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
942 -                       index = fec_enet_get_bd_index(bdp, &txq->bd);
943 +                       bdp = fec_enet_get_nextdesc(bdp, fep, queue);
944 +                       index = fec_enet_get_bd_index(txq->tx_bd_base,
945 +                                                     bdp, fep);
946                         ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
947                                                         bdp, index,
948                                                         tso.data, size,
949 @@ -736,22 +761,22 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
950                         tso_build_data(skb, &tso, size);
951                 }
952  
953 -               bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
954 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue);
955         }
956  
957         /* Save skb pointer */
958         txq->tx_skbuff[index] = skb;
959  
960         skb_tx_timestamp(skb);
961 -       txq->bd.cur = bdp;
962 +       txq->cur_tx = bdp;
963  
964         /* Trigger transmission start */
965         if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
966 -           !readl(txq->bd.reg_desc_active) ||
967 -           !readl(txq->bd.reg_desc_active) ||
968 -           !readl(txq->bd.reg_desc_active) ||
969 -           !readl(txq->bd.reg_desc_active))
970 -               writel(0, txq->bd.reg_desc_active);
971 +           !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
972 +           !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
973 +           !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
974 +           !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)))
975 +               writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
976  
977         return 0;
978  
979 @@ -781,7 +806,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
980         if (ret)
981                 return ret;
982  
983 -       entries_free = fec_enet_get_free_txdesc_num(txq);
984 +       entries_free = fec_enet_get_free_txdesc_num(fep, txq);
985         if (entries_free <= txq->tx_stop_threshold)
986                 netif_tx_stop_queue(nq);
987  
988 @@ -802,45 +827,45 @@ static void fec_enet_bd_init(struct net_device *dev)
989         for (q = 0; q < fep->num_rx_queues; q++) {
990                 /* Initialize the receive buffer descriptors. */
991                 rxq = fep->rx_queue[q];
992 -               bdp = rxq->bd.base;
993 +               bdp = rxq->rx_bd_base;
994  
995 -               for (i = 0; i < rxq->bd.ring_size; i++) {
996 +               for (i = 0; i < rxq->rx_ring_size; i++) {
997  
998                         /* Initialize the BD for every fragment in the page. */
999                         if (bdp->cbd_bufaddr)
1000 -                               bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
1001 +                               bdp->cbd_sc = BD_ENET_RX_EMPTY;
1002                         else
1003 -                               bdp->cbd_sc = cpu_to_fec16(0);
1004 -                       bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1005 +                               bdp->cbd_sc = 0;
1006 +                       bdp = fec_enet_get_nextdesc(bdp, fep, q);
1007                 }
1008  
1009                 /* Set the last buffer to wrap */
1010 -               bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
1011 -               bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
1012 +               bdp = fec_enet_get_prevdesc(bdp, fep, q);
1013 +               bdp->cbd_sc |= BD_SC_WRAP;
1014  
1015 -               rxq->bd.cur = rxq->bd.base;
1016 +               rxq->cur_rx = rxq->rx_bd_base;
1017         }
1018  
1019         for (q = 0; q < fep->num_tx_queues; q++) {
1020                 /* ...and the same for transmit */
1021                 txq = fep->tx_queue[q];
1022 -               bdp = txq->bd.base;
1023 -               txq->bd.cur = bdp;
1024 +               bdp = txq->tx_bd_base;
1025 +               txq->cur_tx = bdp;
1026  
1027 -               for (i = 0; i < txq->bd.ring_size; i++) {
1028 +               for (i = 0; i < txq->tx_ring_size; i++) {
1029                         /* Initialize the BD for every fragment in the page. */
1030 -                       bdp->cbd_sc = cpu_to_fec16(0);
1031 +                       bdp->cbd_sc = 0;
1032                         if (txq->tx_skbuff[i]) {
1033                                 dev_kfree_skb_any(txq->tx_skbuff[i]);
1034                                 txq->tx_skbuff[i] = NULL;
1035                         }
1036 -                       bdp->cbd_bufaddr = cpu_to_fec32(0);
1037 -                       bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1038 +                       bdp->cbd_bufaddr = 0;
1039 +                       bdp = fec_enet_get_nextdesc(bdp, fep, q);
1040                 }
1041  
1042                 /* Set the last buffer to wrap */
1043 -               bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
1044 -               bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
1045 +               bdp = fec_enet_get_prevdesc(bdp, fep, q);
1046 +               bdp->cbd_sc |= BD_SC_WRAP;
1047                 txq->dirty_tx = bdp;
1048         }
1049  }
1050 @@ -851,7 +876,7 @@ static void fec_enet_active_rxring(struct net_device *ndev)
1051         int i;
1052  
1053         for (i = 0; i < fep->num_rx_queues; i++)
1054 -               writel(0, fep->rx_queue[i]->bd.reg_desc_active);
1055 +               writel(0, fep->hwp + FEC_R_DES_ACTIVE(i));
1056  }
1057  
1058  static void fec_enet_enable_ring(struct net_device *ndev)
1059 @@ -863,7 +888,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
1060  
1061         for (i = 0; i < fep->num_rx_queues; i++) {
1062                 rxq = fep->rx_queue[i];
1063 -               writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
1064 +               writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
1065                 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
1066  
1067                 /* enable DMA1/2 */
1068 @@ -874,7 +899,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
1069  
1070         for (i = 0; i < fep->num_tx_queues; i++) {
1071                 txq = fep->tx_queue[i];
1072 -               writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
1073 +               writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i));
1074  
1075                 /* enable DMA1/2 */
1076                 if (i)
1077 @@ -892,7 +917,7 @@ static void fec_enet_reset_skb(struct net_device *ndev)
1078         for (i = 0; i < fep->num_tx_queues; i++) {
1079                 txq = fep->tx_queue[i];
1080  
1081 -               for (j = 0; j < txq->bd.ring_size; j++) {
1082 +               for (j = 0; j < txq->tx_ring_size; j++) {
1083                         if (txq->tx_skbuff[j]) {
1084                                 dev_kfree_skb_any(txq->tx_skbuff[j]);
1085                                 txq->tx_skbuff[j] = NULL;
1086 @@ -930,11 +955,11 @@ fec_restart(struct net_device *ndev)
1087          * enet-mac reset will reset mac address registers too,
1088          * so need to reconfigure it.
1089          */
1090 -       memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1091 -       writel((__force u32)cpu_to_be32(temp_mac[0]),
1092 -              fep->hwp + FEC_ADDR_LOW);
1093 -       writel((__force u32)cpu_to_be32(temp_mac[1]),
1094 -              fep->hwp + FEC_ADDR_HIGH);
1095 +       if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1096 +               memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1097 +               writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
1098 +               writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
1099 +       }
1100  
1101         /* Clear any outstanding interrupt. */
1102         writel(0xffffffff, fep->hwp + FEC_IEVENT);
1103 @@ -961,16 +986,13 @@ fec_restart(struct net_device *ndev)
1104  
1105  #if !defined(CONFIG_M5272)
1106         if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1107 +               /* set RX checksum */
1108                 val = readl(fep->hwp + FEC_RACC);
1109 -               /* align IP header */
1110 -               val |= FEC_RACC_SHIFT16;
1111                 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1112 -                       /* set RX checksum */
1113                         val |= FEC_RACC_OPTIONS;
1114                 else
1115                         val &= ~FEC_RACC_OPTIONS;
1116                 writel(val, fep->hwp + FEC_RACC);
1117 -               writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1118         }
1119         writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1120  #endif
1121 @@ -995,10 +1017,10 @@ fec_restart(struct net_device *ndev)
1122                         rcntl &= ~(1 << 8);
1123  
1124                 /* 1G, 100M or 10M */
1125 -               if (ndev->phydev) {
1126 -                       if (ndev->phydev->speed == SPEED_1000)
1127 +               if (fep->phy_dev) {
1128 +                       if (fep->phy_dev->speed == SPEED_1000)
1129                                 ecntl |= (1 << 5);
1130 -                       else if (ndev->phydev->speed == SPEED_100)
1131 +                       else if (fep->phy_dev->speed == SPEED_100)
1132                                 rcntl &= ~(1 << 9);
1133                         else
1134                                 rcntl |= (1 << 9);
1135 @@ -1019,7 +1041,7 @@ fec_restart(struct net_device *ndev)
1136                          */
1137                         cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1138                                 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1139 -                       if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1140 +                       if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
1141                                 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1142                         writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1143  
1144 @@ -1033,7 +1055,7 @@ fec_restart(struct net_device *ndev)
1145         /* enable pause frame*/
1146         if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1147             ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1148 -            ndev->phydev && ndev->phydev->pause)) {
1149 +            fep->phy_dev && fep->phy_dev->pause)) {
1150                 rcntl |= FEC_ENET_FCE;
1151  
1152                 /* set FIFO threshold parameter to reduce overrun */
1153 @@ -1213,12 +1235,13 @@ static void
1154  fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1155  {
1156         struct  fec_enet_private *fep;
1157 -       struct bufdesc *bdp;
1158 +       struct bufdesc *bdp, *bdp_t;
1159         unsigned short status;
1160         struct  sk_buff *skb;
1161         struct fec_enet_priv_tx_q *txq;
1162         struct netdev_queue *nq;
1163         int     index = 0;
1164 +       int     i, bdnum;
1165         int     entries_free;
1166  
1167         fep = netdev_priv(ndev);
1168 @@ -1231,27 +1254,37 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1169         bdp = txq->dirty_tx;
1170  
1171         /* get next bdp of dirty_tx */
1172 -       bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1173 +       bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1174  
1175 -       while (bdp != READ_ONCE(txq->bd.cur)) {
1176 -               /* Order the load of bd.cur and cbd_sc */
1177 +       while (bdp != READ_ONCE(txq->cur_tx)) {
1178 +               /* Order the load of cur_tx and cbd_sc */
1179                 rmb();
1180 -               status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1181 +               status = READ_ONCE(bdp->cbd_sc);
1182                 if (status & BD_ENET_TX_READY)
1183                         break;
1184  
1185 -               index = fec_enet_get_bd_index(bdp, &txq->bd);
1186 -
1187 +               bdp_t = bdp;
1188 +               bdnum = 1;
1189 +               index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1190                 skb = txq->tx_skbuff[index];
1191 +               while (!skb) {
1192 +                       bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
1193 +                       index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1194 +                       skb = txq->tx_skbuff[index];
1195 +                       bdnum++;
1196 +               }
1197 +               if ((status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
1198 +                       break;
1199 +
1200 +               for (i = 0; i < bdnum; i++) {
1201 +                       if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1202 +                               dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1203 +                                                bdp->cbd_datlen, DMA_TO_DEVICE);
1204 +                       bdp->cbd_bufaddr = 0;
1205 +                       if (i < bdnum - 1)
1206 +                               bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1207 +               }
1208                 txq->tx_skbuff[index] = NULL;
1209 -               if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1210 -                       dma_unmap_single(&fep->pdev->dev,
1211 -                                        fec32_to_cpu(bdp->cbd_bufaddr),
1212 -                                        fec16_to_cpu(bdp->cbd_datlen),
1213 -                                        DMA_TO_DEVICE);
1214 -               bdp->cbd_bufaddr = cpu_to_fec32(0);
1215 -               if (!skb)
1216 -                       goto skb_done;
1217  
1218                 /* Check for errors. */
1219                 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1220 @@ -1278,7 +1311,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1221                         struct skb_shared_hwtstamps shhwtstamps;
1222                         struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1223  
1224 -                       fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1225 +                       fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
1226                         skb_tstamp_tx(skb, &shhwtstamps);
1227                 }
1228  
1229 @@ -1290,7 +1323,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1230  
1231                 /* Free the sk buffer associated with this last transmit */
1232                 dev_kfree_skb_any(skb);
1233 -skb_done:
1234 +
1235                 /* Make sure the update to bdp and tx_skbuff are performed
1236                  * before dirty_tx
1237                  */
1238 @@ -1298,21 +1331,21 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1239                 txq->dirty_tx = bdp;
1240  
1241                 /* Update pointer to next buffer descriptor to be transmitted */
1242 -               bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1243 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1244  
1245                 /* Since we have freed up a buffer, the ring is no longer full
1246                  */
1247                 if (netif_queue_stopped(ndev)) {
1248 -                       entries_free = fec_enet_get_free_txdesc_num(txq);
1249 +                       entries_free = fec_enet_get_free_txdesc_num(fep, txq);
1250                         if (entries_free >= txq->tx_wake_threshold)
1251                                 netif_tx_wake_queue(nq);
1252                 }
1253         }
1254  
1255         /* ERR006538: Keep the transmitter going */
1256 -       if (bdp != txq->bd.cur &&
1257 -           readl(txq->bd.reg_desc_active) == 0)
1258 -               writel(0, txq->bd.reg_desc_active);
1259 +       if (bdp != txq->cur_tx &&
1260 +           readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0)
1261 +               writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id));
1262  }
1263  
1264  static void
1265 @@ -1338,8 +1371,10 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
1266         if (off)
1267                 skb_reserve(skb, fep->rx_align + 1 - off);
1268  
1269 -       bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
1270 -       if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1271 +       bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
1272 +                                         FEC_ENET_RX_FRSIZE - fep->rx_align,
1273 +                                         DMA_FROM_DEVICE);
1274 +       if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
1275                 if (net_ratelimit())
1276                         netdev_err(ndev, "Rx DMA memory map failed\n");
1277                 return -ENOMEM;
1278 @@ -1361,8 +1396,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1279         if (!new_skb)
1280                 return false;
1281  
1282 -       dma_sync_single_for_cpu(&fep->pdev->dev,
1283 -                               fec32_to_cpu(bdp->cbd_bufaddr),
1284 +       dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
1285                                 FEC_ENET_RX_FRSIZE - fep->rx_align,
1286                                 DMA_FROM_DEVICE);
1287         if (!swap)
1288 @@ -1374,7 +1408,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1289         return true;
1290  }
1291  
1292 -/* During a receive, the bd_rx.cur points to the current incoming buffer.
1293 +/* During a receive, the cur_rx points to the current incoming buffer.
1294   * When we update through the ring, if the next incoming buffer has
1295   * not been given to the system, we just set the empty indicator,
1296   * effectively tossing the packet.
1297 @@ -1407,9 +1441,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1298         /* First, grab all of the stats for the incoming packet.
1299          * These get messed up if we get called due to a busy condition.
1300          */
1301 -       bdp = rxq->bd.cur;
1302 +       bdp = rxq->cur_rx;
1303  
1304 -       while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1305 +       while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
1306  
1307                 if (pkt_received >= budget)
1308                         break;
1309 @@ -1445,10 +1479,10 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1310  
1311                 /* Process the incoming frame. */
1312                 ndev->stats.rx_packets++;
1313 -               pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1314 +               pkt_len = bdp->cbd_datlen;
1315                 ndev->stats.rx_bytes += pkt_len;
1316  
1317 -               index = fec_enet_get_bd_index(bdp, &rxq->bd);
1318 +               index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
1319                 skb = rxq->rx_skbuff[index];
1320  
1321                 /* The packet length includes FCS, but we don't want to
1322 @@ -1463,8 +1497,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1323                                 ndev->stats.rx_dropped++;
1324                                 goto rx_processing_done;
1325                         }
1326 -                       dma_unmap_single(&fep->pdev->dev,
1327 -                                        fec32_to_cpu(bdp->cbd_bufaddr),
1328 +                       dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1329                                          FEC_ENET_RX_FRSIZE - fep->rx_align,
1330                                          DMA_FROM_DEVICE);
1331                 }
1332 @@ -1472,15 +1505,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1333                 prefetch(skb->data - NET_IP_ALIGN);
1334                 skb_put(skb, pkt_len - 4);
1335                 data = skb->data;
1336 -
1337                 if (!is_copybreak && need_swap)
1338                         swap_buffer(data, pkt_len);
1339  
1340 -#if !defined(CONFIG_M5272)
1341 -               if (fep->quirks & FEC_QUIRK_HAS_RACC)
1342 -                       data = skb_pull_inline(skb, 2);
1343 -#endif
1344 -
1345                 /* Extract the enhanced buffer descriptor */
1346                 ebdp = NULL;
1347                 if (fep->bufdesc_ex)
1348 @@ -1489,8 +1516,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1349                 /* If this is a VLAN packet remove the VLAN Tag */
1350                 vlan_packet_rcvd = false;
1351                 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1352 -                   fep->bufdesc_ex &&
1353 -                   (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1354 +                       fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
1355                         /* Push and remove the vlan tag */
1356                         struct vlan_hdr *vlan_header =
1357                                         (struct vlan_hdr *) (data + ETH_HLEN);
1358 @@ -1506,12 +1532,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1359  
1360                 /* Get receive timestamp from the skb */
1361                 if (fep->hwts_rx_en && fep->bufdesc_ex)
1362 -                       fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1363 +                       fec_enet_hwtstamp(fep, ebdp->ts,
1364                                           skb_hwtstamps(skb));
1365  
1366                 if (fep->bufdesc_ex &&
1367                     (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1368 -                       if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1369 +                       if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
1370                                 /* don't check it */
1371                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1372                         } else {
1373 @@ -1528,8 +1554,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1374                 napi_gro_receive(&fep->napi, skb);
1375  
1376                 if (is_copybreak) {
1377 -                       dma_sync_single_for_device(&fep->pdev->dev,
1378 -                                                  fec32_to_cpu(bdp->cbd_bufaddr),
1379 +                       dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
1380                                                    FEC_ENET_RX_FRSIZE - fep->rx_align,
1381                                                    DMA_FROM_DEVICE);
1382                 } else {
1383 @@ -1543,30 +1568,26 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1384  
1385                 /* Mark the buffer empty */
1386                 status |= BD_ENET_RX_EMPTY;
1387 +               bdp->cbd_sc = status;
1388  
1389                 if (fep->bufdesc_ex) {
1390                         struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1391  
1392 -                       ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1393 +                       ebdp->cbd_esc = BD_ENET_RX_INT;
1394                         ebdp->cbd_prot = 0;
1395                         ebdp->cbd_bdu = 0;
1396                 }
1397 -               /* Make sure the updates to rest of the descriptor are
1398 -                * performed before transferring ownership.
1399 -                */
1400 -               wmb();
1401 -               bdp->cbd_sc = cpu_to_fec16(status);
1402  
1403                 /* Update BD pointer to next entry */
1404 -               bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1405 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1406  
1407                 /* Doing this here will keep the FEC running while we process
1408                  * incoming frames.  On a heavily loaded network, we should be
1409                  * able to keep up at the expense of system resources.
1410                  */
1411 -               writel(0, rxq->bd.reg_desc_active);
1412 +               writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id));
1413         }
1414 -       rxq->bd.cur = bdp;
1415 +       rxq->cur_rx = bdp;
1416         return pkt_received;
1417  }
1418  
1419 @@ -1578,15 +1599,9 @@ fec_enet_rx(struct net_device *ndev, int budget)
1420         struct fec_enet_private *fep = netdev_priv(ndev);
1421  
1422         for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1423 -               int ret;
1424 -
1425 -               ret = fec_enet_rx_queue(ndev,
1426 +               clear_bit(queue_id, &fep->work_rx);
1427 +               pkt_received += fec_enet_rx_queue(ndev,
1428                                         budget - pkt_received, queue_id);
1429 -
1430 -               if (ret < budget - pkt_received)
1431 -                       clear_bit(queue_id, &fep->work_rx);
1432 -
1433 -               pkt_received += ret;
1434         }
1435         return pkt_received;
1436  }
1437 @@ -1631,7 +1646,7 @@ fec_enet_interrupt(int irq, void *dev_id)
1438  
1439                 if (napi_schedule_prep(&fep->napi)) {
1440                         /* Disable the NAPI interrupts */
1441 -                       writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
1442 +                       writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1443                         __napi_schedule(&fep->napi);
1444                 }
1445         }
1446 @@ -1742,7 +1757,7 @@ static void fec_get_mac(struct net_device *ndev)
1447  static void fec_enet_adjust_link(struct net_device *ndev)
1448  {
1449         struct fec_enet_private *fep = netdev_priv(ndev);
1450 -       struct phy_device *phy_dev = ndev->phydev;
1451 +       struct phy_device *phy_dev = fep->phy_dev;
1452         int status_change = 0;
1453  
1454         /* Prevent a state halted on mii error */
1455 @@ -1802,16 +1817,10 @@ static void fec_enet_adjust_link(struct net_device *ndev)
1456  static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1457  {
1458         struct fec_enet_private *fep = bus->priv;
1459 -       struct device *dev = &fep->pdev->dev;
1460         unsigned long time_left;
1461 -       int ret = 0;
1462 -
1463 -       ret = pm_runtime_get_sync(dev);
1464 -       if (ret < 0)
1465 -               return ret;
1466  
1467         fep->mii_timeout = 0;
1468 -       reinit_completion(&fep->mdio_done);
1469 +       init_completion(&fep->mdio_done);
1470  
1471         /* start a read op */
1472         writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
1473 @@ -1824,35 +1833,21 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1474         if (time_left == 0) {
1475                 fep->mii_timeout = 1;
1476                 netdev_err(fep->netdev, "MDIO read timeout\n");
1477 -               ret = -ETIMEDOUT;
1478 -               goto out;
1479 +               return -ETIMEDOUT;
1480         }
1481  
1482 -       ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1483 -
1484 -out:
1485 -       pm_runtime_mark_last_busy(dev);
1486 -       pm_runtime_put_autosuspend(dev);
1487 -
1488 -       return ret;
1489 +       /* return value */
1490 +       return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1491  }
1492  
1493  static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1494                            u16 value)
1495  {
1496         struct fec_enet_private *fep = bus->priv;
1497 -       struct device *dev = &fep->pdev->dev;
1498         unsigned long time_left;
1499 -       int ret;
1500 -
1501 -       ret = pm_runtime_get_sync(dev);
1502 -       if (ret < 0)
1503 -               return ret;
1504 -       else
1505 -               ret = 0;
1506  
1507         fep->mii_timeout = 0;
1508 -       reinit_completion(&fep->mdio_done);
1509 +       init_completion(&fep->mdio_done);
1510  
1511         /* start a write op */
1512         writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1513 @@ -1866,13 +1861,10 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1514         if (time_left == 0) {
1515                 fep->mii_timeout = 1;
1516                 netdev_err(fep->netdev, "MDIO write timeout\n");
1517 -               ret  = -ETIMEDOUT;
1518 +               return -ETIMEDOUT;
1519         }
1520  
1521 -       pm_runtime_mark_last_busy(dev);
1522 -       pm_runtime_put_autosuspend(dev);
1523 -
1524 -       return ret;
1525 +       return 0;
1526  }
1527  
1528  static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1529 @@ -1881,10 +1873,18 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1530         int ret;
1531  
1532         if (enable) {
1533 +               ret = clk_prepare_enable(fep->clk_ahb);
1534 +               if (ret)
1535 +                       return ret;
1536 +               ret = clk_prepare_enable(fep->clk_ipg);
1537 +               if (ret)
1538 +                       goto failed_clk_ipg;
1539                 if (fep->clk_enet_out) {
1540                         ret = clk_prepare_enable(fep->clk_enet_out);
1541                         if (ret)
1542 -                               return ret;
1543 +                               goto failed_clk_enet_out;
1544 +
1545 +                       fec_reset_phy(fep->pdev);
1546                 }
1547                 if (fep->clk_ptp) {
1548                         mutex_lock(&fep->ptp_clk_mutex);
1549 @@ -1903,6 +1903,8 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1550                                 goto failed_clk_ref;
1551                 }
1552         } else {
1553 +               clk_disable_unprepare(fep->clk_ahb);
1554 +               clk_disable_unprepare(fep->clk_ipg);
1555                 if (fep->clk_enet_out)
1556                         clk_disable_unprepare(fep->clk_enet_out);
1557                 if (fep->clk_ptp) {
1558 @@ -1923,27 +1925,23 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1559  failed_clk_ptp:
1560         if (fep->clk_enet_out)
1561                 clk_disable_unprepare(fep->clk_enet_out);
1562 +failed_clk_enet_out:
1563 +               clk_disable_unprepare(fep->clk_ipg);
1564 +failed_clk_ipg:
1565 +               clk_disable_unprepare(fep->clk_ahb);
1566  
1567         return ret;
1568  }
1569  
1570 -static int fec_restore_mii_bus(struct net_device *ndev)
1571 +static void fec_restore_mii_bus(struct net_device *ndev)
1572  {
1573         struct fec_enet_private *fep = netdev_priv(ndev);
1574 -       int ret;
1575 -
1576 -       ret = pm_runtime_get_sync(&fep->pdev->dev);
1577 -       if (ret < 0)
1578 -               return ret;
1579  
1580 +       fec_enet_clk_enable(ndev, true);
1581         writel(0xffc00000, fep->hwp + FEC_IEVENT);
1582         writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1583         writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1584         writel(FEC_ENET_ETHEREN, fep->hwp + FEC_ECNTRL);
1585 -
1586 -       pm_runtime_mark_last_busy(&fep->pdev->dev);
1587 -       pm_runtime_put_autosuspend(&fep->pdev->dev);
1588 -       return 0;
1589  }
1590  
1591  static int fec_enet_mii_probe(struct net_device *ndev)
1592 @@ -1955,6 +1953,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1593         int phy_id;
1594         int dev_id = fep->dev_id;
1595  
1596 +       fep->phy_dev = NULL;
1597 +
1598         if (fep->phy_node) {
1599                 phy_dev = of_phy_connect(ndev, fep->phy_node,
1600                                          &fec_enet_adjust_link, 0,
1601 @@ -1964,7 +1964,11 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1602         } else {
1603                 /* check for attached phy */
1604                 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1605 -                       if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
1606 +                       if ((fep->mii_bus->phy_mask & (1 << phy_id)))
1607 +                               continue;
1608 +                       if (fep->mii_bus->mdio_map[phy_id] == NULL)
1609 +                               continue;
1610 +                       if (fep->mii_bus->mdio_map[phy_id]->addr == 0)
1611                                 continue;
1612                         if (dev_id--)
1613                                 continue;
1614 @@ -2002,10 +2006,13 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1615  
1616         phy_dev->advertising = phy_dev->supported;
1617  
1618 +       fep->phy_dev = phy_dev;
1619         fep->link = 0;
1620         fep->full_duplex = 0;
1621  
1622 -       phy_attached_info(phy_dev);
1623 +       netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1624 +                   fep->phy_dev->drv->name, NULL,
1625 +                   fep->phy_dev->irq);
1626  
1627         return 0;
1628  }
1629 @@ -2017,7 +2024,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1630         struct net_device *ndev = platform_get_drvdata(pdev);
1631         struct fec_enet_private *fep = netdev_priv(ndev);
1632         struct device_node *node;
1633 -       int err = -ENXIO;
1634 +       int err = -ENXIO, i;
1635         u32 mii_speed, holdtime;
1636  
1637         /*
1638 @@ -2036,7 +2043,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1639          * mdio interface in board design, and need to be configured by
1640          * fec0 mii_bus.
1641          */
1642 -       if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
1643 +       if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
1644                 /* fec1 uses fec0 mii_bus */
1645                 if (mii_cnt && fec0_mii_bus) {
1646                         fep->mii_bus = fec0_mii_bus;
1647 @@ -2100,29 +2107,38 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1648         fep->mii_bus->priv = fep;
1649         fep->mii_bus->parent = &pdev->dev;
1650  
1651 +/*     fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1652 +       if (!fep->mii_bus->irq) {
1653 +               err = -ENOMEM;
1654 +               goto err_out_free_mdiobus;
1655 +       }
1656 +*/
1657 +       for (i = 0; i < PHY_MAX_ADDR; i++)
1658 +               fep->mii_bus->irq[i] = PHY_POLL;
1659 +
1660         node = of_get_child_by_name(pdev->dev.of_node, "mdio");
1661         if (node) {
1662                 err = of_mdiobus_register(fep->mii_bus, node);
1663                 of_node_put(node);
1664 -       } else if (fep->phy_node && !fep->fixed_link) {
1665 -               err = -EPROBE_DEFER;
1666         } else {
1667                 err = mdiobus_register(fep->mii_bus);
1668         }
1669  
1670         if (err)
1671 -               goto err_out_free_mdiobus;
1672 +               goto err_out_free_mdio_irq;
1673  
1674         mii_cnt++;
1675  
1676         /* save fec0 mii_bus */
1677 -       if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) {
1678 +       if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1679                 fec0_mii_bus = fep->mii_bus;
1680                 fec_mii_bus_share = &fep->mii_bus_share;
1681         }
1682  
1683         return 0;
1684  
1685 +err_out_free_mdio_irq:
1686 +       kfree(fep->mii_bus->irq);
1687  err_out_free_mdiobus:
1688         mdiobus_free(fep->mii_bus);
1689  err_out:
1690 @@ -2133,10 +2149,35 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
1691  {
1692         if (--mii_cnt == 0) {
1693                 mdiobus_unregister(fep->mii_bus);
1694 +               kfree(fep->mii_bus->irq);
1695                 mdiobus_free(fep->mii_bus);
1696         }
1697  }
1698  
1699 +static int fec_enet_get_settings(struct net_device *ndev,
1700 +                                 struct ethtool_cmd *cmd)
1701 +{
1702 +       struct fec_enet_private *fep = netdev_priv(ndev);
1703 +       struct phy_device *phydev = fep->phy_dev;
1704 +
1705 +       if (!phydev)
1706 +               return -ENODEV;
1707 +
1708 +       return phy_ethtool_gset(phydev, cmd);
1709 +}
1710 +
1711 +static int fec_enet_set_settings(struct net_device *ndev,
1712 +                                struct ethtool_cmd *cmd)
1713 +{
1714 +       struct fec_enet_private *fep = netdev_priv(ndev);
1715 +       struct phy_device *phydev = fep->phy_dev;
1716 +
1717 +       if (!phydev)
1718 +               return -ENODEV;
1719 +
1720 +       return phy_ethtool_sset(phydev, cmd);
1721 +}
1722 +
1723  static void fec_enet_get_drvinfo(struct net_device *ndev,
1724                                  struct ethtool_drvinfo *info)
1725  {
1726 @@ -2163,8 +2204,8 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
1727  
1728  /* List of registers that can be safety be read to dump them with ethtool */
1729  #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
1730 -       defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
1731 -       defined(CONFIG_ARM64)
1732 +       defined(CONFIG_M520x) || defined(CONFIG_M532x) ||               \
1733 +       defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
1734  static u32 fec_enet_register_offset[] = {
1735         FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
1736         FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
1737 @@ -2270,7 +2311,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1738  {
1739         struct fec_enet_private *fep = netdev_priv(ndev);
1740  
1741 -       if (!ndev->phydev)
1742 +       if (!fep->phy_dev)
1743                 return -ENODEV;
1744  
1745         if (pause->tx_pause != pause->rx_pause) {
1746 @@ -2286,17 +2327,17 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1747         fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
1748  
1749         if (pause->rx_pause || pause->autoneg) {
1750 -               ndev->phydev->supported |= ADVERTISED_Pause;
1751 -               ndev->phydev->advertising |= ADVERTISED_Pause;
1752 +               fep->phy_dev->supported |= ADVERTISED_Pause;
1753 +               fep->phy_dev->advertising |= ADVERTISED_Pause;
1754         } else {
1755 -               ndev->phydev->supported &= ~ADVERTISED_Pause;
1756 -               ndev->phydev->advertising &= ~ADVERTISED_Pause;
1757 +               fep->phy_dev->supported &= ~ADVERTISED_Pause;
1758 +               fep->phy_dev->advertising &= ~ADVERTISED_Pause;
1759         }
1760  
1761         if (pause->autoneg) {
1762                 if (netif_running(ndev))
1763                         fec_stop(ndev);
1764 -               phy_start_aneg(ndev->phydev);
1765 +               phy_start_aneg(fep->phy_dev);
1766         }
1767         if (netif_running(ndev)) {
1768                 napi_disable(&fep->napi);
1769 @@ -2376,26 +2417,14 @@ static const struct fec_stat {
1770         { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
1771  };
1772  
1773 -#define FEC_STATS_SIZE         (ARRAY_SIZE(fec_stats) * sizeof(u64))
1774 -
1775 -static void fec_enet_update_ethtool_stats(struct net_device *dev)
1776 +static void fec_enet_get_ethtool_stats(struct net_device *dev,
1777 +       struct ethtool_stats *stats, u64 *data)
1778  {
1779         struct fec_enet_private *fep = netdev_priv(dev);
1780         int i;
1781  
1782         for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
1783 -               fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
1784 -}
1785 -
1786 -static void fec_enet_get_ethtool_stats(struct net_device *dev,
1787 -                                      struct ethtool_stats *stats, u64 *data)
1788 -{
1789 -       struct fec_enet_private *fep = netdev_priv(dev);
1790 -
1791 -       if (netif_running(dev))
1792 -               fec_enet_update_ethtool_stats(dev);
1793 -
1794 -       memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
1795 +               data[i] = readl(fep->hwp + fec_stats[i].offset);
1796  }
1797  
1798  static void fec_enet_get_strings(struct net_device *netdev,
1799 @@ -2420,17 +2449,12 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
1800                 return -EOPNOTSUPP;
1801         }
1802  }
1803 -
1804 -#else  /* !defined(CONFIG_M5272) */
1805 -#define FEC_STATS_SIZE 0
1806 -static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
1807 -{
1808 -}
1809  #endif /* !defined(CONFIG_M5272) */
1810  
1811  static int fec_enet_nway_reset(struct net_device *dev)
1812  {
1813 -       struct phy_device *phydev = dev->phydev;
1814 +       struct fec_enet_private *fep = netdev_priv(dev);
1815 +       struct phy_device *phydev = fep->phy_dev;
1816  
1817         if (!phydev)
1818                 return -ENODEV;
1819 @@ -2455,6 +2479,9 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
1820         struct fec_enet_private *fep = netdev_priv(ndev);
1821         int rx_itr, tx_itr;
1822  
1823 +       if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
1824 +               return;
1825 +
1826         /* Must be greater than zero to avoid unpredictable behavior */
1827         if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
1828             !fep->tx_time_itr || !fep->tx_pkts_itr)
1829 @@ -2477,12 +2504,10 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
1830  
1831         writel(tx_itr, fep->hwp + FEC_TXIC0);
1832         writel(rx_itr, fep->hwp + FEC_RXIC0);
1833 -       if (fep->quirks & FEC_QUIRK_HAS_AVB) {
1834 -               writel(tx_itr, fep->hwp + FEC_TXIC1);
1835 -               writel(rx_itr, fep->hwp + FEC_RXIC1);
1836 -               writel(tx_itr, fep->hwp + FEC_TXIC2);
1837 -               writel(rx_itr, fep->hwp + FEC_RXIC2);
1838 -       }
1839 +       writel(tx_itr, fep->hwp + FEC_TXIC1);
1840 +       writel(rx_itr, fep->hwp + FEC_RXIC1);
1841 +       writel(tx_itr, fep->hwp + FEC_TXIC2);
1842 +       writel(rx_itr, fep->hwp + FEC_RXIC2);
1843  }
1844  
1845  static int
1846 @@ -2490,7 +2515,7 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
1847  {
1848         struct fec_enet_private *fep = netdev_priv(ndev);
1849  
1850 -       if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
1851 +       if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
1852                 return -EOPNOTSUPP;
1853  
1854         ec->rx_coalesce_usecs = fep->rx_time_itr;
1855 @@ -2508,28 +2533,28 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
1856         struct fec_enet_private *fep = netdev_priv(ndev);
1857         unsigned int cycle;
1858  
1859 -       if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
1860 +       if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
1861                 return -EOPNOTSUPP;
1862  
1863         if (ec->rx_max_coalesced_frames > 255) {
1864 -               pr_err("Rx coalesced frames exceed hardware limitation\n");
1865 +               pr_err("Rx coalesced frames exceed hardware limiation");
1866                 return -EINVAL;
1867         }
1868  
1869         if (ec->tx_max_coalesced_frames > 255) {
1870 -               pr_err("Tx coalesced frame exceed hardware limitation\n");
1871 +               pr_err("Tx coalesced frame exceed hardware limiation");
1872                 return -EINVAL;
1873         }
1874  
1875         cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
1876         if (cycle > 0xFFFF) {
1877 -               pr_err("Rx coalesced usec exceed hardware limitation\n");
1878 +               pr_err("Rx coalesed usec exceeed hardware limiation");
1879                 return -EINVAL;
1880         }
1881  
1882         cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
1883         if (cycle > 0xFFFF) {
1884 -               pr_err("Rx coalesced usec exceed hardware limitation\n");
1885 +               pr_err("Rx coalesed usec exceeed hardware limiation");
1886                 return -EINVAL;
1887         }
1888  
1889 @@ -2629,6 +2654,8 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1890  }
1891  
1892  static const struct ethtool_ops fec_enet_ethtool_ops = {
1893 +       .get_settings           = fec_enet_get_settings,
1894 +       .set_settings           = fec_enet_set_settings,
1895         .get_drvinfo            = fec_enet_get_drvinfo,
1896         .get_regs_len           = fec_enet_get_regs_len,
1897         .get_regs               = fec_enet_get_regs,
1898 @@ -2648,14 +2675,12 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
1899         .set_tunable            = fec_enet_set_tunable,
1900         .get_wol                = fec_enet_get_wol,
1901         .set_wol                = fec_enet_set_wol,
1902 -       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
1903 -       .set_link_ksettings     = phy_ethtool_set_link_ksettings,
1904  };
1905  
1906  static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1907  {
1908         struct fec_enet_private *fep = netdev_priv(ndev);
1909 -       struct phy_device *phydev = ndev->phydev;
1910 +       struct phy_device *phydev = fep->phy_dev;
1911  
1912         if (!netif_running(ndev))
1913                 return -EINVAL;
1914 @@ -2685,25 +2710,25 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1915  
1916         for (q = 0; q < fep->num_rx_queues; q++) {
1917                 rxq = fep->rx_queue[q];
1918 -               bdp = rxq->bd.base;
1919 -               for (i = 0; i < rxq->bd.ring_size; i++) {
1920 +               bdp = rxq->rx_bd_base;
1921 +               for (i = 0; i < rxq->rx_ring_size; i++) {
1922                         skb = rxq->rx_skbuff[i];
1923                         rxq->rx_skbuff[i] = NULL;
1924                         if (skb) {
1925                                 dma_unmap_single(&fep->pdev->dev,
1926 -                                                fec32_to_cpu(bdp->cbd_bufaddr),
1927 +                                                bdp->cbd_bufaddr,
1928                                                  FEC_ENET_RX_FRSIZE - fep->rx_align,
1929                                                  DMA_FROM_DEVICE);
1930                                 dev_kfree_skb(skb);
1931                         }
1932 -                       bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1933 +                       bdp = fec_enet_get_nextdesc(bdp, fep, q);
1934                 }
1935         }
1936  
1937         for (q = 0; q < fep->num_tx_queues; q++) {
1938                 txq = fep->tx_queue[q];
1939 -               bdp = txq->bd.base;
1940 -               for (i = 0; i < txq->bd.ring_size; i++) {
1941 +               bdp = txq->tx_bd_base;
1942 +               for (i = 0; i < txq->tx_ring_size; i++) {
1943                         kfree(txq->tx_bounce[i]);
1944                         txq->tx_bounce[i] = NULL;
1945                         skb = txq->tx_skbuff[i];
1946 @@ -2722,8 +2747,8 @@ static void fec_enet_free_queue(struct net_device *ndev)
1947         for (i = 0; i < fep->num_tx_queues; i++)
1948                 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
1949                         txq = fep->tx_queue[i];
1950 -                       dma_free_coherent(&fep->pdev->dev,
1951 -                                         txq->bd.ring_size * TSO_HEADER_SIZE,
1952 +                       dma_free_coherent(NULL,
1953 +                                         txq->tx_ring_size * TSO_HEADER_SIZE,
1954                                           txq->tso_hdrs,
1955                                           txq->tso_hdrs_dma);
1956                 }
1957 @@ -2749,15 +2774,15 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
1958                 }
1959  
1960                 fep->tx_queue[i] = txq;
1961 -               txq->bd.ring_size = TX_RING_SIZE;
1962 -               fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
1963 +               txq->tx_ring_size = TX_RING_SIZE;
1964 +               fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size;
1965  
1966                 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
1967                 txq->tx_wake_threshold =
1968 -                       (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
1969 +                               (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
1970  
1971 -               txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
1972 -                                       txq->bd.ring_size * TSO_HEADER_SIZE,
1973 +               txq->tso_hdrs = dma_alloc_coherent(NULL,
1974 +                                       txq->tx_ring_size * TSO_HEADER_SIZE,
1975                                         &txq->tso_hdrs_dma,
1976                                         GFP_KERNEL);
1977                 if (!txq->tso_hdrs) {
1978 @@ -2774,8 +2799,8 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
1979                         goto alloc_failed;
1980                 }
1981  
1982 -               fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
1983 -               fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
1984 +               fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE;
1985 +               fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size;
1986         }
1987         return ret;
1988  
1989 @@ -2794,8 +2819,8 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
1990         struct fec_enet_priv_rx_q *rxq;
1991  
1992         rxq = fep->rx_queue[queue];
1993 -       bdp = rxq->bd.base;
1994 -       for (i = 0; i < rxq->bd.ring_size; i++) {
1995 +       bdp = rxq->rx_bd_base;
1996 +       for (i = 0; i < rxq->rx_ring_size; i++) {
1997                 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1998                 if (!skb)
1999                         goto err_alloc;
2000 @@ -2806,19 +2831,19 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2001                 }
2002  
2003                 rxq->rx_skbuff[i] = skb;
2004 -               bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
2005 +               bdp->cbd_sc = BD_ENET_RX_EMPTY;
2006  
2007                 if (fep->bufdesc_ex) {
2008                         struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2009 -                       ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
2010 +                       ebdp->cbd_esc = BD_ENET_RX_INT;
2011                 }
2012  
2013 -               bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2014 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue);
2015         }
2016  
2017         /* Set the last buffer to wrap. */
2018 -       bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
2019 -       bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2020 +       bdp = fec_enet_get_prevdesc(bdp, fep, queue);
2021 +       bdp->cbd_sc |= BD_SC_WRAP;
2022         return 0;
2023  
2024   err_alloc:
2025 @@ -2835,26 +2860,26 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
2026         struct fec_enet_priv_tx_q *txq;
2027  
2028         txq = fep->tx_queue[queue];
2029 -       bdp = txq->bd.base;
2030 -       for (i = 0; i < txq->bd.ring_size; i++) {
2031 +       bdp = txq->tx_bd_base;
2032 +       for (i = 0; i < txq->tx_ring_size; i++) {
2033                 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
2034                 if (!txq->tx_bounce[i])
2035                         goto err_alloc;
2036  
2037 -               bdp->cbd_sc = cpu_to_fec16(0);
2038 -               bdp->cbd_bufaddr = cpu_to_fec32(0);
2039 +               bdp->cbd_sc = 0;
2040 +               bdp->cbd_bufaddr = 0;
2041  
2042                 if (fep->bufdesc_ex) {
2043                         struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2044 -                       ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
2045 +                       ebdp->cbd_esc = BD_ENET_TX_INT;
2046                 }
2047  
2048 -               bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
2049 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue);
2050         }
2051  
2052         /* Set the last buffer to wrap. */
2053 -       bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
2054 -       bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2055 +       bdp = fec_enet_get_prevdesc(bdp, fep, queue);
2056 +       bdp->cbd_sc |= BD_SC_WRAP;
2057  
2058         return 0;
2059  
2060 @@ -2903,14 +2928,10 @@ fec_enet_open(struct net_device *ndev)
2061                                 platform_get_device_id(fep->pdev);
2062         int ret;
2063  
2064 -       ret = pm_runtime_get_sync(&fep->pdev->dev);
2065 -       if (ret < 0)
2066 -               return ret;
2067 -
2068         pinctrl_pm_select_default_state(&fep->pdev->dev);
2069         ret = fec_enet_clk_enable(ndev, true);
2070         if (ret)
2071 -               goto clk_enable;
2072 +               return ret;
2073  
2074         /* I should reset the ring buffers here, but I don't yet know
2075          * a simple way to do that.
2076 @@ -2928,13 +2949,11 @@ fec_enet_open(struct net_device *ndev)
2077         if (ret)
2078                 goto err_enet_mii_probe;
2079  
2080 -       if (fep->quirks & FEC_QUIRK_ERR006687)
2081 -               imx6q_cpuidle_fec_irqs_used();
2082 -
2083         napi_enable(&fep->napi);
2084 -       phy_start(ndev->phydev);
2085 +       phy_start(fep->phy_dev);
2086         netif_tx_start_all_queues(ndev);
2087  
2088 +       pm_runtime_get_sync(ndev->dev.parent);
2089         if ((id_entry->driver_data & FEC_QUIRK_BUG_WAITMODE) &&
2090             !fec_enet_irq_workaround(fep))
2091                 pm_qos_add_request(&fep->pm_qos_req,
2092 @@ -2947,16 +2966,14 @@ fec_enet_open(struct net_device *ndev)
2093  
2094         device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
2095                                  FEC_WOL_FLAG_ENABLE);
2096 +       fep->miibus_up_failed = false;
2097  
2098         return 0;
2099  
2100  err_enet_mii_probe:
2101         fec_enet_free_buffers(ndev);
2102  err_enet_alloc:
2103 -       fec_enet_clk_enable(ndev, false);
2104 -clk_enable:
2105 -       pm_runtime_mark_last_busy(&fep->pdev->dev);
2106 -       pm_runtime_put_autosuspend(&fep->pdev->dev);
2107 +       fep->miibus_up_failed = true;
2108         if (!fep->mii_bus_share)
2109                 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2110         return ret;
2111 @@ -2967,7 +2984,7 @@ fec_enet_close(struct net_device *ndev)
2112  {
2113         struct fec_enet_private *fep = netdev_priv(ndev);
2114  
2115 -       phy_stop(ndev->phydev);
2116 +       phy_stop(fep->phy_dev);
2117  
2118         if (netif_device_present(ndev)) {
2119                 napi_disable(&fep->napi);
2120 @@ -2975,21 +2992,13 @@ fec_enet_close(struct net_device *ndev)
2121                 fec_stop(ndev);
2122         }
2123  
2124 -       phy_disconnect(ndev->phydev);
2125 -       ndev->phydev = NULL;
2126 -
2127 -       if (fep->quirks & FEC_QUIRK_ERR006687)
2128 -               imx6q_cpuidle_fec_irqs_unused();
2129 -
2130 -       fec_enet_update_ethtool_stats(ndev);
2131 +       phy_disconnect(fep->phy_dev);
2132 +       fep->phy_dev = NULL;
2133  
2134         fec_enet_clk_enable(ndev, false);
2135         pm_qos_remove_request(&fep->pm_qos_req);
2136 -       if (!fep->mii_bus_share)
2137 -               pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2138 -       pm_runtime_mark_last_busy(&fep->pdev->dev);
2139 -       pm_runtime_put_autosuspend(&fep->pdev->dev);
2140 -
2141 +       pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2142 +       pm_runtime_put_sync_suspend(ndev->dev.parent);
2143         fec_enet_free_buffers(ndev);
2144  
2145         return 0;
2146 @@ -3005,7 +3014,7 @@ fec_enet_close(struct net_device *ndev)
2147   * this kind of feature?).
2148   */
2149  
2150 -#define FEC_HASH_BITS  6               /* #bits in hash */
2151 +#define HASH_BITS      6               /* #bits in hash */
2152  #define CRC32_POLY     0xEDB88320
2153  
2154  static void set_multicast_list(struct net_device *ndev)
2155 @@ -3014,7 +3023,6 @@ static void set_multicast_list(struct net_device *ndev)
2156         struct netdev_hw_addr *ha;
2157         unsigned int i, bit, data, crc, tmp;
2158         unsigned char hash;
2159 -       unsigned int hash_high, hash_low;
2160  
2161         if (ndev->flags & IFF_PROMISC) {
2162                 tmp = readl(fep->hwp + FEC_R_CNTRL);
2163 @@ -3037,10 +3045,10 @@ static void set_multicast_list(struct net_device *ndev)
2164                 return;
2165         }
2166  
2167 -       /* Add the addresses in hash register
2168 +       /* Clear filter and add the addresses in hash register
2169          */
2170 -       hash_high = 0;
2171 -       hash_low = 0;
2172 +       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2173 +       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2174  
2175         netdev_for_each_mc_addr(ha, ndev) {
2176                 /* calculate crc32 value of mac address */
2177 @@ -3054,20 +3062,21 @@ static void set_multicast_list(struct net_device *ndev)
2178                         }
2179                 }
2180  
2181 -               /* only upper 6 bits (FEC_HASH_BITS) are used
2182 +               /* only upper 6 bits (HASH_BITS) are used
2183                  * which point to specific bit in he hash registers
2184                  */
2185 -               hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
2186 +               hash = (crc >> (32 - HASH_BITS)) & 0x3f;
2187  
2188                 if (hash > 31) {
2189 -                       hash_high |= 1 << (hash - 32);
2190 +                       tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2191 +                       tmp |= 1 << (hash - 32);
2192 +                       writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2193                 } else {
2194 -                       hash_low |= 1 << hash;
2195 +                       tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2196 +                       tmp |= 1 << hash;
2197 +                       writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2198                 }
2199         }
2200 -
2201 -       writel_relaxed(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2202 -       writel_relaxed(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2203  }
2204  
2205  /* Set a MAC change in hardware. */
2206 @@ -3122,6 +3131,7 @@ static void fec_poll_controller(struct net_device *dev)
2207  }
2208  #endif
2209  
2210 +#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
2211  static inline void fec_enet_set_netdev_features(struct net_device *netdev,
2212         netdev_features_t features)
2213  {
2214 @@ -3145,7 +3155,7 @@ static int fec_set_features(struct net_device *netdev,
2215         struct fec_enet_private *fep = netdev_priv(netdev);
2216         netdev_features_t changed = features ^ netdev->features;
2217  
2218 -       if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
2219 +       if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
2220                 napi_disable(&fep->napi);
2221                 netif_tx_lock_bh(netdev);
2222                 fec_stop(netdev);
2223 @@ -3209,14 +3219,6 @@ static const struct net_device_ops fec_netdev_ops = {
2224         .ndo_set_features       = fec_set_features,
2225  };
2226  
2227 -static const unsigned short offset_des_active_rxq[] = {
2228 -       FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
2229 -};
2230 -
2231 -static const unsigned short offset_des_active_txq[] = {
2232 -       FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
2233 -};
2234 -
2235   /*
2236    * XXX:  We need to clean up on failure exits here.
2237    *
2238 @@ -3224,16 +3226,14 @@ static const unsigned short offset_des_active_txq[] = {
2239  static int fec_enet_init(struct net_device *ndev)
2240  {
2241         struct fec_enet_private *fep = netdev_priv(ndev);
2242 +       struct fec_enet_priv_tx_q *txq;
2243 +       struct fec_enet_priv_rx_q *rxq;
2244         struct bufdesc *cbd_base;
2245         dma_addr_t bd_dma;
2246         int bd_size;
2247         unsigned int i;
2248 -       unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
2249 -                       sizeof(struct bufdesc);
2250 -       unsigned dsize_log2 = __fls(dsize);
2251  
2252 -       WARN_ON(dsize != (1 << dsize_log2));
2253 -#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
2254 +#if defined(CONFIG_ARM)
2255         fep->rx_align = 0xf;
2256         fep->tx_align = 0xf;
2257  #else
2258 @@ -3243,11 +3243,16 @@ static int fec_enet_init(struct net_device *ndev)
2259  
2260         fec_enet_alloc_queue(ndev);
2261  
2262 -       bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
2263 +       if (fep->bufdesc_ex)
2264 +               fep->bufdesc_size = sizeof(struct bufdesc_ex);
2265 +       else
2266 +               fep->bufdesc_size = sizeof(struct bufdesc);
2267 +       bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) *
2268 +                       fep->bufdesc_size;
2269  
2270         /* Allocate memory for buffer descriptors. */
2271 -       cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
2272 -                                      GFP_KERNEL);
2273 +       cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
2274 +                                     GFP_KERNEL);
2275         if (!cbd_base) {
2276                 return -ENOMEM;
2277         }
2278 @@ -3261,35 +3266,33 @@ static int fec_enet_init(struct net_device *ndev)
2279  
2280         /* Set receive and transmit descriptor base. */
2281         for (i = 0; i < fep->num_rx_queues; i++) {
2282 -               struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
2283 -               unsigned size = dsize * rxq->bd.ring_size;
2284 -
2285 -               rxq->bd.qid = i;
2286 -               rxq->bd.base = cbd_base;
2287 -               rxq->bd.cur = cbd_base;
2288 -               rxq->bd.dma = bd_dma;
2289 -               rxq->bd.dsize = dsize;
2290 -               rxq->bd.dsize_log2 = dsize_log2;
2291 -               rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
2292 -               bd_dma += size;
2293 -               cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
2294 -               rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
2295 +               rxq = fep->rx_queue[i];
2296 +               rxq->index = i;
2297 +               rxq->rx_bd_base = (struct bufdesc *)cbd_base;
2298 +               rxq->bd_dma = bd_dma;
2299 +               if (fep->bufdesc_ex) {
2300 +                       bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size;
2301 +                       cbd_base = (struct bufdesc *)
2302 +                               (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
2303 +               } else {
2304 +                       bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size;
2305 +                       cbd_base += rxq->rx_ring_size;
2306 +               }
2307         }
2308  
2309         for (i = 0; i < fep->num_tx_queues; i++) {
2310 -               struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
2311 -               unsigned size = dsize * txq->bd.ring_size;
2312 -
2313 -               txq->bd.qid = i;
2314 -               txq->bd.base = cbd_base;
2315 -               txq->bd.cur = cbd_base;
2316 -               txq->bd.dma = bd_dma;
2317 -               txq->bd.dsize = dsize;
2318 -               txq->bd.dsize_log2 = dsize_log2;
2319 -               txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
2320 -               bd_dma += size;
2321 -               cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
2322 -               txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
2323 +               txq = fep->tx_queue[i];
2324 +               txq->index = i;
2325 +               txq->tx_bd_base = (struct bufdesc *)cbd_base;
2326 +               txq->bd_dma = bd_dma;
2327 +               if (fep->bufdesc_ex) {
2328 +                       bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size;
2329 +                       cbd_base = (struct bufdesc *)
2330 +                        (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size);
2331 +               } else {
2332 +                       bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size;
2333 +                       cbd_base += txq->tx_ring_size;
2334 +               }
2335         }
2336  
2337  
2338 @@ -3323,60 +3326,62 @@ static int fec_enet_init(struct net_device *ndev)
2339  
2340         fec_restart(ndev);
2341  
2342 -       fec_enet_update_ethtool_stats(ndev);
2343 -
2344         return 0;
2345  }
2346  
2347  #ifdef CONFIG_OF
2348 -static int fec_reset_phy(struct platform_device *pdev)
2349 +static void fec_reset_phy(struct platform_device *pdev)
2350 +{
2351 +       struct net_device *ndev = platform_get_drvdata(pdev);
2352 +       struct fec_enet_private *fep = netdev_priv(ndev);
2353 +
2354 +       if (!gpio_is_valid(fep->phy_reset_gpio))
2355 +               return;
2356 +
2357 +       gpio_set_value_cansleep(fep->phy_reset_gpio, 0);
2358 +       msleep(fep->phy_reset_duration);
2359 +       gpio_set_value_cansleep(fep->phy_reset_gpio, 1);
2360 +}
2361 +
2362 +static int fec_get_reset_gpio(struct platform_device *pdev)
2363  {
2364         int err, phy_reset;
2365 -       bool active_high = false;
2366         int msec = 1;
2367         struct device_node *np = pdev->dev.of_node;
2368 -
2369 -       if (!np)
2370 -               return 0;
2371 -
2372 -       err = of_property_read_u32(np, "phy-reset-duration", &msec);
2373 -       /* A sane reset duration should not be longer than 1s */
2374 -       if (!err && msec > 1000)
2375 -               msec = 1;
2376 +       struct net_device *ndev = platform_get_drvdata(pdev);
2377 +       struct fec_enet_private *fep = netdev_priv(ndev);
2378  
2379         phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
2380 -       if (phy_reset == -EPROBE_DEFER)
2381 +       if (!gpio_is_valid(phy_reset))
2382                 return phy_reset;
2383 -       else if (!gpio_is_valid(phy_reset))
2384 -               return 0;
2385 -
2386 -       active_high = of_property_read_bool(np, "phy-reset-active-high");
2387  
2388         err = devm_gpio_request_one(&pdev->dev, phy_reset,
2389 -                       active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
2390 -                       "phy-reset");
2391 +                                   GPIOF_OUT_INIT_LOW, "phy-reset");
2392         if (err) {
2393                 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
2394                 return err;
2395         }
2396 -
2397 -       if (msec > 20)
2398 -               msleep(msec);
2399 -       else
2400 -               usleep_range(msec * 1000, msec * 1000 + 1000);
2401 -
2402 -       gpio_set_value_cansleep(phy_reset, !active_high);
2403 -
2404 -       return 0;
2405 +       
2406 +       of_property_read_u32(np, "phy-reset-duration", &msec);
2407 +       /* A sane reset duration should not be longer than 1s */
2408 +       if (msec > 1000)
2409 +               msec = 1;
2410 +       fep->phy_reset_duration = msec;
2411 +       
2412 +       return phy_reset;
2413  }
2414  #else /* CONFIG_OF */
2415 -static int fec_reset_phy(struct platform_device *pdev)
2416 +static void fec_reset_phy(struct platform_device *pdev)
2417  {
2418         /*
2419          * In case of platform probe, the reset has been done
2420          * by machine code.
2421          */
2422 -       return 0;
2423 +}
2424 +
2425 +static inline int fec_get_reset_gpio(struct platform_device *pdev)
2426 +{
2427 +       return -EINVAL;
2428  }
2429  #endif /* CONFIG_OF */
2430  
2431 @@ -3384,6 +3389,7 @@ static void
2432  fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
2433  {
2434         struct device_node *np = pdev->dev.of_node;
2435 +       int err;
2436  
2437         *num_tx = *num_rx = 1;
2438  
2439 @@ -3391,9 +3397,13 @@ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
2440                 return;
2441  
2442         /* parse the num of tx and rx queues */
2443 -       of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
2444 +       err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
2445 +       if (err)
2446 +               *num_tx = 1;
2447  
2448 -       of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
2449 +       err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
2450 +       if (err)
2451 +               *num_rx = 1;
2452  
2453         if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
2454                 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
2455 @@ -3460,13 +3470,11 @@ fec_probe(struct platform_device *pdev)
2456         int num_tx_qs;
2457         int num_rx_qs;
2458  
2459 -       of_dma_configure(&pdev->dev, np);
2460 -
2461         fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
2462  
2463         /* Init network device */
2464 -       ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
2465 -                                 FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
2466 +       ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private),
2467 +                                 num_tx_qs, num_rx_qs);
2468         if (!ndev)
2469                 return -ENOMEM;
2470  
2471 @@ -3505,13 +3513,14 @@ fec_probe(struct platform_device *pdev)
2472  
2473         platform_set_drvdata(pdev, ndev);
2474  
2475 -       if ((of_machine_is_compatible("fsl,imx6q") ||
2476 -            of_machine_is_compatible("fsl,imx6dl")) &&
2477 -           !of_property_read_bool(np, "fsl,err006687-workaround-present"))
2478 -               fep->quirks |= FEC_QUIRK_ERR006687;
2479 -
2480         fec_enet_of_parse_stop_mode(pdev);
2481  
2482 +       ret = fec_get_reset_gpio(pdev);
2483 +       if (ret == -EPROBE_DEFER)
2484 +               goto gpio_defer;
2485 +       fep->phy_reset_gpio = ret;
2486 +       
2487 +
2488         if (of_get_property(np, "fsl,magic-packet", NULL))
2489                 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
2490  
2491 @@ -3524,7 +3533,6 @@ fec_probe(struct platform_device *pdev)
2492                         goto failed_phy;
2493                 }
2494                 phy_node = of_node_get(np);
2495 -               fep->fixed_link = true;
2496         }
2497         fep->phy_node = phy_node;
2498  
2499 @@ -3539,10 +3547,6 @@ fec_probe(struct platform_device *pdev)
2500                 fep->phy_interface = ret;
2501         }
2502  
2503 -#if !defined(CONFIG_ARM64)
2504 -       request_bus_freq(BUS_FREQ_HIGH);
2505 -#endif
2506 -
2507         fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2508         if (IS_ERR(fep->clk_ipg)) {
2509                 ret = PTR_ERR(fep->clk_ipg);
2510 @@ -3577,39 +3581,24 @@ fec_probe(struct platform_device *pdev)
2511                 fep->bufdesc_ex = false;
2512         }
2513  
2514 +       pm_runtime_enable(&pdev->dev);
2515         ret = fec_enet_clk_enable(ndev, true);
2516         if (ret)
2517                 goto failed_clk;
2518  
2519 -       ret = clk_prepare_enable(fep->clk_ipg);
2520 -       if (ret)
2521 -               goto failed_clk_ipg;
2522 -       ret = clk_prepare_enable(fep->clk_ahb);
2523 -       if (ret)
2524 -               goto failed_clk_ahb;
2525 -
2526         fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
2527         if (!IS_ERR(fep->reg_phy)) {
2528                 ret = regulator_enable(fep->reg_phy);
2529                 if (ret) {
2530                         dev_err(&pdev->dev,
2531                                 "Failed to enable phy regulator: %d\n", ret);
2532 -                       clk_disable_unprepare(fep->clk_ipg);
2533                         goto failed_regulator;
2534                 }
2535         } else {
2536                 fep->reg_phy = NULL;
2537         }
2538  
2539 -       pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
2540 -       pm_runtime_use_autosuspend(&pdev->dev);
2541 -       pm_runtime_get_noresume(&pdev->dev);
2542 -       pm_runtime_set_active(&pdev->dev);
2543 -       pm_runtime_enable(&pdev->dev);
2544 -
2545 -       ret = fec_reset_phy(pdev);
2546 -       if (ret)
2547 -               goto failed_reset;
2548 +       fec_reset_phy(pdev);
2549  
2550         if (fep->bufdesc_ex)
2551                 fec_ptp_init(pdev);
2552 @@ -3641,15 +3630,9 @@ fec_probe(struct platform_device *pdev)
2553                 fep->wake_irq = fep->irq[0];
2554  
2555         init_completion(&fep->mdio_done);
2556 -
2557 -       /* board only enable one mii bus in default */
2558 -       if (!of_get_property(np, "fsl,mii-exclusive", NULL))
2559 -               fep->quirks |= FEC_QUIRK_SINGLE_MDIO;
2560         ret = fec_enet_mii_init(pdev);
2561 -       if (ret) {
2562 -               dev_id = 0;
2563 +       if (ret)
2564                 goto failed_mii_init;
2565 -       }
2566  
2567         /* Carrier starts down, phylib will bring it up */
2568         netif_carrier_off(ndev);
2569 @@ -3660,11 +3643,6 @@ fec_probe(struct platform_device *pdev)
2570         if (ret)
2571                 goto failed_register;
2572  
2573 -       if (!fep->fixed_link) {
2574 -               fep->fixups = of_fec_enet_parse_fixup(np);
2575 -               fec_enet_register_fixup(ndev);
2576 -       }
2577 -
2578         device_init_wakeup(&ndev->dev, fep->wol_flag &
2579                            FEC_WOL_HAS_MAGIC_PACKET);
2580  
2581 @@ -3673,10 +3651,6 @@ fec_probe(struct platform_device *pdev)
2582  
2583         fep->rx_copybreak = COPYBREAK_DEFAULT;
2584         INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
2585 -
2586 -       pm_runtime_mark_last_busy(&pdev->dev);
2587 -       pm_runtime_put_autosuspend(&pdev->dev);
2588 -
2589         return 0;
2590  
2591  failed_register:
2592 @@ -3684,22 +3658,14 @@ fec_probe(struct platform_device *pdev)
2593  failed_mii_init:
2594  failed_irq:
2595  failed_init:
2596 -       fec_ptp_stop(pdev);
2597         if (fep->reg_phy)
2598                 regulator_disable(fep->reg_phy);
2599 -failed_reset:
2600 -       pm_runtime_put(&pdev->dev);
2601 -       pm_runtime_disable(&pdev->dev);
2602  failed_regulator:
2603 -failed_clk_ahb:
2604 -       clk_disable_unprepare(fep->clk_ipg);
2605 -failed_clk_ipg:
2606         fec_enet_clk_enable(ndev, false);
2607  failed_clk:
2608 -       if (of_phy_is_fixed_link(np))
2609 -               of_phy_deregister_fixed_link(np);
2610  failed_phy:
2611         of_node_put(phy_node);
2612 +gpio_defer:
2613  failed_ioremap:
2614         free_netdev(ndev);
2615  
2616 @@ -3711,16 +3677,15 @@ fec_drv_remove(struct platform_device *pdev)
2617  {
2618         struct net_device *ndev = platform_get_drvdata(pdev);
2619         struct fec_enet_private *fep = netdev_priv(ndev);
2620 -       struct device_node *np = pdev->dev.of_node;
2621  
2622 +       cancel_delayed_work_sync(&fep->time_keep);
2623         cancel_work_sync(&fep->tx_timeout_work);
2624 -       fec_ptp_stop(pdev);
2625         unregister_netdev(ndev);
2626         fec_enet_mii_remove(fep);
2627         if (fep->reg_phy)
2628                 regulator_disable(fep->reg_phy);
2629 -       if (of_phy_is_fixed_link(np))
2630 -               of_phy_deregister_fixed_link(np);
2631 +       if (fep->ptp_clock)
2632 +               ptp_clock_unregister(fep->ptp_clock);
2633         of_node_put(fep->phy_node);
2634         free_netdev(ndev);
2635  
2636 @@ -3731,13 +3696,12 @@ static int __maybe_unused fec_suspend(struct device *dev)
2637  {
2638         struct net_device *ndev = dev_get_drvdata(dev);
2639         struct fec_enet_private *fep = netdev_priv(ndev);
2640 -       int ret = 0;
2641  
2642         rtnl_lock();
2643         if (netif_running(ndev)) {
2644                 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
2645                         fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
2646 -               phy_stop(ndev->phydev);
2647 +               phy_stop(fep->phy_dev);
2648                 napi_disable(&fep->napi);
2649                 netif_tx_lock_bh(ndev);
2650                 netif_device_detach(ndev);
2651 @@ -3751,12 +3715,8 @@ static int __maybe_unused fec_suspend(struct device *dev)
2652                         enable_irq_wake(fep->wake_irq);
2653                 }
2654                 fec_enet_clk_enable(ndev, false);
2655 -               fep->active_in_suspend = !pm_runtime_status_suspended(dev);
2656 -               if (fep->active_in_suspend)
2657 -                       ret = pm_runtime_force_suspend(dev);
2658 -               if (ret < 0)
2659 -                       return ret;
2660 -       } else if (fep->mii_bus_share && !ndev->phydev) {
2661 +       } else if (fep->mii_bus_share && fep->miibus_up_failed && !fep->phy_dev) {
2662 +               fec_enet_clk_enable(ndev, false);
2663                 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2664         }
2665         rtnl_unlock();
2666 @@ -3777,7 +3737,7 @@ static int __maybe_unused fec_resume(struct device *dev)
2667  {
2668         struct net_device *ndev = dev_get_drvdata(dev);
2669         struct fec_enet_private *fep = netdev_priv(ndev);
2670 -       int ret = 0;
2671 +       int ret;
2672         int val;
2673  
2674         if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
2675 @@ -3788,8 +3748,6 @@ static int __maybe_unused fec_resume(struct device *dev)
2676  
2677         rtnl_lock();
2678         if (netif_running(ndev)) {
2679 -               if (fep->active_in_suspend)
2680 -                       pm_runtime_force_resume(dev);
2681                 ret = fec_enet_clk_enable(ndev, true);
2682                 if (ret) {
2683                         rtnl_unlock();
2684 @@ -3812,15 +3770,16 @@ static int __maybe_unused fec_resume(struct device *dev)
2685                 netif_device_attach(ndev);
2686                 netif_tx_unlock_bh(ndev);
2687                 napi_enable(&fep->napi);
2688 -               phy_start(ndev->phydev);
2689 -       } else if (fep->mii_bus_share && !ndev->phydev) {
2690 +               phy_start(fep->phy_dev);
2691 +       } else if (fep->mii_bus_share && !fep->phy_dev) {
2692                 pinctrl_pm_select_default_state(&fep->pdev->dev);
2693 +               fep->miibus_up_failed = true;
2694                 /* And then recovery mii bus */
2695 -               ret = fec_restore_mii_bus(ndev);
2696 +               fec_restore_mii_bus(ndev);
2697         }
2698         rtnl_unlock();
2699  
2700 -       return ret;
2701 +       return 0;
2702  
2703  failed_clk:
2704         if (fep->reg_phy)
2705 @@ -3828,46 +3787,21 @@ static int __maybe_unused fec_resume(struct device *dev)
2706         return ret;
2707  }
2708  
2709 -static int __maybe_unused fec_runtime_suspend(struct device *dev)
2710 +static int fec_runtime_suspend(struct device *dev)
2711  {
2712 -       struct net_device *ndev = dev_get_drvdata(dev);
2713 -       struct fec_enet_private *fep = netdev_priv(ndev);
2714 -
2715 -       clk_disable_unprepare(fep->clk_ahb);
2716 -       clk_disable_unprepare(fep->clk_ipg);
2717 -#if !defined(CONFIG_ARM64)
2718         release_bus_freq(BUS_FREQ_HIGH);
2719 -#endif
2720 -
2721         return 0;
2722  }
2723  
2724 -static int __maybe_unused fec_runtime_resume(struct device *dev)
2725 +static int fec_runtime_resume(struct device *dev)
2726  {
2727 -       struct net_device *ndev = dev_get_drvdata(dev);
2728 -       struct fec_enet_private *fep = netdev_priv(ndev);
2729 -       int ret;
2730 -
2731 -#if !defined(CONFIG_ARM64)
2732         request_bus_freq(BUS_FREQ_HIGH);
2733 -#endif
2734 -       ret = clk_prepare_enable(fep->clk_ahb);
2735 -       if (ret)
2736 -               return ret;
2737 -       ret = clk_prepare_enable(fep->clk_ipg);
2738 -       if (ret)
2739 -               goto failed_clk_ipg;
2740 -
2741         return 0;
2742 -
2743 -failed_clk_ipg:
2744 -       clk_disable_unprepare(fep->clk_ahb);
2745 -       return ret;
2746  }
2747  
2748  static const struct dev_pm_ops fec_pm_ops = {
2749 -       SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
2750         SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
2751 +       SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
2752  };
2753  
2754  static struct platform_driver fec_driver = {
2755 diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
2756 index 446ae9d..afe7f39 100644
2757 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
2758 +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
2759 @@ -66,6 +66,7 @@ struct mpc52xx_fec_priv {
2760         /* MDIO link details */
2761         unsigned int mdio_speed;
2762         struct device_node *phy_node;
2763 +       struct phy_device *phydev;
2764         enum phy_state link;
2765         int seven_wire_mode;
2766  };
2767 @@ -164,7 +165,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task
2768  static void mpc52xx_fec_adjust_link(struct net_device *dev)
2769  {
2770         struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2771 -       struct phy_device *phydev = dev->phydev;
2772 +       struct phy_device *phydev = priv->phydev;
2773         int new_state = 0;
2774  
2775         if (phydev->link != PHY_DOWN) {
2776 @@ -214,17 +215,16 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev)
2777  static int mpc52xx_fec_open(struct net_device *dev)
2778  {
2779         struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2780 -       struct phy_device *phydev = NULL;
2781         int err = -EBUSY;
2782  
2783         if (priv->phy_node) {
2784 -               phydev = of_phy_connect(priv->ndev, priv->phy_node,
2785 -                                       mpc52xx_fec_adjust_link, 0, 0);
2786 -               if (!phydev) {
2787 +               priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
2788 +                                             mpc52xx_fec_adjust_link, 0, 0);
2789 +               if (!priv->phydev) {
2790                         dev_err(&dev->dev, "of_phy_connect failed\n");
2791                         return -ENODEV;
2792                 }
2793 -               phy_start(phydev);
2794 +               phy_start(priv->phydev);
2795         }
2796  
2797         if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
2798 @@ -268,9 +268,10 @@ static int mpc52xx_fec_open(struct net_device *dev)
2799   free_ctrl_irq:
2800         free_irq(dev->irq, dev);
2801   free_phy:
2802 -       if (phydev) {
2803 -               phy_stop(phydev);
2804 -               phy_disconnect(phydev);
2805 +       if (priv->phydev) {
2806 +               phy_stop(priv->phydev);
2807 +               phy_disconnect(priv->phydev);
2808 +               priv->phydev = NULL;
2809         }
2810  
2811         return err;
2812 @@ -279,7 +280,6 @@ static int mpc52xx_fec_open(struct net_device *dev)
2813  static int mpc52xx_fec_close(struct net_device *dev)
2814  {
2815         struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2816 -       struct phy_device *phydev = dev->phydev;
2817  
2818         netif_stop_queue(dev);
2819  
2820 @@ -291,10 +291,11 @@ static int mpc52xx_fec_close(struct net_device *dev)
2821         free_irq(priv->r_irq, dev);
2822         free_irq(priv->t_irq, dev);
2823  
2824 -       if (phydev) {
2825 +       if (priv->phydev) {
2826                 /* power down phy */
2827 -               phy_stop(phydev);
2828 -               phy_disconnect(phydev);
2829 +               phy_stop(priv->phydev);
2830 +               phy_disconnect(priv->phydev);
2831 +               priv->phydev = NULL;
2832         }
2833  
2834         return 0;
2835 @@ -762,6 +763,26 @@ static void mpc52xx_fec_reset(struct net_device *dev)
2836  
2837  /* ethtool interface */
2838  
2839 +static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2840 +{
2841 +       struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2842 +
2843 +       if (!priv->phydev)
2844 +               return -ENODEV;
2845 +
2846 +       return phy_ethtool_gset(priv->phydev, cmd);
2847 +}
2848 +
2849 +static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2850 +{
2851 +       struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2852 +
2853 +       if (!priv->phydev)
2854 +               return -ENODEV;
2855 +
2856 +       return phy_ethtool_sset(priv->phydev, cmd);
2857 +}
2858 +
2859  static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
2860  {
2861         struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2862 @@ -775,23 +796,23 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
2863  }
2864  
2865  static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
2866 +       .get_settings = mpc52xx_fec_get_settings,
2867 +       .set_settings = mpc52xx_fec_set_settings,
2868         .get_link = ethtool_op_get_link,
2869         .get_msglevel = mpc52xx_fec_get_msglevel,
2870         .set_msglevel = mpc52xx_fec_set_msglevel,
2871         .get_ts_info = ethtool_op_get_ts_info,
2872 -       .get_link_ksettings = phy_ethtool_get_link_ksettings,
2873 -       .set_link_ksettings = phy_ethtool_set_link_ksettings,
2874  };
2875  
2876  
2877  static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2878  {
2879 -       struct phy_device *phydev = dev->phydev;
2880 +       struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2881  
2882 -       if (!phydev)
2883 +       if (!priv->phydev)
2884                 return -ENOTSUPP;
2885  
2886 -       return phy_mii_ioctl(phydev, rq, cmd);
2887 +       return phy_mii_ioctl(priv->phydev, rq, cmd);
2888  }
2889  
2890  static const struct net_device_ops mpc52xx_fec_netdev_ops = {
2891 @@ -1063,23 +1084,27 @@ static struct platform_driver mpc52xx_fec_driver = {
2892  /* Module                                                                   */
2893  /* ======================================================================== */
2894  
2895 -static struct platform_driver * const drivers[] = {
2896 -#ifdef CONFIG_FEC_MPC52xx_MDIO
2897 -       &mpc52xx_fec_mdio_driver,
2898 -#endif
2899 -       &mpc52xx_fec_driver,
2900 -};
2901 -
2902  static int __init
2903  mpc52xx_fec_init(void)
2904  {
2905 -       return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2906 +#ifdef CONFIG_FEC_MPC52xx_MDIO
2907 +       int ret;
2908 +       ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
2909 +       if (ret) {
2910 +               pr_err("failed to register mdio driver\n");
2911 +               return ret;
2912 +       }
2913 +#endif
2914 +       return platform_driver_register(&mpc52xx_fec_driver);
2915  }
2916  
2917  static void __exit
2918  mpc52xx_fec_exit(void)
2919  {
2920 -       platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2921 +       platform_driver_unregister(&mpc52xx_fec_driver);
2922 +#ifdef CONFIG_FEC_MPC52xx_MDIO
2923 +       platform_driver_unregister(&mpc52xx_fec_mdio_driver);
2924 +#endif
2925  }
2926  
2927  
2928 diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
2929 index b5497e3..1e647be 100644
2930 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
2931 +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
2932 @@ -22,6 +22,7 @@
2933  
2934  struct mpc52xx_fec_mdio_priv {
2935         struct mpc52xx_fec __iomem *regs;
2936 +       int mdio_irqs[PHY_MAX_ADDR];
2937  };
2938  
2939  static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
2940 @@ -82,6 +83,9 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
2941         bus->read = mpc52xx_fec_mdio_read;
2942         bus->write = mpc52xx_fec_mdio_write;
2943  
2944 +       /* setup irqs */
2945 +       bus->irq = priv->mdio_irqs;
2946 +
2947         /* setup registers */
2948         err = of_address_to_resource(np, 0, &res);
2949         if (err)
2950 diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
2951 index f9e7446..7a8386a 100644
2952 --- a/drivers/net/ethernet/freescale/fec_ptp.c
2953 +++ b/drivers/net/ethernet/freescale/fec_ptp.c
2954 @@ -112,8 +112,9 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
2955         unsigned long flags;
2956         u32 val, tempval;
2957         int inc;
2958 -       struct timespec64 ts;
2959 +       struct timespec ts;
2960         u64 ns;
2961 +       u32 remainder;
2962         val = 0;
2963  
2964         if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
2965 @@ -162,7 +163,8 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
2966                 tempval = readl(fep->hwp + FEC_ATIME);
2967                 /* Convert the ptp local counter to 1588 timestamp */
2968                 ns = timecounter_cyc2time(&fep->tc, tempval);
2969 -               ts = ns_to_timespec64(ns);
2970 +               ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
2971 +               ts.tv_nsec = remainder;
2972  
2973                 /* The tempval is  less than 3 seconds, and  so val is less than
2974                  * 4 seconds. No overflow for 32bit calculation.
2975 @@ -596,16 +598,6 @@ void fec_ptp_init(struct platform_device *pdev)
2976         schedule_delayed_work(&fep->time_keep, HZ);
2977  }
2978  
2979 -void fec_ptp_stop(struct platform_device *pdev)
2980 -{
2981 -       struct net_device *ndev = platform_get_drvdata(pdev);
2982 -       struct fec_enet_private *fep = netdev_priv(ndev);
2983 -
2984 -       cancel_delayed_work_sync(&fep->time_keep);
2985 -       if (fep->ptp_clock)
2986 -               ptp_clock_unregister(fep->ptp_clock);
2987 -}
2988 -
2989  /**
2990   * fec_ptp_check_pps_event
2991   * @fep: the fec_enet_private structure handle
2992 diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
2993 deleted file mode 100644
2994 index 79b7c84..0000000
2995 --- a/drivers/net/ethernet/freescale/fman/Kconfig
2996 +++ /dev/null
2997 @@ -1,9 +0,0 @@
2998 -config FSL_FMAN
2999 -       tristate "FMan support"
3000 -       depends on FSL_SOC || COMPILE_TEST
3001 -       select GENERIC_ALLOCATOR
3002 -       select PHYLIB
3003 -       default n
3004 -       help
3005 -               Freescale Data-Path Acceleration Architecture Frame Manager
3006 -               (FMan) support
3007 diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile
3008 deleted file mode 100644
3009 index 6049177..0000000
3010 --- a/drivers/net/ethernet/freescale/fman/Makefile
3011 +++ /dev/null
3012 @@ -1,9 +0,0 @@
3013 -subdir-ccflags-y +=  -I$(srctree)/drivers/net/ethernet/freescale/fman
3014 -
3015 -obj-$(CONFIG_FSL_FMAN) += fsl_fman.o
3016 -obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o
3017 -obj-$(CONFIG_FSL_FMAN) += fsl_mac.o
3018 -
3019 -fsl_fman-objs  := fman_muram.o fman.o fman_sp.o
3020 -fsl_fman_port-objs := fman_port.o
3021 -fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
3022 diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
3023 deleted file mode 100644
3024 index dafd9e1..0000000
3025 --- a/drivers/net/ethernet/freescale/fman/fman.c
3026 +++ /dev/null
3027 @@ -1,2967 +0,0 @@
3028 -/*
3029 - * Copyright 2008-2015 Freescale Semiconductor Inc.
3030 - *
3031 - * Redistribution and use in source and binary forms, with or without
3032 - * modification, are permitted provided that the following conditions are met:
3033 - *     * Redistributions of source code must retain the above copyright
3034 - *       notice, this list of conditions and the following disclaimer.
3035 - *     * Redistributions in binary form must reproduce the above copyright
3036 - *       notice, this list of conditions and the following disclaimer in the
3037 - *       documentation and/or other materials provided with the distribution.
3038 - *     * Neither the name of Freescale Semiconductor nor the
3039 - *       names of its contributors may be used to endorse or promote products
3040 - *       derived from this software without specific prior written permission.
3041 - *
3042 - *
3043 - * ALTERNATIVELY, this software may be distributed under the terms of the
3044 - * GNU General Public License ("GPL") as published by the Free Software
3045 - * Foundation, either version 2 of that License or (at your option) any
3046 - * later version.
3047 - *
3048 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
3049 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
3050 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
3051 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
3052 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
3053 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
3054 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
3055 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3056 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
3057 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3058 - */
3059 -
3060 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3061 -
3062 -#include "fman.h"
3063 -#include "fman_muram.h"
3064 -
3065 -#include <linux/fsl/guts.h>
3066 -#include <linux/slab.h>
3067 -#include <linux/delay.h>
3068 -#include <linux/module.h>
3069 -#include <linux/of_platform.h>
3070 -#include <linux/clk.h>
3071 -#include <linux/of_address.h>
3072 -#include <linux/of_irq.h>
3073 -#include <linux/interrupt.h>
3074 -#include <linux/libfdt_env.h>
3075 -
3076 -/* General defines */
3077 -#define FMAN_LIODN_TBL                 64      /* size of LIODN table */
3078 -#define MAX_NUM_OF_MACS                        10
3079 -#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
3080 -#define BASE_RX_PORTID                 0x08
3081 -#define BASE_TX_PORTID                 0x28
3082 -
3083 -/* Modules registers offsets */
3084 -#define BMI_OFFSET             0x00080000
3085 -#define QMI_OFFSET             0x00080400
3086 -#define DMA_OFFSET             0x000C2000
3087 -#define FPM_OFFSET             0x000C3000
3088 -#define IMEM_OFFSET            0x000C4000
3089 -#define CGP_OFFSET             0x000DB000
3090 -
3091 -/* Exceptions bit map */
3092 -#define EX_DMA_BUS_ERROR               0x80000000
3093 -#define EX_DMA_READ_ECC                        0x40000000
3094 -#define EX_DMA_SYSTEM_WRITE_ECC        0x20000000
3095 -#define EX_DMA_FM_WRITE_ECC            0x10000000
3096 -#define EX_FPM_STALL_ON_TASKS          0x08000000
3097 -#define EX_FPM_SINGLE_ECC              0x04000000
3098 -#define EX_FPM_DOUBLE_ECC              0x02000000
3099 -#define EX_QMI_SINGLE_ECC              0x01000000
3100 -#define EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000
3101 -#define EX_QMI_DOUBLE_ECC              0x00400000
3102 -#define EX_BMI_LIST_RAM_ECC            0x00200000
3103 -#define EX_BMI_STORAGE_PROFILE_ECC     0x00100000
3104 -#define EX_BMI_STATISTICS_RAM_ECC      0x00080000
3105 -#define EX_IRAM_ECC                    0x00040000
3106 -#define EX_MURAM_ECC                   0x00020000
3107 -#define EX_BMI_DISPATCH_RAM_ECC        0x00010000
3108 -#define EX_DMA_SINGLE_PORT_ECC         0x00008000
3109 -
3110 -/* DMA defines */
3111 -/* masks */
3112 -#define DMA_MODE_BER                   0x00200000
3113 -#define DMA_MODE_ECC                   0x00000020
3114 -#define DMA_MODE_SECURE_PROT           0x00000800
3115 -#define DMA_MODE_AXI_DBG_MASK          0x0F000000
3116 -
3117 -#define DMA_TRANSFER_PORTID_MASK       0xFF000000
3118 -#define DMA_TRANSFER_TNUM_MASK         0x00FF0000
3119 -#define DMA_TRANSFER_LIODN_MASK        0x00000FFF
3120 -
3121 -#define DMA_STATUS_BUS_ERR             0x08000000
3122 -#define DMA_STATUS_READ_ECC            0x04000000
3123 -#define DMA_STATUS_SYSTEM_WRITE_ECC    0x02000000
3124 -#define DMA_STATUS_FM_WRITE_ECC        0x01000000
3125 -#define DMA_STATUS_FM_SPDAT_ECC        0x00080000
3126 -
3127 -#define DMA_MODE_CACHE_OR_SHIFT                30
3128 -#define DMA_MODE_AXI_DBG_SHIFT                 24
3129 -#define DMA_MODE_CEN_SHIFT                     13
3130 -#define DMA_MODE_CEN_MASK                      0x00000007
3131 -#define DMA_MODE_DBG_SHIFT                     7
3132 -#define DMA_MODE_AID_MODE_SHIFT                4
3133 -
3134 -#define DMA_THRESH_COMMQ_SHIFT                 24
3135 -#define DMA_THRESH_READ_INT_BUF_SHIFT          16
3136 -#define DMA_THRESH_READ_INT_BUF_MASK           0x0000003f
3137 -#define DMA_THRESH_WRITE_INT_BUF_MASK          0x0000003f
3138 -
3139 -#define DMA_TRANSFER_PORTID_SHIFT              24
3140 -#define DMA_TRANSFER_TNUM_SHIFT                16
3141 -
3142 -#define DMA_CAM_SIZEOF_ENTRY                   0x40
3143 -#define DMA_CAM_UNITS                          8
3144 -
3145 -#define DMA_LIODN_SHIFT                16
3146 -#define DMA_LIODN_BASE_MASK    0x00000FFF
3147 -
3148 -/* FPM defines */
3149 -#define FPM_EV_MASK_DOUBLE_ECC         0x80000000
3150 -#define FPM_EV_MASK_STALL              0x40000000
3151 -#define FPM_EV_MASK_SINGLE_ECC         0x20000000
3152 -#define FPM_EV_MASK_RELEASE_FM         0x00010000
3153 -#define FPM_EV_MASK_DOUBLE_ECC_EN      0x00008000
3154 -#define FPM_EV_MASK_STALL_EN           0x00004000
3155 -#define FPM_EV_MASK_SINGLE_ECC_EN      0x00002000
3156 -#define FPM_EV_MASK_EXTERNAL_HALT      0x00000008
3157 -#define FPM_EV_MASK_ECC_ERR_HALT       0x00000004
3158 -
3159 -#define FPM_RAM_MURAM_ECC              0x00008000
3160 -#define FPM_RAM_IRAM_ECC               0x00004000
3161 -#define FPM_IRAM_ECC_ERR_EX_EN         0x00020000
3162 -#define FPM_MURAM_ECC_ERR_EX_EN        0x00040000
3163 -#define FPM_RAM_IRAM_ECC_EN            0x40000000
3164 -#define FPM_RAM_RAMS_ECC_EN            0x80000000
3165 -#define FPM_RAM_RAMS_ECC_EN_SRC_SEL    0x08000000
3166 -
3167 -#define FPM_REV1_MAJOR_MASK            0x0000FF00
3168 -#define FPM_REV1_MINOR_MASK            0x000000FF
3169 -
3170 -#define FPM_DISP_LIMIT_SHIFT           24
3171 -
3172 -#define FPM_PRT_FM_CTL1                        0x00000001
3173 -#define FPM_PRT_FM_CTL2                        0x00000002
3174 -#define FPM_PORT_FM_CTL_PORTID_SHIFT   24
3175 -#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT   16
3176 -
3177 -#define FPM_THR1_PRS_SHIFT             24
3178 -#define FPM_THR1_KG_SHIFT              16
3179 -#define FPM_THR1_PLCR_SHIFT            8
3180 -#define FPM_THR1_BMI_SHIFT             0
3181 -
3182 -#define FPM_THR2_QMI_ENQ_SHIFT         24
3183 -#define FPM_THR2_QMI_DEQ_SHIFT         0
3184 -#define FPM_THR2_FM_CTL1_SHIFT         16
3185 -#define FPM_THR2_FM_CTL2_SHIFT         8
3186 -
3187 -#define FPM_EV_MASK_CAT_ERR_SHIFT      1
3188 -#define FPM_EV_MASK_DMA_ERR_SHIFT      0
3189 -
3190 -#define FPM_REV1_MAJOR_SHIFT           8
3191 -
3192 -#define FPM_RSTC_FM_RESET              0x80000000
3193 -#define FPM_RSTC_MAC0_RESET            0x40000000
3194 -#define FPM_RSTC_MAC1_RESET            0x20000000
3195 -#define FPM_RSTC_MAC2_RESET            0x10000000
3196 -#define FPM_RSTC_MAC3_RESET            0x08000000
3197 -#define FPM_RSTC_MAC8_RESET            0x04000000
3198 -#define FPM_RSTC_MAC4_RESET            0x02000000
3199 -#define FPM_RSTC_MAC5_RESET            0x01000000
3200 -#define FPM_RSTC_MAC6_RESET            0x00800000
3201 -#define FPM_RSTC_MAC7_RESET            0x00400000
3202 -#define FPM_RSTC_MAC9_RESET            0x00200000
3203 -
3204 -#define FPM_TS_INT_SHIFT               16
3205 -#define FPM_TS_CTL_EN                  0x80000000
3206 -
3207 -/* BMI defines */
3208 -#define BMI_INIT_START                         0x80000000
3209 -#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC    0x80000000
3210 -#define BMI_ERR_INTR_EN_LIST_RAM_ECC           0x40000000
3211 -#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC     0x20000000
3212 -#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC       0x10000000
3213 -#define BMI_NUM_OF_TASKS_MASK                  0x3F000000
3214 -#define BMI_NUM_OF_EXTRA_TASKS_MASK            0x000F0000
3215 -#define BMI_NUM_OF_DMAS_MASK                   0x00000F00
3216 -#define BMI_NUM_OF_EXTRA_DMAS_MASK             0x0000000F
3217 -#define BMI_FIFO_SIZE_MASK                     0x000003FF
3218 -#define BMI_EXTRA_FIFO_SIZE_MASK               0x03FF0000
3219 -#define BMI_CFG2_DMAS_MASK                     0x0000003F
3220 -#define BMI_CFG2_TASKS_MASK                    0x0000003F
3221 -
3222 -#define BMI_CFG2_TASKS_SHIFT           16
3223 -#define BMI_CFG2_DMAS_SHIFT            0
3224 -#define BMI_CFG1_FIFO_SIZE_SHIFT       16
3225 -#define BMI_NUM_OF_TASKS_SHIFT         24
3226 -#define BMI_EXTRA_NUM_OF_TASKS_SHIFT   16
3227 -#define BMI_NUM_OF_DMAS_SHIFT          8
3228 -#define BMI_EXTRA_NUM_OF_DMAS_SHIFT    0
3229 -
3230 -#define BMI_FIFO_ALIGN                 0x100
3231 -
3232 -#define BMI_EXTRA_FIFO_SIZE_SHIFT      16
3233 -
3234 -/* QMI defines */
3235 -#define QMI_CFG_ENQ_EN                 0x80000000
3236 -#define QMI_CFG_DEQ_EN                 0x40000000
3237 -#define QMI_CFG_EN_COUNTERS            0x10000000
3238 -#define QMI_CFG_DEQ_MASK               0x0000003F
3239 -#define QMI_CFG_ENQ_MASK               0x00003F00
3240 -#define QMI_CFG_ENQ_SHIFT              8
3241 -
3242 -#define QMI_ERR_INTR_EN_DOUBLE_ECC     0x80000000
3243 -#define QMI_ERR_INTR_EN_DEQ_FROM_DEF   0x40000000
3244 -#define QMI_INTR_EN_SINGLE_ECC         0x80000000
3245 -
3246 -#define QMI_GS_HALT_NOT_BUSY           0x00000002
3247 -
3248 -/* IRAM defines */
3249 -#define IRAM_IADD_AIE                  0x80000000
3250 -#define IRAM_READY                     0x80000000
3251 -
3252 -/* Default values */
3253 -#define DEFAULT_CATASTROPHIC_ERR               0
3254 -#define DEFAULT_DMA_ERR                                0
3255 -#define DEFAULT_AID_MODE                       FMAN_DMA_AID_OUT_TNUM
3256 -#define DEFAULT_DMA_COMM_Q_LOW                 0x2A
3257 -#define DEFAULT_DMA_COMM_Q_HIGH                0x3F
3258 -#define DEFAULT_CACHE_OVERRIDE                 0
3259 -#define DEFAULT_DMA_CAM_NUM_OF_ENTRIES         64
3260 -#define DEFAULT_DMA_DBG_CNT_MODE               0
3261 -#define DEFAULT_DMA_SOS_EMERGENCY              0
3262 -#define DEFAULT_DMA_WATCHDOG                   0
3263 -#define DEFAULT_DISP_LIMIT                     0
3264 -#define DEFAULT_PRS_DISP_TH                    16
3265 -#define DEFAULT_PLCR_DISP_TH                   16
3266 -#define DEFAULT_KG_DISP_TH                     16
3267 -#define DEFAULT_BMI_DISP_TH                    16
3268 -#define DEFAULT_QMI_ENQ_DISP_TH                16
3269 -#define DEFAULT_QMI_DEQ_DISP_TH                16
3270 -#define DEFAULT_FM_CTL1_DISP_TH                16
3271 -#define DEFAULT_FM_CTL2_DISP_TH                16
3272 -
3273 -#define DFLT_AXI_DBG_NUM_OF_BEATS              1
3274 -
3275 -#define DFLT_DMA_READ_INT_BUF_LOW(dma_thresh_max_buf)  \
3276 -       ((dma_thresh_max_buf + 1) / 2)
3277 -#define DFLT_DMA_READ_INT_BUF_HIGH(dma_thresh_max_buf) \
3278 -       ((dma_thresh_max_buf + 1) * 3 / 4)
3279 -#define DFLT_DMA_WRITE_INT_BUF_LOW(dma_thresh_max_buf) \
3280 -       ((dma_thresh_max_buf + 1) / 2)
3281 -#define DFLT_DMA_WRITE_INT_BUF_HIGH(dma_thresh_max_buf)\
3282 -       ((dma_thresh_max_buf + 1) * 3 / 4)
3283 -
3284 -#define DMA_COMM_Q_LOW_FMAN_V3         0x2A
3285 -#define DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq)           \
3286 -       ((dma_thresh_max_commq + 1) / 2)
3287 -#define DFLT_DMA_COMM_Q_LOW(major, dma_thresh_max_commq)       \
3288 -       ((major == 6) ? DMA_COMM_Q_LOW_FMAN_V3 :                \
3289 -       DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq))
3290 -
3291 -#define DMA_COMM_Q_HIGH_FMAN_V3        0x3f
3292 -#define DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq)          \
3293 -       ((dma_thresh_max_commq + 1) * 3 / 4)
3294 -#define DFLT_DMA_COMM_Q_HIGH(major, dma_thresh_max_commq)      \
3295 -       ((major == 6) ? DMA_COMM_Q_HIGH_FMAN_V3 :               \
3296 -       DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq))
3297 -
3298 -#define TOTAL_NUM_OF_TASKS_FMAN_V3L    59
3299 -#define TOTAL_NUM_OF_TASKS_FMAN_V3H    124
3300 -#define DFLT_TOTAL_NUM_OF_TASKS(major, minor, bmi_max_num_of_tasks)    \
3301 -       ((major == 6) ? ((minor == 1 || minor == 4) ?                   \
3302 -       TOTAL_NUM_OF_TASKS_FMAN_V3L : TOTAL_NUM_OF_TASKS_FMAN_V3H) :    \
3303 -       bmi_max_num_of_tasks)
3304 -
3305 -#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V3         64
3306 -#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V2         32
3307 -#define DFLT_DMA_CAM_NUM_OF_ENTRIES(major)                     \
3308 -       (major == 6 ? DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 :          \
3309 -       DMA_CAM_NUM_OF_ENTRIES_FMAN_V2)
3310 -
3311 -#define FM_TIMESTAMP_1_USEC_BIT             8
3312 -
3313 -/* Defines used for enabling/disabling FMan interrupts */
3314 -#define ERR_INTR_EN_DMA         0x00010000
3315 -#define ERR_INTR_EN_FPM         0x80000000
3316 -#define ERR_INTR_EN_BMI         0x00800000
3317 -#define ERR_INTR_EN_QMI         0x00400000
3318 -#define ERR_INTR_EN_MURAM       0x00040000
3319 -#define ERR_INTR_EN_MAC0        0x00004000
3320 -#define ERR_INTR_EN_MAC1        0x00002000
3321 -#define ERR_INTR_EN_MAC2        0x00001000
3322 -#define ERR_INTR_EN_MAC3        0x00000800
3323 -#define ERR_INTR_EN_MAC4        0x00000400
3324 -#define ERR_INTR_EN_MAC5        0x00000200
3325 -#define ERR_INTR_EN_MAC6        0x00000100
3326 -#define ERR_INTR_EN_MAC7        0x00000080
3327 -#define ERR_INTR_EN_MAC8        0x00008000
3328 -#define ERR_INTR_EN_MAC9        0x00000040
3329 -
3330 -#define INTR_EN_QMI             0x40000000
3331 -#define INTR_EN_MAC0            0x00080000
3332 -#define INTR_EN_MAC1            0x00040000
3333 -#define INTR_EN_MAC2            0x00020000
3334 -#define INTR_EN_MAC3            0x00010000
3335 -#define INTR_EN_MAC4            0x00000040
3336 -#define INTR_EN_MAC5            0x00000020
3337 -#define INTR_EN_MAC6            0x00000008
3338 -#define INTR_EN_MAC7            0x00000002
3339 -#define INTR_EN_MAC8            0x00200000
3340 -#define INTR_EN_MAC9            0x00100000
3341 -#define INTR_EN_REV0            0x00008000
3342 -#define INTR_EN_REV1            0x00004000
3343 -#define INTR_EN_REV2            0x00002000
3344 -#define INTR_EN_REV3            0x00001000
3345 -#define INTR_EN_TMR             0x01000000
3346 -
3347 -enum fman_dma_aid_mode {
3348 -       FMAN_DMA_AID_OUT_PORT_ID = 0,             /* 4 LSB of PORT_ID */
3349 -       FMAN_DMA_AID_OUT_TNUM                     /* 4 LSB of TNUM */
3350 -};
3351 -
3352 -struct fman_iram_regs {
3353 -       u32 iadd;       /* FM IRAM instruction address register */
3354 -       u32 idata;      /* FM IRAM instruction data register */
3355 -       u32 itcfg;      /* FM IRAM timing config register */
3356 -       u32 iready;     /* FM IRAM ready register */
3357 -};
3358 -
3359 -struct fman_fpm_regs {
3360 -       u32 fmfp_tnc;           /* FPM TNUM Control 0x00 */
3361 -       u32 fmfp_prc;           /* FPM Port_ID FmCtl Association 0x04 */
3362 -       u32 fmfp_brkc;          /* FPM Breakpoint Control 0x08 */
3363 -       u32 fmfp_mxd;           /* FPM Flush Control 0x0c */
3364 -       u32 fmfp_dist1;         /* FPM Dispatch Thresholds1 0x10 */
3365 -       u32 fmfp_dist2;         /* FPM Dispatch Thresholds2 0x14 */
3366 -       u32 fm_epi;             /* FM Error Pending Interrupts 0x18 */
3367 -       u32 fm_rie;             /* FM Error Interrupt Enable 0x1c */
3368 -       u32 fmfp_fcev[4];       /* FPM FMan-Controller Event 1-4 0x20-0x2f */
3369 -       u32 res0030[4];         /* res 0x30 - 0x3f */
3370 -       u32 fmfp_cee[4];        /* PM FMan-Controller Event 1-4 0x40-0x4f */
3371 -       u32 res0050[4];         /* res 0x50-0x5f */
3372 -       u32 fmfp_tsc1;          /* FPM TimeStamp Control1 0x60 */
3373 -       u32 fmfp_tsc2;          /* FPM TimeStamp Control2 0x64 */
3374 -       u32 fmfp_tsp;           /* FPM Time Stamp 0x68 */
3375 -       u32 fmfp_tsf;           /* FPM Time Stamp Fraction 0x6c */
3376 -       u32 fm_rcr;             /* FM Rams Control 0x70 */
3377 -       u32 fmfp_extc;          /* FPM External Requests Control 0x74 */
3378 -       u32 fmfp_ext1;          /* FPM External Requests Config1 0x78 */
3379 -       u32 fmfp_ext2;          /* FPM External Requests Config2 0x7c */
3380 -       u32 fmfp_drd[16];       /* FPM Data_Ram Data 0-15 0x80 - 0xbf */
3381 -       u32 fmfp_dra;           /* FPM Data Ram Access 0xc0 */
3382 -       u32 fm_ip_rev_1;        /* FM IP Block Revision 1 0xc4 */
3383 -       u32 fm_ip_rev_2;        /* FM IP Block Revision 2 0xc8 */
3384 -       u32 fm_rstc;            /* FM Reset Command 0xcc */
3385 -       u32 fm_cld;             /* FM Classifier Debug 0xd0 */
3386 -       u32 fm_npi;             /* FM Normal Pending Interrupts 0xd4 */
3387 -       u32 fmfp_exte;          /* FPM External Requests Enable 0xd8 */
3388 -       u32 fmfp_ee;            /* FPM Event&Mask 0xdc */
3389 -       u32 fmfp_cev[4];        /* FPM CPU Event 1-4 0xe0-0xef */
3390 -       u32 res00f0[4];         /* res 0xf0-0xff */
3391 -       u32 fmfp_ps[50];        /* FPM Port Status 0x100-0x1c7 */
3392 -       u32 res01c8[14];        /* res 0x1c8-0x1ff */
3393 -       u32 fmfp_clfabc;        /* FPM CLFABC 0x200 */
3394 -       u32 fmfp_clfcc;         /* FPM CLFCC 0x204 */
3395 -       u32 fmfp_clfaval;       /* FPM CLFAVAL 0x208 */
3396 -       u32 fmfp_clfbval;       /* FPM CLFBVAL 0x20c */
3397 -       u32 fmfp_clfcval;       /* FPM CLFCVAL 0x210 */
3398 -       u32 fmfp_clfamsk;       /* FPM CLFAMSK 0x214 */
3399 -       u32 fmfp_clfbmsk;       /* FPM CLFBMSK 0x218 */
3400 -       u32 fmfp_clfcmsk;       /* FPM CLFCMSK 0x21c */
3401 -       u32 fmfp_clfamc;        /* FPM CLFAMC 0x220 */
3402 -       u32 fmfp_clfbmc;        /* FPM CLFBMC 0x224 */
3403 -       u32 fmfp_clfcmc;        /* FPM CLFCMC 0x228 */
3404 -       u32 fmfp_decceh;        /* FPM DECCEH 0x22c */
3405 -       u32 res0230[116];       /* res 0x230 - 0x3ff */
3406 -       u32 fmfp_ts[128];       /* 0x400: FPM Task Status 0x400 - 0x5ff */
3407 -       u32 res0600[0x400 - 384];
3408 -};
3409 -
3410 -struct fman_bmi_regs {
3411 -       u32 fmbm_init;          /* BMI Initialization 0x00 */
3412 -       u32 fmbm_cfg1;          /* BMI Configuration 1 0x04 */
3413 -       u32 fmbm_cfg2;          /* BMI Configuration 2 0x08 */
3414 -       u32 res000c[5];         /* 0x0c - 0x1f */
3415 -       u32 fmbm_ievr;          /* Interrupt Event Register 0x20 */
3416 -       u32 fmbm_ier;           /* Interrupt Enable Register 0x24 */
3417 -       u32 fmbm_ifr;           /* Interrupt Force Register 0x28 */
3418 -       u32 res002c[5];         /* 0x2c - 0x3f */
3419 -       u32 fmbm_arb[8];        /* BMI Arbitration 0x40 - 0x5f */
3420 -       u32 res0060[12];        /* 0x60 - 0x8f */
3421 -       u32 fmbm_dtc[3];        /* Debug Trap Counter 0x90 - 0x9b */
3422 -       u32 res009c;            /* 0x9c */
3423 -       u32 fmbm_dcv[3][4];     /* Debug Compare val 0xa0-0xcf */
3424 -       u32 fmbm_dcm[3][4];     /* Debug Compare Mask 0xd0-0xff */
3425 -       u32 fmbm_gde;           /* BMI Global Debug Enable 0x100 */
3426 -       u32 fmbm_pp[63];        /* BMI Port Parameters 0x104 - 0x1ff */
3427 -       u32 res0200;            /* 0x200 */
3428 -       u32 fmbm_pfs[63];       /* BMI Port FIFO Size 0x204 - 0x2ff */
3429 -       u32 res0300;            /* 0x300 */
3430 -       u32 fmbm_spliodn[63];   /* Port Partition ID 0x304 - 0x3ff */
3431 -};
3432 -
3433 -struct fman_qmi_regs {
3434 -       u32 fmqm_gc;            /* General Configuration Register 0x00 */
3435 -       u32 res0004;            /* 0x04 */
3436 -       u32 fmqm_eie;           /* Error Interrupt Event Register 0x08 */
3437 -       u32 fmqm_eien;          /* Error Interrupt Enable Register 0x0c */
3438 -       u32 fmqm_eif;           /* Error Interrupt Force Register 0x10 */
3439 -       u32 fmqm_ie;            /* Interrupt Event Register 0x14 */
3440 -       u32 fmqm_ien;           /* Interrupt Enable Register 0x18 */
3441 -       u32 fmqm_if;            /* Interrupt Force Register 0x1c */
3442 -       u32 fmqm_gs;            /* Global Status Register 0x20 */
3443 -       u32 fmqm_ts;            /* Task Status Register 0x24 */
3444 -       u32 fmqm_etfc;          /* Enqueue Total Frame Counter 0x28 */
3445 -       u32 fmqm_dtfc;          /* Dequeue Total Frame Counter 0x2c */
3446 -       u32 fmqm_dc0;           /* Dequeue Counter 0 0x30 */
3447 -       u32 fmqm_dc1;           /* Dequeue Counter 1 0x34 */
3448 -       u32 fmqm_dc2;           /* Dequeue Counter 2 0x38 */
3449 -       u32 fmqm_dc3;           /* Dequeue Counter 3 0x3c */
3450 -       u32 fmqm_dfdc;          /* Dequeue FQID from Default Counter 0x40 */
3451 -       u32 fmqm_dfcc;          /* Dequeue FQID from Context Counter 0x44 */
3452 -       u32 fmqm_dffc;          /* Dequeue FQID from FD Counter 0x48 */
3453 -       u32 fmqm_dcc;           /* Dequeue Confirm Counter 0x4c */
3454 -       u32 res0050[7];         /* 0x50 - 0x6b */
3455 -       u32 fmqm_tapc;          /* Tnum Aging Period Control 0x6c */
3456 -       u32 fmqm_dmcvc;         /* Dequeue MAC Command Valid Counter 0x70 */
3457 -       u32 fmqm_difdcc;        /* Dequeue Invalid FD Command Counter 0x74 */
3458 -       u32 fmqm_da1v;          /* Dequeue A1 Valid Counter 0x78 */
3459 -       u32 res007c;            /* 0x7c */
3460 -       u32 fmqm_dtc;           /* 0x80 Debug Trap Counter 0x80 */
3461 -       u32 fmqm_efddd;         /* 0x84 Enqueue Frame desc Dynamic dbg 0x84 */
3462 -       u32 res0088[2];         /* 0x88 - 0x8f */
3463 -       struct {
3464 -               u32 fmqm_dtcfg1;        /* 0x90 dbg trap cfg 1 Register 0x00 */
3465 -               u32 fmqm_dtval1;        /* Debug Trap Value 1 Register 0x04 */
3466 -               u32 fmqm_dtm1;          /* Debug Trap Mask 1 Register 0x08 */
3467 -               u32 fmqm_dtc1;          /* Debug Trap Counter 1 Register 0x0c */
3468 -               u32 fmqm_dtcfg2;        /* dbg Trap cfg 2 Register 0x10 */
3469 -               u32 fmqm_dtval2;        /* Debug Trap Value 2 Register 0x14 */
3470 -               u32 fmqm_dtm2;          /* Debug Trap Mask 2 Register 0x18 */
3471 -               u32 res001c;            /* 0x1c */
3472 -       } dbg_traps[3];                 /* 0x90 - 0xef */
3473 -       u8 res00f0[0x400 - 0xf0];       /* 0xf0 - 0x3ff */
3474 -};
3475 -
3476 -struct fman_dma_regs {
3477 -       u32 fmdmsr;     /* FM DMA status register 0x00 */
3478 -       u32 fmdmmr;     /* FM DMA mode register 0x04 */
3479 -       u32 fmdmtr;     /* FM DMA bus threshold register 0x08 */
3480 -       u32 fmdmhy;     /* FM DMA bus hysteresis register 0x0c */
3481 -       u32 fmdmsetr;   /* FM DMA SOS emergency Threshold Register 0x10 */
3482 -       u32 fmdmtah;    /* FM DMA transfer bus address high reg 0x14 */
3483 -       u32 fmdmtal;    /* FM DMA transfer bus address low reg 0x18 */
3484 -       u32 fmdmtcid;   /* FM DMA transfer bus communication ID reg 0x1c */
3485 -       u32 fmdmra;     /* FM DMA bus internal ram address register 0x20 */
3486 -       u32 fmdmrd;     /* FM DMA bus internal ram data register 0x24 */
3487 -       u32 fmdmwcr;    /* FM DMA CAM watchdog counter value 0x28 */
3488 -       u32 fmdmebcr;   /* FM DMA CAM base in MURAM register 0x2c */
3489 -       u32 fmdmccqdr;  /* FM DMA CAM and CMD Queue Debug reg 0x30 */
3490 -       u32 fmdmccqvr1; /* FM DMA CAM and CMD Queue Value reg #1 0x34 */
3491 -       u32 fmdmccqvr2; /* FM DMA CAM and CMD Queue Value reg #2 0x38 */
3492 -       u32 fmdmcqvr3;  /* FM DMA CMD Queue Value register #3 0x3c */
3493 -       u32 fmdmcqvr4;  /* FM DMA CMD Queue Value register #4 0x40 */
3494 -       u32 fmdmcqvr5;  /* FM DMA CMD Queue Value register #5 0x44 */
3495 -       u32 fmdmsefrc;  /* FM DMA Semaphore Entry Full Reject Cntr 0x48 */
3496 -       u32 fmdmsqfrc;  /* FM DMA Semaphore Queue Full Reject Cntr 0x4c */
3497 -       u32 fmdmssrc;   /* FM DMA Semaphore SYNC Reject Counter 0x50 */
3498 -       u32 fmdmdcr;    /* FM DMA Debug Counter 0x54 */
3499 -       u32 fmdmemsr;   /* FM DMA Emergency Smoother Register 0x58 */
3500 -       u32 res005c;    /* 0x5c */
3501 -       u32 fmdmplr[FMAN_LIODN_TBL / 2];        /* DMA LIODN regs 0x60-0xdf */
3502 -       u32 res00e0[0x400 - 56];
3503 -};
3504 -
3505 -/* Structure that holds current FMan state.
3506 - * Used for saving run time information.
3507 - */
3508 -struct fman_state_struct {
3509 -       u8 fm_id;
3510 -       u16 fm_clk_freq;
3511 -       struct fman_rev_info rev_info;
3512 -       bool enabled_time_stamp;
3513 -       u8 count1_micro_bit;
3514 -       u8 total_num_of_tasks;
3515 -       u8 accumulated_num_of_tasks;
3516 -       u32 accumulated_fifo_size;
3517 -       u8 accumulated_num_of_open_dmas;
3518 -       u8 accumulated_num_of_deq_tnums;
3519 -       u32 exceptions;
3520 -       u32 extra_fifo_pool_size;
3521 -       u8 extra_tasks_pool_size;
3522 -       u8 extra_open_dmas_pool_size;
3523 -       u16 port_mfl[MAX_NUM_OF_MACS];
3524 -       u16 mac_mfl[MAX_NUM_OF_MACS];
3525 -
3526 -       /* SOC specific */
3527 -       u32 fm_iram_size;
3528 -       /* DMA */
3529 -       u32 dma_thresh_max_commq;
3530 -       u32 dma_thresh_max_buf;
3531 -       u32 max_num_of_open_dmas;
3532 -       /* QMI */
3533 -       u32 qmi_max_num_of_tnums;
3534 -       u32 qmi_def_tnums_thresh;
3535 -       /* BMI */
3536 -       u32 bmi_max_num_of_tasks;
3537 -       u32 bmi_max_fifo_size;
3538 -       /* General */
3539 -       u32 fm_port_num_of_cg;
3540 -       u32 num_of_rx_ports;
3541 -       u32 total_fifo_size;
3542 -
3543 -       u32 qman_channel_base;
3544 -       u32 num_of_qman_channels;
3545 -
3546 -       struct resource *res;
3547 -};
3548 -
3549 -/* Structure that holds FMan initial configuration */
3550 -struct fman_cfg {
3551 -       u8 disp_limit_tsh;
3552 -       u8 prs_disp_tsh;
3553 -       u8 plcr_disp_tsh;
3554 -       u8 kg_disp_tsh;
3555 -       u8 bmi_disp_tsh;
3556 -       u8 qmi_enq_disp_tsh;
3557 -       u8 qmi_deq_disp_tsh;
3558 -       u8 fm_ctl1_disp_tsh;
3559 -       u8 fm_ctl2_disp_tsh;
3560 -       int dma_cache_override;
3561 -       enum fman_dma_aid_mode dma_aid_mode;
3562 -       u32 dma_axi_dbg_num_of_beats;
3563 -       u32 dma_cam_num_of_entries;
3564 -       u32 dma_watchdog;
3565 -       u8 dma_comm_qtsh_asrt_emer;
3566 -       u32 dma_write_buf_tsh_asrt_emer;
3567 -       u32 dma_read_buf_tsh_asrt_emer;
3568 -       u8 dma_comm_qtsh_clr_emer;
3569 -       u32 dma_write_buf_tsh_clr_emer;
3570 -       u32 dma_read_buf_tsh_clr_emer;
3571 -       u32 dma_sos_emergency;
3572 -       int dma_dbg_cnt_mode;
3573 -       int catastrophic_err;
3574 -       int dma_err;
3575 -       u32 exceptions;
3576 -       u16 clk_freq;
3577 -       u32 cam_base_addr;
3578 -       u32 fifo_base_addr;
3579 -       u32 total_fifo_size;
3580 -       u32 total_num_of_tasks;
3581 -       u32 qmi_def_tnums_thresh;
3582 -};
3583 -
3584 -/* Structure that holds information received from device tree */
3585 -struct fman_dts_params {
3586 -       void __iomem *base_addr;                /* FMan virtual address */
3587 -       struct resource *res;                   /* FMan memory resource */
3588 -       u8 id;                                  /* FMan ID */
3589 -
3590 -       int err_irq;                            /* FMan Error IRQ */
3591 -
3592 -       u16 clk_freq;                           /* FMan clock freq (In Mhz) */
3593 -
3594 -       u32 qman_channel_base;                  /* QMan channels base */
3595 -       u32 num_of_qman_channels;               /* Number of QMan channels */
3596 -
3597 -       struct resource muram_res;              /* MURAM resource */
3598 -};
3599 -
3600 -/** fman_exceptions_cb
3601 - * fman                - Pointer to FMan
3602 - * exception   - The exception.
3603 - *
3604 - * Exceptions user callback routine, will be called upon an exception
3605 - * passing the exception identification.
3606 - *
3607 - * Return: irq status
3608 - */
3609 -typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman,
3610 -                                        enum fman_exceptions exception);
3611 -
3612 -/** fman_bus_error_cb
3613 - * fman                - Pointer to FMan
3614 - * port_id     - Port id
3615 - * addr                - Address that caused the error
3616 - * tnum                - Owner of error
3617 - * liodn       - Logical IO device number
3618 - *
3619 - * Bus error user callback routine, will be called upon bus error,
3620 - * passing parameters describing the errors and the owner.
3621 - *
3622 - * Return: IRQ status
3623 - */
3624 -typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id,
3625 -                                       u64 addr, u8 tnum, u16 liodn);
3626 -
3627 -struct fman {
3628 -       struct device *dev;
3629 -       void __iomem *base_addr;
3630 -       struct fman_intr_src intr_mng[FMAN_EV_CNT];
3631 -
3632 -       struct fman_fpm_regs __iomem *fpm_regs;
3633 -       struct fman_bmi_regs __iomem *bmi_regs;
3634 -       struct fman_qmi_regs __iomem *qmi_regs;
3635 -       struct fman_dma_regs __iomem *dma_regs;
3636 -       fman_exceptions_cb *exception_cb;
3637 -       fman_bus_error_cb *bus_error_cb;
3638 -       /* Spinlock for FMan use */
3639 -       spinlock_t spinlock;
3640 -       struct fman_state_struct *state;
3641 -
3642 -       struct fman_cfg *cfg;
3643 -       struct muram_info *muram;
3644 -       /* cam section in muram */
3645 -       unsigned long cam_offset;
3646 -       size_t cam_size;
3647 -       /* Fifo in MURAM */
3648 -       unsigned long fifo_offset;
3649 -       size_t fifo_size;
3650 -
3651 -       u32 liodn_base[64];
3652 -       u32 liodn_offset[64];
3653 -
3654 -       struct fman_dts_params dts_params;
3655 -};
3656 -
3657 -static irqreturn_t fman_exceptions(struct fman *fman,
3658 -                                  enum fman_exceptions exception)
3659 -{
3660 -       dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n",
3661 -               __func__, fman->state->fm_id, exception);
3662 -
3663 -       return IRQ_HANDLED;
3664 -}
3665 -
3666 -static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
3667 -                                 u64 __maybe_unused addr,
3668 -                                 u8 __maybe_unused tnum,
3669 -                                 u16 __maybe_unused liodn)
3670 -{
3671 -       dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n",
3672 -               __func__, fman->state->fm_id, port_id);
3673 -
3674 -       return IRQ_HANDLED;
3675 -}
3676 -
3677 -static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id)
3678 -{
3679 -       if (fman->intr_mng[id].isr_cb) {
3680 -               fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle);
3681 -
3682 -               return IRQ_HANDLED;
3683 -       }
3684 -
3685 -       return IRQ_NONE;
3686 -}
3687 -
3688 -static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id)
3689 -{
3690 -       u8 sw_port_id = 0;
3691 -
3692 -       if (hw_port_id >= BASE_TX_PORTID)
3693 -               sw_port_id = hw_port_id - BASE_TX_PORTID;
3694 -       else if (hw_port_id >= BASE_RX_PORTID)
3695 -               sw_port_id = hw_port_id - BASE_RX_PORTID;
3696 -       else
3697 -               sw_port_id = 0;
3698 -
3699 -       return sw_port_id;
3700 -}
3701 -
3702 -static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg,
3703 -                                      u8 port_id)
3704 -{
3705 -       u32 tmp = 0;
3706 -
3707 -       tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT;
3708 -
3709 -       tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1;
3710 -
3711 -       /* order restoration */
3712 -       if (port_id % 2)
3713 -               tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
3714 -       else
3715 -               tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
3716 -
3717 -       iowrite32be(tmp, &fpm_rg->fmfp_prc);
3718 -}
3719 -
3720 -static void set_port_liodn(struct fman *fman, u8 port_id,
3721 -                          u32 liodn_base, u32 liodn_ofst)
3722 -{
3723 -       u32 tmp;
3724 -
3725 -       /* set LIODN base for this port */
3726 -       tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
3727 -       if (port_id % 2) {
3728 -               tmp &= ~DMA_LIODN_BASE_MASK;
3729 -               tmp |= liodn_base;
3730 -       } else {
3731 -               tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
3732 -               tmp |= liodn_base << DMA_LIODN_SHIFT;
3733 -       }
3734 -       iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
3735 -       iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
3736 -}
3737 -
3738 -static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
3739 -{
3740 -       u32 tmp;
3741 -
3742 -       tmp = ioread32be(&fpm_rg->fm_rcr);
3743 -       if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
3744 -               iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
3745 -       else
3746 -               iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN |
3747 -                           FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
3748 -}
3749 -
3750 -static void disable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
3751 -{
3752 -       u32 tmp;
3753 -
3754 -       tmp = ioread32be(&fpm_rg->fm_rcr);
3755 -       if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
3756 -               iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
3757 -       else
3758 -               iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN),
3759 -                           &fpm_rg->fm_rcr);
3760 -}
3761 -
3762 -static void fman_defconfig(struct fman_cfg *cfg)
3763 -{
3764 -       memset(cfg, 0, sizeof(struct fman_cfg));
3765 -
3766 -       cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
3767 -       cfg->dma_err = DEFAULT_DMA_ERR;
3768 -       cfg->dma_aid_mode = DEFAULT_AID_MODE;
3769 -       cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
3770 -       cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
3771 -       cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
3772 -       cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
3773 -       cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
3774 -       cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
3775 -       cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
3776 -       cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
3777 -       cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
3778 -       cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
3779 -       cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH;
3780 -       cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH;
3781 -       cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH;
3782 -       cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
3783 -       cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
3784 -       cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
3785 -}
3786 -
3787 -static int dma_init(struct fman *fman)
3788 -{
3789 -       struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
3790 -       struct fman_cfg *cfg = fman->cfg;
3791 -       u32 tmp_reg;
3792 -
3793 -       /* Init DMA Registers */
3794 -
3795 -       /* clear status reg events */
3796 -       tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC |
3797 -                  DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC);
3798 -       iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg, &dma_rg->fmdmsr);
3799 -
3800 -       /* configure mode register */
3801 -       tmp_reg = 0;
3802 -       tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
3803 -       if (cfg->exceptions & EX_DMA_BUS_ERROR)
3804 -               tmp_reg |= DMA_MODE_BER;
3805 -       if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) |
3806 -           (cfg->exceptions & EX_DMA_READ_ECC) |
3807 -           (cfg->exceptions & EX_DMA_FM_WRITE_ECC))
3808 -               tmp_reg |= DMA_MODE_ECC;
3809 -       if (cfg->dma_axi_dbg_num_of_beats)
3810 -               tmp_reg |= (DMA_MODE_AXI_DBG_MASK &
3811 -                       ((cfg->dma_axi_dbg_num_of_beats - 1)
3812 -                       << DMA_MODE_AXI_DBG_SHIFT));
3813 -
3814 -       tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) &
3815 -               DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT;
3816 -       tmp_reg |= DMA_MODE_SECURE_PROT;
3817 -       tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
3818 -       tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
3819 -
3820 -       iowrite32be(tmp_reg, &dma_rg->fmdmmr);
3821 -
3822 -       /* configure thresholds register */
3823 -       tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer <<
3824 -               DMA_THRESH_COMMQ_SHIFT);
3825 -       tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer &
3826 -               DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
3827 -       tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer &
3828 -               DMA_THRESH_WRITE_INT_BUF_MASK;
3829 -
3830 -       iowrite32be(tmp_reg, &dma_rg->fmdmtr);
3831 -
3832 -       /* configure hysteresis register */
3833 -       tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer <<
3834 -               DMA_THRESH_COMMQ_SHIFT);
3835 -       tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer &
3836 -               DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
3837 -       tmp_reg |= cfg->dma_write_buf_tsh_clr_emer &
3838 -               DMA_THRESH_WRITE_INT_BUF_MASK;
3839 -
3840 -       iowrite32be(tmp_reg, &dma_rg->fmdmhy);
3841 -
3842 -       /* configure emergency threshold */
3843 -       iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr);
3844 -
3845 -       /* configure Watchdog */
3846 -       iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr);
3847 -
3848 -       iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr);
3849 -
3850 -       /* Allocate MURAM for CAM */
3851 -       fman->cam_size =
3852 -               (u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY);
3853 -       fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size);
3854 -       if (IS_ERR_VALUE(fman->cam_offset)) {
3855 -               dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
3856 -                       __func__);
3857 -               return -ENOMEM;
3858 -       }
3859 -
3860 -       if (fman->state->rev_info.major == 2) {
3861 -               u32 __iomem *cam_base_addr;
3862 -
3863 -               fman_muram_free_mem(fman->muram, fman->cam_offset,
3864 -                                   fman->cam_size);
3865 -
3866 -               fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128;
3867 -               fman->cam_offset = fman_muram_alloc(fman->muram,
3868 -                                                   fman->cam_size);
3869 -               if (IS_ERR_VALUE(fman->cam_offset)) {
3870 -                       dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
3871 -                               __func__);
3872 -                       return -ENOMEM;
3873 -               }
3874 -
3875 -               if (fman->cfg->dma_cam_num_of_entries % 8 ||
3876 -                   fman->cfg->dma_cam_num_of_entries > 32) {
3877 -                       dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n",
3878 -                               __func__);
3879 -                       return -EINVAL;
3880 -               }
3881 -
3882 -               cam_base_addr = (u32 __iomem *)
3883 -                       fman_muram_offset_to_vbase(fman->muram,
3884 -                                                  fman->cam_offset);
3885 -               iowrite32be(~((1 <<
3886 -                           (32 - fman->cfg->dma_cam_num_of_entries)) - 1),
3887 -                           cam_base_addr);
3888 -       }
3889 -
3890 -       fman->cfg->cam_base_addr = fman->cam_offset;
3891 -
3892 -       return 0;
3893 -}
3894 -
3895 -static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
3896 -{
3897 -       u32 tmp_reg;
3898 -       int i;
3899 -
3900 -       /* Init FPM Registers */
3901 -
3902 -       tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT);
3903 -       iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd);
3904 -
3905 -       tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) |
3906 -                  ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) |
3907 -                  ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) |
3908 -                  ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT));
3909 -       iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1);
3910 -
3911 -       tmp_reg =
3912 -               (((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) |
3913 -                ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) |
3914 -                ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) |
3915 -                ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT));
3916 -       iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2);
3917 -
3918 -       /* define exceptions and error behavior */
3919 -       tmp_reg = 0;
3920 -       /* Clear events */
3921 -       tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC |
3922 -                   FPM_EV_MASK_SINGLE_ECC);
3923 -       /* enable interrupts */
3924 -       if (cfg->exceptions & EX_FPM_STALL_ON_TASKS)
3925 -               tmp_reg |= FPM_EV_MASK_STALL_EN;
3926 -       if (cfg->exceptions & EX_FPM_SINGLE_ECC)
3927 -               tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN;
3928 -       if (cfg->exceptions & EX_FPM_DOUBLE_ECC)
3929 -               tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
3930 -       tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
3931 -       tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
3932 -       /* FMan is not halted upon external halt activation */
3933 -       tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
3934 -       /* Man is not halted upon  Unrecoverable ECC error behavior */
3935 -       tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
3936 -       iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
3937 -
3938 -       /* clear all fmCtls event registers */
3939 -       for (i = 0; i < FM_NUM_OF_FMAN_CTRL_EVENT_REGS; i++)
3940 -               iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]);
3941 -
3942 -       /* RAM ECC -  enable and clear events */
3943 -       /* first we need to clear all parser memory,
3944 -        * as it is uninitialized and may cause ECC errors
3945 -        */
3946 -       /* event bits */
3947 -       tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
3948 -
3949 -       iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
3950 -
3951 -       tmp_reg = 0;
3952 -       if (cfg->exceptions & EX_IRAM_ECC) {
3953 -               tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN;
3954 -               enable_rams_ecc(fpm_rg);
3955 -       }
3956 -       if (cfg->exceptions & EX_MURAM_ECC) {
3957 -               tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN;
3958 -               enable_rams_ecc(fpm_rg);
3959 -       }
3960 -       iowrite32be(tmp_reg, &fpm_rg->fm_rie);
3961 -}
3962 -
3963 -static void bmi_init(struct fman_bmi_regs __iomem *bmi_rg,
3964 -                    struct fman_cfg *cfg)
3965 -{
3966 -       u32 tmp_reg;
3967 -
3968 -       /* Init BMI Registers */
3969 -
3970 -       /* define common resources */
3971 -       tmp_reg = cfg->fifo_base_addr;
3972 -       tmp_reg = tmp_reg / BMI_FIFO_ALIGN;
3973 -
3974 -       tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) <<
3975 -                   BMI_CFG1_FIFO_SIZE_SHIFT);
3976 -       iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1);
3977 -
3978 -       tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) <<
3979 -                  BMI_CFG2_TASKS_SHIFT;
3980 -       /* num of DMA's will be dynamically updated when each port is set */
3981 -       iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2);
3982 -
3983 -       /* define unmaskable exceptions, enable and clear events */
3984 -       tmp_reg = 0;
3985 -       iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC |
3986 -                   BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC |
3987 -                   BMI_ERR_INTR_EN_STATISTICS_RAM_ECC |
3988 -                   BMI_ERR_INTR_EN_DISPATCH_RAM_ECC, &bmi_rg->fmbm_ievr);
3989 -
3990 -       if (cfg->exceptions & EX_BMI_LIST_RAM_ECC)
3991 -               tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
3992 -       if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC)
3993 -               tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
3994 -       if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC)
3995 -               tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
3996 -       if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC)
3997 -               tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
3998 -       iowrite32be(tmp_reg, &bmi_rg->fmbm_ier);
3999 -}
4000 -
4001 -static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
4002 -                    struct fman_cfg *cfg)
4003 -{
4004 -       u32 tmp_reg;
4005 -
4006 -       /* Init QMI Registers */
4007 -
4008 -       /* Clear error interrupt events */
4009 -
4010 -       iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF,
4011 -                   &qmi_rg->fmqm_eie);
4012 -       tmp_reg = 0;
4013 -       if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID)
4014 -               tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
4015 -       if (cfg->exceptions & EX_QMI_DOUBLE_ECC)
4016 -               tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC;
4017 -       /* enable events */
4018 -       iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
4019 -
4020 -       tmp_reg = 0;
4021 -       /* Clear interrupt events */
4022 -       iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
4023 -       if (cfg->exceptions & EX_QMI_SINGLE_ECC)
4024 -               tmp_reg |= QMI_INTR_EN_SINGLE_ECC;
4025 -       /* enable events */
4026 -       iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
4027 -}
4028 -
4029 -static int enable(struct fman *fman, struct fman_cfg *cfg)
4030 -{
4031 -       u32 cfg_reg = 0;
4032 -
4033 -       /* Enable all modules */
4034 -
4035 -       /* clear&enable global counters - calculate reg and save for later,
4036 -        * because it's the same reg for QMI enable
4037 -        */
4038 -       cfg_reg = QMI_CFG_EN_COUNTERS;
4039 -
4040 -       /* Set enqueue and dequeue thresholds */
4041 -       cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh;
4042 -
4043 -       iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init);
4044 -       iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
4045 -                   &fman->qmi_regs->fmqm_gc);
4046 -
4047 -       return 0;
4048 -}
4049 -
4050 -static int set_exception(struct fman *fman,
4051 -                        enum fman_exceptions exception, bool enable)
4052 -{
4053 -       u32 tmp;
4054 -
4055 -       switch (exception) {
4056 -       case FMAN_EX_DMA_BUS_ERROR:
4057 -               tmp = ioread32be(&fman->dma_regs->fmdmmr);
4058 -               if (enable)
4059 -                       tmp |= DMA_MODE_BER;
4060 -               else
4061 -                       tmp &= ~DMA_MODE_BER;
4062 -               /* disable bus error */
4063 -               iowrite32be(tmp, &fman->dma_regs->fmdmmr);
4064 -               break;
4065 -       case FMAN_EX_DMA_READ_ECC:
4066 -       case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
4067 -       case FMAN_EX_DMA_FM_WRITE_ECC:
4068 -               tmp = ioread32be(&fman->dma_regs->fmdmmr);
4069 -               if (enable)
4070 -                       tmp |= DMA_MODE_ECC;
4071 -               else
4072 -                       tmp &= ~DMA_MODE_ECC;
4073 -               iowrite32be(tmp, &fman->dma_regs->fmdmmr);
4074 -               break;
4075 -       case FMAN_EX_FPM_STALL_ON_TASKS:
4076 -               tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
4077 -               if (enable)
4078 -                       tmp |= FPM_EV_MASK_STALL_EN;
4079 -               else
4080 -                       tmp &= ~FPM_EV_MASK_STALL_EN;
4081 -               iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
4082 -               break;
4083 -       case FMAN_EX_FPM_SINGLE_ECC:
4084 -               tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
4085 -               if (enable)
4086 -                       tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
4087 -               else
4088 -                       tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
4089 -               iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
4090 -               break;
4091 -       case FMAN_EX_FPM_DOUBLE_ECC:
4092 -               tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
4093 -               if (enable)
4094 -                       tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
4095 -               else
4096 -                       tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
4097 -               iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
4098 -               break;
4099 -       case FMAN_EX_QMI_SINGLE_ECC:
4100 -               tmp = ioread32be(&fman->qmi_regs->fmqm_ien);
4101 -               if (enable)
4102 -                       tmp |= QMI_INTR_EN_SINGLE_ECC;
4103 -               else
4104 -                       tmp &= ~QMI_INTR_EN_SINGLE_ECC;
4105 -               iowrite32be(tmp, &fman->qmi_regs->fmqm_ien);
4106 -               break;
4107 -       case FMAN_EX_QMI_DOUBLE_ECC:
4108 -               tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
4109 -               if (enable)
4110 -                       tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
4111 -               else
4112 -                       tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
4113 -               iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
4114 -               break;
4115 -       case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
4116 -               tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
4117 -               if (enable)
4118 -                       tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
4119 -               else
4120 -                       tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
4121 -               iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
4122 -               break;
4123 -       case FMAN_EX_BMI_LIST_RAM_ECC:
4124 -               tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
4125 -               if (enable)
4126 -                       tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
4127 -               else
4128 -                       tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
4129 -               iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
4130 -               break;
4131 -       case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
4132 -               tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
4133 -               if (enable)
4134 -                       tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
4135 -               else
4136 -                       tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
4137 -               iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
4138 -               break;
4139 -       case FMAN_EX_BMI_STATISTICS_RAM_ECC:
4140 -               tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
4141 -               if (enable)
4142 -                       tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
4143 -               else
4144 -                       tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
4145 -               iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
4146 -               break;
4147 -       case FMAN_EX_BMI_DISPATCH_RAM_ECC:
4148 -               tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
4149 -               if (enable)
4150 -                       tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
4151 -               else
4152 -                       tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
4153 -               iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
4154 -               break;
4155 -       case FMAN_EX_IRAM_ECC:
4156 -               tmp = ioread32be(&fman->fpm_regs->fm_rie);
4157 -               if (enable) {
4158 -                       /* enable ECC if not enabled */
4159 -                       enable_rams_ecc(fman->fpm_regs);
4160 -                       /* enable ECC interrupts */
4161 -                       tmp |= FPM_IRAM_ECC_ERR_EX_EN;
4162 -               } else {
4163 -                       /* ECC mechanism may be disabled,
4164 -                        * depending on driver status
4165 -                        */
4166 -                       disable_rams_ecc(fman->fpm_regs);
4167 -                       tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
4168 -               }
4169 -               iowrite32be(tmp, &fman->fpm_regs->fm_rie);
4170 -               break;
4171 -       case FMAN_EX_MURAM_ECC:
4172 -               tmp = ioread32be(&fman->fpm_regs->fm_rie);
4173 -               if (enable) {
4174 -                       /* enable ECC if not enabled */
4175 -                       enable_rams_ecc(fman->fpm_regs);
4176 -                       /* enable ECC interrupts */
4177 -                       tmp |= FPM_MURAM_ECC_ERR_EX_EN;
4178 -               } else {
4179 -                       /* ECC mechanism may be disabled,
4180 -                        * depending on driver status
4181 -                        */
4182 -                       disable_rams_ecc(fman->fpm_regs);
4183 -                       tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
4184 -               }
4185 -               iowrite32be(tmp, &fman->fpm_regs->fm_rie);
4186 -               break;
4187 -       default:
4188 -               return -EINVAL;
4189 -       }
4190 -       return 0;
4191 -}
4192 -
4193 -static void resume(struct fman_fpm_regs __iomem *fpm_rg)
4194 -{
4195 -       u32 tmp;
4196 -
4197 -       tmp = ioread32be(&fpm_rg->fmfp_ee);
4198 -       /* clear tmp_reg event bits in order not to clear standing events */
4199 -       tmp &= ~(FPM_EV_MASK_DOUBLE_ECC |
4200 -                FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC);
4201 -       tmp |= FPM_EV_MASK_RELEASE_FM;
4202 -
4203 -       iowrite32be(tmp, &fpm_rg->fmfp_ee);
4204 -}
4205 -
4206 -static int fill_soc_specific_params(struct fman_state_struct *state)
4207 -{
4208 -       u8 minor = state->rev_info.minor;
4209 -       /* P4080 - Major 2
4210 -        * P2041/P3041/P5020/P5040 - Major 3
4211 -        * Tx/Bx - Major 6
4212 -        */
4213 -       switch (state->rev_info.major) {
4214 -       case 3:
4215 -               state->bmi_max_fifo_size        = 160 * 1024;
4216 -               state->fm_iram_size             = 64 * 1024;
4217 -               state->dma_thresh_max_commq     = 31;
4218 -               state->dma_thresh_max_buf       = 127;
4219 -               state->qmi_max_num_of_tnums     = 64;
4220 -               state->qmi_def_tnums_thresh     = 48;
4221 -               state->bmi_max_num_of_tasks     = 128;
4222 -               state->max_num_of_open_dmas     = 32;
4223 -               state->fm_port_num_of_cg        = 256;
4224 -               state->num_of_rx_ports  = 6;
4225 -               state->total_fifo_size  = 122 * 1024;
4226 -               break;
4227 -
4228 -       case 2:
4229 -               state->bmi_max_fifo_size        = 160 * 1024;
4230 -               state->fm_iram_size             = 64 * 1024;
4231 -               state->dma_thresh_max_commq     = 31;
4232 -               state->dma_thresh_max_buf       = 127;
4233 -               state->qmi_max_num_of_tnums     = 64;
4234 -               state->qmi_def_tnums_thresh     = 48;
4235 -               state->bmi_max_num_of_tasks     = 128;
4236 -               state->max_num_of_open_dmas     = 32;
4237 -               state->fm_port_num_of_cg        = 256;
4238 -               state->num_of_rx_ports  = 5;
4239 -               state->total_fifo_size  = 100 * 1024;
4240 -               break;
4241 -
4242 -       case 6:
4243 -               state->dma_thresh_max_commq     = 83;
4244 -               state->dma_thresh_max_buf       = 127;
4245 -               state->qmi_max_num_of_tnums     = 64;
4246 -               state->qmi_def_tnums_thresh     = 32;
4247 -               state->fm_port_num_of_cg        = 256;
4248 -
4249 -               /* FManV3L */
4250 -               if (minor == 1 || minor == 4) {
4251 -                       state->bmi_max_fifo_size        = 192 * 1024;
4252 -                       state->bmi_max_num_of_tasks     = 64;
4253 -                       state->max_num_of_open_dmas     = 32;
4254 -                       state->num_of_rx_ports          = 5;
4255 -                       if (minor == 1)
4256 -                               state->fm_iram_size     = 32 * 1024;
4257 -                       else
4258 -                               state->fm_iram_size     = 64 * 1024;
4259 -                       state->total_fifo_size          = 156 * 1024;
4260 -               }
4261 -               /* FManV3H */
4262 -               else if (minor == 0 || minor == 2 || minor == 3) {
4263 -                       state->bmi_max_fifo_size        = 384 * 1024;
4264 -                       state->fm_iram_size             = 64 * 1024;
4265 -                       state->bmi_max_num_of_tasks     = 128;
4266 -                       state->max_num_of_open_dmas     = 84;
4267 -                       state->num_of_rx_ports          = 8;
4268 -                       state->total_fifo_size          = 295 * 1024;
4269 -               } else {
4270 -                       pr_err("Unsupported FManv3 version\n");
4271 -                       return -EINVAL;
4272 -               }
4273 -
4274 -               break;
4275 -       default:
4276 -               pr_err("Unsupported FMan version\n");
4277 -               return -EINVAL;
4278 -       }
4279 -
4280 -       return 0;
4281 -}
4282 -
4283 -static bool is_init_done(struct fman_cfg *cfg)
4284 -{
4285 -       /* Checks if FMan driver parameters were initialized */
4286 -       if (!cfg)
4287 -               return true;
4288 -
4289 -       return false;
4290 -}
4291 -
4292 -static void free_init_resources(struct fman *fman)
4293 -{
4294 -       if (fman->cam_offset)
4295 -               fman_muram_free_mem(fman->muram, fman->cam_offset,
4296 -                                   fman->cam_size);
4297 -       if (fman->fifo_offset)
4298 -               fman_muram_free_mem(fman->muram, fman->fifo_offset,
4299 -                                   fman->fifo_size);
4300 -}
4301 -
4302 -static irqreturn_t bmi_err_event(struct fman *fman)
4303 -{
4304 -       u32 event, mask, force;
4305 -       struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
4306 -       irqreturn_t ret = IRQ_NONE;
4307 -
4308 -       event = ioread32be(&bmi_rg->fmbm_ievr);
4309 -       mask = ioread32be(&bmi_rg->fmbm_ier);
4310 -       event &= mask;
4311 -       /* clear the forced events */
4312 -       force = ioread32be(&bmi_rg->fmbm_ifr);
4313 -       if (force & event)
4314 -               iowrite32be(force & ~event, &bmi_rg->fmbm_ifr);
4315 -       /* clear the acknowledged events */
4316 -       iowrite32be(event, &bmi_rg->fmbm_ievr);
4317 -
4318 -       if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
4319 -               ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
4320 -       if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
4321 -               ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
4322 -       if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
4323 -               ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
4324 -       if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
4325 -               ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
4326 -
4327 -       return ret;
4328 -}
4329 -
4330 -static irqreturn_t qmi_err_event(struct fman *fman)
4331 -{
4332 -       u32 event, mask, force;
4333 -       struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
4334 -       irqreturn_t ret = IRQ_NONE;
4335 -
4336 -       event = ioread32be(&qmi_rg->fmqm_eie);
4337 -       mask = ioread32be(&qmi_rg->fmqm_eien);
4338 -       event &= mask;
4339 -
4340 -       /* clear the forced events */
4341 -       force = ioread32be(&qmi_rg->fmqm_eif);
4342 -       if (force & event)
4343 -               iowrite32be(force & ~event, &qmi_rg->fmqm_eif);
4344 -       /* clear the acknowledged events */
4345 -       iowrite32be(event, &qmi_rg->fmqm_eie);
4346 -
4347 -       if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
4348 -               ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
4349 -       if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
4350 -               ret = fman->exception_cb(fman,
4351 -                                        FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
4352 -
4353 -       return ret;
4354 -}
4355 -
4356 -static irqreturn_t dma_err_event(struct fman *fman)
4357 -{
4358 -       u32 status, mask, com_id;
4359 -       u8 tnum, port_id, relative_port_id;
4360 -       u16 liodn;
4361 -       struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
4362 -       irqreturn_t ret = IRQ_NONE;
4363 -
4364 -       status = ioread32be(&dma_rg->fmdmsr);
4365 -       mask = ioread32be(&dma_rg->fmdmmr);
4366 -
4367 -       /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */
4368 -       if ((mask & DMA_MODE_BER) != DMA_MODE_BER)
4369 -               status &= ~DMA_STATUS_BUS_ERR;
4370 -
4371 -       /* clear relevant bits if mask has no DMA_MODE_ECC */
4372 -       if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC)
4373 -               status &= ~(DMA_STATUS_FM_SPDAT_ECC |
4374 -                           DMA_STATUS_READ_ECC |
4375 -                           DMA_STATUS_SYSTEM_WRITE_ECC |
4376 -                           DMA_STATUS_FM_WRITE_ECC);
4377 -
4378 -       /* clear set events */
4379 -       iowrite32be(status, &dma_rg->fmdmsr);
4380 -
4381 -       if (status & DMA_STATUS_BUS_ERR) {
4382 -               u64 addr;
4383 -
4384 -               addr = (u64)ioread32be(&dma_rg->fmdmtal);
4385 -               addr |= ((u64)(ioread32be(&dma_rg->fmdmtah)) << 32);
4386 -
4387 -               com_id = ioread32be(&dma_rg->fmdmtcid);
4388 -               port_id = (u8)(((com_id & DMA_TRANSFER_PORTID_MASK) >>
4389 -                              DMA_TRANSFER_PORTID_SHIFT));
4390 -               relative_port_id =
4391 -               hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
4392 -               tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >>
4393 -                           DMA_TRANSFER_TNUM_SHIFT);
4394 -               liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK);
4395 -               ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum,
4396 -                                        liodn);
4397 -       }
4398 -       if (status & DMA_STATUS_FM_SPDAT_ECC)
4399 -               ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
4400 -       if (status & DMA_STATUS_READ_ECC)
4401 -               ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
4402 -       if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
4403 -               ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
4404 -       if (status & DMA_STATUS_FM_WRITE_ECC)
4405 -               ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
4406 -
4407 -       return ret;
4408 -}
4409 -
4410 -static irqreturn_t fpm_err_event(struct fman *fman)
4411 -{
4412 -       u32 event;
4413 -       struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
4414 -       irqreturn_t ret = IRQ_NONE;
4415 -
4416 -       event = ioread32be(&fpm_rg->fmfp_ee);
4417 -       /* clear the all occurred events */
4418 -       iowrite32be(event, &fpm_rg->fmfp_ee);
4419 -
4420 -       if ((event & FPM_EV_MASK_DOUBLE_ECC) &&
4421 -           (event & FPM_EV_MASK_DOUBLE_ECC_EN))
4422 -               ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
4423 -       if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
4424 -               ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
4425 -       if ((event & FPM_EV_MASK_SINGLE_ECC) &&
4426 -           (event & FPM_EV_MASK_SINGLE_ECC_EN))
4427 -               ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
4428 -
4429 -       return ret;
4430 -}
4431 -
4432 -static irqreturn_t muram_err_intr(struct fman *fman)
4433 -{
4434 -       u32 event, mask;
4435 -       struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
4436 -       irqreturn_t ret = IRQ_NONE;
4437 -
4438 -       event = ioread32be(&fpm_rg->fm_rcr);
4439 -       mask = ioread32be(&fpm_rg->fm_rie);
4440 -
4441 -       /* clear MURAM event bit (do not clear IRAM event) */
4442 -       iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
4443 -
4444 -       if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC))
4445 -               ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
4446 -
4447 -       return ret;
4448 -}
4449 -
4450 -static irqreturn_t qmi_event(struct fman *fman)
4451 -{
4452 -       u32 event, mask, force;
4453 -       struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
4454 -       irqreturn_t ret = IRQ_NONE;
4455 -
4456 -       event = ioread32be(&qmi_rg->fmqm_ie);
4457 -       mask = ioread32be(&qmi_rg->fmqm_ien);
4458 -       event &= mask;
4459 -       /* clear the forced events */
4460 -       force = ioread32be(&qmi_rg->fmqm_if);
4461 -       if (force & event)
4462 -               iowrite32be(force & ~event, &qmi_rg->fmqm_if);
4463 -       /* clear the acknowledged events */
4464 -       iowrite32be(event, &qmi_rg->fmqm_ie);
4465 -
4466 -       if (event & QMI_INTR_EN_SINGLE_ECC)
4467 -               ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
4468 -
4469 -       return ret;
4470 -}
4471 -
4472 -static void enable_time_stamp(struct fman *fman)
4473 -{
4474 -       struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
4475 -       u16 fm_clk_freq = fman->state->fm_clk_freq;
4476 -       u32 tmp, intgr, ts_freq;
4477 -       u64 frac;
4478 -
4479 -       ts_freq = (u32)(1 << fman->state->count1_micro_bit);
4480 -       /* configure timestamp so that bit 8 will count 1 microsecond
4481 -        * Find effective count rate at TIMESTAMP least significant bits:
4482 -        * Effective_Count_Rate = 1MHz x 2^8 = 256MHz
4483 -        * Find frequency ratio between effective count rate and the clock:
4484 -        * Effective_Count_Rate / CLK e.g. for 600 MHz clock:
4485 -        * 256/600 = 0.4266666...
4486 -        */
4487 -
4488 -       intgr = ts_freq / fm_clk_freq;
4489 -       /* we multiply by 2^16 to keep the fraction of the division
4490 -        * we do not div back, since we write this value as a fraction
4491 -        * see spec
4492 -        */
4493 -
4494 -       frac = ((ts_freq << 16) - (intgr << 16) * fm_clk_freq) / fm_clk_freq;
4495 -       /* we check remainder of the division in order to round up if not int */
4496 -       if (((ts_freq << 16) - (intgr << 16) * fm_clk_freq) % fm_clk_freq)
4497 -               frac++;
4498 -
4499 -       tmp = (intgr << FPM_TS_INT_SHIFT) | (u16)frac;
4500 -       iowrite32be(tmp, &fpm_rg->fmfp_tsc2);
4501 -
4502 -       /* enable timestamp with original clock */
4503 -       iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1);
4504 -       fman->state->enabled_time_stamp = true;
4505 -}
4506 -
4507 -static int clear_iram(struct fman *fman)
4508 -{
4509 -       struct fman_iram_regs __iomem *iram;
4510 -       int i, count;
4511 -
4512 -       iram = fman->base_addr + IMEM_OFFSET;
4513 -
4514 -       /* Enable the auto-increment */
4515 -       iowrite32be(IRAM_IADD_AIE, &iram->iadd);
4516 -       count = 100;
4517 -       do {
4518 -               udelay(1);
4519 -       } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count);
4520 -       if (count == 0)
4521 -               return -EBUSY;
4522 -
4523 -       for (i = 0; i < (fman->state->fm_iram_size / 4); i++)
4524 -               iowrite32be(0xffffffff, &iram->idata);
4525 -
4526 -       iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd);
4527 -       count = 100;
4528 -       do {
4529 -               udelay(1);
4530 -       } while ((ioread32be(&iram->idata) != 0xffffffff) && --count);
4531 -       if (count == 0)
4532 -               return -EBUSY;
4533 -
4534 -       return 0;
4535 -}
4536 -
4537 -static u32 get_exception_flag(enum fman_exceptions exception)
4538 -{
4539 -       u32 bit_mask;
4540 -
4541 -       switch (exception) {
4542 -       case FMAN_EX_DMA_BUS_ERROR:
4543 -               bit_mask = EX_DMA_BUS_ERROR;
4544 -               break;
4545 -       case FMAN_EX_DMA_SINGLE_PORT_ECC:
4546 -               bit_mask = EX_DMA_SINGLE_PORT_ECC;
4547 -               break;
4548 -       case FMAN_EX_DMA_READ_ECC:
4549 -               bit_mask = EX_DMA_READ_ECC;
4550 -               break;
4551 -       case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
4552 -               bit_mask = EX_DMA_SYSTEM_WRITE_ECC;
4553 -               break;
4554 -       case FMAN_EX_DMA_FM_WRITE_ECC:
4555 -               bit_mask = EX_DMA_FM_WRITE_ECC;
4556 -               break;
4557 -       case FMAN_EX_FPM_STALL_ON_TASKS:
4558 -               bit_mask = EX_FPM_STALL_ON_TASKS;
4559 -               break;
4560 -       case FMAN_EX_FPM_SINGLE_ECC:
4561 -               bit_mask = EX_FPM_SINGLE_ECC;
4562 -               break;
4563 -       case FMAN_EX_FPM_DOUBLE_ECC:
4564 -               bit_mask = EX_FPM_DOUBLE_ECC;
4565 -               break;
4566 -       case FMAN_EX_QMI_SINGLE_ECC:
4567 -               bit_mask = EX_QMI_SINGLE_ECC;
4568 -               break;
4569 -       case FMAN_EX_QMI_DOUBLE_ECC:
4570 -               bit_mask = EX_QMI_DOUBLE_ECC;
4571 -               break;
4572 -       case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
4573 -               bit_mask = EX_QMI_DEQ_FROM_UNKNOWN_PORTID;
4574 -               break;
4575 -       case FMAN_EX_BMI_LIST_RAM_ECC:
4576 -               bit_mask = EX_BMI_LIST_RAM_ECC;
4577 -               break;
4578 -       case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
4579 -               bit_mask = EX_BMI_STORAGE_PROFILE_ECC;
4580 -               break;
4581 -       case FMAN_EX_BMI_STATISTICS_RAM_ECC:
4582 -               bit_mask = EX_BMI_STATISTICS_RAM_ECC;
4583 -               break;
4584 -       case FMAN_EX_BMI_DISPATCH_RAM_ECC:
4585 -               bit_mask = EX_BMI_DISPATCH_RAM_ECC;
4586 -               break;
4587 -       case FMAN_EX_MURAM_ECC:
4588 -               bit_mask = EX_MURAM_ECC;
4589 -               break;
4590 -       default:
4591 -               bit_mask = 0;
4592 -               break;
4593 -       }
4594 -
4595 -       return bit_mask;
4596 -}
4597 -
4598 -static int get_module_event(enum fman_event_modules module, u8 mod_id,
4599 -                           enum fman_intr_type intr_type)
4600 -{
4601 -       int event;
4602 -
4603 -       switch (module) {
4604 -       case FMAN_MOD_MAC:
4605 -               if (intr_type == FMAN_INTR_TYPE_ERR)
4606 -                       event = FMAN_EV_ERR_MAC0 + mod_id;
4607 -               else
4608 -                       event = FMAN_EV_MAC0 + mod_id;
4609 -               break;
4610 -       case FMAN_MOD_FMAN_CTRL:
4611 -               if (intr_type == FMAN_INTR_TYPE_ERR)
4612 -                       event = FMAN_EV_CNT;
4613 -               else
4614 -                       event = (FMAN_EV_FMAN_CTRL_0 + mod_id);
4615 -               break;
4616 -       case FMAN_MOD_DUMMY_LAST:
4617 -               event = FMAN_EV_CNT;
4618 -               break;
4619 -       default:
4620 -               event = FMAN_EV_CNT;
4621 -               break;
4622 -       }
4623 -
4624 -       return event;
4625 -}
4626 -
4627 -static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo,
4628 -                           u32 *extra_size_of_fifo)
4629 -{
4630 -       struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
4631 -       u32 fifo = *size_of_fifo;
4632 -       u32 extra_fifo = *extra_size_of_fifo;
4633 -       u32 tmp;
4634 -
4635 -       /* if this is the first time a port requires extra_fifo_pool_size,
4636 -        * the total extra_fifo_pool_size must be initialized to 1 buffer per
4637 -        * port
4638 -        */
4639 -       if (extra_fifo && !fman->state->extra_fifo_pool_size)
4640 -               fman->state->extra_fifo_pool_size =
4641 -                       fman->state->num_of_rx_ports * FMAN_BMI_FIFO_UNITS;
4642 -
4643 -       fman->state->extra_fifo_pool_size =
4644 -               max(fman->state->extra_fifo_pool_size, extra_fifo);
4645 -
4646 -       /* check that there are enough uncommitted fifo size */
4647 -       if ((fman->state->accumulated_fifo_size + fifo) >
4648 -           (fman->state->total_fifo_size -
4649 -           fman->state->extra_fifo_pool_size)) {
4650 -               dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n",
4651 -                       __func__);
4652 -               return -EAGAIN;
4653 -       }
4654 -
4655 -       /* Read, modify and write to HW */
4656 -       tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) |
4657 -              ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
4658 -              BMI_EXTRA_FIFO_SIZE_SHIFT);
4659 -       iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
4660 -
4661 -       /* update accumulated */
4662 -       fman->state->accumulated_fifo_size += fifo;
4663 -
4664 -       return 0;
4665 -}
4666 -
4667 -static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks,
4668 -                           u8 *num_of_extra_tasks)
4669 -{
4670 -       struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
4671 -       u8 tasks = *num_of_tasks;
4672 -       u8 extra_tasks = *num_of_extra_tasks;
4673 -       u32 tmp;
4674 -
4675 -       if (extra_tasks)
4676 -               fman->state->extra_tasks_pool_size =
4677 -               max(fman->state->extra_tasks_pool_size, extra_tasks);
4678 -
4679 -       /* check that there are enough uncommitted tasks */
4680 -       if ((fman->state->accumulated_num_of_tasks + tasks) >
4681 -           (fman->state->total_num_of_tasks -
4682 -            fman->state->extra_tasks_pool_size)) {
4683 -               dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
4684 -                       __func__, fman->state->fm_id);
4685 -               return -EAGAIN;
4686 -       }
4687 -       /* update accumulated */
4688 -       fman->state->accumulated_num_of_tasks += tasks;
4689 -
4690 -       /* Write to HW */
4691 -       tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
4692 -           ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK);
4693 -       tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) |
4694 -               (u32)(extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT));
4695 -       iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
4696 -
4697 -       return 0;
4698 -}
4699 -
4700 -static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
4701 -                               u8 *num_of_open_dmas,
4702 -                               u8 *num_of_extra_open_dmas)
4703 -{
4704 -       struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
4705 -       u8 open_dmas = *num_of_open_dmas;
4706 -       u8 extra_open_dmas = *num_of_extra_open_dmas;
4707 -       u8 total_num_dmas = 0, current_val = 0, current_extra_val = 0;
4708 -       u32 tmp;
4709 -
4710 -       if (!open_dmas) {
4711 -               /* Configuration according to values in the HW.
4712 -                * read the current number of open Dma's
4713 -                */
4714 -               tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
4715 -               current_extra_val = (u8)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >>
4716 -                                        BMI_EXTRA_NUM_OF_DMAS_SHIFT);
4717 -
4718 -               tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
4719 -               current_val = (u8)(((tmp & BMI_NUM_OF_DMAS_MASK) >>
4720 -                                  BMI_NUM_OF_DMAS_SHIFT) + 1);
4721 -
4722 -               /* This is the first configuration and user did not
4723 -                * specify value (!open_dmas), reset values will be used
4724 -                * and we just save these values for resource management
4725 -                */
4726 -               fman->state->extra_open_dmas_pool_size =
4727 -                       (u8)max(fman->state->extra_open_dmas_pool_size,
4728 -                               current_extra_val);
4729 -               fman->state->accumulated_num_of_open_dmas += current_val;
4730 -               *num_of_open_dmas = current_val;
4731 -               *num_of_extra_open_dmas = current_extra_val;
4732 -               return 0;
4733 -       }
4734 -
4735 -       if (extra_open_dmas > current_extra_val)
4736 -               fman->state->extra_open_dmas_pool_size =
4737 -                   (u8)max(fman->state->extra_open_dmas_pool_size,
4738 -                           extra_open_dmas);
4739 -
4740 -       if ((fman->state->rev_info.major < 6) &&
4741 -           (fman->state->accumulated_num_of_open_dmas - current_val +
4742 -            open_dmas > fman->state->max_num_of_open_dmas)) {
4743 -               dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
4744 -                       __func__, fman->state->fm_id);
4745 -               return -EAGAIN;
4746 -       } else if ((fman->state->rev_info.major >= 6) &&
4747 -                  !((fman->state->rev_info.major == 6) &&
4748 -                  (fman->state->rev_info.minor == 0)) &&
4749 -                  (fman->state->accumulated_num_of_open_dmas -
4750 -                  current_val + open_dmas >
4751 -                  fman->state->dma_thresh_max_commq + 1)) {
4752 -               dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
4753 -                       __func__, fman->state->fm_id,
4754 -                      fman->state->dma_thresh_max_commq + 1);
4755 -               return -EAGAIN;
4756 -       }
4757 -
4758 -       WARN_ON(fman->state->accumulated_num_of_open_dmas < current_val);
4759 -       /* update acummulated */
4760 -       fman->state->accumulated_num_of_open_dmas -= current_val;
4761 -       fman->state->accumulated_num_of_open_dmas += open_dmas;
4762 -
4763 -       if (fman->state->rev_info.major < 6)
4764 -               total_num_dmas =
4765 -                   (u8)(fman->state->accumulated_num_of_open_dmas +
4766 -                   fman->state->extra_open_dmas_pool_size);
4767 -
4768 -       /* calculate reg */
4769 -       tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
4770 -           ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK);
4771 -       tmp |= (u32)(((open_dmas - 1) << BMI_NUM_OF_DMAS_SHIFT) |
4772 -                          (extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT));
4773 -       iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
4774 -
4775 -       /* update total num of DMA's with committed number of open DMAS,
4776 -        * and max uncommitted pool.
4777 -        */
4778 -       if (total_num_dmas) {
4779 -               tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK;
4780 -               tmp |= (u32)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT;
4781 -               iowrite32be(tmp, &bmi_rg->fmbm_cfg2);
4782 -       }
4783 -
4784 -       return 0;
4785 -}
4786 -
4787 -static int fman_config(struct fman *fman)
4788 -{
4789 -       void __iomem *base_addr;
4790 -       int err;
4791 -
4792 -       base_addr = fman->dts_params.base_addr;
4793 -
4794 -       fman->state = kzalloc(sizeof(*fman->state), GFP_KERNEL);
4795 -       if (!fman->state)
4796 -               goto err_fm_state;
4797 -
4798 -       /* Allocate the FM driver's parameters structure */
4799 -       fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL);
4800 -       if (!fman->cfg)
4801 -               goto err_fm_drv;
4802 -
4803 -       /* Initialize MURAM block */
4804 -       fman->muram =
4805 -               fman_muram_init(fman->dts_params.muram_res.start,
4806 -                               resource_size(&fman->dts_params.muram_res));
4807 -       if (!fman->muram)
4808 -               goto err_fm_soc_specific;
4809 -
4810 -       /* Initialize FM parameters which will be kept by the driver */
4811 -       fman->state->fm_id = fman->dts_params.id;
4812 -       fman->state->fm_clk_freq = fman->dts_params.clk_freq;
4813 -       fman->state->qman_channel_base = fman->dts_params.qman_channel_base;
4814 -       fman->state->num_of_qman_channels =
4815 -               fman->dts_params.num_of_qman_channels;
4816 -       fman->state->res = fman->dts_params.res;
4817 -       fman->exception_cb = fman_exceptions;
4818 -       fman->bus_error_cb = fman_bus_error;
4819 -       fman->fpm_regs = base_addr + FPM_OFFSET;
4820 -       fman->bmi_regs = base_addr + BMI_OFFSET;
4821 -       fman->qmi_regs = base_addr + QMI_OFFSET;
4822 -       fman->dma_regs = base_addr + DMA_OFFSET;
4823 -       fman->base_addr = base_addr;
4824 -
4825 -       spin_lock_init(&fman->spinlock);
4826 -       fman_defconfig(fman->cfg);
4827 -
4828 -       fman->state->extra_fifo_pool_size = 0;
4829 -       fman->state->exceptions = (EX_DMA_BUS_ERROR                 |
4830 -                                       EX_DMA_READ_ECC              |
4831 -                                       EX_DMA_SYSTEM_WRITE_ECC      |
4832 -                                       EX_DMA_FM_WRITE_ECC          |
4833 -                                       EX_FPM_STALL_ON_TASKS        |
4834 -                                       EX_FPM_SINGLE_ECC            |
4835 -                                       EX_FPM_DOUBLE_ECC            |
4836 -                                       EX_QMI_DEQ_FROM_UNKNOWN_PORTID |
4837 -                                       EX_BMI_LIST_RAM_ECC          |
4838 -                                       EX_BMI_STORAGE_PROFILE_ECC   |
4839 -                                       EX_BMI_STATISTICS_RAM_ECC    |
4840 -                                       EX_MURAM_ECC                 |
4841 -                                       EX_BMI_DISPATCH_RAM_ECC      |
4842 -                                       EX_QMI_DOUBLE_ECC            |
4843 -                                       EX_QMI_SINGLE_ECC);
4844 -
4845 -       /* Read FMan revision for future use*/
4846 -       fman_get_revision(fman, &fman->state->rev_info);
4847 -
4848 -       err = fill_soc_specific_params(fman->state);
4849 -       if (err)
4850 -               goto err_fm_soc_specific;
4851 -
4852 -       /* FM_AID_MODE_NO_TNUM_SW005 Errata workaround */
4853 -       if (fman->state->rev_info.major >= 6)
4854 -               fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID;
4855 -
4856 -       fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh;
4857 -
4858 -       fman->state->total_num_of_tasks =
4859 -       (u8)DFLT_TOTAL_NUM_OF_TASKS(fman->state->rev_info.major,
4860 -                                   fman->state->rev_info.minor,
4861 -                                   fman->state->bmi_max_num_of_tasks);
4862 -
4863 -       if (fman->state->rev_info.major < 6) {
4864 -               fman->cfg->dma_comm_qtsh_clr_emer =
4865 -               (u8)DFLT_DMA_COMM_Q_LOW(fman->state->rev_info.major,
4866 -                                       fman->state->dma_thresh_max_commq);
4867 -
4868 -               fman->cfg->dma_comm_qtsh_asrt_emer =
4869 -               (u8)DFLT_DMA_COMM_Q_HIGH(fman->state->rev_info.major,
4870 -                                        fman->state->dma_thresh_max_commq);
4871 -
4872 -               fman->cfg->dma_cam_num_of_entries =
4873 -               DFLT_DMA_CAM_NUM_OF_ENTRIES(fman->state->rev_info.major);
4874 -
4875 -               fman->cfg->dma_read_buf_tsh_clr_emer =
4876 -               DFLT_DMA_READ_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
4877 -
4878 -               fman->cfg->dma_read_buf_tsh_asrt_emer =
4879 -               DFLT_DMA_READ_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
4880 -
4881 -               fman->cfg->dma_write_buf_tsh_clr_emer =
4882 -               DFLT_DMA_WRITE_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
4883 -
4884 -               fman->cfg->dma_write_buf_tsh_asrt_emer =
4885 -               DFLT_DMA_WRITE_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
4886 -
4887 -               fman->cfg->dma_axi_dbg_num_of_beats =
4888 -               DFLT_AXI_DBG_NUM_OF_BEATS;
4889 -       }
4890 -
4891 -       return 0;
4892 -
4893 -err_fm_soc_specific:
4894 -       kfree(fman->cfg);
4895 -err_fm_drv:
4896 -       kfree(fman->state);
4897 -err_fm_state:
4898 -       kfree(fman);
4899 -       return -EINVAL;
4900 -}
4901 -
4902 -static int fman_reset(struct fman *fman)
4903 -{
4904 -       u32 count;
4905 -       int err = 0;
4906 -
4907 -       if (fman->state->rev_info.major < 6) {
4908 -               iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
4909 -               /* Wait for reset completion */
4910 -               count = 100;
4911 -               do {
4912 -                       udelay(1);
4913 -               } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
4914 -                        FPM_RSTC_FM_RESET) && --count);
4915 -               if (count == 0)
4916 -                       err = -EBUSY;
4917 -
4918 -               goto _return;
4919 -       } else {
4920 -               struct device_node *guts_node;
4921 -               struct ccsr_guts __iomem *guts_regs;
4922 -               u32 devdisr2, reg;
4923 -
4924 -               /* Errata A007273 */
4925 -               guts_node =
4926 -                       of_find_compatible_node(NULL, NULL,
4927 -                                               "fsl,qoriq-device-config-2.0");
4928 -               if (!guts_node) {
4929 -                       dev_err(fman->dev, "%s: Couldn't find guts node\n",
4930 -                               __func__);
4931 -                       goto guts_node;
4932 -               }
4933 -
4934 -               guts_regs = of_iomap(guts_node, 0);
4935 -               if (!guts_regs) {
4936 -                       dev_err(fman->dev, "%s: Couldn't map %s regs\n",
4937 -                               __func__, guts_node->full_name);
4938 -                       goto guts_regs;
4939 -               }
4940 -#define FMAN1_ALL_MACS_MASK    0xFCC00000
4941 -#define FMAN2_ALL_MACS_MASK    0x000FCC00
4942 -               /* Read current state */
4943 -               devdisr2 = ioread32be(&guts_regs->devdisr2);
4944 -               if (fman->dts_params.id == 0)
4945 -                       reg = devdisr2 & ~FMAN1_ALL_MACS_MASK;
4946 -               else
4947 -                       reg = devdisr2 & ~FMAN2_ALL_MACS_MASK;
4948 -
4949 -               /* Enable all MACs */
4950 -               iowrite32be(reg, &guts_regs->devdisr2);
4951 -
4952 -               /* Perform FMan reset */
4953 -               iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
4954 -
4955 -               /* Wait for reset completion */
4956 -               count = 100;
4957 -               do {
4958 -                       udelay(1);
4959 -               } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
4960 -                        FPM_RSTC_FM_RESET) && --count);
4961 -               if (count == 0) {
4962 -                       iounmap(guts_regs);
4963 -                       of_node_put(guts_node);
4964 -                       err = -EBUSY;
4965 -                       goto _return;
4966 -               }
4967 -
4968 -               /* Restore devdisr2 value */
4969 -               iowrite32be(devdisr2, &guts_regs->devdisr2);
4970 -
4971 -               iounmap(guts_regs);
4972 -               of_node_put(guts_node);
4973 -
4974 -               goto _return;
4975 -
4976 -guts_regs:
4977 -               of_node_put(guts_node);
4978 -guts_node:
4979 -               dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
4980 -                       __func__);
4981 -       }
4982 -_return:
4983 -       return err;
4984 -}
4985 -
4986 -static int fman_init(struct fman *fman)
4987 -{
4988 -       struct fman_cfg *cfg = NULL;
4989 -       int err = 0, i, count;
4990 -
4991 -       if (is_init_done(fman->cfg))
4992 -               return -EINVAL;
4993 -
4994 -       fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT;
4995 -
4996 -       cfg = fman->cfg;
4997 -
4998 -       /* clear revision-dependent non existing exception */
4999 -       if (fman->state->rev_info.major < 6)
5000 -               fman->state->exceptions &= ~FMAN_EX_BMI_DISPATCH_RAM_ECC;
5001 -
5002 -       if (fman->state->rev_info.major >= 6)
5003 -               fman->state->exceptions &= ~FMAN_EX_QMI_SINGLE_ECC;
5004 -
5005 -       /* clear CPG */
5006 -       memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0,
5007 -                 fman->state->fm_port_num_of_cg);
5008 -
5009 -       /* Save LIODN info before FMan reset
5010 -        * Skipping non-existent port 0 (i = 1)
5011 -        */
5012 -       for (i = 1; i < FMAN_LIODN_TBL; i++) {
5013 -               u32 liodn_base;
5014 -
5015 -               fman->liodn_offset[i] =
5016 -                       ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
5017 -               liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
5018 -               if (i % 2) {
5019 -                       /* FMDM_PLR LSB holds LIODN base for odd ports */
5020 -                       liodn_base &= DMA_LIODN_BASE_MASK;
5021 -               } else {
5022 -                       /* FMDM_PLR MSB holds LIODN base for even ports */
5023 -                       liodn_base >>= DMA_LIODN_SHIFT;
5024 -                       liodn_base &= DMA_LIODN_BASE_MASK;
5025 -               }
5026 -               fman->liodn_base[i] = liodn_base;
5027 -       }
5028 -
5029 -       err = fman_reset(fman);
5030 -       if (err)
5031 -               return err;
5032 -
5033 -       if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) {
5034 -               resume(fman->fpm_regs);
5035 -               /* Wait until QMI is not in halt not busy state */
5036 -               count = 100;
5037 -               do {
5038 -                       udelay(1);
5039 -               } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) &
5040 -                        QMI_GS_HALT_NOT_BUSY) && --count);
5041 -               if (count == 0)
5042 -                       dev_warn(fman->dev, "%s: QMI is in halt not busy state\n",
5043 -                                __func__);
5044 -       }
5045 -
5046 -       if (clear_iram(fman) != 0)
5047 -               return -EINVAL;
5048 -
5049 -       cfg->exceptions = fman->state->exceptions;
5050 -
5051 -       /* Init DMA Registers */
5052 -
5053 -       err = dma_init(fman);
5054 -       if (err != 0) {
5055 -               free_init_resources(fman);
5056 -               return err;
5057 -       }
5058 -
5059 -       /* Init FPM Registers */
5060 -       fpm_init(fman->fpm_regs, fman->cfg);
5061 -
5062 -       /* define common resources */
5063 -       /* allocate MURAM for FIFO according to total size */
5064 -       fman->fifo_offset = fman_muram_alloc(fman->muram,
5065 -                                            fman->state->total_fifo_size);
5066 -       if (IS_ERR_VALUE(fman->fifo_offset)) {
5067 -               free_init_resources(fman);
5068 -               dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n",
5069 -                       __func__);
5070 -               return -ENOMEM;
5071 -       }
5072 -
5073 -       cfg->fifo_base_addr = fman->fifo_offset;
5074 -       cfg->total_fifo_size = fman->state->total_fifo_size;
5075 -       cfg->total_num_of_tasks = fman->state->total_num_of_tasks;
5076 -       cfg->clk_freq = fman->state->fm_clk_freq;
5077 -
5078 -       /* Init BMI Registers */
5079 -       bmi_init(fman->bmi_regs, fman->cfg);
5080 -
5081 -       /* Init QMI Registers */
5082 -       qmi_init(fman->qmi_regs, fman->cfg);
5083 -
5084 -       err = enable(fman, cfg);
5085 -       if (err != 0)
5086 -               return err;
5087 -
5088 -       enable_time_stamp(fman);
5089 -
5090 -       kfree(fman->cfg);
5091 -       fman->cfg = NULL;
5092 -
5093 -       return 0;
5094 -}
5095 -
5096 -static int fman_set_exception(struct fman *fman,
5097 -                             enum fman_exceptions exception, bool enable)
5098 -{
5099 -       u32 bit_mask = 0;
5100 -
5101 -       if (!is_init_done(fman->cfg))
5102 -               return -EINVAL;
5103 -
5104 -       bit_mask = get_exception_flag(exception);
5105 -       if (bit_mask) {
5106 -               if (enable)
5107 -                       fman->state->exceptions |= bit_mask;
5108 -               else
5109 -                       fman->state->exceptions &= ~bit_mask;
5110 -       } else {
5111 -               dev_err(fman->dev, "%s: Undefined exception (%d)\n",
5112 -                       __func__, exception);
5113 -               return -EINVAL;
5114 -       }
5115 -
5116 -       return set_exception(fman, exception, enable);
5117 -}
5118 -
5119 -/**
5120 - * fman_register_intr
5121 - * @fman:      A Pointer to FMan device
5122 - * @mod:       Calling module
5123 - * @mod_id:    Module id (if more than 1 exists, '0' if not)
5124 - * @intr_type: Interrupt type (error/normal) selection.
5125 - * @f_isr:     The interrupt service routine.
5126 - * @h_src_arg: Argument to be passed to f_isr.
5127 - *
5128 - * Used to register an event handler to be processed by FMan
5129 - *
5130 - * Return: 0 on success; Error code otherwise.
5131 - */
5132 -void fman_register_intr(struct fman *fman, enum fman_event_modules module,
5133 -                       u8 mod_id, enum fman_intr_type intr_type,
5134 -                       void (*isr_cb)(void *src_arg), void *src_arg)
5135 -{
5136 -       int event = 0;
5137 -
5138 -       event = get_module_event(module, mod_id, intr_type);
5139 -       WARN_ON(event >= FMAN_EV_CNT);
5140 -
5141 -       /* register in local FM structure */
5142 -       fman->intr_mng[event].isr_cb = isr_cb;
5143 -       fman->intr_mng[event].src_handle = src_arg;
5144 -}
5145 -EXPORT_SYMBOL(fman_register_intr);
5146 -
5147 -/**
5148 - * fman_unregister_intr
5149 - * @fman:      A Pointer to FMan device
5150 - * @mod:       Calling module
5151 - * @mod_id:    Module id (if more than 1 exists, '0' if not)
5152 - * @intr_type: Interrupt type (error/normal) selection.
5153 - *
5154 - * Used to unregister an event handler to be processed by FMan
5155 - *
5156 - * Return: 0 on success; Error code otherwise.
5157 - */
5158 -void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
5159 -                         u8 mod_id, enum fman_intr_type intr_type)
5160 -{
5161 -       int event = 0;
5162 -
5163 -       event = get_module_event(module, mod_id, intr_type);
5164 -       WARN_ON(event >= FMAN_EV_CNT);
5165 -
5166 -       fman->intr_mng[event].isr_cb = NULL;
5167 -       fman->intr_mng[event].src_handle = NULL;
5168 -}
5169 -EXPORT_SYMBOL(fman_unregister_intr);
5170 -
5171 -/**
5172 - * fman_set_port_params
5173 - * @fman:              A Pointer to FMan device
5174 - * @port_params:       Port parameters
5175 - *
5176 - * Used by FMan Port to pass parameters to the FMan
5177 - *
5178 - * Return: 0 on success; Error code otherwise.
5179 - */
5180 -int fman_set_port_params(struct fman *fman,
5181 -                        struct fman_port_init_params *port_params)
5182 -{
5183 -       int err;
5184 -       unsigned long flags;
5185 -       u8 port_id = port_params->port_id, mac_id;
5186 -
5187 -       spin_lock_irqsave(&fman->spinlock, flags);
5188 -
5189 -       err = set_num_of_tasks(fman, port_params->port_id,
5190 -                              &port_params->num_of_tasks,
5191 -                              &port_params->num_of_extra_tasks);
5192 -       if (err)
5193 -               goto return_err;
5194 -
5195 -       /* TX Ports */
5196 -       if (port_params->port_type != FMAN_PORT_TYPE_RX) {
5197 -               u32 enq_th, deq_th, reg;
5198 -
5199 -               /* update qmi ENQ/DEQ threshold */
5200 -               fman->state->accumulated_num_of_deq_tnums +=
5201 -                       port_params->deq_pipeline_depth;
5202 -               enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) &
5203 -                         QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT;
5204 -               /* if enq_th is too big, we reduce it to the max value
5205 -                * that is still 0
5206 -                */
5207 -               if (enq_th >= (fman->state->qmi_max_num_of_tnums -
5208 -                   fman->state->accumulated_num_of_deq_tnums)) {
5209 -                       enq_th =
5210 -                       fman->state->qmi_max_num_of_tnums -
5211 -                       fman->state->accumulated_num_of_deq_tnums - 1;
5212 -
5213 -                       reg = ioread32be(&fman->qmi_regs->fmqm_gc);
5214 -                       reg &= ~QMI_CFG_ENQ_MASK;
5215 -                       reg |= (enq_th << QMI_CFG_ENQ_SHIFT);
5216 -                       iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
5217 -               }
5218 -
5219 -               deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) &
5220 -                                   QMI_CFG_DEQ_MASK;
5221 -               /* if deq_th is too small, we enlarge it to the min
5222 -                * value that is still 0.
5223 -                * depTh may not be larger than 63
5224 -                * (fman->state->qmi_max_num_of_tnums-1).
5225 -                */
5226 -               if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) &&
5227 -                   (deq_th < fman->state->qmi_max_num_of_tnums - 1)) {
5228 -                       deq_th = fman->state->accumulated_num_of_deq_tnums + 1;
5229 -                       reg = ioread32be(&fman->qmi_regs->fmqm_gc);
5230 -                       reg &= ~QMI_CFG_DEQ_MASK;
5231 -                       reg |= deq_th;
5232 -                       iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
5233 -               }
5234 -       }
5235 -
5236 -       err = set_size_of_fifo(fman, port_params->port_id,
5237 -                              &port_params->size_of_fifo,
5238 -                              &port_params->extra_size_of_fifo);
5239 -       if (err)
5240 -               goto return_err;
5241 -
5242 -       err = set_num_of_open_dmas(fman, port_params->port_id,
5243 -                                  &port_params->num_of_open_dmas,
5244 -                                  &port_params->num_of_extra_open_dmas);
5245 -       if (err)
5246 -               goto return_err;
5247 -
5248 -       set_port_liodn(fman, port_id, fman->liodn_base[port_id],
5249 -                      fman->liodn_offset[port_id]);
5250 -
5251 -       if (fman->state->rev_info.major < 6)
5252 -               set_port_order_restoration(fman->fpm_regs, port_id);
5253 -
5254 -       mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
5255 -
5256 -       if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) {
5257 -               fman->state->port_mfl[mac_id] = port_params->max_frame_length;
5258 -       } else {
5259 -               dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n",
5260 -                        __func__, port_id, mac_id);
5261 -               err = -EINVAL;
5262 -               goto return_err;
5263 -       }
5264 -
5265 -       spin_unlock_irqrestore(&fman->spinlock, flags);
5266 -
5267 -       return 0;
5268 -
5269 -return_err:
5270 -       spin_unlock_irqrestore(&fman->spinlock, flags);
5271 -       return err;
5272 -}
5273 -EXPORT_SYMBOL(fman_set_port_params);
5274 -
5275 -/**
5276 - * fman_reset_mac
5277 - * @fman:      A Pointer to FMan device
5278 - * @mac_id:    MAC id to be reset
5279 - *
5280 - * Reset a specific MAC
5281 - *
5282 - * Return: 0 on success; Error code otherwise.
5283 - */
5284 -int fman_reset_mac(struct fman *fman, u8 mac_id)
5285 -{
5286 -       struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
5287 -       u32 msk, timeout = 100;
5288 -
5289 -       if (fman->state->rev_info.major >= 6) {
5290 -               dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n",
5291 -                       __func__);
5292 -               return -EINVAL;
5293 -       }
5294 -
5295 -       /* Get the relevant bit mask */
5296 -       switch (mac_id) {
5297 -       case 0:
5298 -               msk = FPM_RSTC_MAC0_RESET;
5299 -               break;
5300 -       case 1:
5301 -               msk = FPM_RSTC_MAC1_RESET;
5302 -               break;
5303 -       case 2:
5304 -               msk = FPM_RSTC_MAC2_RESET;
5305 -               break;
5306 -       case 3:
5307 -               msk = FPM_RSTC_MAC3_RESET;
5308 -               break;
5309 -       case 4:
5310 -               msk = FPM_RSTC_MAC4_RESET;
5311 -               break;
5312 -       case 5:
5313 -               msk = FPM_RSTC_MAC5_RESET;
5314 -               break;
5315 -       case 6:
5316 -               msk = FPM_RSTC_MAC6_RESET;
5317 -               break;
5318 -       case 7:
5319 -               msk = FPM_RSTC_MAC7_RESET;
5320 -               break;
5321 -       case 8:
5322 -               msk = FPM_RSTC_MAC8_RESET;
5323 -               break;
5324 -       case 9:
5325 -               msk = FPM_RSTC_MAC9_RESET;
5326 -               break;
5327 -       default:
5328 -               dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n",
5329 -                        __func__, mac_id);
5330 -               return -EINVAL;
5331 -       }
5332 -
5333 -       /* reset */
5334 -       iowrite32be(msk, &fpm_rg->fm_rstc);
5335 -       while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout)
5336 -               udelay(10);
5337 -
5338 -       if (!timeout)
5339 -               return -EIO;
5340 -
5341 -       return 0;
5342 -}
5343 -EXPORT_SYMBOL(fman_reset_mac);
5344 -
5345 -/**
5346 - * fman_set_mac_max_frame
5347 - * @fman:      A Pointer to FMan device
5348 - * @mac_id:    MAC id
5349 - * @mfl:       Maximum frame length
5350 - *
5351 - * Set maximum frame length of specific MAC in FMan driver
5352 - *
5353 - * Return: 0 on success; Error code otherwise.
5354 - */
5355 -int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
5356 -{
5357 -       /* if port is already initialized, check that MaxFrameLength is smaller
5358 -        * or equal to the port's max
5359 -        */
5360 -       if ((!fman->state->port_mfl[mac_id]) ||
5361 -           (mfl <= fman->state->port_mfl[mac_id])) {
5362 -               fman->state->mac_mfl[mac_id] = mfl;
5363 -       } else {
5364 -               dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n",
5365 -                        __func__);
5366 -               return -EINVAL;
5367 -       }
5368 -       return 0;
5369 -}
5370 -EXPORT_SYMBOL(fman_set_mac_max_frame);
5371 -
5372 -/**
5373 - * fman_get_clock_freq
5374 - * @fman:      A Pointer to FMan device
5375 - *
5376 - * Get FMan clock frequency
5377 - *
5378 - * Return: FMan clock frequency
5379 - */
5380 -u16 fman_get_clock_freq(struct fman *fman)
5381 -{
5382 -       return fman->state->fm_clk_freq;
5383 -}
5384 -
5385 -/**
5386 - * fman_get_bmi_max_fifo_size
5387 - * @fman:      A Pointer to FMan device
5388 - *
5389 - * Get FMan maximum FIFO size
5390 - *
5391 - * Return: FMan Maximum FIFO size
5392 - */
5393 -u32 fman_get_bmi_max_fifo_size(struct fman *fman)
5394 -{
5395 -       return fman->state->bmi_max_fifo_size;
5396 -}
5397 -EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
5398 -
5399 -/**
5400 - * fman_get_revision
5401 - * @fman               - Pointer to the FMan module
5402 - * @rev_info           - A structure of revision information parameters.
5403 - *
5404 - * Returns the FM revision
5405 - *
5406 - * Allowed only following fman_init().
5407 - *
5408 - * Return: 0 on success; Error code otherwise.
5409 - */
5410 -void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
5411 -{
5412 -       u32 tmp;
5413 -
5414 -       tmp = ioread32be(&fman->fpm_regs->fm_ip_rev_1);
5415 -       rev_info->major = (u8)((tmp & FPM_REV1_MAJOR_MASK) >>
5416 -                               FPM_REV1_MAJOR_SHIFT);
5417 -       rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
5418 -}
5419 -EXPORT_SYMBOL(fman_get_revision);
5420 -
5421 -/**
5422 - * fman_get_qman_channel_id
5423 - * @fman:      A Pointer to FMan device
5424 - * @port_id:   Port id
5425 - *
5426 - * Get QMan channel ID associated to the Port id
5427 - *
5428 - * Return: QMan channel ID
5429 - */
5430 -u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
5431 -{
5432 -       int i;
5433 -
5434 -       if (fman->state->rev_info.major >= 6) {
5435 -               u32 port_ids[] = {0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b,
5436 -                                 0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
5437 -               for (i = 0; i < fman->state->num_of_qman_channels; i++) {
5438 -                       if (port_ids[i] == port_id)
5439 -                               break;
5440 -               }
5441 -       } else {
5442 -               u32 port_ids[] = {0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1,
5443 -                                 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
5444 -               for (i = 0; i < fman->state->num_of_qman_channels; i++) {
5445 -                       if (port_ids[i] == port_id)
5446 -                               break;
5447 -               }
5448 -       }
5449 -
5450 -       if (i == fman->state->num_of_qman_channels)
5451 -               return 0;
5452 -
5453 -       return fman->state->qman_channel_base + i;
5454 -}
5455 -EXPORT_SYMBOL(fman_get_qman_channel_id);
5456 -
5457 -/**
5458 - * fman_get_mem_region
5459 - * @fman:      A Pointer to FMan device
5460 - *
5461 - * Get FMan memory region
5462 - *
5463 - * Return: A structure with FMan memory region information
5464 - */
5465 -struct resource *fman_get_mem_region(struct fman *fman)
5466 -{
5467 -       return fman->state->res;
5468 -}
5469 -EXPORT_SYMBOL(fman_get_mem_region);
5470 -
5471 -/* Bootargs defines */
5472 -/* Extra headroom for RX buffers - Default, min and max */
5473 -#define FSL_FM_RX_EXTRA_HEADROOM       64
5474 -#define FSL_FM_RX_EXTRA_HEADROOM_MIN   16
5475 -#define FSL_FM_RX_EXTRA_HEADROOM_MAX   384
5476 -
5477 -/* Maximum frame length */
5478 -#define FSL_FM_MAX_FRAME_SIZE                  1522
5479 -#define FSL_FM_MAX_POSSIBLE_FRAME_SIZE         9600
5480 -#define FSL_FM_MIN_POSSIBLE_FRAME_SIZE         64
5481 -
5482 -/* Extra headroom for Rx buffers.
5483 - * FMan is instructed to allocate, on the Rx path, this amount of
5484 - * space at the beginning of a data buffer, beside the DPA private
5485 - * data area and the IC fields.
5486 - * Does not impact Tx buffer layout.
5487 - * Configurable from bootargs. 64 by default, it's needed on
5488 - * particular forwarding scenarios that add extra headers to the
5489 - * forwarded frame.
5490 - */
5491 -static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
5492 -module_param(fsl_fm_rx_extra_headroom, int, 0);
5493 -MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
5494 -
5495 -/* Max frame size, across all interfaces.
5496 - * Configurable from bootargs, to avoid allocating oversized (socket)
5497 - * buffers when not using jumbo frames.
5498 - * Must be large enough to accommodate the network MTU, but small enough
5499 - * to avoid wasting skb memory.
5500 - *
5501 - * Could be overridden once, at boot-time, via the
5502 - * fm_set_max_frm() callback.
5503 - */
5504 -static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
5505 -module_param(fsl_fm_max_frm, int, 0);
5506 -MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
5507 -
5508 -/**
5509 - * fman_get_max_frm
5510 - *
5511 - * Return: Max frame length configured in the FM driver
5512 - */
5513 -u16 fman_get_max_frm(void)
5514 -{
5515 -       static bool fm_check_mfl;
5516 -
5517 -       if (!fm_check_mfl) {
5518 -               if (fsl_fm_max_frm > FSL_FM_MAX_POSSIBLE_FRAME_SIZE ||
5519 -                   fsl_fm_max_frm < FSL_FM_MIN_POSSIBLE_FRAME_SIZE) {
5520 -                       pr_warn("Invalid fsl_fm_max_frm value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
5521 -                               fsl_fm_max_frm,
5522 -                               FSL_FM_MIN_POSSIBLE_FRAME_SIZE,
5523 -                               FSL_FM_MAX_POSSIBLE_FRAME_SIZE,
5524 -                               FSL_FM_MAX_FRAME_SIZE);
5525 -                       fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
5526 -               }
5527 -               fm_check_mfl = true;
5528 -       }
5529 -
5530 -       return fsl_fm_max_frm;
5531 -}
5532 -EXPORT_SYMBOL(fman_get_max_frm);
5533 -
5534 -/**
5535 - * fman_get_rx_extra_headroom
5536 - *
5537 - * Return: Extra headroom size configured in the FM driver
5538 - */
5539 -int fman_get_rx_extra_headroom(void)
5540 -{
5541 -       static bool fm_check_rx_extra_headroom;
5542 -
5543 -       if (!fm_check_rx_extra_headroom) {
5544 -               if (fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX ||
5545 -                   fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN) {
5546 -                       pr_warn("Invalid fsl_fm_rx_extra_headroom value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
5547 -                               fsl_fm_rx_extra_headroom,
5548 -                               FSL_FM_RX_EXTRA_HEADROOM_MIN,
5549 -                               FSL_FM_RX_EXTRA_HEADROOM_MAX,
5550 -                               FSL_FM_RX_EXTRA_HEADROOM);
5551 -                       fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
5552 -               }
5553 -
5554 -               fm_check_rx_extra_headroom = true;
5555 -               fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16);
5556 -       }
5557 -
5558 -       return fsl_fm_rx_extra_headroom;
5559 -}
5560 -EXPORT_SYMBOL(fman_get_rx_extra_headroom);
5561 -
5562 -/**
5563 - * fman_bind
5564 - * @dev:       FMan OF device pointer
5565 - *
5566 - * Bind to a specific FMan device.
5567 - *
5568 - * Allowed only after the port was created.
5569 - *
5570 - * Return: A pointer to the FMan device
5571 - */
5572 -struct fman *fman_bind(struct device *fm_dev)
5573 -{
5574 -       return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
5575 -}
5576 -EXPORT_SYMBOL(fman_bind);
5577 -
5578 -static irqreturn_t fman_err_irq(int irq, void *handle)
5579 -{
5580 -       struct fman *fman = (struct fman *)handle;
5581 -       u32 pending;
5582 -       struct fman_fpm_regs __iomem *fpm_rg;
5583 -       irqreturn_t single_ret, ret = IRQ_NONE;
5584 -
5585 -       if (!is_init_done(fman->cfg))
5586 -               return IRQ_NONE;
5587 -
5588 -       fpm_rg = fman->fpm_regs;
5589 -
5590 -       /* error interrupts */
5591 -       pending = ioread32be(&fpm_rg->fm_epi);
5592 -       if (!pending)
5593 -               return IRQ_NONE;
5594 -
5595 -       if (pending & ERR_INTR_EN_BMI) {
5596 -               single_ret = bmi_err_event(fman);
5597 -               if (single_ret == IRQ_HANDLED)
5598 -                       ret = IRQ_HANDLED;
5599 -       }
5600 -       if (pending & ERR_INTR_EN_QMI) {
5601 -               single_ret = qmi_err_event(fman);
5602 -               if (single_ret == IRQ_HANDLED)
5603 -                       ret = IRQ_HANDLED;
5604 -       }
5605 -       if (pending & ERR_INTR_EN_FPM) {
5606 -               single_ret = fpm_err_event(fman);
5607 -               if (single_ret == IRQ_HANDLED)
5608 -                       ret = IRQ_HANDLED;
5609 -       }
5610 -       if (pending & ERR_INTR_EN_DMA) {
5611 -               single_ret = dma_err_event(fman);
5612 -               if (single_ret == IRQ_HANDLED)
5613 -                       ret = IRQ_HANDLED;
5614 -       }
5615 -       if (pending & ERR_INTR_EN_MURAM) {
5616 -               single_ret = muram_err_intr(fman);
5617 -               if (single_ret == IRQ_HANDLED)
5618 -                       ret = IRQ_HANDLED;
5619 -       }
5620 -
5621 -       /* MAC error interrupts */
5622 -       if (pending & ERR_INTR_EN_MAC0) {
5623 -               single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
5624 -               if (single_ret == IRQ_HANDLED)
5625 -                       ret = IRQ_HANDLED;
5626 -       }
5627 -       if (pending & ERR_INTR_EN_MAC1) {
5628 -               single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
5629 -               if (single_ret == IRQ_HANDLED)
5630 -                       ret = IRQ_HANDLED;
5631 -       }
5632 -       if (pending & ERR_INTR_EN_MAC2) {
5633 -               single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
5634 -               if (single_ret == IRQ_HANDLED)
5635 -                       ret = IRQ_HANDLED;
5636 -       }
5637 -       if (pending & ERR_INTR_EN_MAC3) {
5638 -               single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
5639 -               if (single_ret == IRQ_HANDLED)
5640 -                       ret = IRQ_HANDLED;
5641 -       }
5642 -       if (pending & ERR_INTR_EN_MAC4) {
5643 -               single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
5644 -               if (single_ret == IRQ_HANDLED)
5645 -                       ret = IRQ_HANDLED;
5646 -       }
5647 -       if (pending & ERR_INTR_EN_MAC5) {
5648 -               single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
5649 -               if (single_ret == IRQ_HANDLED)
5650 -                       ret = IRQ_HANDLED;
5651 -       }
5652 -       if (pending & ERR_INTR_EN_MAC6) {
5653 -               single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
5654 -               if (single_ret == IRQ_HANDLED)
5655 -                       ret = IRQ_HANDLED;
5656 -       }
5657 -       if (pending & ERR_INTR_EN_MAC7) {
5658 -               single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
5659 -               if (single_ret == IRQ_HANDLED)
5660 -                       ret = IRQ_HANDLED;
5661 -       }
5662 -       if (pending & ERR_INTR_EN_MAC8) {
5663 -               single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
5664 -               if (single_ret == IRQ_HANDLED)
5665 -                       ret = IRQ_HANDLED;
5666 -       }
5667 -       if (pending & ERR_INTR_EN_MAC9) {
5668 -               single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
5669 -               if (single_ret == IRQ_HANDLED)
5670 -                       ret = IRQ_HANDLED;
5671 -       }
5672 -
5673 -       return ret;
5674 -}
5675 -
5676 -static irqreturn_t fman_irq(int irq, void *handle)
5677 -{
5678 -       struct fman *fman = (struct fman *)handle;
5679 -       u32 pending;
5680 -       struct fman_fpm_regs __iomem *fpm_rg;
5681 -       irqreturn_t single_ret, ret = IRQ_NONE;
5682 -
5683 -       if (!is_init_done(fman->cfg))
5684 -               return IRQ_NONE;
5685 -
5686 -       fpm_rg = fman->fpm_regs;
5687 -
5688 -       /* normal interrupts */
5689 -       pending = ioread32be(&fpm_rg->fm_npi);
5690 -       if (!pending)
5691 -               return IRQ_NONE;
5692 -
5693 -       if (pending & INTR_EN_QMI) {
5694 -               single_ret = qmi_event(fman);
5695 -               if (single_ret == IRQ_HANDLED)
5696 -                       ret = IRQ_HANDLED;
5697 -       }
5698 -
5699 -       /* MAC interrupts */
5700 -       if (pending & INTR_EN_MAC0) {
5701 -               single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0);
5702 -               if (single_ret == IRQ_HANDLED)
5703 -                       ret = IRQ_HANDLED;
5704 -       }
5705 -       if (pending & INTR_EN_MAC1) {
5706 -               single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1);
5707 -               if (single_ret == IRQ_HANDLED)
5708 -                       ret = IRQ_HANDLED;
5709 -       }
5710 -       if (pending & INTR_EN_MAC2) {
5711 -               single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2);
5712 -               if (single_ret == IRQ_HANDLED)
5713 -                       ret = IRQ_HANDLED;
5714 -       }
5715 -       if (pending & INTR_EN_MAC3) {
5716 -               single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3);
5717 -               if (single_ret == IRQ_HANDLED)
5718 -                       ret = IRQ_HANDLED;
5719 -       }
5720 -       if (pending & INTR_EN_MAC4) {
5721 -               single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4);
5722 -               if (single_ret == IRQ_HANDLED)
5723 -                       ret = IRQ_HANDLED;
5724 -       }
5725 -       if (pending & INTR_EN_MAC5) {
5726 -               single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5);
5727 -               if (single_ret == IRQ_HANDLED)
5728 -                       ret = IRQ_HANDLED;
5729 -       }
5730 -       if (pending & INTR_EN_MAC6) {
5731 -               single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6);
5732 -               if (single_ret == IRQ_HANDLED)
5733 -                       ret = IRQ_HANDLED;
5734 -       }
5735 -       if (pending & INTR_EN_MAC7) {
5736 -               single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7);
5737 -               if (single_ret == IRQ_HANDLED)
5738 -                       ret = IRQ_HANDLED;
5739 -       }
5740 -       if (pending & INTR_EN_MAC8) {
5741 -               single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8);
5742 -               if (single_ret == IRQ_HANDLED)
5743 -                       ret = IRQ_HANDLED;
5744 -       }
5745 -       if (pending & INTR_EN_MAC9) {
5746 -               single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9);
5747 -               if (single_ret == IRQ_HANDLED)
5748 -                       ret = IRQ_HANDLED;
5749 -       }
5750 -
5751 -       return ret;
5752 -}
5753 -
5754 -static const struct of_device_id fman_muram_match[] = {
5755 -       {
5756 -               .compatible = "fsl,fman-muram"},
5757 -       {}
5758 -};
5759 -MODULE_DEVICE_TABLE(of, fman_muram_match);
5760 -
5761 -static struct fman *read_dts_node(struct platform_device *of_dev)
5762 -{
5763 -       struct fman *fman;
5764 -       struct device_node *fm_node, *muram_node;
5765 -       struct resource *res;
5766 -       u32 val, range[2];
5767 -       int err, irq;
5768 -       struct clk *clk;
5769 -       u32 clk_rate;
5770 -       phys_addr_t phys_base_addr;
5771 -       resource_size_t mem_size;
5772 -
5773 -       fman = kzalloc(sizeof(*fman), GFP_KERNEL);
5774 -       if (!fman)
5775 -               return NULL;
5776 -
5777 -       fm_node = of_node_get(of_dev->dev.of_node);
5778 -
5779 -       err = of_property_read_u32(fm_node, "cell-index", &val);
5780 -       if (err) {
5781 -               dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n",
5782 -                       __func__, fm_node->full_name);
5783 -               goto fman_node_put;
5784 -       }
5785 -       fman->dts_params.id = (u8)val;
5786 -
5787 -       /* Get the FM interrupt */
5788 -       res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
5789 -       if (!res) {
5790 -               dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n",
5791 -                       __func__);
5792 -               goto fman_node_put;
5793 -       }
5794 -       irq = res->start;
5795 -
5796 -       /* Get the FM error interrupt */
5797 -       res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
5798 -       if (!res) {
5799 -               dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n",
5800 -                       __func__);
5801 -               goto fman_node_put;
5802 -       }
5803 -       fman->dts_params.err_irq = res->start;
5804 -
5805 -       /* Get the FM address */
5806 -       res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
5807 -       if (!res) {
5808 -               dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
5809 -                       __func__);
5810 -               goto fman_node_put;
5811 -       }
5812 -
5813 -       phys_base_addr = res->start;
5814 -       mem_size = resource_size(res);
5815 -
5816 -       clk = of_clk_get(fm_node, 0);
5817 -       if (IS_ERR(clk)) {
5818 -               dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
5819 -                       __func__, fman->dts_params.id);
5820 -               goto fman_node_put;
5821 -       }
5822 -
5823 -       clk_rate = clk_get_rate(clk);
5824 -       if (!clk_rate) {
5825 -               dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
5826 -                       __func__, fman->dts_params.id);
5827 -               goto fman_node_put;
5828 -       }
5829 -       /* Rounding to MHz */
5830 -       fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000);
5831 -
5832 -       err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range",
5833 -                                        &range[0], 2);
5834 -       if (err) {
5835 -               dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n",
5836 -                       __func__, fm_node->full_name);
5837 -               goto fman_node_put;
5838 -       }
5839 -       fman->dts_params.qman_channel_base = range[0];
5840 -       fman->dts_params.num_of_qman_channels = range[1];
5841 -
5842 -       /* Get the MURAM base address and size */
5843 -       muram_node = of_find_matching_node(fm_node, fman_muram_match);
5844 -       if (!muram_node) {
5845 -               dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
5846 -                       __func__);
5847 -               goto fman_node_put;
5848 -       }
5849 -
5850 -       err = of_address_to_resource(muram_node, 0,
5851 -                                    &fman->dts_params.muram_res);
5852 -       if (err) {
5853 -               of_node_put(muram_node);
5854 -               dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
5855 -                       __func__, err);
5856 -               goto fman_node_put;
5857 -       }
5858 -
5859 -       of_node_put(muram_node);
5860 -       of_node_put(fm_node);
5861 -
5862 -       err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
5863 -       if (err < 0) {
5864 -               dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
5865 -                       __func__, irq, err);
5866 -               goto fman_free;
5867 -       }
5868 -
5869 -       if (fman->dts_params.err_irq != 0) {
5870 -               err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq,
5871 -                                      fman_err_irq, IRQF_SHARED,
5872 -                                      "fman-err", fman);
5873 -               if (err < 0) {
5874 -                       dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
5875 -                               __func__, fman->dts_params.err_irq, err);
5876 -                       goto fman_free;
5877 -               }
5878 -       }
5879 -
5880 -       fman->dts_params.res =
5881 -               devm_request_mem_region(&of_dev->dev, phys_base_addr,
5882 -                                       mem_size, "fman");
5883 -       if (!fman->dts_params.res) {
5884 -               dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
5885 -                       __func__);
5886 -               goto fman_free;
5887 -       }
5888 -
5889 -       fman->dts_params.base_addr =
5890 -               devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
5891 -       if (!fman->dts_params.base_addr) {
5892 -               dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
5893 -               goto fman_free;
5894 -       }
5895 -
5896 -       fman->dev = &of_dev->dev;
5897 -
5898 -       return fman;
5899 -
5900 -fman_node_put:
5901 -       of_node_put(fm_node);
5902 -fman_free:
5903 -       kfree(fman);
5904 -       return NULL;
5905 -}
5906 -
5907 -static int fman_probe(struct platform_device *of_dev)
5908 -{
5909 -       struct fman *fman;
5910 -       struct device *dev;
5911 -       int err;
5912 -
5913 -       dev = &of_dev->dev;
5914 -
5915 -       fman = read_dts_node(of_dev);
5916 -       if (!fman)
5917 -               return -EIO;
5918 -
5919 -       err = fman_config(fman);
5920 -       if (err) {
5921 -               dev_err(dev, "%s: FMan config failed\n", __func__);
5922 -               return -EINVAL;
5923 -       }
5924 -
5925 -       if (fman_init(fman) != 0) {
5926 -               dev_err(dev, "%s: FMan init failed\n", __func__);
5927 -               return -EINVAL;
5928 -       }
5929 -
5930 -       if (fman->dts_params.err_irq == 0) {
5931 -               fman_set_exception(fman, FMAN_EX_DMA_BUS_ERROR, false);
5932 -               fman_set_exception(fman, FMAN_EX_DMA_READ_ECC, false);
5933 -               fman_set_exception(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC, false);
5934 -               fman_set_exception(fman, FMAN_EX_DMA_FM_WRITE_ECC, false);
5935 -               fman_set_exception(fman, FMAN_EX_DMA_SINGLE_PORT_ECC, false);
5936 -               fman_set_exception(fman, FMAN_EX_FPM_STALL_ON_TASKS, false);
5937 -               fman_set_exception(fman, FMAN_EX_FPM_SINGLE_ECC, false);
5938 -               fman_set_exception(fman, FMAN_EX_FPM_DOUBLE_ECC, false);
5939 -               fman_set_exception(fman, FMAN_EX_QMI_SINGLE_ECC, false);
5940 -               fman_set_exception(fman, FMAN_EX_QMI_DOUBLE_ECC, false);
5941 -               fman_set_exception(fman,
5942 -                                  FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID, false);
5943 -               fman_set_exception(fman, FMAN_EX_BMI_LIST_RAM_ECC, false);
5944 -               fman_set_exception(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC,
5945 -                                  false);
5946 -               fman_set_exception(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC, false);
5947 -               fman_set_exception(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC, false);
5948 -       }
5949 -
5950 -       dev_set_drvdata(dev, fman);
5951 -
5952 -       dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
5953 -
5954 -       return 0;
5955 -}
5956 -
5957 -static const struct of_device_id fman_match[] = {
5958 -       {
5959 -               .compatible = "fsl,fman"},
5960 -       {}
5961 -};
5962 -
5963 -MODULE_DEVICE_TABLE(of, fman_match);
5964 -
5965 -static struct platform_driver fman_driver = {
5966 -       .driver = {
5967 -               .name = "fsl-fman",
5968 -               .of_match_table = fman_match,
5969 -       },
5970 -       .probe = fman_probe,
5971 -};
5972 -
5973 -static int __init fman_load(void)
5974 -{
5975 -       int err;
5976 -
5977 -       pr_debug("FSL DPAA FMan driver\n");
5978 -
5979 -       err = platform_driver_register(&fman_driver);
5980 -       if (err < 0)
5981 -               pr_err("Error, platform_driver_register() = %d\n", err);
5982 -
5983 -       return err;
5984 -}
5985 -module_init(fman_load);
5986 -
5987 -static void __exit fman_unload(void)
5988 -{
5989 -       platform_driver_unregister(&fman_driver);
5990 -}
5991 -module_exit(fman_unload);
5992 -
5993 -MODULE_LICENSE("Dual BSD/GPL");
5994 -MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver");
5995 diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
5996 deleted file mode 100644
5997 index 57aae8d..0000000
5998 --- a/drivers/net/ethernet/freescale/fman/fman.h
5999 +++ /dev/null
6000 @@ -1,325 +0,0 @@
6001 -/*
6002 - * Copyright 2008-2015 Freescale Semiconductor Inc.
6003 - *
6004 - * Redistribution and use in source and binary forms, with or without
6005 - * modification, are permitted provided that the following conditions are met:
6006 - *     * Redistributions of source code must retain the above copyright
6007 - *       notice, this list of conditions and the following disclaimer.
6008 - *     * Redistributions in binary form must reproduce the above copyright
6009 - *       notice, this list of conditions and the following disclaimer in the
6010 - *       documentation and/or other materials provided with the distribution.
6011 - *     * Neither the name of Freescale Semiconductor nor the
6012 - *       names of its contributors may be used to endorse or promote products
6013 - *       derived from this software without specific prior written permission.
6014 - *
6015 - *
6016 - * ALTERNATIVELY, this software may be distributed under the terms of the
6017 - * GNU General Public License ("GPL") as published by the Free Software
6018 - * Foundation, either version 2 of that License or (at your option) any
6019 - * later version.
6020 - *
6021 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
6022 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
6023 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
6024 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
6025 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
6026 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
6027 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
6028 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
6029 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6030 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6031 - */
6032 -
6033 -#ifndef __FM_H
6034 -#define __FM_H
6035 -
6036 -#include <linux/io.h>
6037 -
6038 -/* FM Frame descriptor macros  */
6039 -/* Frame queue Context Override */
6040 -#define FM_FD_CMD_FCO                   0x80000000
6041 -#define FM_FD_CMD_RPD                   0x40000000  /* Read Prepended Data */
6042 -#define FM_FD_CMD_DTC                   0x10000000  /* Do L4 Checksum */
6043 -
6044 -/* TX-Port: Unsupported Format */
6045 -#define FM_FD_ERR_UNSUPPORTED_FORMAT    0x04000000
6046 -/* TX Port: Length Error */
6047 -#define FM_FD_ERR_LENGTH                0x02000000
6048 -#define FM_FD_ERR_DMA                   0x01000000  /* DMA Data error */
6049 -
6050 -/* IPR frame (not error) */
6051 -#define FM_FD_IPR                       0x00000001
6052 -/* IPR non-consistent-sp */
6053 -#define FM_FD_ERR_IPR_NCSP              (0x00100000 | FM_FD_IPR)
6054 -/* IPR error */
6055 -#define FM_FD_ERR_IPR                   (0x00200000 | FM_FD_IPR)
6056 -/* IPR timeout */
6057 -#define FM_FD_ERR_IPR_TO                (0x00300000 | FM_FD_IPR)
6058 -/* TX Port: Length Error */
6059 -#define FM_FD_ERR_IPRE                  (FM_FD_ERR_IPR & ~FM_FD_IPR)
6060 -
6061 -/* Rx FIFO overflow, FCS error, code error, running disparity error
6062 - * (SGMII and TBI modes), FIFO parity error. PHY Sequence error,
6063 - * PHY error control character detected.
6064 - */
6065 -#define FM_FD_ERR_PHYSICAL              0x00080000
6066 -/* Frame too long OR Frame size exceeds max_length_frame  */
6067 -#define FM_FD_ERR_SIZE                  0x00040000
6068 -/* classification discard */
6069 -#define FM_FD_ERR_CLS_DISCARD           0x00020000
6070 -/* Extract Out of Frame */
6071 -#define FM_FD_ERR_EXTRACTION            0x00008000
6072 -/* No Scheme Selected */
6073 -#define FM_FD_ERR_NO_SCHEME             0x00004000
6074 -/* Keysize Overflow */
6075 -#define FM_FD_ERR_KEYSIZE_OVERFLOW      0x00002000
6076 -/* Frame color is red */
6077 -#define FM_FD_ERR_COLOR_RED             0x00000800
6078 -/* Frame color is yellow */
6079 -#define FM_FD_ERR_COLOR_YELLOW          0x00000400
6080 -/* Parser Time out Exceed */
6081 -#define FM_FD_ERR_PRS_TIMEOUT           0x00000080
6082 -/* Invalid Soft Parser instruction */
6083 -#define FM_FD_ERR_PRS_ILL_INSTRUCT      0x00000040
6084 -/* Header error was identified during parsing */
6085 -#define FM_FD_ERR_PRS_HDR_ERR           0x00000020
6086 -/* Frame parsed beyind 256 first bytes */
6087 -#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED  0x00000008
6088 -
6089 -/* non Frame-Manager error */
6090 -#define FM_FD_RX_STATUS_ERR_NON_FM      0x00400000
6091 -
6092 -/* FMan driver defines */
6093 -#define FMAN_BMI_FIFO_UNITS            0x100
6094 -#define OFFSET_UNITS                   16
6095 -
6096 -/* BMan defines */
6097 -#define BM_MAX_NUM_OF_POOLS            64 /* Buffers pools */
6098 -#define FMAN_PORT_MAX_EXT_POOLS_NUM    8  /* External BM pools per Rx port */
6099 -
6100 -struct fman; /* FMan data */
6101 -
6102 -/* Enum for defining port types */
6103 -enum fman_port_type {
6104 -       FMAN_PORT_TYPE_TX = 0,  /* TX Port */
6105 -       FMAN_PORT_TYPE_RX,      /* RX Port */
6106 -};
6107 -
6108 -struct fman_rev_info {
6109 -       u8 major;                       /* Major revision */
6110 -       u8 minor;                       /* Minor revision */
6111 -};
6112 -
6113 -enum fman_exceptions {
6114 -       FMAN_EX_DMA_BUS_ERROR = 0,      /* DMA bus error. */
6115 -       FMAN_EX_DMA_READ_ECC,           /* Read Buffer ECC error */
6116 -       FMAN_EX_DMA_SYSTEM_WRITE_ECC,   /* Write Buffer ECC err on sys side */
6117 -       FMAN_EX_DMA_FM_WRITE_ECC,       /* Write Buffer ECC error on FM side */
6118 -       FMAN_EX_DMA_SINGLE_PORT_ECC,    /* Single Port ECC error on FM side */
6119 -       FMAN_EX_FPM_STALL_ON_TASKS,     /* Stall of tasks on FPM */
6120 -       FMAN_EX_FPM_SINGLE_ECC,         /* Single ECC on FPM. */
6121 -       FMAN_EX_FPM_DOUBLE_ECC,         /* Double ECC error on FPM ram access */
6122 -       FMAN_EX_QMI_SINGLE_ECC, /* Single ECC on QMI. */
6123 -       FMAN_EX_QMI_DOUBLE_ECC, /* Double bit ECC occurred on QMI */
6124 -       FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/* DeQ from unknown port id */
6125 -       FMAN_EX_BMI_LIST_RAM_ECC,       /* Linked List RAM ECC error */
6126 -       FMAN_EX_BMI_STORAGE_PROFILE_ECC,/* storage profile */
6127 -       FMAN_EX_BMI_STATISTICS_RAM_ECC,/* Statistics RAM ECC Err Enable */
6128 -       FMAN_EX_BMI_DISPATCH_RAM_ECC,   /* Dispatch RAM ECC Error Enable */
6129 -       FMAN_EX_IRAM_ECC,               /* Double bit ECC occurred on IRAM */
6130 -       FMAN_EX_MURAM_ECC               /* Double bit ECC occurred on MURAM */
6131 -};
6132 -
6133 -/* Parse results memory layout */
6134 -struct fman_prs_result {
6135 -       u8 lpid;                /* Logical port id */
6136 -       u8 shimr;               /* Shim header result  */
6137 -       u16 l2r;                /* Layer 2 result */
6138 -       u16 l3r;                /* Layer 3 result */
6139 -       u8 l4r;         /* Layer 4 result */
6140 -       u8 cplan;               /* Classification plan id */
6141 -       u16 nxthdr;             /* Next Header  */
6142 -       u16 cksum;              /* Running-sum */
6143 -       /* Flags&fragment-offset field of the last IP-header */
6144 -       u16 flags_frag_off;
6145 -       /* Routing type field of a IPV6 routing extension header */
6146 -       u8 route_type;
6147 -       /* Routing Extension Header Present; last bit is IP valid */
6148 -       u8 rhp_ip_valid;
6149 -       u8 shim_off[2];         /* Shim offset */
6150 -       u8 ip_pid_off;          /* IP PID (last IP-proto) offset */
6151 -       u8 eth_off;             /* ETH offset */
6152 -       u8 llc_snap_off;        /* LLC_SNAP offset */
6153 -       u8 vlan_off[2];         /* VLAN offset */
6154 -       u8 etype_off;           /* ETYPE offset */
6155 -       u8 pppoe_off;           /* PPP offset */
6156 -       u8 mpls_off[2];         /* MPLS offset */
6157 -       u8 ip_off[2];           /* IP offset */
6158 -       u8 gre_off;             /* GRE offset */
6159 -       u8 l4_off;              /* Layer 4 offset */
6160 -       u8 nxthdr_off;          /* Parser end point */
6161 -};
6162 -
6163 -/* A structure for defining buffer prefix area content. */
6164 -struct fman_buffer_prefix_content {
6165 -       /* Number of bytes to be left at the beginning of the external
6166 -        * buffer; Note that the private-area will start from the base
6167 -        * of the buffer address.
6168 -        */
6169 -       u16 priv_data_size;
6170 -       /* true to pass the parse result to/from the FM;
6171 -        * User may use FM_PORT_GetBufferPrsResult() in
6172 -        * order to get the parser-result from a buffer.
6173 -        */
6174 -       bool pass_prs_result;
6175 -       /* true to pass the timeStamp to/from the FM User */
6176 -       bool pass_time_stamp;
6177 -       /* true to pass the KG hash result to/from the FM User may
6178 -        * use FM_PORT_GetBufferHashResult() in order to get the
6179 -        * parser-result from a buffer.
6180 -        */
6181 -       bool pass_hash_result;
6182 -       /* Add all other Internal-Context information: AD,
6183 -        * hash-result, key, etc.
6184 -        */
6185 -       u16 data_align;
6186 -};
6187 -
6188 -/* A structure of information about each of the external
6189 - * buffer pools used by a port or storage-profile.
6190 - */
6191 -struct fman_ext_pool_params {
6192 -       u8 id;              /* External buffer pool id */
6193 -       u16 size;                   /* External buffer pool buffer size */
6194 -};
6195 -
6196 -/* A structure for informing the driver about the external
6197 - * buffer pools allocated in the BM and used by a port or a
6198 - * storage-profile.
6199 - */
6200 -struct fman_ext_pools {
6201 -       u8 num_of_pools_used; /* Number of pools use by this port */
6202 -       struct fman_ext_pool_params ext_buf_pool[FMAN_PORT_MAX_EXT_POOLS_NUM];
6203 -                                       /* Parameters for each port */
6204 -};
6205 -
6206 -/* A structure for defining BM pool depletion criteria */
6207 -struct fman_buf_pool_depletion {
6208 -       /* select mode in which pause frames will be sent after a
6209 -        * number of pools (all together!) are depleted
6210 -        */
6211 -       bool pools_grp_mode_enable;
6212 -       /* the number of depleted pools that will invoke pause
6213 -        * frames transmission.
6214 -        */
6215 -       u8 num_of_pools;
6216 -       /* For each pool, true if it should be considered for
6217 -        * depletion (Note - this pool must be used by this port!).
6218 -        */
6219 -       bool pools_to_consider[BM_MAX_NUM_OF_POOLS];
6220 -       /* select mode in which pause frames will be sent
6221 -        * after a single-pool is depleted;
6222 -        */
6223 -       bool single_pool_mode_enable;
6224 -       /* For each pool, true if it should be considered
6225 -        * for depletion (Note - this pool must be used by this port!)
6226 -        */
6227 -       bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
6228 -};
6229 -
6230 -/* Enum for inter-module interrupts registration */
6231 -enum fman_event_modules {
6232 -       FMAN_MOD_MAC = 0,               /* MAC event */
6233 -       FMAN_MOD_FMAN_CTRL,     /* FMAN Controller */
6234 -       FMAN_MOD_DUMMY_LAST
6235 -};
6236 -
6237 -/* Enum for interrupts types */
6238 -enum fman_intr_type {
6239 -       FMAN_INTR_TYPE_ERR,
6240 -       FMAN_INTR_TYPE_NORMAL
6241 -};
6242 -
6243 -/* Enum for inter-module interrupts registration */
6244 -enum fman_inter_module_event {
6245 -       FMAN_EV_ERR_MAC0 = 0,   /* MAC 0 error event */
6246 -       FMAN_EV_ERR_MAC1,               /* MAC 1 error event */
6247 -       FMAN_EV_ERR_MAC2,               /* MAC 2 error event */
6248 -       FMAN_EV_ERR_MAC3,               /* MAC 3 error event */
6249 -       FMAN_EV_ERR_MAC4,               /* MAC 4 error event */
6250 -       FMAN_EV_ERR_MAC5,               /* MAC 5 error event */
6251 -       FMAN_EV_ERR_MAC6,               /* MAC 6 error event */
6252 -       FMAN_EV_ERR_MAC7,               /* MAC 7 error event */
6253 -       FMAN_EV_ERR_MAC8,               /* MAC 8 error event */
6254 -       FMAN_EV_ERR_MAC9,               /* MAC 9 error event */
6255 -       FMAN_EV_MAC0,           /* MAC 0 event (Magic packet detection) */
6256 -       FMAN_EV_MAC1,           /* MAC 1 event (Magic packet detection) */
6257 -       FMAN_EV_MAC2,           /* MAC 2 (Magic packet detection) */
6258 -       FMAN_EV_MAC3,           /* MAC 3 (Magic packet detection) */
6259 -       FMAN_EV_MAC4,           /* MAC 4 (Magic packet detection) */
6260 -       FMAN_EV_MAC5,           /* MAC 5 (Magic packet detection) */
6261 -       FMAN_EV_MAC6,           /* MAC 6 (Magic packet detection) */
6262 -       FMAN_EV_MAC7,           /* MAC 7 (Magic packet detection) */
6263 -       FMAN_EV_MAC8,           /* MAC 8 event (Magic packet detection) */
6264 -       FMAN_EV_MAC9,           /* MAC 9 event (Magic packet detection) */
6265 -       FMAN_EV_FMAN_CTRL_0,    /* Fman controller event 0 */
6266 -       FMAN_EV_FMAN_CTRL_1,    /* Fman controller event 1 */
6267 -       FMAN_EV_FMAN_CTRL_2,    /* Fman controller event 2 */
6268 -       FMAN_EV_FMAN_CTRL_3,    /* Fman controller event 3 */
6269 -       FMAN_EV_CNT
6270 -};
6271 -
6272 -struct fman_intr_src {
6273 -       void (*isr_cb)(void *src_arg);
6274 -       void *src_handle;
6275 -};
6276 -
6277 -/* Structure for port-FM communication during fman_port_init. */
6278 -struct fman_port_init_params {
6279 -       u8 port_id;                     /* port Id */
6280 -       enum fman_port_type port_type;  /* Port type */
6281 -       u16 port_speed;                 /* Port speed */
6282 -       u16 liodn_offset;               /* Port's requested resource */
6283 -       u8 num_of_tasks;                /* Port's requested resource */
6284 -       u8 num_of_extra_tasks;          /* Port's requested resource */
6285 -       u8 num_of_open_dmas;            /* Port's requested resource */
6286 -       u8 num_of_extra_open_dmas;      /* Port's requested resource */
6287 -       u32 size_of_fifo;               /* Port's requested resource */
6288 -       u32 extra_size_of_fifo;         /* Port's requested resource */
6289 -       u8 deq_pipeline_depth;          /* Port's requested resource */
6290 -       u16 max_frame_length;           /* Port's max frame length. */
6291 -       u16 liodn_base;
6292 -       /* LIODN base for this port, to be used together with LIODN offset. */
6293 -};
6294 -
6295 -void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info);
6296 -
6297 -void fman_register_intr(struct fman *fman, enum fman_event_modules mod,
6298 -                       u8 mod_id, enum fman_intr_type intr_type,
6299 -                       void (*f_isr)(void *h_src_arg), void *h_src_arg);
6300 -
6301 -void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod,
6302 -                         u8 mod_id, enum fman_intr_type intr_type);
6303 -
6304 -int fman_set_port_params(struct fman *fman,
6305 -                        struct fman_port_init_params *port_params);
6306 -
6307 -int fman_reset_mac(struct fman *fman, u8 mac_id);
6308 -
6309 -u16 fman_get_clock_freq(struct fman *fman);
6310 -
6311 -u32 fman_get_bmi_max_fifo_size(struct fman *fman);
6312 -
6313 -int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl);
6314 -
6315 -u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id);
6316 -
6317 -struct resource *fman_get_mem_region(struct fman *fman);
6318 -
6319 -u16 fman_get_max_frm(void);
6320 -
6321 -int fman_get_rx_extra_headroom(void);
6322 -
6323 -struct fman *fman_bind(struct device *dev);
6324 -
6325 -#endif /* __FM_H */
6326 diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
6327 deleted file mode 100644
6328 index c88918c..0000000
6329 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
6330 +++ /dev/null
6331 @@ -1,1451 +0,0 @@
6332 -/*
6333 - * Copyright 2008-2015 Freescale Semiconductor Inc.
6334 - *
6335 - * Redistribution and use in source and binary forms, with or without
6336 - * modification, are permitted provided that the following conditions are met:
6337 - *     * Redistributions of source code must retain the above copyright
6338 - *       notice, this list of conditions and the following disclaimer.
6339 - *     * Redistributions in binary form must reproduce the above copyright
6340 - *       notice, this list of conditions and the following disclaimer in the
6341 - *       documentation and/or other materials provided with the distribution.
6342 - *     * Neither the name of Freescale Semiconductor nor the
6343 - *       names of its contributors may be used to endorse or promote products
6344 - *       derived from this software without specific prior written permission.
6345 - *
6346 - *
6347 - * ALTERNATIVELY, this software may be distributed under the terms of the
6348 - * GNU General Public License ("GPL") as published by the Free Software
6349 - * Foundation, either version 2 of that License or (at your option) any
6350 - * later version.
6351 - *
6352 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
6353 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
6354 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
6355 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
6356 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
6357 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
6358 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
6359 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
6360 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
6361 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6362 - */
6363 -
6364 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6365 -
6366 -#include "fman_dtsec.h"
6367 -#include "fman.h"
6368 -
6369 -#include <linux/slab.h>
6370 -#include <linux/bitrev.h>
6371 -#include <linux/io.h>
6372 -#include <linux/delay.h>
6373 -#include <linux/phy.h>
6374 -#include <linux/crc32.h>
6375 -#include <linux/of_mdio.h>
6376 -#include <linux/mii.h>
6377 -
6378 -/* TBI register addresses */
6379 -#define MII_TBICON             0x11
6380 -
6381 -/* TBICON register bit fields */
6382 -#define TBICON_SOFT_RESET      0x8000  /* Soft reset */
6383 -#define TBICON_DISABLE_RX_DIS  0x2000  /* Disable receive disparity */
6384 -#define TBICON_DISABLE_TX_DIS  0x1000  /* Disable transmit disparity */
6385 -#define TBICON_AN_SENSE                0x0100  /* Auto-negotiation sense enable */
6386 -#define TBICON_CLK_SELECT      0x0020  /* Clock select */
6387 -#define TBICON_MI_MODE         0x0010  /* GMII mode (TBI if not set) */
6388 -
6389 -#define TBIANA_SGMII           0x4001
6390 -#define TBIANA_1000X           0x01a0
6391 -
6392 -/* Interrupt Mask Register (IMASK) */
6393 -#define DTSEC_IMASK_BREN       0x80000000
6394 -#define DTSEC_IMASK_RXCEN      0x40000000
6395 -#define DTSEC_IMASK_MSROEN     0x04000000
6396 -#define DTSEC_IMASK_GTSCEN     0x02000000
6397 -#define DTSEC_IMASK_BTEN       0x01000000
6398 -#define DTSEC_IMASK_TXCEN      0x00800000
6399 -#define DTSEC_IMASK_TXEEN      0x00400000
6400 -#define DTSEC_IMASK_LCEN       0x00040000
6401 -#define DTSEC_IMASK_CRLEN      0x00020000
6402 -#define DTSEC_IMASK_XFUNEN     0x00010000
6403 -#define DTSEC_IMASK_ABRTEN     0x00008000
6404 -#define DTSEC_IMASK_IFERREN    0x00004000
6405 -#define DTSEC_IMASK_MAGEN      0x00000800
6406 -#define DTSEC_IMASK_MMRDEN     0x00000400
6407 -#define DTSEC_IMASK_MMWREN     0x00000200
6408 -#define DTSEC_IMASK_GRSCEN     0x00000100
6409 -#define DTSEC_IMASK_TDPEEN     0x00000002
6410 -#define DTSEC_IMASK_RDPEEN     0x00000001
6411 -
6412 -#define DTSEC_EVENTS_MASK              \
6413 -        ((u32)(DTSEC_IMASK_BREN    |   \
6414 -               DTSEC_IMASK_RXCEN   |   \
6415 -               DTSEC_IMASK_BTEN    |   \
6416 -               DTSEC_IMASK_TXCEN   |   \
6417 -               DTSEC_IMASK_TXEEN   |   \
6418 -               DTSEC_IMASK_ABRTEN  |   \
6419 -               DTSEC_IMASK_LCEN    |   \
6420 -               DTSEC_IMASK_CRLEN   |   \
6421 -               DTSEC_IMASK_XFUNEN  |   \
6422 -               DTSEC_IMASK_IFERREN |   \
6423 -               DTSEC_IMASK_MAGEN   |   \
6424 -               DTSEC_IMASK_TDPEEN  |   \
6425 -               DTSEC_IMASK_RDPEEN))
6426 -
6427 -/* dtsec timestamp event bits */
6428 -#define TMR_PEMASK_TSREEN      0x00010000
6429 -#define TMR_PEVENT_TSRE                0x00010000
6430 -
6431 -/* Group address bit indication */
6432 -#define MAC_GROUP_ADDRESS      0x0000010000000000ULL
6433 -
6434 -/* Defaults */
6435 -#define DEFAULT_HALFDUP_RETRANSMIT             0xf
6436 -#define DEFAULT_HALFDUP_COLL_WINDOW            0x37
6437 -#define DEFAULT_TX_PAUSE_TIME                  0xf000
6438 -#define DEFAULT_RX_PREPEND                     0
6439 -#define DEFAULT_PREAMBLE_LEN                   7
6440 -#define DEFAULT_TX_PAUSE_TIME_EXTD             0
6441 -#define DEFAULT_NON_BACK_TO_BACK_IPG1          0x40
6442 -#define DEFAULT_NON_BACK_TO_BACK_IPG2          0x60
6443 -#define DEFAULT_MIN_IFG_ENFORCEMENT            0x50
6444 -#define DEFAULT_BACK_TO_BACK_IPG               0x60
6445 -#define DEFAULT_MAXIMUM_FRAME                  0x600
6446 -
6447 -/* register related defines (bits, field offsets..) */
6448 -#define DTSEC_ID2_INT_REDUCED_OFF      0x00010000
6449 -
6450 -#define DTSEC_ECNTRL_GMIIM             0x00000040
6451 -#define DTSEC_ECNTRL_TBIM              0x00000020
6452 -#define DTSEC_ECNTRL_SGMIIM            0x00000002
6453 -#define DTSEC_ECNTRL_RPM               0x00000010
6454 -#define DTSEC_ECNTRL_R100M             0x00000008
6455 -#define DTSEC_ECNTRL_QSGMIIM           0x00000001
6456 -
6457 -#define DTSEC_TCTRL_GTS                        0x00000020
6458 -
6459 -#define RCTRL_PAL_MASK                 0x001f0000
6460 -#define RCTRL_PAL_SHIFT                        16
6461 -#define RCTRL_GHTX                     0x00000400
6462 -#define RCTRL_GRS                      0x00000020
6463 -#define RCTRL_MPROM                    0x00000008
6464 -#define RCTRL_RSF                      0x00000004
6465 -#define RCTRL_UPROM                    0x00000001
6466 -
6467 -#define MACCFG1_SOFT_RESET             0x80000000
6468 -#define MACCFG1_RX_FLOW                        0x00000020
6469 -#define MACCFG1_TX_FLOW                        0x00000010
6470 -#define MACCFG1_TX_EN                  0x00000001
6471 -#define MACCFG1_RX_EN                  0x00000004
6472 -
6473 -#define MACCFG2_NIBBLE_MODE            0x00000100
6474 -#define MACCFG2_BYTE_MODE              0x00000200
6475 -#define MACCFG2_PAD_CRC_EN             0x00000004
6476 -#define MACCFG2_FULL_DUPLEX            0x00000001
6477 -#define MACCFG2_PREAMBLE_LENGTH_MASK   0x0000f000
6478 -#define MACCFG2_PREAMBLE_LENGTH_SHIFT  12
6479 -
6480 -#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT    24
6481 -#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT    16
6482 -#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT       8
6483 -
6484 -#define IPGIFG_NON_BACK_TO_BACK_IPG_1  0x7F000000
6485 -#define IPGIFG_NON_BACK_TO_BACK_IPG_2  0x007F0000
6486 -#define IPGIFG_MIN_IFG_ENFORCEMENT     0x0000FF00
6487 -#define IPGIFG_BACK_TO_BACK_IPG        0x0000007F
6488 -
6489 -#define HAFDUP_EXCESS_DEFER                    0x00010000
6490 -#define HAFDUP_COLLISION_WINDOW                0x000003ff
6491 -#define HAFDUP_RETRANSMISSION_MAX_SHIFT        12
6492 -#define HAFDUP_RETRANSMISSION_MAX              0x0000f000
6493 -
6494 -#define NUM_OF_HASH_REGS       8       /* Number of hash table registers */
6495 -
6496 -#define PTV_PTE_MASK           0xffff0000
6497 -#define PTV_PT_MASK            0x0000ffff
6498 -#define PTV_PTE_SHIFT          16
6499 -
6500 -#define MAX_PACKET_ALIGNMENT           31
6501 -#define MAX_INTER_PACKET_GAP           0x7f
6502 -#define MAX_RETRANSMISSION             0x0f
6503 -#define MAX_COLLISION_WINDOW           0x03ff
6504 -
6505 -/* Hash table size (32 bits*8 regs) */
6506 -#define DTSEC_HASH_TABLE_SIZE          256
6507 -/* Extended Hash table size (32 bits*16 regs) */
6508 -#define EXTENDED_HASH_TABLE_SIZE       512
6509 -
6510 -/* dTSEC Memory Map registers */
6511 -struct dtsec_regs {
6512 -       /* dTSEC General Control and Status Registers */
6513 -       u32 tsec_id;            /* 0x000 ETSEC_ID register */
6514 -       u32 tsec_id2;           /* 0x004 ETSEC_ID2 register */
6515 -       u32 ievent;             /* 0x008 Interrupt event register */
6516 -       u32 imask;              /* 0x00C Interrupt mask register */
6517 -       u32 reserved0010[1];
6518 -       u32 ecntrl;             /* 0x014 E control register */
6519 -       u32 ptv;                /* 0x018 Pause time value register */
6520 -       u32 tbipa;              /* 0x01C TBI PHY address register */
6521 -       u32 tmr_ctrl;           /* 0x020 Time-stamp Control register */
6522 -       u32 tmr_pevent;         /* 0x024 Time-stamp event register */
6523 -       u32 tmr_pemask;         /* 0x028 Timer event mask register */
6524 -       u32 reserved002c[5];
6525 -       u32 tctrl;              /* 0x040 Transmit control register */
6526 -       u32 reserved0044[3];
6527 -       u32 rctrl;              /* 0x050 Receive control register */
6528 -       u32 reserved0054[11];
6529 -       u32 igaddr[8];          /* 0x080-0x09C Individual/group address */
6530 -       u32 gaddr[8];           /* 0x0A0-0x0BC Group address registers 0-7 */
6531 -       u32 reserved00c0[16];
6532 -       u32 maccfg1;            /* 0x100 MAC configuration #1 */
6533 -       u32 maccfg2;            /* 0x104 MAC configuration #2 */
6534 -       u32 ipgifg;             /* 0x108 IPG/IFG */
6535 -       u32 hafdup;             /* 0x10C Half-duplex */
6536 -       u32 maxfrm;             /* 0x110 Maximum frame */
6537 -       u32 reserved0114[10];
6538 -       u32 ifstat;             /* 0x13C Interface status */
6539 -       u32 macstnaddr1;        /* 0x140 Station Address,part 1 */
6540 -       u32 macstnaddr2;        /* 0x144 Station Address,part 2 */
6541 -       struct {
6542 -               u32 exact_match1;       /* octets 1-4 */
6543 -               u32 exact_match2;       /* octets 5-6 */
6544 -       } macaddr[15];          /* 0x148-0x1BC mac exact match addresses 1-15 */
6545 -       u32 reserved01c0[16];
6546 -       u32 tr64;       /* 0x200 Tx and Rx 64 byte frame counter */
6547 -       u32 tr127;      /* 0x204 Tx and Rx 65 to 127 byte frame counter */
6548 -       u32 tr255;      /* 0x208 Tx and Rx 128 to 255 byte frame counter */
6549 -       u32 tr511;      /* 0x20C Tx and Rx 256 to 511 byte frame counter */
6550 -       u32 tr1k;       /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
6551 -       u32 trmax;      /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
6552 -       u32 trmgv;
6553 -       /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
6554 -       u32 rbyt;       /* 0x21C receive byte counter */
6555 -       u32 rpkt;       /* 0x220 receive packet counter */
6556 -       u32 rfcs;       /* 0x224 receive FCS error counter */
6557 -       u32 rmca;       /* 0x228 RMCA Rx multicast packet counter */
6558 -       u32 rbca;       /* 0x22C Rx broadcast packet counter */
6559 -       u32 rxcf;       /* 0x230 Rx control frame packet counter */
6560 -       u32 rxpf;       /* 0x234 Rx pause frame packet counter */
6561 -       u32 rxuo;       /* 0x238 Rx unknown OP code counter */
6562 -       u32 raln;       /* 0x23C Rx alignment error counter */
6563 -       u32 rflr;       /* 0x240 Rx frame length error counter */
6564 -       u32 rcde;       /* 0x244 Rx code error counter */
6565 -       u32 rcse;       /* 0x248 Rx carrier sense error counter */
6566 -       u32 rund;       /* 0x24C Rx undersize packet counter */
6567 -       u32 rovr;       /* 0x250 Rx oversize packet counter */
6568 -       u32 rfrg;       /* 0x254 Rx fragments counter */
6569 -       u32 rjbr;       /* 0x258 Rx jabber counter */
6570 -       u32 rdrp;       /* 0x25C Rx drop */
6571 -       u32 tbyt;       /* 0x260 Tx byte counter */
6572 -       u32 tpkt;       /* 0x264 Tx packet counter */
6573 -       u32 tmca;       /* 0x268 Tx multicast packet counter */
6574 -       u32 tbca;       /* 0x26C Tx broadcast packet counter */
6575 -       u32 txpf;       /* 0x270 Tx pause control frame counter */
6576 -       u32 tdfr;       /* 0x274 Tx deferral packet counter */
6577 -       u32 tedf;       /* 0x278 Tx excessive deferral packet counter */
6578 -       u32 tscl;       /* 0x27C Tx single collision packet counter */
6579 -       u32 tmcl;       /* 0x280 Tx multiple collision packet counter */
6580 -       u32 tlcl;       /* 0x284 Tx late collision packet counter */
6581 -       u32 txcl;       /* 0x288 Tx excessive collision packet counter */
6582 -       u32 tncl;       /* 0x28C Tx total collision counter */
6583 -       u32 reserved0290[1];
6584 -       u32 tdrp;       /* 0x294 Tx drop frame counter */
6585 -       u32 tjbr;       /* 0x298 Tx jabber frame counter */
6586 -       u32 tfcs;       /* 0x29C Tx FCS error counter */
6587 -       u32 txcf;       /* 0x2A0 Tx control frame counter */
6588 -       u32 tovr;       /* 0x2A4 Tx oversize frame counter */
6589 -       u32 tund;       /* 0x2A8 Tx undersize frame counter */
6590 -       u32 tfrg;       /* 0x2AC Tx fragments frame counter */
6591 -       u32 car1;       /* 0x2B0 carry register one register* */
6592 -       u32 car2;       /* 0x2B4 carry register two register* */
6593 -       u32 cam1;       /* 0x2B8 carry register one mask register */
6594 -       u32 cam2;       /* 0x2BC carry register two mask register */
6595 -       u32 reserved02c0[848];
6596 -};
6597 -
6598 -/* struct dtsec_cfg - dTSEC configuration
6599 - * Transmit half-duplex flow control, under software control for 10/100-Mbps
6600 - * half-duplex media. If set, back pressure is applied to media by raising
6601 - * carrier.
6602 - * halfdup_retransmit:
6603 - * Number of retransmission attempts following a collision.
6604 - * If this is exceeded dTSEC aborts transmission due to excessive collisions.
6605 - * The standard specifies the attempt limit to be 15.
6606 - * halfdup_coll_window:
6607 - * The number of bytes of the frame during which collisions may occur.
6608 - * The default value of 55 corresponds to the frame byte at the end of the
6609 - * standard 512-bit slot time window. If collisions are detected after this
6610 - * byte, the late collision event is asserted and transmission of current
6611 - * frame is aborted.
6612 - * tx_pad_crc:
6613 - * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
6614 - * appends a CRC to every frame regardless of padding requirement.
6615 - * tx_pause_time:
6616 - * Transmit pause time value. This pause value is used as part of the pause
6617 - * frame to be sent when a transmit pause frame is initiated.
6618 - * If set to 0 this disables transmission of pause frames.
6619 - * preamble_len:
6620 - * Length, in bytes, of the preamble field preceding each Ethernet
6621 - * start-of-frame delimiter byte. The default value of 0x7 should be used in
6622 - * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
6623 - * rx_prepend:
6624 - * Packet alignment padding length. The specified number of bytes (1-31)
6625 - * of zero padding are inserted before the start of each received frame.
6626 - * For Ethernet, where optional preamble extraction is enabled, the padding
6627 - * appears before the preamble, otherwise the padding precedes the
6628 - * layer 2 header.
6629 - *
6630 - * This structure contains basic dTSEC configuration and must be passed to
6631 - * init() function. A default set of configuration values can be
6632 - * obtained by calling set_dflts().
6633 - */
6634 -struct dtsec_cfg {
6635 -       u16 halfdup_retransmit;
6636 -       u16 halfdup_coll_window;
6637 -       bool tx_pad_crc;
6638 -       u16 tx_pause_time;
6639 -       bool ptp_tsu_en;
6640 -       bool ptp_exception_en;
6641 -       u32 preamble_len;
6642 -       u32 rx_prepend;
6643 -       u16 tx_pause_time_extd;
6644 -       u16 maximum_frame;
6645 -       u32 non_back_to_back_ipg1;
6646 -       u32 non_back_to_back_ipg2;
6647 -       u32 min_ifg_enforcement;
6648 -       u32 back_to_back_ipg;
6649 -};
6650 -
6651 -struct fman_mac {
6652 -       /* pointer to dTSEC memory mapped registers */
6653 -       struct dtsec_regs __iomem *regs;
6654 -       /* MAC address of device */
6655 -       u64 addr;
6656 -       /* Ethernet physical interface */
6657 -       phy_interface_t phy_if;
6658 -       u16 max_speed;
6659 -       void *dev_id; /* device cookie used by the exception cbs */
6660 -       fman_mac_exception_cb *exception_cb;
6661 -       fman_mac_exception_cb *event_cb;
6662 -       /* Number of individual addresses in registers for this station */
6663 -       u8 num_of_ind_addr_in_regs;
6664 -       /* pointer to driver's global address hash table */
6665 -       struct eth_hash_t *multicast_addr_hash;
6666 -       /* pointer to driver's individual address hash table */
6667 -       struct eth_hash_t *unicast_addr_hash;
6668 -       u8 mac_id;
6669 -       u32 exceptions;
6670 -       bool ptp_tsu_enabled;
6671 -       bool en_tsu_err_exeption;
6672 -       struct dtsec_cfg *dtsec_drv_param;
6673 -       void *fm;
6674 -       struct fman_rev_info fm_rev_info;
6675 -       bool basex_if;
6676 -       struct phy_device *tbiphy;
6677 -};
6678 -
6679 -static void set_dflts(struct dtsec_cfg *cfg)
6680 -{
6681 -       cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
6682 -       cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
6683 -       cfg->tx_pad_crc = true;
6684 -       cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
6685 -       /* PHY address 0 is reserved (DPAA RM) */
6686 -       cfg->rx_prepend = DEFAULT_RX_PREPEND;
6687 -       cfg->ptp_tsu_en = true;
6688 -       cfg->ptp_exception_en = true;
6689 -       cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
6690 -       cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
6691 -       cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
6692 -       cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
6693 -       cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
6694 -       cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
6695 -       cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
6696 -}
6697 -
6698 -static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
6699 -               phy_interface_t iface, u16 iface_speed, u8 *macaddr,
6700 -               u32 exception_mask, u8 tbi_addr)
6701 -{
6702 -       bool is_rgmii, is_sgmii, is_qsgmii;
6703 -       int i;
6704 -       u32 tmp;
6705 -
6706 -       /* Soft reset */
6707 -       iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
6708 -       iowrite32be(0, &regs->maccfg1);
6709 -
6710 -       /* dtsec_id2 */
6711 -       tmp = ioread32be(&regs->tsec_id2);
6712 -
6713 -       /* check RGMII support */
6714 -       if (iface == PHY_INTERFACE_MODE_RGMII ||
6715 -           iface == PHY_INTERFACE_MODE_RMII)
6716 -               if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
6717 -                       return -EINVAL;
6718 -
6719 -       if (iface == PHY_INTERFACE_MODE_SGMII ||
6720 -           iface == PHY_INTERFACE_MODE_MII)
6721 -               if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
6722 -                       return -EINVAL;
6723 -
6724 -       is_rgmii = iface == PHY_INTERFACE_MODE_RGMII;
6725 -       is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
6726 -       is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
6727 -
6728 -       tmp = 0;
6729 -       if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
6730 -               tmp |= DTSEC_ECNTRL_GMIIM;
6731 -       if (is_sgmii)
6732 -               tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
6733 -       if (is_qsgmii)
6734 -               tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
6735 -                       DTSEC_ECNTRL_QSGMIIM);
6736 -       if (is_rgmii)
6737 -               tmp |= DTSEC_ECNTRL_RPM;
6738 -       if (iface_speed == SPEED_100)
6739 -               tmp |= DTSEC_ECNTRL_R100M;
6740 -
6741 -       iowrite32be(tmp, &regs->ecntrl);
6742 -
6743 -       tmp = 0;
6744 -
6745 -       if (cfg->tx_pause_time)
6746 -               tmp |= cfg->tx_pause_time;
6747 -       if (cfg->tx_pause_time_extd)
6748 -               tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
6749 -       iowrite32be(tmp, &regs->ptv);
6750 -
6751 -       tmp = 0;
6752 -       tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
6753 -       /* Accept short frames */
6754 -       tmp |= RCTRL_RSF;
6755 -
6756 -       iowrite32be(tmp, &regs->rctrl);
6757 -
6758 -       /* Assign a Phy Address to the TBI (TBIPA).
6759 -        * Done also in cases where TBI is not selected to avoid conflict with
6760 -        * the external PHY's Physical address
6761 -        */
6762 -       iowrite32be(tbi_addr, &regs->tbipa);
6763 -
6764 -       iowrite32be(0, &regs->tmr_ctrl);
6765 -
6766 -       if (cfg->ptp_tsu_en) {
6767 -               tmp = 0;
6768 -               tmp |= TMR_PEVENT_TSRE;
6769 -               iowrite32be(tmp, &regs->tmr_pevent);
6770 -
6771 -               if (cfg->ptp_exception_en) {
6772 -                       tmp = 0;
6773 -                       tmp |= TMR_PEMASK_TSREEN;
6774 -                       iowrite32be(tmp, &regs->tmr_pemask);
6775 -               }
6776 -       }
6777 -
6778 -       tmp = 0;
6779 -       tmp |= MACCFG1_RX_FLOW;
6780 -       tmp |= MACCFG1_TX_FLOW;
6781 -       iowrite32be(tmp, &regs->maccfg1);
6782 -
6783 -       tmp = 0;
6784 -
6785 -       if (iface_speed < SPEED_1000)
6786 -               tmp |= MACCFG2_NIBBLE_MODE;
6787 -       else if (iface_speed == SPEED_1000)
6788 -               tmp |= MACCFG2_BYTE_MODE;
6789 -
6790 -       tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
6791 -               MACCFG2_PREAMBLE_LENGTH_MASK;
6792 -       if (cfg->tx_pad_crc)
6793 -               tmp |= MACCFG2_PAD_CRC_EN;
6794 -       /* Full Duplex */
6795 -       tmp |= MACCFG2_FULL_DUPLEX;
6796 -       iowrite32be(tmp, &regs->maccfg2);
6797 -
6798 -       tmp = (((cfg->non_back_to_back_ipg1 <<
6799 -                IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
6800 -               & IPGIFG_NON_BACK_TO_BACK_IPG_1)
6801 -              | ((cfg->non_back_to_back_ipg2 <<
6802 -                  IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
6803 -                & IPGIFG_NON_BACK_TO_BACK_IPG_2)
6804 -              | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
6805 -                & IPGIFG_MIN_IFG_ENFORCEMENT)
6806 -              | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
6807 -       iowrite32be(tmp, &regs->ipgifg);
6808 -
6809 -       tmp = 0;
6810 -       tmp |= HAFDUP_EXCESS_DEFER;
6811 -       tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
6812 -               & HAFDUP_RETRANSMISSION_MAX);
6813 -       tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
6814 -
6815 -       iowrite32be(tmp, &regs->hafdup);
6816 -
6817 -       /* Initialize Maximum frame length */
6818 -       iowrite32be(cfg->maximum_frame, &regs->maxfrm);
6819 -
6820 -       iowrite32be(0xffffffff, &regs->cam1);
6821 -       iowrite32be(0xffffffff, &regs->cam2);
6822 -
6823 -       iowrite32be(exception_mask, &regs->imask);
6824 -
6825 -       iowrite32be(0xffffffff, &regs->ievent);
6826 -
6827 -       tmp = (u32)((macaddr[5] << 24) |
6828 -                   (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
6829 -       iowrite32be(tmp, &regs->macstnaddr1);
6830 -
6831 -       tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
6832 -       iowrite32be(tmp, &regs->macstnaddr2);
6833 -
6834 -       /* HASH */
6835 -       for (i = 0; i < NUM_OF_HASH_REGS; i++) {
6836 -               /* Initialize IADDRx */
6837 -               iowrite32be(0, &regs->igaddr[i]);
6838 -               /* Initialize GADDRx */
6839 -               iowrite32be(0, &regs->gaddr[i]);
6840 -       }
6841 -
6842 -       return 0;
6843 -}
6844 -
6845 -static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
6846 -{
6847 -       u32 tmp;
6848 -
6849 -       tmp = (u32)((adr[5] << 24) |
6850 -                   (adr[4] << 16) | (adr[3] << 8) | adr[2]);
6851 -       iowrite32be(tmp, &regs->macstnaddr1);
6852 -
6853 -       tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
6854 -       iowrite32be(tmp, &regs->macstnaddr2);
6855 -}
6856 -
6857 -static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
6858 -                      bool enable)
6859 -{
6860 -       int reg_idx = (bucket >> 5) & 0xf;
6861 -       int bit_idx = bucket & 0x1f;
6862 -       u32 bit_mask = 0x80000000 >> bit_idx;
6863 -       u32 __iomem *reg;
6864 -
6865 -       if (reg_idx > 7)
6866 -               reg = &regs->gaddr[reg_idx - 8];
6867 -       else
6868 -               reg = &regs->igaddr[reg_idx];
6869 -
6870 -       if (enable)
6871 -               iowrite32be(ioread32be(reg) | bit_mask, reg);
6872 -       else
6873 -               iowrite32be(ioread32be(reg) & (~bit_mask), reg);
6874 -}
6875 -
6876 -static int check_init_parameters(struct fman_mac *dtsec)
6877 -{
6878 -       if (dtsec->max_speed >= SPEED_10000) {
6879 -               pr_err("1G MAC driver supports 1G or lower speeds\n");
6880 -               return -EINVAL;
6881 -       }
6882 -       if (dtsec->addr == 0) {
6883 -               pr_err("Ethernet MAC Must have a valid MAC Address\n");
6884 -               return -EINVAL;
6885 -       }
6886 -       if ((dtsec->dtsec_drv_param)->rx_prepend >
6887 -           MAX_PACKET_ALIGNMENT) {
6888 -               pr_err("packetAlignmentPadding can't be > than %d\n",
6889 -                      MAX_PACKET_ALIGNMENT);
6890 -               return -EINVAL;
6891 -       }
6892 -       if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
6893 -            MAX_INTER_PACKET_GAP) ||
6894 -           ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
6895 -            MAX_INTER_PACKET_GAP) ||
6896 -            ((dtsec->dtsec_drv_param)->back_to_back_ipg >
6897 -             MAX_INTER_PACKET_GAP)) {
6898 -               pr_err("Inter packet gap can't be greater than %d\n",
6899 -                      MAX_INTER_PACKET_GAP);
6900 -               return -EINVAL;
6901 -       }
6902 -       if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
6903 -           MAX_RETRANSMISSION) {
6904 -               pr_err("maxRetransmission can't be greater than %d\n",
6905 -                      MAX_RETRANSMISSION);
6906 -               return -EINVAL;
6907 -       }
6908 -       if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
6909 -           MAX_COLLISION_WINDOW) {
6910 -               pr_err("collisionWindow can't be greater than %d\n",
6911 -                      MAX_COLLISION_WINDOW);
6912 -               return -EINVAL;
6913 -       /* If Auto negotiation process is disabled, need to set up the PHY
6914 -        * using the MII Management Interface
6915 -        */
6916 -       }
6917 -       if (!dtsec->exception_cb) {
6918 -               pr_err("uninitialized exception_cb\n");
6919 -               return -EINVAL;
6920 -       }
6921 -       if (!dtsec->event_cb) {
6922 -               pr_err("uninitialized event_cb\n");
6923 -               return -EINVAL;
6924 -       }
6925 -
6926 -       return 0;
6927 -}
6928 -
6929 -static int get_exception_flag(enum fman_mac_exceptions exception)
6930 -{
6931 -       u32 bit_mask;
6932 -
6933 -       switch (exception) {
6934 -       case FM_MAC_EX_1G_BAB_RX:
6935 -               bit_mask = DTSEC_IMASK_BREN;
6936 -               break;
6937 -       case FM_MAC_EX_1G_RX_CTL:
6938 -               bit_mask = DTSEC_IMASK_RXCEN;
6939 -               break;
6940 -       case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
6941 -               bit_mask = DTSEC_IMASK_GTSCEN;
6942 -               break;
6943 -       case FM_MAC_EX_1G_BAB_TX:
6944 -               bit_mask = DTSEC_IMASK_BTEN;
6945 -               break;
6946 -       case FM_MAC_EX_1G_TX_CTL:
6947 -               bit_mask = DTSEC_IMASK_TXCEN;
6948 -               break;
6949 -       case FM_MAC_EX_1G_TX_ERR:
6950 -               bit_mask = DTSEC_IMASK_TXEEN;
6951 -               break;
6952 -       case FM_MAC_EX_1G_LATE_COL:
6953 -               bit_mask = DTSEC_IMASK_LCEN;
6954 -               break;
6955 -       case FM_MAC_EX_1G_COL_RET_LMT:
6956 -               bit_mask = DTSEC_IMASK_CRLEN;
6957 -               break;
6958 -       case FM_MAC_EX_1G_TX_FIFO_UNDRN:
6959 -               bit_mask = DTSEC_IMASK_XFUNEN;
6960 -               break;
6961 -       case FM_MAC_EX_1G_MAG_PCKT:
6962 -               bit_mask = DTSEC_IMASK_MAGEN;
6963 -               break;
6964 -       case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
6965 -               bit_mask = DTSEC_IMASK_MMRDEN;
6966 -               break;
6967 -       case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
6968 -               bit_mask = DTSEC_IMASK_MMWREN;
6969 -               break;
6970 -       case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
6971 -               bit_mask = DTSEC_IMASK_GRSCEN;
6972 -               break;
6973 -       case FM_MAC_EX_1G_DATA_ERR:
6974 -               bit_mask = DTSEC_IMASK_TDPEEN;
6975 -               break;
6976 -       case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
6977 -               bit_mask = DTSEC_IMASK_MSROEN;
6978 -               break;
6979 -       default:
6980 -               bit_mask = 0;
6981 -               break;
6982 -       }
6983 -
6984 -       return bit_mask;
6985 -}
6986 -
6987 -static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
6988 -{
6989 -       /* Checks if dTSEC driver parameters were initialized */
6990 -       if (!dtsec_drv_params)
6991 -               return true;
6992 -
6993 -       return false;
6994 -}
6995 -
6996 -static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
6997 -{
6998 -       struct dtsec_regs __iomem *regs = dtsec->regs;
6999 -
7000 -       if (is_init_done(dtsec->dtsec_drv_param))
7001 -               return 0;
7002 -
7003 -       return (u16)ioread32be(&regs->maxfrm);
7004 -}
7005 -
7006 -static void dtsec_isr(void *handle)
7007 -{
7008 -       struct fman_mac *dtsec = (struct fman_mac *)handle;
7009 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7010 -       u32 event;
7011 -
7012 -       /* do not handle MDIO events */
7013 -       event = ioread32be(&regs->ievent) &
7014 -               (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
7015 -
7016 -       event &= ioread32be(&regs->imask);
7017 -
7018 -       iowrite32be(event, &regs->ievent);
7019 -
7020 -       if (event & DTSEC_IMASK_BREN)
7021 -               dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
7022 -       if (event & DTSEC_IMASK_RXCEN)
7023 -               dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
7024 -       if (event & DTSEC_IMASK_GTSCEN)
7025 -               dtsec->exception_cb(dtsec->dev_id,
7026 -                                   FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
7027 -       if (event & DTSEC_IMASK_BTEN)
7028 -               dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
7029 -       if (event & DTSEC_IMASK_TXCEN)
7030 -               dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
7031 -       if (event & DTSEC_IMASK_TXEEN)
7032 -               dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
7033 -       if (event & DTSEC_IMASK_LCEN)
7034 -               dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
7035 -       if (event & DTSEC_IMASK_CRLEN)
7036 -               dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
7037 -       if (event & DTSEC_IMASK_XFUNEN) {
7038 -               /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
7039 -               if (dtsec->fm_rev_info.major == 2) {
7040 -                       u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
7041 -                       /* a. Write 0x00E0_0C00 to DTSEC_ID
7042 -                        *      This is a read only register
7043 -                        * b. Read and save the value of TPKT
7044 -                        */
7045 -                       tpkt1 = ioread32be(&regs->tpkt);
7046 -
7047 -                       /* c. Read the register at dTSEC address offset 0x32C */
7048 -                       tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
7049 -
7050 -                       /* d. Compare bits [9:15] to bits [25:31] of the
7051 -                        * register at address offset 0x32C.
7052 -                        */
7053 -                       if ((tmp_reg1 & 0x007F0000) !=
7054 -                               (tmp_reg1 & 0x0000007F)) {
7055 -                               /* If they are not equal, save the value of
7056 -                                * this register and wait for at least
7057 -                                * MAXFRM*16 ns
7058 -                                */
7059 -                               usleep_range((u32)(min
7060 -                                       (dtsec_get_max_frame_length(dtsec) *
7061 -                                       16 / 1000, 1)), (u32)
7062 -                                       (min(dtsec_get_max_frame_length
7063 -                                       (dtsec) * 16 / 1000, 1) + 1));
7064 -                       }
7065 -
7066 -                       /* e. Read and save TPKT again and read the register
7067 -                        * at dTSEC address offset 0x32C again
7068 -                        */
7069 -                       tpkt2 = ioread32be(&regs->tpkt);
7070 -                       tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
7071 -
7072 -                       /* f. Compare the value of TPKT saved in step b to
7073 -                        * value read in step e. Also compare bits [9:15] of
7074 -                        * the register at offset 0x32C saved in step d to the
7075 -                        * value of bits [9:15] saved in step e. If the two
7076 -                        * registers values are unchanged, then the transmit
7077 -                        * portion of the dTSEC controller is locked up and
7078 -                        * the user should proceed to the recover sequence.
7079 -                        */
7080 -                       if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
7081 -                               (tmp_reg2 & 0x007F0000))) {
7082 -                               /* recover sequence */
7083 -
7084 -                               /* a.Write a 1 to RCTRL[GRS] */
7085 -
7086 -                               iowrite32be(ioread32be(&regs->rctrl) |
7087 -                                           RCTRL_GRS, &regs->rctrl);
7088 -
7089 -                               /* b.Wait until IEVENT[GRSC]=1, or at least
7090 -                                * 100 us has elapsed.
7091 -                                */
7092 -                               for (i = 0; i < 100; i++) {
7093 -                                       if (ioread32be(&regs->ievent) &
7094 -                                           DTSEC_IMASK_GRSCEN)
7095 -                                               break;
7096 -                                       udelay(1);
7097 -                               }
7098 -                               if (ioread32be(&regs->ievent) &
7099 -                                   DTSEC_IMASK_GRSCEN)
7100 -                                       iowrite32be(DTSEC_IMASK_GRSCEN,
7101 -                                                   &regs->ievent);
7102 -                               else
7103 -                                       pr_debug("Rx lockup due to Tx lockup\n");
7104 -
7105 -                               /* c.Write a 1 to bit n of FM_RSTC
7106 -                                * (offset 0x0CC of FPM)
7107 -                                */
7108 -                               fman_reset_mac(dtsec->fm, dtsec->mac_id);
7109 -
7110 -                               /* d.Wait 4 Tx clocks (32 ns) */
7111 -                               udelay(1);
7112 -
7113 -                               /* e.Write a 0 to bit n of FM_RSTC. */
7114 -                               /* cleared by FMAN
7115 -                                */
7116 -                       }
7117 -               }
7118 -
7119 -               dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
7120 -       }
7121 -       if (event & DTSEC_IMASK_MAGEN)
7122 -               dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
7123 -       if (event & DTSEC_IMASK_GRSCEN)
7124 -               dtsec->exception_cb(dtsec->dev_id,
7125 -                                   FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
7126 -       if (event & DTSEC_IMASK_TDPEEN)
7127 -               dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
7128 -       if (event & DTSEC_IMASK_RDPEEN)
7129 -               dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
7130 -
7131 -       /* masked interrupts */
7132 -       WARN_ON(event & DTSEC_IMASK_ABRTEN);
7133 -       WARN_ON(event & DTSEC_IMASK_IFERREN);
7134 -}
7135 -
7136 -static void dtsec_1588_isr(void *handle)
7137 -{
7138 -       struct fman_mac *dtsec = (struct fman_mac *)handle;
7139 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7140 -       u32 event;
7141 -
7142 -       if (dtsec->ptp_tsu_enabled) {
7143 -               event = ioread32be(&regs->tmr_pevent);
7144 -               event &= ioread32be(&regs->tmr_pemask);
7145 -
7146 -               if (event) {
7147 -                       iowrite32be(event, &regs->tmr_pevent);
7148 -                       WARN_ON(event & TMR_PEVENT_TSRE);
7149 -                       dtsec->exception_cb(dtsec->dev_id,
7150 -                                           FM_MAC_EX_1G_1588_TS_RX_ERR);
7151 -               }
7152 -       }
7153 -}
7154 -
7155 -static void free_init_resources(struct fman_mac *dtsec)
7156 -{
7157 -       fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
7158 -                            FMAN_INTR_TYPE_ERR);
7159 -       fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
7160 -                            FMAN_INTR_TYPE_NORMAL);
7161 -
7162 -       /* release the driver's group hash table */
7163 -       free_hash_table(dtsec->multicast_addr_hash);
7164 -       dtsec->multicast_addr_hash = NULL;
7165 -
7166 -       /* release the driver's individual hash table */
7167 -       free_hash_table(dtsec->unicast_addr_hash);
7168 -       dtsec->unicast_addr_hash = NULL;
7169 -}
7170 -
7171 -int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
7172 -{
7173 -       if (is_init_done(dtsec->dtsec_drv_param))
7174 -               return -EINVAL;
7175 -
7176 -       dtsec->dtsec_drv_param->maximum_frame = new_val;
7177 -
7178 -       return 0;
7179 -}
7180 -
7181 -int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
7182 -{
7183 -       if (is_init_done(dtsec->dtsec_drv_param))
7184 -               return -EINVAL;
7185 -
7186 -       dtsec->dtsec_drv_param->tx_pad_crc = new_val;
7187 -
7188 -       return 0;
7189 -}
7190 -
7191 -int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
7192 -{
7193 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7194 -       u32 tmp;
7195 -
7196 -       if (!is_init_done(dtsec->dtsec_drv_param))
7197 -               return -EINVAL;
7198 -
7199 -       /* Enable */
7200 -       tmp = ioread32be(&regs->maccfg1);
7201 -       if (mode & COMM_MODE_RX)
7202 -               tmp |= MACCFG1_RX_EN;
7203 -       if (mode & COMM_MODE_TX)
7204 -               tmp |= MACCFG1_TX_EN;
7205 -
7206 -       iowrite32be(tmp, &regs->maccfg1);
7207 -
7208 -       /* Graceful start - clear the graceful receive stop bit */
7209 -       if (mode & COMM_MODE_TX)
7210 -               iowrite32be(ioread32be(&regs->tctrl) & ~DTSEC_TCTRL_GTS,
7211 -                           &regs->tctrl);
7212 -       if (mode & COMM_MODE_RX)
7213 -               iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS,
7214 -                           &regs->rctrl);
7215 -
7216 -       return 0;
7217 -}
7218 -
7219 -int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
7220 -{
7221 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7222 -       u32 tmp;
7223 -
7224 -       if (!is_init_done(dtsec->dtsec_drv_param))
7225 -               return -EINVAL;
7226 -
7227 -       /* Gracefull stop - Assert the graceful transmit stop bit */
7228 -       if (mode & COMM_MODE_RX) {
7229 -               tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
7230 -               iowrite32be(tmp, &regs->rctrl);
7231 -
7232 -               if (dtsec->fm_rev_info.major == 2)
7233 -                       usleep_range(100, 200);
7234 -               else
7235 -                       udelay(10);
7236 -       }
7237 -
7238 -       if (mode & COMM_MODE_TX) {
7239 -               if (dtsec->fm_rev_info.major == 2)
7240 -                       pr_debug("GTS not supported due to DTSEC_A004 errata.\n");
7241 -               else
7242 -                       pr_debug("GTS not supported due to DTSEC_A0014 errata.\n");
7243 -       }
7244 -
7245 -       tmp = ioread32be(&regs->maccfg1);
7246 -       if (mode & COMM_MODE_RX)
7247 -               tmp &= ~MACCFG1_RX_EN;
7248 -       if (mode & COMM_MODE_TX)
7249 -               tmp &= ~MACCFG1_TX_EN;
7250 -
7251 -       iowrite32be(tmp, &regs->maccfg1);
7252 -
7253 -       return 0;
7254 -}
7255 -
7256 -int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
7257 -                             u8 __maybe_unused priority,
7258 -                             u16 pause_time, u16 __maybe_unused thresh_time)
7259 -{
7260 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7261 -       u32 ptv = 0;
7262 -
7263 -       if (!is_init_done(dtsec->dtsec_drv_param))
7264 -               return -EINVAL;
7265 -
7266 -       if (pause_time) {
7267 -               /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
7268 -               if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
7269 -                       pr_warn("pause-time: %d illegal.Should be > 320\n",
7270 -                               pause_time);
7271 -                       return -EINVAL;
7272 -               }
7273 -
7274 -               ptv = ioread32be(&regs->ptv);
7275 -               ptv &= PTV_PTE_MASK;
7276 -               ptv |= pause_time & PTV_PT_MASK;
7277 -               iowrite32be(ptv, &regs->ptv);
7278 -
7279 -               /* trigger the transmission of a flow-control pause frame */
7280 -               iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
7281 -                           &regs->maccfg1);
7282 -       } else
7283 -               iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
7284 -                           &regs->maccfg1);
7285 -
7286 -       return 0;
7287 -}
7288 -
7289 -int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
7290 -{
7291 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7292 -       u32 tmp;
7293 -
7294 -       if (!is_init_done(dtsec->dtsec_drv_param))
7295 -               return -EINVAL;
7296 -
7297 -       tmp = ioread32be(&regs->maccfg1);
7298 -       if (en)
7299 -               tmp |= MACCFG1_RX_FLOW;
7300 -       else
7301 -               tmp &= ~MACCFG1_RX_FLOW;
7302 -       iowrite32be(tmp, &regs->maccfg1);
7303 -
7304 -       return 0;
7305 -}
7306 -
7307 -int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
7308 -{
7309 -       if (!is_init_done(dtsec->dtsec_drv_param))
7310 -               return -EINVAL;
7311 -
7312 -       /* Initialize MAC Station Address registers (1 & 2)
7313 -        * Station address have to be swapped (big endian to little endian
7314 -        */
7315 -       dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
7316 -       set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
7317 -
7318 -       return 0;
7319 -}
7320 -
7321 -int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
7322 -{
7323 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7324 -       struct eth_hash_entry *hash_entry;
7325 -       u64 addr;
7326 -       s32 bucket;
7327 -       u32 crc = 0xFFFFFFFF;
7328 -       bool mcast, ghtx;
7329 -
7330 -       if (!is_init_done(dtsec->dtsec_drv_param))
7331 -               return -EINVAL;
7332 -
7333 -       addr = ENET_ADDR_TO_UINT64(*eth_addr);
7334 -
7335 -       ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
7336 -       mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
7337 -
7338 -       /* Cannot handle unicast mac addr when GHTX is on */
7339 -       if (ghtx && !mcast) {
7340 -               pr_err("Could not compute hash bucket\n");
7341 -               return -EINVAL;
7342 -       }
7343 -       crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
7344 -       crc = bitrev32(crc);
7345 -
7346 -       /* considering the 9 highest order bits in crc H[8:0]:
7347 -        *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
7348 -        *and H[5:1] (next 5 bits) identify the hash bit
7349 -        *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
7350 -        *and H[4:0] (next 5 bits) identify the hash bit.
7351 -        *
7352 -        *In bucket index output the low 5 bits identify the hash register
7353 -        *bit, while the higher 4 bits identify the hash register
7354 -        */
7355 -
7356 -       if (ghtx) {
7357 -               bucket = (s32)((crc >> 23) & 0x1ff);
7358 -       } else {
7359 -               bucket = (s32)((crc >> 24) & 0xff);
7360 -               /* if !ghtx and mcast the bit must be set in gaddr instead of
7361 -                *igaddr.
7362 -                */
7363 -               if (mcast)
7364 -                       bucket += 0x100;
7365 -       }
7366 -
7367 -       set_bucket(dtsec->regs, bucket, true);
7368 -
7369 -       /* Create element to be added to the driver hash table */
7370 -       hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
7371 -       if (!hash_entry)
7372 -               return -ENOMEM;
7373 -       hash_entry->addr = addr;
7374 -       INIT_LIST_HEAD(&hash_entry->node);
7375 -
7376 -       if (addr & MAC_GROUP_ADDRESS)
7377 -               /* Group Address */
7378 -               list_add_tail(&hash_entry->node,
7379 -                             &dtsec->multicast_addr_hash->lsts[bucket]);
7380 -       else
7381 -               list_add_tail(&hash_entry->node,
7382 -                             &dtsec->unicast_addr_hash->lsts[bucket]);
7383 -
7384 -       return 0;
7385 -}
7386 -
7387 -int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
7388 -{
7389 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7390 -       struct list_head *pos;
7391 -       struct eth_hash_entry *hash_entry = NULL;
7392 -       u64 addr;
7393 -       s32 bucket;
7394 -       u32 crc = 0xFFFFFFFF;
7395 -       bool mcast, ghtx;
7396 -
7397 -       if (!is_init_done(dtsec->dtsec_drv_param))
7398 -               return -EINVAL;
7399 -
7400 -       addr = ENET_ADDR_TO_UINT64(*eth_addr);
7401 -
7402 -       ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
7403 -       mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
7404 -
7405 -       /* Cannot handle unicast mac addr when GHTX is on */
7406 -       if (ghtx && !mcast) {
7407 -               pr_err("Could not compute hash bucket\n");
7408 -               return -EINVAL;
7409 -       }
7410 -       crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
7411 -       crc = bitrev32(crc);
7412 -
7413 -       if (ghtx) {
7414 -               bucket = (s32)((crc >> 23) & 0x1ff);
7415 -       } else {
7416 -               bucket = (s32)((crc >> 24) & 0xff);
7417 -               /* if !ghtx and mcast the bit must be set
7418 -                * in gaddr instead of igaddr.
7419 -                */
7420 -               if (mcast)
7421 -                       bucket += 0x100;
7422 -       }
7423 -
7424 -       if (addr & MAC_GROUP_ADDRESS) {
7425 -               /* Group Address */
7426 -               list_for_each(pos,
7427 -                             &dtsec->multicast_addr_hash->lsts[bucket]) {
7428 -                       hash_entry = ETH_HASH_ENTRY_OBJ(pos);
7429 -                       if (hash_entry->addr == addr) {
7430 -                               list_del_init(&hash_entry->node);
7431 -                               kfree(hash_entry);
7432 -                               break;
7433 -                       }
7434 -               }
7435 -               if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
7436 -                       set_bucket(dtsec->regs, bucket, false);
7437 -       } else {
7438 -               /* Individual Address */
7439 -               list_for_each(pos,
7440 -                             &dtsec->unicast_addr_hash->lsts[bucket]) {
7441 -                       hash_entry = ETH_HASH_ENTRY_OBJ(pos);
7442 -                       if (hash_entry->addr == addr) {
7443 -                               list_del_init(&hash_entry->node);
7444 -                               kfree(hash_entry);
7445 -                               break;
7446 -                       }
7447 -               }
7448 -               if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
7449 -                       set_bucket(dtsec->regs, bucket, false);
7450 -       }
7451 -
7452 -       /* address does not exist */
7453 -       WARN_ON(!hash_entry);
7454 -
7455 -       return 0;
7456 -}
7457 -
7458 -int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
7459 -{
7460 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7461 -       u32 tmp;
7462 -
7463 -       if (!is_init_done(dtsec->dtsec_drv_param))
7464 -               return -EINVAL;
7465 -
7466 -       /* Set unicast promiscuous */
7467 -       tmp = ioread32be(&regs->rctrl);
7468 -       if (new_val)
7469 -               tmp |= RCTRL_UPROM;
7470 -       else
7471 -               tmp &= ~RCTRL_UPROM;
7472 -
7473 -       iowrite32be(tmp, &regs->rctrl);
7474 -
7475 -       /* Set multicast promiscuous */
7476 -       tmp = ioread32be(&regs->rctrl);
7477 -       if (new_val)
7478 -               tmp |= RCTRL_MPROM;
7479 -       else
7480 -               tmp &= ~RCTRL_MPROM;
7481 -
7482 -       iowrite32be(tmp, &regs->rctrl);
7483 -
7484 -       return 0;
7485 -}
7486 -
7487 -int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
7488 -{
7489 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7490 -       u32 tmp;
7491 -
7492 -       if (!is_init_done(dtsec->dtsec_drv_param))
7493 -               return -EINVAL;
7494 -
7495 -       tmp = ioread32be(&regs->maccfg2);
7496 -
7497 -       /* Full Duplex */
7498 -       tmp |= MACCFG2_FULL_DUPLEX;
7499 -
7500 -       tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
7501 -       if (speed < SPEED_1000)
7502 -               tmp |= MACCFG2_NIBBLE_MODE;
7503 -       else if (speed == SPEED_1000)
7504 -               tmp |= MACCFG2_BYTE_MODE;
7505 -       iowrite32be(tmp, &regs->maccfg2);
7506 -
7507 -       tmp = ioread32be(&regs->ecntrl);
7508 -       if (speed == SPEED_100)
7509 -               tmp |= DTSEC_ECNTRL_R100M;
7510 -       else
7511 -               tmp &= ~DTSEC_ECNTRL_R100M;
7512 -       iowrite32be(tmp, &regs->ecntrl);
7513 -
7514 -       return 0;
7515 -}
7516 -
7517 -int dtsec_restart_autoneg(struct fman_mac *dtsec)
7518 -{
7519 -       u16 tmp_reg16;
7520 -
7521 -       if (!is_init_done(dtsec->dtsec_drv_param))
7522 -               return -EINVAL;
7523 -
7524 -       tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
7525 -
7526 -       tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
7527 -       tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
7528 -                     BMCR_FULLDPLX | BMCR_SPEED1000);
7529 -
7530 -       phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
7531 -
7532 -       return 0;
7533 -}
7534 -
7535 -int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
7536 -{
7537 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7538 -
7539 -       if (!is_init_done(dtsec->dtsec_drv_param))
7540 -               return -EINVAL;
7541 -
7542 -       *mac_version = ioread32be(&regs->tsec_id);
7543 -
7544 -       return 0;
7545 -}
7546 -
7547 -int dtsec_set_exception(struct fman_mac *dtsec,
7548 -                       enum fman_mac_exceptions exception, bool enable)
7549 -{
7550 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7551 -       u32 bit_mask = 0;
7552 -
7553 -       if (!is_init_done(dtsec->dtsec_drv_param))
7554 -               return -EINVAL;
7555 -
7556 -       if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
7557 -               bit_mask = get_exception_flag(exception);
7558 -               if (bit_mask) {
7559 -                       if (enable)
7560 -                               dtsec->exceptions |= bit_mask;
7561 -                       else
7562 -                               dtsec->exceptions &= ~bit_mask;
7563 -               } else {
7564 -                       pr_err("Undefined exception\n");
7565 -                       return -EINVAL;
7566 -               }
7567 -               if (enable)
7568 -                       iowrite32be(ioread32be(&regs->imask) | bit_mask,
7569 -                                   &regs->imask);
7570 -               else
7571 -                       iowrite32be(ioread32be(&regs->imask) & ~bit_mask,
7572 -                                   &regs->imask);
7573 -       } else {
7574 -               if (!dtsec->ptp_tsu_enabled) {
7575 -                       pr_err("Exception valid for 1588 only\n");
7576 -                       return -EINVAL;
7577 -               }
7578 -               switch (exception) {
7579 -               case FM_MAC_EX_1G_1588_TS_RX_ERR:
7580 -                       if (enable) {
7581 -                               dtsec->en_tsu_err_exeption = true;
7582 -                               iowrite32be(ioread32be(&regs->tmr_pemask) |
7583 -                                           TMR_PEMASK_TSREEN,
7584 -                                           &regs->tmr_pemask);
7585 -                       } else {
7586 -                               dtsec->en_tsu_err_exeption = false;
7587 -                               iowrite32be(ioread32be(&regs->tmr_pemask) &
7588 -                                           ~TMR_PEMASK_TSREEN,
7589 -                                           &regs->tmr_pemask);
7590 -                       }
7591 -                       break;
7592 -               default:
7593 -                       pr_err("Undefined exception\n");
7594 -                       return -EINVAL;
7595 -               }
7596 -       }
7597 -
7598 -       return 0;
7599 -}
7600 -
7601 -int dtsec_init(struct fman_mac *dtsec)
7602 -{
7603 -       struct dtsec_regs __iomem *regs = dtsec->regs;
7604 -       struct dtsec_cfg *dtsec_drv_param;
7605 -       int err;
7606 -       u16 max_frm_ln;
7607 -       enet_addr_t eth_addr;
7608 -
7609 -       if (is_init_done(dtsec->dtsec_drv_param))
7610 -               return -EINVAL;
7611 -
7612 -       if (DEFAULT_RESET_ON_INIT &&
7613 -           (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
7614 -               pr_err("Can't reset MAC!\n");
7615 -               return -EINVAL;
7616 -       }
7617 -
7618 -       err = check_init_parameters(dtsec);
7619 -       if (err)
7620 -               return err;
7621 -
7622 -       dtsec_drv_param = dtsec->dtsec_drv_param;
7623 -
7624 -       MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
7625 -
7626 -       err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
7627 -                  dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
7628 -                  dtsec->tbiphy->mdio.addr);
7629 -       if (err) {
7630 -               free_init_resources(dtsec);
7631 -               pr_err("DTSEC version doesn't support this i/f mode\n");
7632 -               return err;
7633 -       }
7634 -
7635 -       if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
7636 -               u16 tmp_reg16;
7637 -
7638 -               /* Configure the TBI PHY Control Register */
7639 -               tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
7640 -               phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
7641 -
7642 -               tmp_reg16 = TBICON_CLK_SELECT;
7643 -               phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
7644 -
7645 -               tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
7646 -                            BMCR_FULLDPLX | BMCR_SPEED1000);
7647 -               phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
7648 -
7649 -               if (dtsec->basex_if)
7650 -                       tmp_reg16 = TBIANA_1000X;
7651 -               else
7652 -                       tmp_reg16 = TBIANA_SGMII;
7653 -               phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
7654 -
7655 -               tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
7656 -                            BMCR_FULLDPLX | BMCR_SPEED1000);
7657 -
7658 -               phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
7659 -       }
7660 -
7661 -       /* Max Frame Length */
7662 -       max_frm_ln = (u16)ioread32be(&regs->maxfrm);
7663 -       err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
7664 -       if (err) {
7665 -               pr_err("Setting max frame length failed\n");
7666 -               free_init_resources(dtsec);
7667 -               return -EINVAL;
7668 -       }
7669 -
7670 -       dtsec->multicast_addr_hash =
7671 -       alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
7672 -       if (!dtsec->multicast_addr_hash) {
7673 -               free_init_resources(dtsec);
7674 -               pr_err("MC hash table is failed\n");
7675 -               return -ENOMEM;
7676 -       }
7677 -
7678 -       dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
7679 -       if (!dtsec->unicast_addr_hash) {
7680 -               free_init_resources(dtsec);
7681 -               pr_err("UC hash table is failed\n");
7682 -               return -ENOMEM;
7683 -       }
7684 -
7685 -       /* register err intr handler for dtsec to FPM (err) */
7686 -       fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
7687 -                          FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
7688 -       /* register 1588 intr handler for TMR to FPM (normal) */
7689 -       fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
7690 -                          FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
7691 -
7692 -       kfree(dtsec_drv_param);
7693 -       dtsec->dtsec_drv_param = NULL;
7694 -
7695 -       return 0;
7696 -}
7697 -
7698 -int dtsec_free(struct fman_mac *dtsec)
7699 -{
7700 -       free_init_resources(dtsec);
7701 -
7702 -       kfree(dtsec->dtsec_drv_param);
7703 -       dtsec->dtsec_drv_param = NULL;
7704 -       kfree(dtsec);
7705 -
7706 -       return 0;
7707 -}
7708 -
7709 -struct fman_mac *dtsec_config(struct fman_mac_params *params)
7710 -{
7711 -       struct fman_mac *dtsec;
7712 -       struct dtsec_cfg *dtsec_drv_param;
7713 -       void __iomem *base_addr;
7714 -
7715 -       base_addr = params->base_addr;
7716 -
7717 -       /* allocate memory for the UCC GETH data structure. */
7718 -       dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
7719 -       if (!dtsec)
7720 -               return NULL;
7721 -
7722 -       /* allocate memory for the d_tsec driver parameters data structure. */
7723 -       dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
7724 -       if (!dtsec_drv_param)
7725 -               goto err_dtsec;
7726 -
7727 -       /* Plant parameter structure pointer */
7728 -       dtsec->dtsec_drv_param = dtsec_drv_param;
7729 -
7730 -       set_dflts(dtsec_drv_param);
7731 -
7732 -       dtsec->regs = base_addr;
7733 -       dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
7734 -       dtsec->max_speed = params->max_speed;
7735 -       dtsec->phy_if = params->phy_if;
7736 -       dtsec->mac_id = params->mac_id;
7737 -       dtsec->exceptions = (DTSEC_IMASK_BREN   |
7738 -                            DTSEC_IMASK_RXCEN  |
7739 -                            DTSEC_IMASK_BTEN   |
7740 -                            DTSEC_IMASK_TXCEN  |
7741 -                            DTSEC_IMASK_TXEEN  |
7742 -                            DTSEC_IMASK_ABRTEN |
7743 -                            DTSEC_IMASK_LCEN   |
7744 -                            DTSEC_IMASK_CRLEN  |
7745 -                            DTSEC_IMASK_XFUNEN |
7746 -                            DTSEC_IMASK_IFERREN |
7747 -                            DTSEC_IMASK_MAGEN  |
7748 -                            DTSEC_IMASK_TDPEEN |
7749 -                            DTSEC_IMASK_RDPEEN);
7750 -       dtsec->exception_cb = params->exception_cb;
7751 -       dtsec->event_cb = params->event_cb;
7752 -       dtsec->dev_id = params->dev_id;
7753 -       dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
7754 -       dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en;
7755 -
7756 -       dtsec->fm = params->fm;
7757 -       dtsec->basex_if = params->basex_if;
7758 -
7759 -       if (!params->internal_phy_node) {
7760 -               pr_err("TBI PHY node is not available\n");
7761 -               goto err_dtsec_drv_param;
7762 -       }
7763 -
7764 -       dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
7765 -       if (!dtsec->tbiphy) {
7766 -               pr_err("of_phy_find_device (TBI PHY) failed\n");
7767 -               goto err_dtsec_drv_param;
7768 -       }
7769 -
7770 -       put_device(&dtsec->tbiphy->mdio.dev);
7771 -
7772 -       /* Save FMan revision */
7773 -       fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
7774 -
7775 -       return dtsec;
7776 -
7777 -err_dtsec_drv_param:
7778 -       kfree(dtsec_drv_param);
7779 -err_dtsec:
7780 -       kfree(dtsec);
7781 -       return NULL;
7782 -}
7783 diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
7784 deleted file mode 100644
7785 index c4467c0..0000000
7786 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
7787 +++ /dev/null
7788 @@ -1,59 +0,0 @@
7789 -/*
7790 - * Copyright 2008-2015 Freescale Semiconductor Inc.
7791 - *
7792 - * Redistribution and use in source and binary forms, with or without
7793 - * modification, are permitted provided that the following conditions are met:
7794 - *     * Redistributions of source code must retain the above copyright
7795 - *       notice, this list of conditions and the following disclaimer.
7796 - *     * Redistributions in binary form must reproduce the above copyright
7797 - *       notice, this list of conditions and the following disclaimer in the
7798 - *       documentation and/or other materials provided with the distribution.
7799 - *     * Neither the name of Freescale Semiconductor nor the
7800 - *       names of its contributors may be used to endorse or promote products
7801 - *       derived from this software without specific prior written permission.
7802 - *
7803 - *
7804 - * ALTERNATIVELY, this software may be distributed under the terms of the
7805 - * GNU General Public License ("GPL") as published by the Free Software
7806 - * Foundation, either version 2 of that License or (at your option) any
7807 - * later version.
7808 - *
7809 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7810 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7811 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7812 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7813 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7814 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7815 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7816 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7817 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7818 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7819 - */
7820 -
7821 -#ifndef __DTSEC_H
7822 -#define __DTSEC_H
7823 -
7824 -#include "fman_mac.h"
7825 -
7826 -struct fman_mac *dtsec_config(struct fman_mac_params *params);
7827 -int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
7828 -int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr);
7829 -int dtsec_adjust_link(struct fman_mac *dtsec,
7830 -                     u16 speed);
7831 -int dtsec_restart_autoneg(struct fman_mac *dtsec);
7832 -int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val);
7833 -int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val);
7834 -int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode);
7835 -int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode);
7836 -int dtsec_init(struct fman_mac *dtsec);
7837 -int dtsec_free(struct fman_mac *dtsec);
7838 -int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en);
7839 -int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority,
7840 -                             u16 pause_time, u16 thresh_time);
7841 -int dtsec_set_exception(struct fman_mac *dtsec,
7842 -                       enum fman_mac_exceptions exception, bool enable);
7843 -int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
7844 -int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
7845 -int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
7846 -
7847 -#endif /* __DTSEC_H */
7848 diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
7849 deleted file mode 100644
7850 index dd6d052..0000000
7851 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h
7852 +++ /dev/null
7853 @@ -1,274 +0,0 @@
7854 -/*
7855 - * Copyright 2008-2015 Freescale Semiconductor Inc.
7856 - *
7857 - * Redistribution and use in source and binary forms, with or without
7858 - * modification, are permitted provided that the following conditions are met:
7859 - *     * Redistributions of source code must retain the above copyright
7860 - *       notice, this list of conditions and the following disclaimer.
7861 - *     * Redistributions in binary form must reproduce the above copyright
7862 - *       notice, this list of conditions and the following disclaimer in the
7863 - *       documentation and/or other materials provided with the distribution.
7864 - *     * Neither the name of Freescale Semiconductor nor the
7865 - *       names of its contributors may be used to endorse or promote products
7866 - *       derived from this software without specific prior written permission.
7867 - *
7868 - *
7869 - * ALTERNATIVELY, this software may be distributed under the terms of the
7870 - * GNU General Public License ("GPL") as published by the Free Software
7871 - * Foundation, either version 2 of that License or (at your option) any
7872 - * later version.
7873 - *
7874 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
7875 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
7876 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
7877 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
7878 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
7879 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
7880 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
7881 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7882 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
7883 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7884 - */
7885 -
7886 -/* FM MAC ... */
7887 -#ifndef __FM_MAC_H
7888 -#define __FM_MAC_H
7889 -
7890 -#include "fman.h"
7891 -
7892 -#include <linux/slab.h>
7893 -#include <linux/phy.h>
7894 -#include <linux/if_ether.h>
7895 -
7896 -struct fman_mac;
7897 -
7898 -/* Ethernet Address */
7899 -typedef u8 enet_addr_t[ETH_ALEN];
7900 -
7901 -#define ENET_ADDR_TO_UINT64(_enet_addr)                \
7902 -       (u64)(((u64)(_enet_addr)[0] << 40) |            \
7903 -             ((u64)(_enet_addr)[1] << 32) |            \
7904 -             ((u64)(_enet_addr)[2] << 24) |            \
7905 -             ((u64)(_enet_addr)[3] << 16) |            \
7906 -             ((u64)(_enet_addr)[4] << 8) |             \
7907 -             ((u64)(_enet_addr)[5]))
7908 -
7909 -#define MAKE_ENET_ADDR_FROM_UINT64(_addr64, _enet_addr) \
7910 -       do { \
7911 -               int i; \
7912 -               for (i = 0; i < ETH_ALEN; i++) \
7913 -                       (_enet_addr)[i] = \
7914 -                       (u8)((_addr64) >> ((5 - i) * 8)); \
7915 -       } while (0)
7916 -
7917 -/* defaults */
7918 -#define DEFAULT_RESET_ON_INIT                 false
7919 -
7920 -/* PFC defines */
7921 -#define FSL_FM_PAUSE_TIME_ENABLE       0xf000
7922 -#define FSL_FM_PAUSE_TIME_DISABLE      0
7923 -#define FSL_FM_PAUSE_THRESH_DEFAULT    0
7924 -
7925 -#define FM_MAC_NO_PFC   0xff
7926 -
7927 -/* HASH defines */
7928 -#define ETH_HASH_ENTRY_OBJ(ptr)        \
7929 -       hlist_entry_safe(ptr, struct eth_hash_entry, node)
7930 -
7931 -/* Enumeration (bit flags) of communication modes (Transmit,
7932 - * receive or both).
7933 - */
7934 -enum comm_mode {
7935 -       COMM_MODE_NONE = 0,     /* No transmit/receive communication */
7936 -       COMM_MODE_RX = 1,       /* Only receive communication */
7937 -       COMM_MODE_TX = 2,       /* Only transmit communication */
7938 -       COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */
7939 -};
7940 -
7941 -/* FM MAC Exceptions */
7942 -enum fman_mac_exceptions {
7943 -       FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
7944 -       /* 10GEC MDIO scan event interrupt */
7945 -       , FM_MAC_EX_10G_MDIO_CMD_CMPL
7946 -       /* 10GEC MDIO command completion interrupt */
7947 -       , FM_MAC_EX_10G_REM_FAULT
7948 -       /* 10GEC, mEMAC Remote fault interrupt */
7949 -       , FM_MAC_EX_10G_LOC_FAULT
7950 -       /* 10GEC, mEMAC Local fault interrupt */
7951 -       , FM_MAC_EX_10G_TX_ECC_ER
7952 -       /* 10GEC, mEMAC Transmit frame ECC error interrupt */
7953 -       , FM_MAC_EX_10G_TX_FIFO_UNFL
7954 -       /* 10GEC, mEMAC Transmit FIFO underflow interrupt */
7955 -       , FM_MAC_EX_10G_TX_FIFO_OVFL
7956 -       /* 10GEC, mEMAC Transmit FIFO overflow interrupt */
7957 -       , FM_MAC_EX_10G_TX_ER
7958 -       /* 10GEC Transmit frame error interrupt */
7959 -       , FM_MAC_EX_10G_RX_FIFO_OVFL
7960 -       /* 10GEC, mEMAC Receive FIFO overflow interrupt */
7961 -       , FM_MAC_EX_10G_RX_ECC_ER
7962 -       /* 10GEC, mEMAC Receive frame ECC error interrupt */
7963 -       , FM_MAC_EX_10G_RX_JAB_FRM
7964 -       /* 10GEC Receive jabber frame interrupt */
7965 -       , FM_MAC_EX_10G_RX_OVRSZ_FRM
7966 -       /* 10GEC Receive oversized frame interrupt */
7967 -       , FM_MAC_EX_10G_RX_RUNT_FRM
7968 -       /* 10GEC Receive runt frame interrupt */
7969 -       , FM_MAC_EX_10G_RX_FRAG_FRM
7970 -       /* 10GEC Receive fragment frame interrupt */
7971 -       , FM_MAC_EX_10G_RX_LEN_ER
7972 -       /* 10GEC Receive payload length error interrupt */
7973 -       , FM_MAC_EX_10G_RX_CRC_ER
7974 -       /* 10GEC Receive CRC error interrupt */
7975 -       , FM_MAC_EX_10G_RX_ALIGN_ER
7976 -       /* 10GEC Receive alignment error interrupt */
7977 -       , FM_MAC_EX_1G_BAB_RX
7978 -       /* dTSEC Babbling receive error */
7979 -       , FM_MAC_EX_1G_RX_CTL
7980 -       /* dTSEC Receive control (pause frame) interrupt */
7981 -       , FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET
7982 -       /* dTSEC Graceful transmit stop complete */
7983 -       , FM_MAC_EX_1G_BAB_TX
7984 -       /* dTSEC Babbling transmit error */
7985 -       , FM_MAC_EX_1G_TX_CTL
7986 -       /* dTSEC Transmit control (pause frame) interrupt */
7987 -       , FM_MAC_EX_1G_TX_ERR
7988 -       /* dTSEC Transmit error */
7989 -       , FM_MAC_EX_1G_LATE_COL
7990 -       /* dTSEC Late collision */
7991 -       , FM_MAC_EX_1G_COL_RET_LMT
7992 -       /* dTSEC Collision retry limit */
7993 -       , FM_MAC_EX_1G_TX_FIFO_UNDRN
7994 -       /* dTSEC Transmit FIFO underrun */
7995 -       , FM_MAC_EX_1G_MAG_PCKT
7996 -       /* dTSEC Magic Packet detection */
7997 -       , FM_MAC_EX_1G_MII_MNG_RD_COMPLET
7998 -       /* dTSEC MII management read completion */
7999 -       , FM_MAC_EX_1G_MII_MNG_WR_COMPLET
8000 -       /* dTSEC MII management write completion */
8001 -       , FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET
8002 -       /* dTSEC Graceful receive stop complete */
8003 -       , FM_MAC_EX_1G_DATA_ERR
8004 -       /* dTSEC Internal data error on transmit */
8005 -       , FM_MAC_1G_RX_DATA_ERR
8006 -       /* dTSEC Internal data error on receive */
8007 -       , FM_MAC_EX_1G_1588_TS_RX_ERR
8008 -       /* dTSEC Time-Stamp Receive Error */
8009 -       , FM_MAC_EX_1G_RX_MIB_CNT_OVFL
8010 -       /* dTSEC MIB counter overflow */
8011 -       , FM_MAC_EX_TS_FIFO_ECC_ERR
8012 -       /* mEMAC Time-stamp FIFO ECC error interrupt;
8013 -        * not supported on T4240/B4860 rev1 chips
8014 -        */
8015 -       , FM_MAC_EX_MAGIC_PACKET_INDICATION = FM_MAC_EX_1G_MAG_PCKT
8016 -       /* mEMAC Magic Packet Indication Interrupt */
8017 -};
8018 -
8019 -struct eth_hash_entry {
8020 -       u64 addr;               /* Ethernet Address  */
8021 -       struct list_head node;
8022 -};
8023 -
8024 -typedef void (fman_mac_exception_cb)(void *dev_id,
8025 -                                   enum fman_mac_exceptions exceptions);
8026 -
8027 -/* FMan MAC config input */
8028 -struct fman_mac_params {
8029 -       /* Base of memory mapped FM MAC registers */
8030 -       void __iomem *base_addr;
8031 -       /* MAC address of device; First octet is sent first */
8032 -       enet_addr_t addr;
8033 -       /* MAC ID; numbering of dTSEC and 1G-mEMAC:
8034 -        * 0 - FM_MAX_NUM_OF_1G_MACS;
8035 -        * numbering of 10G-MAC (TGEC) and 10G-mEMAC:
8036 -        * 0 - FM_MAX_NUM_OF_10G_MACS
8037 -        */
8038 -       u8 mac_id;
8039 -       /* PHY interface */
8040 -       phy_interface_t  phy_if;
8041 -       /* Note that the speed should indicate the maximum rate that
8042 -        * this MAC should support rather than the actual speed;
8043 -        */
8044 -       u16 max_speed;
8045 -       /* A handle to the FM object this port related to */
8046 -       void *fm;
8047 -       void *dev_id; /* device cookie used by the exception cbs */
8048 -       fman_mac_exception_cb *event_cb;    /* MDIO Events Callback Routine */
8049 -       fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
8050 -       /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
8051 -        * and phy or backplane; Note: 1000BaseX auto-negotiation relates only
8052 -        * to interface between MAC and phy/backplane, SGMII phy can still
8053 -        * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
8054 -       */
8055 -       bool basex_if;
8056 -       /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
8057 -       struct device_node *internal_phy_node;
8058 -};
8059 -
8060 -struct eth_hash_t {
8061 -       u16 size;
8062 -       struct list_head *lsts;
8063 -};
8064 -
8065 -static inline struct eth_hash_entry
8066 -*dequeue_addr_from_hash_entry(struct list_head *addr_lst)
8067 -{
8068 -       struct eth_hash_entry *hash_entry = NULL;
8069 -
8070 -       if (!list_empty(addr_lst)) {
8071 -               hash_entry = ETH_HASH_ENTRY_OBJ(addr_lst->next);
8072 -               list_del_init(&hash_entry->node);
8073 -       }
8074 -       return hash_entry;
8075 -}
8076 -
8077 -static inline void free_hash_table(struct eth_hash_t *hash)
8078 -{
8079 -       struct eth_hash_entry *hash_entry;
8080 -       int i = 0;
8081 -
8082 -       if (hash) {
8083 -               if (hash->lsts) {
8084 -                       for (i = 0; i < hash->size; i++) {
8085 -                               hash_entry =
8086 -                               dequeue_addr_from_hash_entry(&hash->lsts[i]);
8087 -                               while (hash_entry) {
8088 -                                       kfree(hash_entry);
8089 -                                       hash_entry =
8090 -                                       dequeue_addr_from_hash_entry(&hash->
8091 -                                                                    lsts[i]);
8092 -                               }
8093 -                       }
8094 -
8095 -                       kfree(hash->lsts);
8096 -               }
8097 -
8098 -               kfree(hash);
8099 -       }
8100 -}
8101 -
8102 -static inline struct eth_hash_t *alloc_hash_table(u16 size)
8103 -{
8104 -       u32 i;
8105 -       struct eth_hash_t *hash;
8106 -
8107 -       /* Allocate address hash table */
8108 -       hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL);
8109 -       if (!hash)
8110 -               return NULL;
8111 -
8112 -       hash->size = size;
8113 -
8114 -       hash->lsts = kmalloc_array(hash->size, sizeof(struct list_head),
8115 -                                  GFP_KERNEL);
8116 -       if (!hash->lsts) {
8117 -               kfree(hash);
8118 -               return NULL;
8119 -       }
8120 -
8121 -       for (i = 0; i < hash->size; i++)
8122 -               INIT_LIST_HEAD(&hash->lsts[i]);
8123 -
8124 -       return hash;
8125 -}
8126 -
8127 -#endif /* __FM_MAC_H */
8128 diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
8129 deleted file mode 100644
8130 index 71a5ded..0000000
8131 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c
8132 +++ /dev/null
8133 @@ -1,1177 +0,0 @@
8134 -/*
8135 - * Copyright 2008-2015 Freescale Semiconductor Inc.
8136 - *
8137 - * Redistribution and use in source and binary forms, with or without
8138 - * modification, are permitted provided that the following conditions are met:
8139 - *     * Redistributions of source code must retain the above copyright
8140 - *       notice, this list of conditions and the following disclaimer.
8141 - *     * Redistributions in binary form must reproduce the above copyright
8142 - *       notice, this list of conditions and the following disclaimer in the
8143 - *       documentation and/or other materials provided with the distribution.
8144 - *     * Neither the name of Freescale Semiconductor nor the
8145 - *       names of its contributors may be used to endorse or promote products
8146 - *       derived from this software without specific prior written permission.
8147 - *
8148 - *
8149 - * ALTERNATIVELY, this software may be distributed under the terms of the
8150 - * GNU General Public License ("GPL") as published by the Free Software
8151 - * Foundation, either version 2 of that License or (at your option) any
8152 - * later version.
8153 - *
8154 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
8155 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
8156 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
8157 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
8158 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
8159 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
8160 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
8161 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
8162 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
8163 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8164 - */
8165 -
8166 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8167 -
8168 -#include "fman_memac.h"
8169 -#include "fman.h"
8170 -
8171 -#include <linux/slab.h>
8172 -#include <linux/io.h>
8173 -#include <linux/phy.h>
8174 -#include <linux/of_mdio.h>
8175 -
8176 -/* PCS registers */
8177 -#define MDIO_SGMII_CR                  0x00
8178 -#define MDIO_SGMII_DEV_ABIL_SGMII      0x04
8179 -#define MDIO_SGMII_LINK_TMR_L          0x12
8180 -#define MDIO_SGMII_LINK_TMR_H          0x13
8181 -#define MDIO_SGMII_IF_MODE             0x14
8182 -
8183 -/* SGMII Control defines */
8184 -#define SGMII_CR_AN_EN                 0x1000
8185 -#define SGMII_CR_RESTART_AN            0x0200
8186 -#define SGMII_CR_FD                    0x0100
8187 -#define SGMII_CR_SPEED_SEL1_1G         0x0040
8188 -#define SGMII_CR_DEF_VAL               (SGMII_CR_AN_EN | SGMII_CR_FD | \
8189 -                                        SGMII_CR_SPEED_SEL1_1G)
8190 -
8191 -/* SGMII Device Ability for SGMII defines */
8192 -#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001
8193 -#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0
8194 -
8195 -/* Link timer define */
8196 -#define LINK_TMR_L                     0xa120
8197 -#define LINK_TMR_H                     0x0007
8198 -#define LINK_TMR_L_BASEX               0xaf08
8199 -#define LINK_TMR_H_BASEX               0x002f
8200 -
8201 -/* SGMII IF Mode defines */
8202 -#define IF_MODE_USE_SGMII_AN           0x0002
8203 -#define IF_MODE_SGMII_EN               0x0001
8204 -#define IF_MODE_SGMII_SPEED_100M       0x0004
8205 -#define IF_MODE_SGMII_SPEED_1G         0x0008
8206 -#define IF_MODE_SGMII_DUPLEX_HALF      0x0010
8207 -
8208 -/* Num of additional exact match MAC adr regs */
8209 -#define MEMAC_NUM_OF_PADDRS 7
8210 -
8211 -/* Control and Configuration Register (COMMAND_CONFIG) */
8212 -#define CMD_CFG_REG_LOWP_RXETY 0x01000000 /* 07 Rx low power indication */
8213 -#define CMD_CFG_TX_LOWP_ENA    0x00800000 /* 08 Tx Low Power Idle Enable */
8214 -#define CMD_CFG_PFC_MODE       0x00080000 /* 12 Enable PFC */
8215 -#define CMD_CFG_NO_LEN_CHK     0x00020000 /* 14 Payload length check disable */
8216 -#define CMD_CFG_SW_RESET       0x00001000 /* 19 S/W Reset, self clearing bit */
8217 -#define CMD_CFG_TX_PAD_EN      0x00000800 /* 20 Enable Tx padding of frames */
8218 -#define CMD_CFG_PAUSE_IGNORE   0x00000100 /* 23 Ignore Pause frame quanta */
8219 -#define CMD_CFG_CRC_FWD                0x00000040 /* 25 Terminate/frwd CRC of frames */
8220 -#define CMD_CFG_PAD_EN         0x00000020 /* 26 Frame padding removal */
8221 -#define CMD_CFG_PROMIS_EN      0x00000010 /* 27 Promiscuous operation enable */
8222 -#define CMD_CFG_RX_EN          0x00000002 /* 30 MAC receive path enable */
8223 -#define CMD_CFG_TX_EN          0x00000001 /* 31 MAC transmit path enable */
8224 -
8225 -/* Transmit FIFO Sections Register (TX_FIFO_SECTIONS) */
8226 -#define TX_FIFO_SECTIONS_TX_EMPTY_MASK                 0xFFFF0000
8227 -#define TX_FIFO_SECTIONS_TX_AVAIL_MASK                 0x0000FFFF
8228 -#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G          0x00400000
8229 -#define TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G           0x00100000
8230 -#define TX_FIFO_SECTIONS_TX_AVAIL_10G                  0x00000019
8231 -#define TX_FIFO_SECTIONS_TX_AVAIL_1G                   0x00000020
8232 -#define TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G             0x00000060
8233 -
8234 -#define GET_TX_EMPTY_DEFAULT_VALUE(_val)                               \
8235 -do {                                                                   \
8236 -       _val &= ~TX_FIFO_SECTIONS_TX_EMPTY_MASK;                        \
8237 -       ((_val == TX_FIFO_SECTIONS_TX_AVAIL_10G) ?                      \
8238 -                       (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G) :\
8239 -                       (_val |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G));\
8240 -} while (0)
8241 -
8242 -/* Interface Mode Register (IF_MODE) */
8243 -
8244 -#define IF_MODE_MASK           0x00000003 /* 30-31 Mask on i/f mode bits */
8245 -#define IF_MODE_XGMII          0x00000000 /* 30-31 XGMII (10G) interface */
8246 -#define IF_MODE_GMII           0x00000002 /* 30-31 GMII (1G) interface */
8247 -#define IF_MODE_RGMII          0x00000004
8248 -#define IF_MODE_RGMII_AUTO     0x00008000
8249 -#define IF_MODE_RGMII_1000     0x00004000 /* 10 - 1000Mbps RGMII */
8250 -#define IF_MODE_RGMII_100      0x00000000 /* 00 - 100Mbps RGMII */
8251 -#define IF_MODE_RGMII_10       0x00002000 /* 01 - 10Mbps RGMII */
8252 -#define IF_MODE_RGMII_SP_MASK  0x00006000 /* Setsp mask bits */
8253 -#define IF_MODE_RGMII_FD       0x00001000 /* Full duplex RGMII */
8254 -#define IF_MODE_HD             0x00000040 /* Half duplex operation */
8255 -
8256 -/* Hash table Control Register (HASHTABLE_CTRL) */
8257 -#define HASH_CTRL_MCAST_EN     0x00000100
8258 -/* 26-31 Hash table address code */
8259 -#define HASH_CTRL_ADDR_MASK    0x0000003F
8260 -/* MAC mcast indication */
8261 -#define GROUP_ADDRESS          0x0000010000000000LL
8262 -#define HASH_TABLE_SIZE                64      /* Hash tbl size */
8263 -
8264 -/* Interrupt Mask Register (IMASK) */
8265 -#define MEMAC_IMASK_MGI                0x40000000 /* 1 Magic pkt detect indication */
8266 -#define MEMAC_IMASK_TSECC_ER   0x20000000 /* 2 Timestamp FIFO ECC error evnt */
8267 -#define MEMAC_IMASK_TECC_ER    0x02000000 /* 6 Transmit frame ECC error evnt */
8268 -#define MEMAC_IMASK_RECC_ER    0x01000000 /* 7 Receive frame ECC error evnt */
8269 -
8270 -#define MEMAC_ALL_ERRS_IMASK                                   \
8271 -               ((u32)(MEMAC_IMASK_TSECC_ER     |       \
8272 -                      MEMAC_IMASK_TECC_ER              |       \
8273 -                      MEMAC_IMASK_RECC_ER              |       \
8274 -                      MEMAC_IMASK_MGI))
8275 -
8276 -#define MEMAC_IEVNT_PCS                        0x80000000 /* PCS (XG). Link sync (G) */
8277 -#define MEMAC_IEVNT_AN                 0x40000000 /* Auto-negotiation */
8278 -#define MEMAC_IEVNT_LT                 0x20000000 /* Link Training/New page */
8279 -#define MEMAC_IEVNT_MGI                        0x00004000 /* Magic pkt detection */
8280 -#define MEMAC_IEVNT_TS_ECC_ER          0x00002000 /* Timestamp FIFO ECC error*/
8281 -#define MEMAC_IEVNT_RX_FIFO_OVFL       0x00001000 /* Rx FIFO overflow */
8282 -#define MEMAC_IEVNT_TX_FIFO_UNFL       0x00000800 /* Tx FIFO underflow */
8283 -#define MEMAC_IEVNT_TX_FIFO_OVFL       0x00000400 /* Tx FIFO overflow */
8284 -#define MEMAC_IEVNT_TX_ECC_ER          0x00000200 /* Tx frame ECC error */
8285 -#define MEMAC_IEVNT_RX_ECC_ER          0x00000100 /* Rx frame ECC error */
8286 -#define MEMAC_IEVNT_LI_FAULT           0x00000080 /* Link Interruption flt */
8287 -#define MEMAC_IEVNT_RX_EMPTY           0x00000040 /* Rx FIFO empty */
8288 -#define MEMAC_IEVNT_TX_EMPTY           0x00000020 /* Tx FIFO empty */
8289 -#define MEMAC_IEVNT_RX_LOWP            0x00000010 /* Low Power Idle */
8290 -#define MEMAC_IEVNT_PHY_LOS            0x00000004 /* Phy loss of signal */
8291 -#define MEMAC_IEVNT_REM_FAULT          0x00000002 /* Remote fault (XGMII) */
8292 -#define MEMAC_IEVNT_LOC_FAULT          0x00000001 /* Local fault (XGMII) */
8293 -
8294 -#define DEFAULT_PAUSE_QUANTA   0xf000
8295 -#define DEFAULT_FRAME_LENGTH   0x600
8296 -#define DEFAULT_TX_IPG_LENGTH  12
8297 -
8298 -#define CLXY_PAUSE_QUANTA_CLX_PQNT     0x0000FFFF
8299 -#define CLXY_PAUSE_QUANTA_CLY_PQNT     0xFFFF0000
8300 -#define CLXY_PAUSE_THRESH_CLX_QTH      0x0000FFFF
8301 -#define CLXY_PAUSE_THRESH_CLY_QTH      0xFFFF0000
8302 -
8303 -struct mac_addr {
8304 -       /* Lower 32 bits of 48-bit MAC address */
8305 -       u32 mac_addr_l;
8306 -       /* Upper 16 bits of 48-bit MAC address */
8307 -       u32 mac_addr_u;
8308 -};
8309 -
8310 -/* memory map */
8311 -struct memac_regs {
8312 -       u32 res0000[2];                 /* General Control and Status */
8313 -       u32 command_config;             /* 0x008 Ctrl and cfg */
8314 -       struct mac_addr mac_addr0;      /* 0x00C-0x010 MAC_ADDR_0...1 */
8315 -       u32 maxfrm;                     /* 0x014 Max frame length */
8316 -       u32 res0018[1];
8317 -       u32 rx_fifo_sections;           /* Receive FIFO configuration reg */
8318 -       u32 tx_fifo_sections;           /* Transmit FIFO configuration reg */
8319 -       u32 res0024[2];
8320 -       u32 hashtable_ctrl;             /* 0x02C Hash table control */
8321 -       u32 res0030[4];
8322 -       u32 ievent;                     /* 0x040 Interrupt event */
8323 -       u32 tx_ipg_length;              /* 0x044 Transmitter inter-packet-gap */
8324 -       u32 res0048;
8325 -       u32 imask;                      /* 0x04C Interrupt mask */
8326 -       u32 res0050;
8327 -       u32 pause_quanta[4];            /* 0x054 Pause quanta */
8328 -       u32 pause_thresh[4];            /* 0x064 Pause quanta threshold */
8329 -       u32 rx_pause_status;            /* 0x074 Receive pause status */
8330 -       u32 res0078[2];
8331 -       struct mac_addr mac_addr[MEMAC_NUM_OF_PADDRS];/* 0x80-0x0B4 mac padr */
8332 -       u32 lpwake_timer;               /* 0x0B8 Low Power Wakeup Timer */
8333 -       u32 sleep_timer;                /* 0x0BC Transmit EEE Low Power Timer */
8334 -       u32 res00c0[8];
8335 -       u32 statn_config;               /* 0x0E0 Statistics configuration */
8336 -       u32 res00e4[7];
8337 -       /* Rx Statistics Counter */
8338 -       u32 reoct_l;
8339 -       u32 reoct_u;
8340 -       u32 roct_l;
8341 -       u32 roct_u;
8342 -       u32 raln_l;
8343 -       u32 raln_u;
8344 -       u32 rxpf_l;
8345 -       u32 rxpf_u;
8346 -       u32 rfrm_l;
8347 -       u32 rfrm_u;
8348 -       u32 rfcs_l;
8349 -       u32 rfcs_u;
8350 -       u32 rvlan_l;
8351 -       u32 rvlan_u;
8352 -       u32 rerr_l;
8353 -       u32 rerr_u;
8354 -       u32 ruca_l;
8355 -       u32 ruca_u;
8356 -       u32 rmca_l;
8357 -       u32 rmca_u;
8358 -       u32 rbca_l;
8359 -       u32 rbca_u;
8360 -       u32 rdrp_l;
8361 -       u32 rdrp_u;
8362 -       u32 rpkt_l;
8363 -       u32 rpkt_u;
8364 -       u32 rund_l;
8365 -       u32 rund_u;
8366 -       u32 r64_l;
8367 -       u32 r64_u;
8368 -       u32 r127_l;
8369 -       u32 r127_u;
8370 -       u32 r255_l;
8371 -       u32 r255_u;
8372 -       u32 r511_l;
8373 -       u32 r511_u;
8374 -       u32 r1023_l;
8375 -       u32 r1023_u;
8376 -       u32 r1518_l;
8377 -       u32 r1518_u;
8378 -       u32 r1519x_l;
8379 -       u32 r1519x_u;
8380 -       u32 rovr_l;
8381 -       u32 rovr_u;
8382 -       u32 rjbr_l;
8383 -       u32 rjbr_u;
8384 -       u32 rfrg_l;
8385 -       u32 rfrg_u;
8386 -       u32 rcnp_l;
8387 -       u32 rcnp_u;
8388 -       u32 rdrntp_l;
8389 -       u32 rdrntp_u;
8390 -       u32 res01d0[12];
8391 -       /* Tx Statistics Counter */
8392 -       u32 teoct_l;
8393 -       u32 teoct_u;
8394 -       u32 toct_l;
8395 -       u32 toct_u;
8396 -       u32 res0210[2];
8397 -       u32 txpf_l;
8398 -       u32 txpf_u;
8399 -       u32 tfrm_l;
8400 -       u32 tfrm_u;
8401 -       u32 tfcs_l;
8402 -       u32 tfcs_u;
8403 -       u32 tvlan_l;
8404 -       u32 tvlan_u;
8405 -       u32 terr_l;
8406 -       u32 terr_u;
8407 -       u32 tuca_l;
8408 -       u32 tuca_u;
8409 -       u32 tmca_l;
8410 -       u32 tmca_u;
8411 -       u32 tbca_l;
8412 -       u32 tbca_u;
8413 -       u32 res0258[2];
8414 -       u32 tpkt_l;
8415 -       u32 tpkt_u;
8416 -       u32 tund_l;
8417 -       u32 tund_u;
8418 -       u32 t64_l;
8419 -       u32 t64_u;
8420 -       u32 t127_l;
8421 -       u32 t127_u;
8422 -       u32 t255_l;
8423 -       u32 t255_u;
8424 -       u32 t511_l;
8425 -       u32 t511_u;
8426 -       u32 t1023_l;
8427 -       u32 t1023_u;
8428 -       u32 t1518_l;
8429 -       u32 t1518_u;
8430 -       u32 t1519x_l;
8431 -       u32 t1519x_u;
8432 -       u32 res02a8[6];
8433 -       u32 tcnp_l;
8434 -       u32 tcnp_u;
8435 -       u32 res02c8[14];
8436 -       /* Line Interface Control */
8437 -       u32 if_mode;            /* 0x300 Interface Mode Control */
8438 -       u32 if_status;          /* 0x304 Interface Status */
8439 -       u32 res0308[14];
8440 -       /* HiGig/2 */
8441 -       u32 hg_config;          /* 0x340 Control and cfg */
8442 -       u32 res0344[3];
8443 -       u32 hg_pause_quanta;    /* 0x350 Pause quanta */
8444 -       u32 res0354[3];
8445 -       u32 hg_pause_thresh;    /* 0x360 Pause quanta threshold */
8446 -       u32 res0364[3];
8447 -       u32 hgrx_pause_status;  /* 0x370 Receive pause status */
8448 -       u32 hg_fifos_status;    /* 0x374 fifos status */
8449 -       u32 rhm;                /* 0x378 rx messages counter */
8450 -       u32 thm;                /* 0x37C tx messages counter */
8451 -};
8452 -
8453 -struct memac_cfg {
8454 -       bool reset_on_init;
8455 -       bool pause_ignore;
8456 -       bool promiscuous_mode_enable;
8457 -       struct fixed_phy_status *fixed_link;
8458 -       u16 max_frame_length;
8459 -       u16 pause_quanta;
8460 -       u32 tx_ipg_length;
8461 -};
8462 -
8463 -struct fman_mac {
8464 -       /* Pointer to MAC memory mapped registers */
8465 -       struct memac_regs __iomem *regs;
8466 -       /* MAC address of device */
8467 -       u64 addr;
8468 -       /* Ethernet physical interface */
8469 -       phy_interface_t phy_if;
8470 -       u16 max_speed;
8471 -       void *dev_id; /* device cookie used by the exception cbs */
8472 -       fman_mac_exception_cb *exception_cb;
8473 -       fman_mac_exception_cb *event_cb;
8474 -       /* Pointer to driver's global address hash table  */
8475 -       struct eth_hash_t *multicast_addr_hash;
8476 -       /* Pointer to driver's individual address hash table  */
8477 -       struct eth_hash_t *unicast_addr_hash;
8478 -       u8 mac_id;
8479 -       u32 exceptions;
8480 -       struct memac_cfg *memac_drv_param;
8481 -       void *fm;
8482 -       struct fman_rev_info fm_rev_info;
8483 -       bool basex_if;
8484 -       struct phy_device *pcsphy;
8485 -};
8486 -
8487 -static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
8488 -                             u8 paddr_num)
8489 -{
8490 -       u32 tmp0, tmp1;
8491 -
8492 -       tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
8493 -       tmp1 = (u32)(adr[4] | adr[5] << 8);
8494 -
8495 -       if (paddr_num == 0) {
8496 -               iowrite32be(tmp0, &regs->mac_addr0.mac_addr_l);
8497 -               iowrite32be(tmp1, &regs->mac_addr0.mac_addr_u);
8498 -       } else {
8499 -               iowrite32be(tmp0, &regs->mac_addr[paddr_num - 1].mac_addr_l);
8500 -               iowrite32be(tmp1, &regs->mac_addr[paddr_num - 1].mac_addr_u);
8501 -       }
8502 -}
8503 -
8504 -static int reset(struct memac_regs __iomem *regs)
8505 -{
8506 -       u32 tmp;
8507 -       int count;
8508 -
8509 -       tmp = ioread32be(&regs->command_config);
8510 -
8511 -       tmp |= CMD_CFG_SW_RESET;
8512 -
8513 -       iowrite32be(tmp, &regs->command_config);
8514 -
8515 -       count = 100;
8516 -       do {
8517 -               udelay(1);
8518 -       } while ((ioread32be(&regs->command_config) & CMD_CFG_SW_RESET) &&
8519 -                --count);
8520 -
8521 -       if (count == 0)
8522 -               return -EBUSY;
8523 -
8524 -       return 0;
8525 -}
8526 -
8527 -static void set_exception(struct memac_regs __iomem *regs, u32 val,
8528 -                         bool enable)
8529 -{
8530 -       u32 tmp;
8531 -
8532 -       tmp = ioread32be(&regs->imask);
8533 -       if (enable)
8534 -               tmp |= val;
8535 -       else
8536 -               tmp &= ~val;
8537 -
8538 -       iowrite32be(tmp, &regs->imask);
8539 -}
8540 -
8541 -static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
8542 -               phy_interface_t phy_if, u16 speed, bool slow_10g_if,
8543 -               u32 exceptions)
8544 -{
8545 -       u32 tmp;
8546 -
8547 -       /* Config */
8548 -       tmp = 0;
8549 -       if (cfg->promiscuous_mode_enable)
8550 -               tmp |= CMD_CFG_PROMIS_EN;
8551 -       if (cfg->pause_ignore)
8552 -               tmp |= CMD_CFG_PAUSE_IGNORE;
8553 -
8554 -       /* Payload length check disable */
8555 -       tmp |= CMD_CFG_NO_LEN_CHK;
8556 -       /* Enable padding of frames in transmit direction */
8557 -       tmp |= CMD_CFG_TX_PAD_EN;
8558 -
8559 -       tmp |= CMD_CFG_CRC_FWD;
8560 -
8561 -       iowrite32be(tmp, &regs->command_config);
8562 -
8563 -       /* Max Frame Length */
8564 -       iowrite32be((u32)cfg->max_frame_length, &regs->maxfrm);
8565 -
8566 -       /* Pause Time */
8567 -       iowrite32be((u32)cfg->pause_quanta, &regs->pause_quanta[0]);
8568 -       iowrite32be((u32)0, &regs->pause_thresh[0]);
8569 -
8570 -       /* IF_MODE */
8571 -       tmp = 0;
8572 -       switch (phy_if) {
8573 -       case PHY_INTERFACE_MODE_XGMII:
8574 -               tmp |= IF_MODE_XGMII;
8575 -               break;
8576 -       default:
8577 -               tmp |= IF_MODE_GMII;
8578 -               if (phy_if == PHY_INTERFACE_MODE_RGMII)
8579 -                       tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
8580 -       }
8581 -       iowrite32be(tmp, &regs->if_mode);
8582 -
8583 -       /* TX_FIFO_SECTIONS */
8584 -       tmp = 0;
8585 -       if (phy_if == PHY_INTERFACE_MODE_XGMII) {
8586 -               if (slow_10g_if) {
8587 -                       tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
8588 -                               TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
8589 -               } else {
8590 -                       tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
8591 -                               TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
8592 -               }
8593 -       } else {
8594 -               tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
8595 -                       TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
8596 -       }
8597 -       iowrite32be(tmp, &regs->tx_fifo_sections);
8598 -
8599 -       /* clear all pending events and set-up interrupts */
8600 -       iowrite32be(0xffffffff, &regs->ievent);
8601 -       set_exception(regs, exceptions, true);
8602 -
8603 -       return 0;
8604 -}
8605 -
8606 -static void set_dflts(struct memac_cfg *cfg)
8607 -{
8608 -       cfg->reset_on_init = false;
8609 -       cfg->promiscuous_mode_enable = false;
8610 -       cfg->pause_ignore = false;
8611 -       cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
8612 -       cfg->max_frame_length = DEFAULT_FRAME_LENGTH;
8613 -       cfg->pause_quanta = DEFAULT_PAUSE_QUANTA;
8614 -}
8615 -
8616 -static u32 get_mac_addr_hash_code(u64 eth_addr)
8617 -{
8618 -       u64 mask1, mask2;
8619 -       u32 xor_val = 0;
8620 -       u8 i, j;
8621 -
8622 -       for (i = 0; i < 6; i++) {
8623 -               mask1 = eth_addr & (u64)0x01;
8624 -               eth_addr >>= 1;
8625 -
8626 -               for (j = 0; j < 7; j++) {
8627 -                       mask2 = eth_addr & (u64)0x01;
8628 -                       mask1 ^= mask2;
8629 -                       eth_addr >>= 1;
8630 -               }
8631 -
8632 -               xor_val |= (mask1 << (5 - i));
8633 -       }
8634 -
8635 -       return xor_val;
8636 -}
8637 -
8638 -static void setup_sgmii_internal_phy(struct fman_mac *memac,
8639 -                                    struct fixed_phy_status *fixed_link)
8640 -{
8641 -       u16 tmp_reg16;
8642 -
8643 -       if (WARN_ON(!memac->pcsphy))
8644 -               return;
8645 -
8646 -       /* SGMII mode */
8647 -       tmp_reg16 = IF_MODE_SGMII_EN;
8648 -       if (!fixed_link)
8649 -               /* AN enable */
8650 -               tmp_reg16 |= IF_MODE_USE_SGMII_AN;
8651 -       else {
8652 -               switch (fixed_link->speed) {
8653 -               case 10:
8654 -                       /* For 10M: IF_MODE[SPEED_10M] = 0 */
8655 -               break;
8656 -               case 100:
8657 -                       tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
8658 -               break;
8659 -               case 1000: /* fallthrough */
8660 -               default:
8661 -                       tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
8662 -               break;
8663 -               }
8664 -               if (!fixed_link->duplex)
8665 -                       tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF;
8666 -       }
8667 -       phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16);
8668 -
8669 -       /* Device ability according to SGMII specification */
8670 -       tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE;
8671 -       phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
8672 -
8673 -       /* Adjust link timer for SGMII  -
8674 -        * According to Cisco SGMII specification the timer should be 1.6 ms.
8675 -        * The link_timer register is configured in units of the clock.
8676 -        * - When running as 1G SGMII, Serdes clock is 125 MHz, so
8677 -        * unit = 1 / (125*10^6 Hz) = 8 ns.
8678 -        * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40
8679 -        * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
8680 -        * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
8681 -        * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120.
8682 -        * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
8683 -        * we always set up here a value of 2.5 SGMII.
8684 -        */
8685 -       phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H);
8686 -       phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L);
8687 -
8688 -       if (!fixed_link)
8689 -               /* Restart AN */
8690 -               tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
8691 -       else
8692 -               /* AN disabled */
8693 -               tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN;
8694 -       phy_write(memac->pcsphy, 0x0, tmp_reg16);
8695 -}
8696 -
8697 -static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
8698 -{
8699 -       u16 tmp_reg16;
8700 -
8701 -       /* AN Device capability  */
8702 -       tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE;
8703 -       phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
8704 -
8705 -       /* Adjust link timer for SGMII  -
8706 -        * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
8707 -        * The link_timer register is configured in units of the clock.
8708 -        * - When running as 1G SGMII, Serdes clock is 125 MHz, so
8709 -        * unit = 1 / (125*10^6 Hz) = 8 ns.
8710 -        * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
8711 -        * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
8712 -        * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
8713 -        * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
8714 -        * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
8715 -        * we always set up here a value of 2.5 SGMII.
8716 -        */
8717 -       phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX);
8718 -       phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX);
8719 -
8720 -       /* Restart AN */
8721 -       tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
8722 -       phy_write(memac->pcsphy, 0x0, tmp_reg16);
8723 -}
8724 -
8725 -static int check_init_parameters(struct fman_mac *memac)
8726 -{
8727 -       if (memac->addr == 0) {
8728 -               pr_err("Ethernet MAC must have a valid MAC address\n");
8729 -               return -EINVAL;
8730 -       }
8731 -       if (!memac->exception_cb) {
8732 -               pr_err("Uninitialized exception handler\n");
8733 -               return -EINVAL;
8734 -       }
8735 -       if (!memac->event_cb) {
8736 -               pr_warn("Uninitialize event handler\n");
8737 -               return -EINVAL;
8738 -       }
8739 -
8740 -       return 0;
8741 -}
8742 -
8743 -static int get_exception_flag(enum fman_mac_exceptions exception)
8744 -{
8745 -       u32 bit_mask;
8746 -
8747 -       switch (exception) {
8748 -       case FM_MAC_EX_10G_TX_ECC_ER:
8749 -               bit_mask = MEMAC_IMASK_TECC_ER;
8750 -               break;
8751 -       case FM_MAC_EX_10G_RX_ECC_ER:
8752 -               bit_mask = MEMAC_IMASK_RECC_ER;
8753 -               break;
8754 -       case FM_MAC_EX_TS_FIFO_ECC_ERR:
8755 -               bit_mask = MEMAC_IMASK_TSECC_ER;
8756 -               break;
8757 -       case FM_MAC_EX_MAGIC_PACKET_INDICATION:
8758 -               bit_mask = MEMAC_IMASK_MGI;
8759 -               break;
8760 -       default:
8761 -               bit_mask = 0;
8762 -               break;
8763 -       }
8764 -
8765 -       return bit_mask;
8766 -}
8767 -
8768 -static void memac_err_exception(void *handle)
8769 -{
8770 -       struct fman_mac *memac = (struct fman_mac *)handle;
8771 -       struct memac_regs __iomem *regs = memac->regs;
8772 -       u32 event, imask;
8773 -
8774 -       event = ioread32be(&regs->ievent);
8775 -       imask = ioread32be(&regs->imask);
8776 -
8777 -       /* Imask include both error and notification/event bits.
8778 -        * Leaving only error bits enabled by imask.
8779 -        * The imask error bits are shifted by 16 bits offset from
8780 -        * their corresponding location in the ievent - hence the >> 16
8781 -        */
8782 -       event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
8783 -
8784 -       iowrite32be(event, &regs->ievent);
8785 -
8786 -       if (event & MEMAC_IEVNT_TS_ECC_ER)
8787 -               memac->exception_cb(memac->dev_id, FM_MAC_EX_TS_FIFO_ECC_ERR);
8788 -       if (event & MEMAC_IEVNT_TX_ECC_ER)
8789 -               memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
8790 -       if (event & MEMAC_IEVNT_RX_ECC_ER)
8791 -               memac->exception_cb(memac->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
8792 -}
8793 -
8794 -static void memac_exception(void *handle)
8795 -{
8796 -       struct fman_mac *memac = (struct fman_mac *)handle;
8797 -       struct memac_regs __iomem *regs = memac->regs;
8798 -       u32 event, imask;
8799 -
8800 -       event = ioread32be(&regs->ievent);
8801 -       imask = ioread32be(&regs->imask);
8802 -
8803 -       /* Imask include both error and notification/event bits.
8804 -        * Leaving only error bits enabled by imask.
8805 -        * The imask error bits are shifted by 16 bits offset from
8806 -        * their corresponding location in the ievent - hence the >> 16
8807 -        */
8808 -       event &= ((imask & MEMAC_ALL_ERRS_IMASK) >> 16);
8809 -
8810 -       iowrite32be(event, &regs->ievent);
8811 -
8812 -       if (event & MEMAC_IEVNT_MGI)
8813 -               memac->exception_cb(memac->dev_id,
8814 -                                   FM_MAC_EX_MAGIC_PACKET_INDICATION);
8815 -}
8816 -
8817 -static void free_init_resources(struct fman_mac *memac)
8818 -{
8819 -       fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
8820 -                            FMAN_INTR_TYPE_ERR);
8821 -
8822 -       fman_unregister_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
8823 -                            FMAN_INTR_TYPE_NORMAL);
8824 -
8825 -       /* release the driver's group hash table */
8826 -       free_hash_table(memac->multicast_addr_hash);
8827 -       memac->multicast_addr_hash = NULL;
8828 -
8829 -       /* release the driver's individual hash table */
8830 -       free_hash_table(memac->unicast_addr_hash);
8831 -       memac->unicast_addr_hash = NULL;
8832 -}
8833 -
8834 -static bool is_init_done(struct memac_cfg *memac_drv_params)
8835 -{
8836 -       /* Checks if mEMAC driver parameters were initialized */
8837 -       if (!memac_drv_params)
8838 -               return true;
8839 -
8840 -       return false;
8841 -}
8842 -
8843 -int memac_enable(struct fman_mac *memac, enum comm_mode mode)
8844 -{
8845 -       struct memac_regs __iomem *regs = memac->regs;
8846 -       u32 tmp;
8847 -
8848 -       if (!is_init_done(memac->memac_drv_param))
8849 -               return -EINVAL;
8850 -
8851 -       tmp = ioread32be(&regs->command_config);
8852 -       if (mode & COMM_MODE_RX)
8853 -               tmp |= CMD_CFG_RX_EN;
8854 -       if (mode & COMM_MODE_TX)
8855 -               tmp |= CMD_CFG_TX_EN;
8856 -
8857 -       iowrite32be(tmp, &regs->command_config);
8858 -
8859 -       return 0;
8860 -}
8861 -
8862 -int memac_disable(struct fman_mac *memac, enum comm_mode mode)
8863 -{
8864 -       struct memac_regs __iomem *regs = memac->regs;
8865 -       u32 tmp;
8866 -
8867 -       if (!is_init_done(memac->memac_drv_param))
8868 -               return -EINVAL;
8869 -
8870 -       tmp = ioread32be(&regs->command_config);
8871 -       if (mode & COMM_MODE_RX)
8872 -               tmp &= ~CMD_CFG_RX_EN;
8873 -       if (mode & COMM_MODE_TX)
8874 -               tmp &= ~CMD_CFG_TX_EN;
8875 -
8876 -       iowrite32be(tmp, &regs->command_config);
8877 -
8878 -       return 0;
8879 -}
8880 -
8881 -int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
8882 -{
8883 -       struct memac_regs __iomem *regs = memac->regs;
8884 -       u32 tmp;
8885 -
8886 -       if (!is_init_done(memac->memac_drv_param))
8887 -               return -EINVAL;
8888 -
8889 -       tmp = ioread32be(&regs->command_config);
8890 -       if (new_val)
8891 -               tmp |= CMD_CFG_PROMIS_EN;
8892 -       else
8893 -               tmp &= ~CMD_CFG_PROMIS_EN;
8894 -
8895 -       iowrite32be(tmp, &regs->command_config);
8896 -
8897 -       return 0;
8898 -}
8899 -
8900 -int memac_adjust_link(struct fman_mac *memac, u16 speed)
8901 -{
8902 -       struct memac_regs __iomem *regs = memac->regs;
8903 -       u32 tmp;
8904 -
8905 -       if (!is_init_done(memac->memac_drv_param))
8906 -               return -EINVAL;
8907 -
8908 -       tmp = ioread32be(&regs->if_mode);
8909 -
8910 -       /* Set full duplex */
8911 -       tmp &= ~IF_MODE_HD;
8912 -
8913 -       if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) {
8914 -               /* Configure RGMII in manual mode */
8915 -               tmp &= ~IF_MODE_RGMII_AUTO;
8916 -               tmp &= ~IF_MODE_RGMII_SP_MASK;
8917 -               /* Full duplex */
8918 -               tmp |= IF_MODE_RGMII_FD;
8919 -
8920 -               switch (speed) {
8921 -               case SPEED_1000:
8922 -                       tmp |= IF_MODE_RGMII_1000;
8923 -                       break;
8924 -               case SPEED_100:
8925 -                       tmp |= IF_MODE_RGMII_100;
8926 -                       break;
8927 -               case SPEED_10:
8928 -                       tmp |= IF_MODE_RGMII_10;
8929 -                       break;
8930 -               default:
8931 -                       break;
8932 -               }
8933 -       }
8934 -
8935 -       iowrite32be(tmp, &regs->if_mode);
8936 -
8937 -       return 0;
8938 -}
8939 -
8940 -int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val)
8941 -{
8942 -       if (is_init_done(memac->memac_drv_param))
8943 -               return -EINVAL;
8944 -
8945 -       memac->memac_drv_param->max_frame_length = new_val;
8946 -
8947 -       return 0;
8948 -}
8949 -
8950 -int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable)
8951 -{
8952 -       if (is_init_done(memac->memac_drv_param))
8953 -               return -EINVAL;
8954 -
8955 -       memac->memac_drv_param->reset_on_init = enable;
8956 -
8957 -       return 0;
8958 -}
8959 -
8960 -int memac_cfg_fixed_link(struct fman_mac *memac,
8961 -                        struct fixed_phy_status *fixed_link)
8962 -{
8963 -       if (is_init_done(memac->memac_drv_param))
8964 -               return -EINVAL;
8965 -
8966 -       memac->memac_drv_param->fixed_link = fixed_link;
8967 -
8968 -       return 0;
8969 -}
8970 -
8971 -int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
8972 -                             u16 pause_time, u16 thresh_time)
8973 -{
8974 -       struct memac_regs __iomem *regs = memac->regs;
8975 -       u32 tmp;
8976 -
8977 -       if (!is_init_done(memac->memac_drv_param))
8978 -               return -EINVAL;
8979 -
8980 -       tmp = ioread32be(&regs->tx_fifo_sections);
8981 -
8982 -       GET_TX_EMPTY_DEFAULT_VALUE(tmp);
8983 -       iowrite32be(tmp, &regs->tx_fifo_sections);
8984 -
8985 -       tmp = ioread32be(&regs->command_config);
8986 -       tmp &= ~CMD_CFG_PFC_MODE;
8987 -       priority = 0;
8988 -
8989 -       iowrite32be(tmp, &regs->command_config);
8990 -
8991 -       tmp = ioread32be(&regs->pause_quanta[priority / 2]);
8992 -       if (priority % 2)
8993 -               tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT;
8994 -       else
8995 -               tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT;
8996 -       tmp |= ((u32)pause_time << (16 * (priority % 2)));
8997 -       iowrite32be(tmp, &regs->pause_quanta[priority / 2]);
8998 -
8999 -       tmp = ioread32be(&regs->pause_thresh[priority / 2]);
9000 -       if (priority % 2)
9001 -               tmp &= CLXY_PAUSE_THRESH_CLX_QTH;
9002 -       else
9003 -               tmp &= CLXY_PAUSE_THRESH_CLY_QTH;
9004 -       tmp |= ((u32)thresh_time << (16 * (priority % 2)));
9005 -       iowrite32be(tmp, &regs->pause_thresh[priority / 2]);
9006 -
9007 -       return 0;
9008 -}
9009 -
9010 -int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
9011 -{
9012 -       struct memac_regs __iomem *regs = memac->regs;
9013 -       u32 tmp;
9014 -
9015 -       if (!is_init_done(memac->memac_drv_param))
9016 -               return -EINVAL;
9017 -
9018 -       tmp = ioread32be(&regs->command_config);
9019 -       if (en)
9020 -               tmp &= ~CMD_CFG_PAUSE_IGNORE;
9021 -       else
9022 -               tmp |= CMD_CFG_PAUSE_IGNORE;
9023 -
9024 -       iowrite32be(tmp, &regs->command_config);
9025 -
9026 -       return 0;
9027 -}
9028 -
9029 -int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr)
9030 -{
9031 -       if (!is_init_done(memac->memac_drv_param))
9032 -               return -EINVAL;
9033 -
9034 -       add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0);
9035 -
9036 -       return 0;
9037 -}
9038 -
9039 -int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
9040 -{
9041 -       struct memac_regs __iomem *regs = memac->regs;
9042 -       struct eth_hash_entry *hash_entry;
9043 -       u32 hash;
9044 -       u64 addr;
9045 -
9046 -       if (!is_init_done(memac->memac_drv_param))
9047 -               return -EINVAL;
9048 -
9049 -       addr = ENET_ADDR_TO_UINT64(*eth_addr);
9050 -
9051 -       if (!(addr & GROUP_ADDRESS)) {
9052 -               /* Unicast addresses not supported in hash */
9053 -               pr_err("Unicast Address\n");
9054 -               return -EINVAL;
9055 -       }
9056 -       hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
9057 -
9058 -       /* Create element to be added to the driver hash table */
9059 -       hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
9060 -       if (!hash_entry)
9061 -               return -ENOMEM;
9062 -       hash_entry->addr = addr;
9063 -       INIT_LIST_HEAD(&hash_entry->node);
9064 -
9065 -       list_add_tail(&hash_entry->node,
9066 -                     &memac->multicast_addr_hash->lsts[hash]);
9067 -       iowrite32be(hash | HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
9068 -
9069 -       return 0;
9070 -}
9071 -
9072 -int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
9073 -{
9074 -       struct memac_regs __iomem *regs = memac->regs;
9075 -       struct eth_hash_entry *hash_entry = NULL;
9076 -       struct list_head *pos;
9077 -       u32 hash;
9078 -       u64 addr;
9079 -
9080 -       if (!is_init_done(memac->memac_drv_param))
9081 -               return -EINVAL;
9082 -
9083 -       addr = ENET_ADDR_TO_UINT64(*eth_addr);
9084 -
9085 -       hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
9086 -
9087 -       list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) {
9088 -               hash_entry = ETH_HASH_ENTRY_OBJ(pos);
9089 -               if (hash_entry->addr == addr) {
9090 -                       list_del_init(&hash_entry->node);
9091 -                       kfree(hash_entry);
9092 -                       break;
9093 -               }
9094 -       }
9095 -       if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
9096 -               iowrite32be(hash & ~HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
9097 -
9098 -       return 0;
9099 -}
9100 -
9101 -int memac_set_exception(struct fman_mac *memac,
9102 -                       enum fman_mac_exceptions exception, bool enable)
9103 -{
9104 -       u32 bit_mask = 0;
9105 -
9106 -       if (!is_init_done(memac->memac_drv_param))
9107 -               return -EINVAL;
9108 -
9109 -       bit_mask = get_exception_flag(exception);
9110 -       if (bit_mask) {
9111 -               if (enable)
9112 -                       memac->exceptions |= bit_mask;
9113 -               else
9114 -                       memac->exceptions &= ~bit_mask;
9115 -       } else {
9116 -               pr_err("Undefined exception\n");
9117 -               return -EINVAL;
9118 -       }
9119 -       set_exception(memac->regs, bit_mask, enable);
9120 -
9121 -       return 0;
9122 -}
9123 -
9124 -int memac_init(struct fman_mac *memac)
9125 -{
9126 -       struct memac_cfg *memac_drv_param;
9127 -       u8 i;
9128 -       enet_addr_t eth_addr;
9129 -       bool slow_10g_if = false;
9130 -       struct fixed_phy_status *fixed_link;
9131 -       int err;
9132 -       u32 reg32 = 0;
9133 -
9134 -       if (is_init_done(memac->memac_drv_param))
9135 -               return -EINVAL;
9136 -
9137 -       err = check_init_parameters(memac);
9138 -       if (err)
9139 -               return err;
9140 -
9141 -       memac_drv_param = memac->memac_drv_param;
9142 -
9143 -       if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4)
9144 -               slow_10g_if = true;
9145 -
9146 -       /* First, reset the MAC if desired. */
9147 -       if (memac_drv_param->reset_on_init) {
9148 -               err = reset(memac->regs);
9149 -               if (err) {
9150 -                       pr_err("mEMAC reset failed\n");
9151 -                       return err;
9152 -               }
9153 -       }
9154 -
9155 -       /* MAC Address */
9156 -       MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
9157 -       add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
9158 -
9159 -       fixed_link = memac_drv_param->fixed_link;
9160 -
9161 -       init(memac->regs, memac->memac_drv_param, memac->phy_if,
9162 -            memac->max_speed, slow_10g_if, memac->exceptions);
9163 -
9164 -       /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround
9165 -        * Exists only in FMan 6.0 and 6.3.
9166 -        */
9167 -       if ((memac->fm_rev_info.major == 6) &&
9168 -           ((memac->fm_rev_info.minor == 0) ||
9169 -           (memac->fm_rev_info.minor == 3))) {
9170 -               /* MAC strips CRC from received frames - this workaround
9171 -                * should decrease the likelihood of bug appearance
9172 -                */
9173 -               reg32 = ioread32be(&memac->regs->command_config);
9174 -               reg32 &= ~CMD_CFG_CRC_FWD;
9175 -               iowrite32be(reg32, &memac->regs->command_config);
9176 -       }
9177 -
9178 -       if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
9179 -               /* Configure internal SGMII PHY */
9180 -               if (memac->basex_if)
9181 -                       setup_sgmii_internal_phy_base_x(memac);
9182 -               else
9183 -                       setup_sgmii_internal_phy(memac, fixed_link);
9184 -       } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
9185 -               /* Configure 4 internal SGMII PHYs */
9186 -               for (i = 0; i < 4; i++) {
9187 -                       u8 qsmgii_phy_addr, phy_addr;
9188 -                       /* QSGMII PHY address occupies 3 upper bits of 5-bit
9189 -                        * phy_address; the lower 2 bits are used to extend
9190 -                        * register address space and access each one of 4
9191 -                        * ports inside QSGMII.
9192 -                        */
9193 -                       phy_addr = memac->pcsphy->mdio.addr;
9194 -                       qsmgii_phy_addr = (u8)((phy_addr << 2) | i);
9195 -                       memac->pcsphy->mdio.addr = qsmgii_phy_addr;
9196 -                       if (memac->basex_if)
9197 -                               setup_sgmii_internal_phy_base_x(memac);
9198 -                       else
9199 -                               setup_sgmii_internal_phy(memac, fixed_link);
9200 -
9201 -                       memac->pcsphy->mdio.addr = phy_addr;
9202 -               }
9203 -       }
9204 -
9205 -       /* Max Frame Length */
9206 -       err = fman_set_mac_max_frame(memac->fm, memac->mac_id,
9207 -                                    memac_drv_param->max_frame_length);
9208 -       if (err) {
9209 -               pr_err("settings Mac max frame length is FAILED\n");
9210 -               return err;
9211 -       }
9212 -
9213 -       memac->multicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
9214 -       if (!memac->multicast_addr_hash) {
9215 -               free_init_resources(memac);
9216 -               pr_err("allocation hash table is FAILED\n");
9217 -               return -ENOMEM;
9218 -       }
9219 -
9220 -       memac->unicast_addr_hash = alloc_hash_table(HASH_TABLE_SIZE);
9221 -       if (!memac->unicast_addr_hash) {
9222 -               free_init_resources(memac);
9223 -               pr_err("allocation hash table is FAILED\n");
9224 -               return -ENOMEM;
9225 -       }
9226 -
9227 -       fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
9228 -                          FMAN_INTR_TYPE_ERR, memac_err_exception, memac);
9229 -
9230 -       fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
9231 -                          FMAN_INTR_TYPE_NORMAL, memac_exception, memac);
9232 -
9233 -       kfree(memac_drv_param);
9234 -       memac->memac_drv_param = NULL;
9235 -
9236 -       return 0;
9237 -}
9238 -
9239 -int memac_free(struct fman_mac *memac)
9240 -{
9241 -       free_init_resources(memac);
9242 -
9243 -       if (memac->pcsphy)
9244 -               put_device(&memac->pcsphy->mdio.dev);
9245 -
9246 -       kfree(memac->memac_drv_param);
9247 -       kfree(memac);
9248 -
9249 -       return 0;
9250 -}
9251 -
9252 -struct fman_mac *memac_config(struct fman_mac_params *params)
9253 -{
9254 -       struct fman_mac *memac;
9255 -       struct memac_cfg *memac_drv_param;
9256 -       void __iomem *base_addr;
9257 -
9258 -       base_addr = params->base_addr;
9259 -       /* allocate memory for the m_emac data structure */
9260 -       memac = kzalloc(sizeof(*memac), GFP_KERNEL);
9261 -       if (!memac)
9262 -               return NULL;
9263 -
9264 -       /* allocate memory for the m_emac driver parameters data structure */
9265 -       memac_drv_param = kzalloc(sizeof(*memac_drv_param), GFP_KERNEL);
9266 -       if (!memac_drv_param) {
9267 -               memac_free(memac);
9268 -               return NULL;
9269 -       }
9270 -
9271 -       /* Plant parameter structure pointer */
9272 -       memac->memac_drv_param = memac_drv_param;
9273 -
9274 -       set_dflts(memac_drv_param);
9275 -
9276 -       memac->addr = ENET_ADDR_TO_UINT64(params->addr);
9277 -
9278 -       memac->regs = base_addr;
9279 -       memac->max_speed = params->max_speed;
9280 -       memac->phy_if = params->phy_if;
9281 -       memac->mac_id = params->mac_id;
9282 -       memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
9283 -                            MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
9284 -       memac->exception_cb = params->exception_cb;
9285 -       memac->event_cb = params->event_cb;
9286 -       memac->dev_id = params->dev_id;
9287 -       memac->fm = params->fm;
9288 -       memac->basex_if = params->basex_if;
9289 -
9290 -       /* Save FMan revision */
9291 -       fman_get_revision(memac->fm, &memac->fm_rev_info);
9292 -
9293 -       if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
9294 -           memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
9295 -               if (!params->internal_phy_node) {
9296 -                       pr_err("PCS PHY node is not available\n");
9297 -                       memac_free(memac);
9298 -                       return NULL;
9299 -               }
9300 -
9301 -               memac->pcsphy = of_phy_find_device(params->internal_phy_node);
9302 -               if (!memac->pcsphy) {
9303 -                       pr_err("of_phy_find_device (PCS PHY) failed\n");
9304 -                       memac_free(memac);
9305 -                       return NULL;
9306 -               }
9307 -       }
9308 -
9309 -       return memac;
9310 -}
9311 diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
9312 deleted file mode 100644
9313 index 173d8e0..0000000
9314 --- a/drivers/net/ethernet/freescale/fman/fman_memac.h
9315 +++ /dev/null
9316 @@ -1,60 +0,0 @@
9317 -/*
9318 - * Copyright 2008-2015 Freescale Semiconductor Inc.
9319 - *
9320 - * Redistribution and use in source and binary forms, with or without
9321 - * modification, are permitted provided that the following conditions are met:
9322 - *     * Redistributions of source code must retain the above copyright
9323 - *       notice, this list of conditions and the following disclaimer.
9324 - *     * Redistributions in binary form must reproduce the above copyright
9325 - *       notice, this list of conditions and the following disclaimer in the
9326 - *       documentation and/or other materials provided with the distribution.
9327 - *     * Neither the name of Freescale Semiconductor nor the
9328 - *       names of its contributors may be used to endorse or promote products
9329 - *       derived from this software without specific prior written permission.
9330 - *
9331 - *
9332 - * ALTERNATIVELY, this software may be distributed under the terms of the
9333 - * GNU General Public License ("GPL") as published by the Free Software
9334 - * Foundation, either version 2 of that License or (at your option) any
9335 - * later version.
9336 - *
9337 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9338 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9339 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9340 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9341 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9342 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9343 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9344 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9345 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9346 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9347 - */
9348 -
9349 -#ifndef __MEMAC_H
9350 -#define __MEMAC_H
9351 -
9352 -#include "fman_mac.h"
9353 -
9354 -#include <linux/netdevice.h>
9355 -
9356 -struct fman_mac *memac_config(struct fman_mac_params *params);
9357 -int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
9358 -int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr);
9359 -int memac_adjust_link(struct fman_mac *memac, u16 speed);
9360 -int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
9361 -int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
9362 -int memac_cfg_fixed_link(struct fman_mac *memac,
9363 -                        struct fixed_phy_status *fixed_link);
9364 -int memac_enable(struct fman_mac *memac, enum comm_mode mode);
9365 -int memac_disable(struct fman_mac *memac, enum comm_mode mode);
9366 -int memac_init(struct fman_mac *memac);
9367 -int memac_free(struct fman_mac *memac);
9368 -int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en);
9369 -int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
9370 -                             u16 pause_time, u16 thresh_time);
9371 -int memac_set_exception(struct fman_mac *memac,
9372 -                       enum fman_mac_exceptions exception, bool enable);
9373 -int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
9374 -int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
9375 -
9376 -#endif /* __MEMAC_H */
9377 diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
9378 deleted file mode 100644
9379 index 5ec94d2..0000000
9380 --- a/drivers/net/ethernet/freescale/fman/fman_muram.c
9381 +++ /dev/null
9382 @@ -1,159 +0,0 @@
9383 -/*
9384 - * Copyright 2008-2015 Freescale Semiconductor Inc.
9385 - *
9386 - * Redistribution and use in source and binary forms, with or without
9387 - * modification, are permitted provided that the following conditions are met:
9388 - *     * Redistributions of source code must retain the above copyright
9389 - *       notice, this list of conditions and the following disclaimer.
9390 - *     * Redistributions in binary form must reproduce the above copyright
9391 - *       notice, this list of conditions and the following disclaimer in the
9392 - *       documentation and/or other materials provided with the distribution.
9393 - *     * Neither the name of Freescale Semiconductor nor the
9394 - *       names of its contributors may be used to endorse or promote products
9395 - *       derived from this software without specific prior written permission.
9396 - *
9397 - *
9398 - * ALTERNATIVELY, this software may be distributed under the terms of the
9399 - * GNU General Public License ("GPL") as published by the Free Software
9400 - * Foundation, either version 2 of that License or (at your option) any
9401 - * later version.
9402 - *
9403 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9404 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9405 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9406 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9407 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9408 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9409 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9410 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9411 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9412 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9413 - */
9414 -
9415 -#include "fman_muram.h"
9416 -
9417 -#include <linux/io.h>
9418 -#include <linux/slab.h>
9419 -#include <linux/genalloc.h>
9420 -
9421 -struct muram_info {
9422 -       struct gen_pool *pool;
9423 -       void __iomem *vbase;
9424 -       size_t size;
9425 -       phys_addr_t pbase;
9426 -};
9427 -
9428 -static unsigned long fman_muram_vbase_to_offset(struct muram_info *muram,
9429 -                                               unsigned long vaddr)
9430 -{
9431 -       return vaddr - (unsigned long)muram->vbase;
9432 -}
9433 -
9434 -/**
9435 - * fman_muram_init
9436 - * @base:      Pointer to base of memory mapped FM-MURAM.
9437 - * @size:      Size of the FM-MURAM partition.
9438 - *
9439 - * Creates partition in the MURAM.
9440 - * The routine returns a pointer to the MURAM partition.
9441 - * This pointer must be passed as to all other FM-MURAM function calls.
9442 - * No actual initialization or configuration of FM_MURAM hardware is done by
9443 - * this routine.
9444 - *
9445 - * Return: pointer to FM-MURAM object, or NULL for Failure.
9446 - */
9447 -struct muram_info *fman_muram_init(phys_addr_t base, size_t size)
9448 -{
9449 -       struct muram_info *muram;
9450 -       void __iomem *vaddr;
9451 -       int ret;
9452 -
9453 -       muram = kzalloc(sizeof(*muram), GFP_KERNEL);
9454 -       if (!muram)
9455 -               return NULL;
9456 -
9457 -       muram->pool = gen_pool_create(ilog2(64), -1);
9458 -       if (!muram->pool) {
9459 -               pr_err("%s(): MURAM pool create failed\n", __func__);
9460 -               goto  muram_free;
9461 -       }
9462 -
9463 -       vaddr = ioremap(base, size);
9464 -       if (!vaddr) {
9465 -               pr_err("%s(): MURAM ioremap failed\n", __func__);
9466 -               goto pool_destroy;
9467 -       }
9468 -
9469 -       ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr,
9470 -                               base, size, -1);
9471 -       if (ret < 0) {
9472 -               pr_err("%s(): MURAM pool add failed\n", __func__);
9473 -               iounmap(vaddr);
9474 -               goto pool_destroy;
9475 -       }
9476 -
9477 -       memset_io(vaddr, 0, (int)size);
9478 -
9479 -       muram->vbase = vaddr;
9480 -       muram->pbase = base;
9481 -       return muram;
9482 -
9483 -pool_destroy:
9484 -       gen_pool_destroy(muram->pool);
9485 -muram_free:
9486 -       kfree(muram);
9487 -       return NULL;
9488 -}
9489 -
9490 -/**
9491 - * fman_muram_offset_to_vbase
9492 - * @muram:     FM-MURAM module pointer.
9493 - * @offset:    the offset of the memory block
9494 - *
9495 - * Gives the address of the memory region from specific offset
9496 - *
9497 - * Return: The address of the memory block
9498 - */
9499 -unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
9500 -                                        unsigned long offset)
9501 -{
9502 -       return offset + (unsigned long)muram->vbase;
9503 -}
9504 -
9505 -/**
9506 - * fman_muram_alloc
9507 - * @muram:     FM-MURAM module pointer.
9508 - * @size:      Size of the memory to be allocated.
9509 - *
9510 - * Allocate some memory from FM-MURAM partition.
9511 - *
9512 - * Return: address of the allocated memory; NULL otherwise.
9513 - */
9514 -unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
9515 -{
9516 -       unsigned long vaddr;
9517 -
9518 -       vaddr = gen_pool_alloc(muram->pool, size);
9519 -       if (!vaddr)
9520 -               return -ENOMEM;
9521 -
9522 -       memset_io((void __iomem *)vaddr, 0, size);
9523 -
9524 -       return fman_muram_vbase_to_offset(muram, vaddr);
9525 -}
9526 -
9527 -/**
9528 - * fman_muram_free_mem
9529 - * muram:      FM-MURAM module pointer.
9530 - * offset:     offset of the memory region to be freed.
9531 - * size:       size of the memory to be freed.
9532 - *
9533 - * Free an allocated memory from FM-MURAM partition.
9534 - */
9535 -void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
9536 -                        size_t size)
9537 -{
9538 -       unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
9539 -
9540 -       gen_pool_free(muram->pool, addr, size);
9541 -}
9542 diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
9543 deleted file mode 100644
9544 index 453bf84..0000000
9545 --- a/drivers/net/ethernet/freescale/fman/fman_muram.h
9546 +++ /dev/null
9547 @@ -1,52 +0,0 @@
9548 -/*
9549 - * Copyright 2008-2015 Freescale Semiconductor Inc.
9550 - *
9551 - * Redistribution and use in source and binary forms, with or without
9552 - * modification, are permitted provided that the following conditions are met:
9553 - *     * Redistributions of source code must retain the above copyright
9554 - *       notice, this list of conditions and the following disclaimer.
9555 - *     * Redistributions in binary form must reproduce the above copyright
9556 - *       notice, this list of conditions and the following disclaimer in the
9557 - *       documentation and/or other materials provided with the distribution.
9558 - *     * Neither the name of Freescale Semiconductor nor the
9559 - *       names of its contributors may be used to endorse or promote products
9560 - *       derived from this software without specific prior written permission.
9561 - *
9562 - *
9563 - * ALTERNATIVELY, this software may be distributed under the terms of the
9564 - * GNU General Public License ("GPL") as published by the Free Software
9565 - * Foundation, either version 2 of that License or (at your option) any
9566 - * later version.
9567 - *
9568 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9569 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9570 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9571 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9572 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9573 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9574 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9575 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9576 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9577 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9578 - */
9579 -#ifndef __FM_MURAM_EXT
9580 -#define __FM_MURAM_EXT
9581 -
9582 -#include <linux/types.h>
9583 -
9584 -#define FM_MURAM_INVALID_ALLOCATION    -1
9585 -
9586 -/* Structure for FM MURAM information */
9587 -struct muram_info;
9588 -
9589 -struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
9590 -
9591 -unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
9592 -                                        unsigned long offset);
9593 -
9594 -unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
9595 -
9596 -void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
9597 -                        size_t size);
9598 -
9599 -#endif /* __FM_MURAM_EXT */
9600 diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
9601 deleted file mode 100644
9602 index 9f3bb50..0000000
9603 --- a/drivers/net/ethernet/freescale/fman/fman_port.c
9604 +++ /dev/null
9605 @@ -1,1791 +0,0 @@
9606 -/*
9607 - * Copyright 2008 - 2015 Freescale Semiconductor Inc.
9608 - *
9609 - * Redistribution and use in source and binary forms, with or without
9610 - * modification, are permitted provided that the following conditions are met:
9611 - *     * Redistributions of source code must retain the above copyright
9612 - *       notice, this list of conditions and the following disclaimer.
9613 - *     * Redistributions in binary form must reproduce the above copyright
9614 - *       notice, this list of conditions and the following disclaimer in the
9615 - *       documentation and/or other materials provided with the distribution.
9616 - *     * Neither the name of Freescale Semiconductor nor the
9617 - *       names of its contributors may be used to endorse or promote products
9618 - *       derived from this software without specific prior written permission.
9619 - *
9620 - *
9621 - * ALTERNATIVELY, this software may be distributed under the terms of the
9622 - * GNU General Public License ("GPL") as published by the Free Software
9623 - * Foundation, either version 2 of that License or (at your option) any
9624 - * later version.
9625 - *
9626 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
9627 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
9628 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
9629 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
9630 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
9631 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
9632 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
9633 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9634 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
9635 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9636 - */
9637 -
9638 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9639 -
9640 -#include "fman_port.h"
9641 -#include "fman.h"
9642 -#include "fman_sp.h"
9643 -
9644 -#include <linux/io.h>
9645 -#include <linux/slab.h>
9646 -#include <linux/module.h>
9647 -#include <linux/interrupt.h>
9648 -#include <linux/of_platform.h>
9649 -#include <linux/of_address.h>
9650 -#include <linux/delay.h>
9651 -#include <linux/libfdt_env.h>
9652 -
9653 -/* Queue ID */
9654 -#define DFLT_FQ_ID             0x00FFFFFF
9655 -
9656 -/* General defines */
9657 -#define PORT_BMI_FIFO_UNITS            0x100
9658 -
9659 -#define MAX_PORT_FIFO_SIZE(bmi_max_fifo_size)  \
9660 -       min((u32)bmi_max_fifo_size, (u32)1024 * FMAN_BMI_FIFO_UNITS)
9661 -
9662 -#define PORT_CG_MAP_NUM                        8
9663 -#define PORT_PRS_RESULT_WORDS_NUM      8
9664 -#define PORT_IC_OFFSET_UNITS           0x10
9665 -
9666 -#define MIN_EXT_BUF_SIZE               64
9667 -
9668 -#define BMI_PORT_REGS_OFFSET                           0
9669 -#define QMI_PORT_REGS_OFFSET                           0x400
9670 -
9671 -/* Default values */
9672 -#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN             \
9673 -       DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN
9674 -
9675 -#define DFLT_PORT_CUT_BYTES_FROM_END           4
9676 -
9677 -#define DFLT_PORT_ERRORS_TO_DISCARD            FM_PORT_FRM_ERR_CLS_DISCARD
9678 -#define DFLT_PORT_MAX_FRAME_LENGTH             9600
9679 -
9680 -#define DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(bmi_max_fifo_size) \
9681 -       MAX_PORT_FIFO_SIZE(bmi_max_fifo_size)
9682 -
9683 -#define DFLT_PORT_RX_FIFO_THRESHOLD(major, bmi_max_fifo_size)  \
9684 -       (major == 6 ?                                           \
9685 -       MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) :         \
9686 -       (MAX_PORT_FIFO_SIZE(bmi_max_fifo_size) * 3 / 4))        \
9687 -
9688 -#define DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS               0
9689 -
9690 -/* QMI defines */
9691 -#define QMI_DEQ_CFG_SUBPORTAL_MASK             0x1f
9692 -
9693 -#define QMI_PORT_CFG_EN                                0x80000000
9694 -#define QMI_PORT_STATUS_DEQ_FD_BSY             0x20000000
9695 -
9696 -#define QMI_DEQ_CFG_PRI                                0x80000000
9697 -#define QMI_DEQ_CFG_TYPE1                      0x10000000
9698 -#define QMI_DEQ_CFG_TYPE2                      0x20000000
9699 -#define QMI_DEQ_CFG_TYPE3                      0x30000000
9700 -#define QMI_DEQ_CFG_PREFETCH_PARTIAL           0x01000000
9701 -#define QMI_DEQ_CFG_PREFETCH_FULL              0x03000000
9702 -#define QMI_DEQ_CFG_SP_MASK                    0xf
9703 -#define QMI_DEQ_CFG_SP_SHIFT                   20
9704 -
9705 -#define QMI_BYTE_COUNT_LEVEL_CONTROL(_type)    \
9706 -       (_type == FMAN_PORT_TYPE_TX ? 0x1400 : 0x400)
9707 -
9708 -/* BMI defins */
9709 -#define BMI_EBD_EN                             0x80000000
9710 -
9711 -#define BMI_PORT_CFG_EN                                0x80000000
9712 -
9713 -#define BMI_PORT_STATUS_BSY                    0x80000000
9714 -
9715 -#define BMI_DMA_ATTR_SWP_SHIFT                 FMAN_SP_DMA_ATTR_SWP_SHIFT
9716 -#define BMI_DMA_ATTR_WRITE_OPTIMIZE            FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE
9717 -
9718 -#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT        16
9719 -#define BMI_RX_FIFO_THRESHOLD_ETHE             0x80000000
9720 -
9721 -#define BMI_FRAME_END_CS_IGNORE_SHIFT          24
9722 -#define BMI_FRAME_END_CS_IGNORE_MASK           0x0000001f
9723 -
9724 -#define BMI_RX_FRAME_END_CUT_SHIFT             16
9725 -#define BMI_RX_FRAME_END_CUT_MASK              0x0000001f
9726 -
9727 -#define BMI_IC_TO_EXT_SHIFT                    FMAN_SP_IC_TO_EXT_SHIFT
9728 -#define BMI_IC_TO_EXT_MASK                     0x0000001f
9729 -#define BMI_IC_FROM_INT_SHIFT                  FMAN_SP_IC_FROM_INT_SHIFT
9730 -#define BMI_IC_FROM_INT_MASK                   0x0000000f
9731 -#define BMI_IC_SIZE_MASK                       0x0000001f
9732 -
9733 -#define BMI_INT_BUF_MARG_SHIFT                 28
9734 -#define BMI_INT_BUF_MARG_MASK                  0x0000000f
9735 -#define BMI_EXT_BUF_MARG_START_SHIFT           FMAN_SP_EXT_BUF_MARG_START_SHIFT
9736 -#define BMI_EXT_BUF_MARG_START_MASK            0x000001ff
9737 -#define BMI_EXT_BUF_MARG_END_MASK              0x000001ff
9738 -
9739 -#define BMI_CMD_MR_LEAC                                0x00200000
9740 -#define BMI_CMD_MR_SLEAC                       0x00100000
9741 -#define BMI_CMD_MR_MA                          0x00080000
9742 -#define BMI_CMD_MR_DEAS                                0x00040000
9743 -#define BMI_CMD_RX_MR_DEF                      (BMI_CMD_MR_LEAC | \
9744 -                                               BMI_CMD_MR_SLEAC | \
9745 -                                               BMI_CMD_MR_MA | \
9746 -                                               BMI_CMD_MR_DEAS)
9747 -#define BMI_CMD_TX_MR_DEF                      0
9748 -
9749 -#define BMI_CMD_ATTR_ORDER                     0x80000000
9750 -#define BMI_CMD_ATTR_SYNC                      0x02000000
9751 -#define BMI_CMD_ATTR_COLOR_SHIFT               26
9752 -
9753 -#define BMI_FIFO_PIPELINE_DEPTH_SHIFT          12
9754 -#define BMI_FIFO_PIPELINE_DEPTH_MASK           0x0000000f
9755 -#define BMI_NEXT_ENG_FD_BITS_SHIFT             24
9756 -
9757 -#define BMI_EXT_BUF_POOL_VALID                 FMAN_SP_EXT_BUF_POOL_VALID
9758 -#define BMI_EXT_BUF_POOL_EN_COUNTER            FMAN_SP_EXT_BUF_POOL_EN_COUNTER
9759 -#define BMI_EXT_BUF_POOL_BACKUP                FMAN_SP_EXT_BUF_POOL_BACKUP
9760 -#define BMI_EXT_BUF_POOL_ID_SHIFT              16
9761 -#define BMI_EXT_BUF_POOL_ID_MASK               0x003F0000
9762 -#define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT        16
9763 -
9764 -#define BMI_TX_FIFO_MIN_FILL_SHIFT             16
9765 -
9766 -#define BMI_PRIORITY_ELEVATION_LEVEL ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
9767 -#define BMI_FIFO_THRESHOLD           ((0x3FF + 1) * PORT_BMI_FIFO_UNITS)
9768 -
9769 -#define BMI_DEQUEUE_PIPELINE_DEPTH(_type, _speed)              \
9770 -       ((_type == FMAN_PORT_TYPE_TX && _speed == 10000) ? 4 : 1)
9771 -
9772 -#define RX_ERRS_TO_ENQ                           \
9773 -       (FM_PORT_FRM_ERR_DMA                    | \
9774 -       FM_PORT_FRM_ERR_PHYSICAL                | \
9775 -       FM_PORT_FRM_ERR_SIZE                    | \
9776 -       FM_PORT_FRM_ERR_EXTRACTION              | \
9777 -       FM_PORT_FRM_ERR_NO_SCHEME               | \
9778 -       FM_PORT_FRM_ERR_PRS_TIMEOUT             | \
9779 -       FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT        | \
9780 -       FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED    | \
9781 -       FM_PORT_FRM_ERR_PRS_HDR_ERR             | \
9782 -       FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW        | \
9783 -       FM_PORT_FRM_ERR_IPRE)
9784 -
9785 -/* NIA defines */
9786 -#define NIA_ORDER_RESTOR                               0x00800000
9787 -#define NIA_ENG_BMI                                    0x00500000
9788 -#define NIA_ENG_QMI_ENQ                                        0x00540000
9789 -#define NIA_ENG_QMI_DEQ                                        0x00580000
9790 -
9791 -#define NIA_BMI_AC_ENQ_FRAME                           0x00000002
9792 -#define NIA_BMI_AC_TX_RELEASE                          0x000002C0
9793 -#define NIA_BMI_AC_RELEASE                             0x000000C0
9794 -#define NIA_BMI_AC_TX                                  0x00000274
9795 -#define NIA_BMI_AC_FETCH_ALL_FRAME                     0x0000020c
9796 -
9797 -/* Port IDs */
9798 -#define TX_10G_PORT_BASE               0x30
9799 -#define RX_10G_PORT_BASE               0x10
9800 -
9801 -/* BMI Rx port register map */
9802 -struct fman_port_rx_bmi_regs {
9803 -       u32 fmbm_rcfg;          /* Rx Configuration */
9804 -       u32 fmbm_rst;           /* Rx Status */
9805 -       u32 fmbm_rda;           /* Rx DMA attributes */
9806 -       u32 fmbm_rfp;           /* Rx FIFO Parameters */
9807 -       u32 fmbm_rfed;          /* Rx Frame End Data */
9808 -       u32 fmbm_ricp;          /* Rx Internal Context Parameters */
9809 -       u32 fmbm_rim;           /* Rx Internal Buffer Margins */
9810 -       u32 fmbm_rebm;          /* Rx External Buffer Margins */
9811 -       u32 fmbm_rfne;          /* Rx Frame Next Engine */
9812 -       u32 fmbm_rfca;          /* Rx Frame Command Attributes. */
9813 -       u32 fmbm_rfpne;         /* Rx Frame Parser Next Engine */
9814 -       u32 fmbm_rpso;          /* Rx Parse Start Offset */
9815 -       u32 fmbm_rpp;           /* Rx Policer Profile  */
9816 -       u32 fmbm_rccb;          /* Rx Coarse Classification Base */
9817 -       u32 fmbm_reth;          /* Rx Excessive Threshold */
9818 -       u32 reserved003c[1];    /* (0x03C 0x03F) */
9819 -       u32 fmbm_rprai[PORT_PRS_RESULT_WORDS_NUM];
9820 -       /* Rx Parse Results Array Init */
9821 -       u32 fmbm_rfqid;         /* Rx Frame Queue ID */
9822 -       u32 fmbm_refqid;        /* Rx Error Frame Queue ID */
9823 -       u32 fmbm_rfsdm;         /* Rx Frame Status Discard Mask */
9824 -       u32 fmbm_rfsem;         /* Rx Frame Status Error Mask */
9825 -       u32 fmbm_rfene;         /* Rx Frame Enqueue Next Engine */
9826 -       u32 reserved0074[0x2];  /* (0x074-0x07C)  */
9827 -       u32 fmbm_rcmne;         /* Rx Frame Continuous Mode Next Engine */
9828 -       u32 reserved0080[0x20]; /* (0x080 0x0FF)  */
9829 -       u32 fmbm_ebmpi[FMAN_PORT_MAX_EXT_POOLS_NUM];
9830 -       /* Buffer Manager pool Information- */
9831 -       u32 fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];     /* Allocate Counter- */
9832 -       u32 reserved0130[8];    /* 0x130/0x140 - 0x15F reserved - */
9833 -       u32 fmbm_rcgm[PORT_CG_MAP_NUM]; /* Congestion Group Map */
9834 -       u32 fmbm_mpd;           /* BM Pool Depletion  */
9835 -       u32 reserved0184[0x1F]; /* (0x184 0x1FF) */
9836 -       u32 fmbm_rstc;          /* Rx Statistics Counters */
9837 -       u32 fmbm_rfrc;          /* Rx Frame Counter */
9838 -       u32 fmbm_rfbc;          /* Rx Bad Frames Counter */
9839 -       u32 fmbm_rlfc;          /* Rx Large Frames Counter */
9840 -       u32 fmbm_rffc;          /* Rx Filter Frames Counter */
9841 -       u32 fmbm_rfdc;          /* Rx Frame Discard Counter */
9842 -       u32 fmbm_rfldec;                /* Rx Frames List DMA Error Counter */
9843 -       u32 fmbm_rodc;          /* Rx Out of Buffers Discard nntr */
9844 -       u32 fmbm_rbdc;          /* Rx Buffers Deallocate Counter */
9845 -       u32 fmbm_rpec;          /* RX Prepare to enqueue Counte */
9846 -       u32 reserved0224[0x16]; /* (0x224 0x27F) */
9847 -       u32 fmbm_rpc;           /* Rx Performance Counters */
9848 -       u32 fmbm_rpcp;          /* Rx Performance Count Parameters */
9849 -       u32 fmbm_rccn;          /* Rx Cycle Counter */
9850 -       u32 fmbm_rtuc;          /* Rx Tasks Utilization Counter */
9851 -       u32 fmbm_rrquc;         /* Rx Receive Queue Utilization cntr */
9852 -       u32 fmbm_rduc;          /* Rx DMA Utilization Counter */
9853 -       u32 fmbm_rfuc;          /* Rx FIFO Utilization Counter */
9854 -       u32 fmbm_rpac;          /* Rx Pause Activation Counter */
9855 -       u32 reserved02a0[0x18]; /* (0x2A0 0x2FF) */
9856 -       u32 fmbm_rdcfg[0x3];    /* Rx Debug Configuration */
9857 -       u32 fmbm_rgpr;          /* Rx General Purpose Register */
9858 -       u32 reserved0310[0x3a];
9859 -};
9860 -
9861 -/* BMI Tx port register map */
9862 -struct fman_port_tx_bmi_regs {
9863 -       u32 fmbm_tcfg;          /* Tx Configuration */
9864 -       u32 fmbm_tst;           /* Tx Status */
9865 -       u32 fmbm_tda;           /* Tx DMA attributes */
9866 -       u32 fmbm_tfp;           /* Tx FIFO Parameters */
9867 -       u32 fmbm_tfed;          /* Tx Frame End Data */
9868 -       u32 fmbm_ticp;          /* Tx Internal Context Parameters */
9869 -       u32 fmbm_tfdne;         /* Tx Frame Dequeue Next Engine. */
9870 -       u32 fmbm_tfca;          /* Tx Frame Command attribute. */
9871 -       u32 fmbm_tcfqid;        /* Tx Confirmation Frame Queue ID. */
9872 -       u32 fmbm_tefqid;        /* Tx Frame Error Queue ID */
9873 -       u32 fmbm_tfene;         /* Tx Frame Enqueue Next Engine */
9874 -       u32 fmbm_trlmts;        /* Tx Rate Limiter Scale */
9875 -       u32 fmbm_trlmt;         /* Tx Rate Limiter */
9876 -       u32 reserved0034[0x0e]; /* (0x034-0x6c) */
9877 -       u32 fmbm_tccb;          /* Tx Coarse Classification base */
9878 -       u32 fmbm_tfne;          /* Tx Frame Next Engine */
9879 -       u32 fmbm_tpfcm[0x02];
9880 -       /* Tx Priority based Flow Control (PFC) Mapping */
9881 -       u32 fmbm_tcmne;         /* Tx Frame Continuous Mode Next Engine */
9882 -       u32 reserved0080[0x60]; /* (0x080-0x200) */
9883 -       u32 fmbm_tstc;          /* Tx Statistics Counters */
9884 -       u32 fmbm_tfrc;          /* Tx Frame Counter */
9885 -       u32 fmbm_tfdc;          /* Tx Frames Discard Counter */
9886 -       u32 fmbm_tfledc;        /* Tx Frame len error discard cntr */
9887 -       u32 fmbm_tfufdc;        /* Tx Frame unsprt frmt discard cntr */
9888 -       u32 fmbm_tbdc;          /* Tx Buffers Deallocate Counter */
9889 -       u32 reserved0218[0x1A]; /* (0x218-0x280) */
9890 -       u32 fmbm_tpc;           /* Tx Performance Counters */
9891 -       u32 fmbm_tpcp;          /* Tx Performance Count Parameters */
9892 -       u32 fmbm_tccn;          /* Tx Cycle Counter */
9893 -       u32 fmbm_ttuc;          /* Tx Tasks Utilization Counter */
9894 -       u32 fmbm_ttcquc;        /* Tx Transmit conf Q util Counter */
9895 -       u32 fmbm_tduc;          /* Tx DMA Utilization Counter */
9896 -       u32 fmbm_tfuc;          /* Tx FIFO Utilization Counter */
9897 -       u32 reserved029c[16];   /* (0x29C-0x2FF) */
9898 -       u32 fmbm_tdcfg[0x3];    /* Tx Debug Configuration */
9899 -       u32 fmbm_tgpr;          /* Tx General Purpose Register */
9900 -       u32 reserved0310[0x3a]; /* (0x310-0x3FF) */
9901 -};
9902 -
9903 -/* BMI port register map */
9904 -union fman_port_bmi_regs {
9905 -       struct fman_port_rx_bmi_regs rx;
9906 -       struct fman_port_tx_bmi_regs tx;
9907 -};
9908 -
9909 -/* QMI port register map */
9910 -struct fman_port_qmi_regs {
9911 -       u32 fmqm_pnc;           /* PortID n Configuration Register */
9912 -       u32 fmqm_pns;           /* PortID n Status Register */
9913 -       u32 fmqm_pnts;          /* PortID n Task Status Register */
9914 -       u32 reserved00c[4];     /* 0xn00C - 0xn01B */
9915 -       u32 fmqm_pnen;          /* PortID n Enqueue NIA Register */
9916 -       u32 fmqm_pnetfc;                /* PortID n Enq Total Frame Counter */
9917 -       u32 reserved024[2];     /* 0xn024 - 0x02B */
9918 -       u32 fmqm_pndn;          /* PortID n Dequeue NIA Register */
9919 -       u32 fmqm_pndc;          /* PortID n Dequeue Config Register */
9920 -       u32 fmqm_pndtfc;                /* PortID n Dequeue tot Frame cntr */
9921 -       u32 fmqm_pndfdc;                /* PortID n Dequeue FQID Dflt Cntr */
9922 -       u32 fmqm_pndcc;         /* PortID n Dequeue Confirm Counter */
9923 -};
9924 -
9925 -/* QMI dequeue prefetch modes */
9926 -enum fman_port_deq_prefetch {
9927 -       FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
9928 -       FMAN_PORT_DEQ_PART_PREFETCH, /* Partial prefetch mode */
9929 -       FMAN_PORT_DEQ_FULL_PREFETCH /* Full prefetch mode */
9930 -};
9931 -
9932 -/* A structure for defining FM port resources */
9933 -struct fman_port_rsrc {
9934 -       u32 num; /* Committed required resource */
9935 -       u32 extra; /* Extra (not committed) required resource */
9936 -};
9937 -
9938 -enum fman_port_dma_swap {
9939 -       FMAN_PORT_DMA_NO_SWAP,  /* No swap, transfer data as is */
9940 -       FMAN_PORT_DMA_SWAP_LE,
9941 -       /* The transferred data should be swapped in PPC Little Endian mode */
9942 -       FMAN_PORT_DMA_SWAP_BE
9943 -       /* The transferred data should be swapped in Big Endian mode */
9944 -};
9945 -
9946 -/* Default port color */
9947 -enum fman_port_color {
9948 -       FMAN_PORT_COLOR_GREEN,  /* Default port color is green */
9949 -       FMAN_PORT_COLOR_YELLOW, /* Default port color is yellow */
9950 -       FMAN_PORT_COLOR_RED,            /* Default port color is red */
9951 -       FMAN_PORT_COLOR_OVERRIDE        /* Ignore color */
9952 -};
9953 -
9954 -/* QMI dequeue from the SP channel - types */
9955 -enum fman_port_deq_type {
9956 -       FMAN_PORT_DEQ_BY_PRI,
9957 -       /* Priority precedence and Intra-Class scheduling */
9958 -       FMAN_PORT_DEQ_ACTIVE_FQ,
9959 -       /* Active FQ precedence and Intra-Class scheduling */
9960 -       FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS
9961 -       /* Active FQ precedence and override Intra-Class scheduling */
9962 -};
9963 -
9964 -/* External buffer pools configuration */
9965 -struct fman_port_bpools {
9966 -       u8 count;                       /* Num of pools to set up */
9967 -       bool counters_enable;           /* Enable allocate counters */
9968 -       u8 grp_bp_depleted_num;
9969 -       /* Number of depleted pools - if reached the BMI indicates
9970 -        * the MAC to send a pause frame
9971 -        */
9972 -       struct {
9973 -               u8 bpid;                /* BM pool ID */
9974 -               u16 size;
9975 -               /* Pool's size - must be in ascending order */
9976 -               bool is_backup;
9977 -               /* If this is a backup pool */
9978 -               bool grp_bp_depleted;
9979 -               /* Consider this buffer in multiple pools depletion criteria */
9980 -               bool single_bp_depleted;
9981 -               /* Consider this buffer in single pool depletion criteria */
9982 -       } bpool[FMAN_PORT_MAX_EXT_POOLS_NUM];
9983 -};
9984 -
9985 -struct fman_port_cfg {
9986 -       u32 dflt_fqid;
9987 -       u32 err_fqid;
9988 -       u8 deq_sp;
9989 -       bool deq_high_priority;
9990 -       enum fman_port_deq_type deq_type;
9991 -       enum fman_port_deq_prefetch deq_prefetch_option;
9992 -       u16 deq_byte_cnt;
9993 -       u8 cheksum_last_bytes_ignore;
9994 -       u8 rx_cut_end_bytes;
9995 -       struct fman_buf_pool_depletion buf_pool_depletion;
9996 -       struct fman_ext_pools ext_buf_pools;
9997 -       u32 tx_fifo_min_level;
9998 -       u32 tx_fifo_low_comf_level;
9999 -       u32 rx_pri_elevation;
10000 -       u32 rx_fifo_thr;
10001 -       struct fman_sp_buf_margins buf_margins;
10002 -       u32 int_buf_start_margin;
10003 -       struct fman_sp_int_context_data_copy int_context;
10004 -       u32 discard_mask;
10005 -       u32 err_mask;
10006 -       struct fman_buffer_prefix_content buffer_prefix_content;
10007 -       bool dont_release_buf;
10008 -
10009 -       u8 rx_fd_bits;
10010 -       u32 tx_fifo_deq_pipeline_depth;
10011 -       bool errata_A006320;
10012 -       bool excessive_threshold_register;
10013 -       bool fmbm_tfne_has_features;
10014 -
10015 -       enum fman_port_dma_swap dma_swap_data;
10016 -       enum fman_port_color color;
10017 -};
10018 -
10019 -struct fman_port_rx_pools_params {
10020 -       u8 num_of_pools;
10021 -       u16 second_largest_buf_size;
10022 -       u16 largest_buf_size;
10023 -};
10024 -
10025 -struct fman_port_dts_params {
10026 -       void __iomem *base_addr;        /* FMan port virtual memory */
10027 -       enum fman_port_type type;       /* Port type */
10028 -       u16 speed;                      /* Port speed */
10029 -       u8 id;                          /* HW Port Id */
10030 -       u32 qman_channel_id;            /* QMan channel id (non RX only) */
10031 -       struct fman *fman;              /* FMan Handle */
10032 -};
10033 -
10034 -struct fman_port {
10035 -       void *fm;
10036 -       struct device *dev;
10037 -       struct fman_rev_info rev_info;
10038 -       u8 port_id;
10039 -       enum fman_port_type port_type;
10040 -       u16 port_speed;
10041 -
10042 -       union fman_port_bmi_regs __iomem *bmi_regs;
10043 -       struct fman_port_qmi_regs __iomem *qmi_regs;
10044 -
10045 -       struct fman_sp_buffer_offsets buffer_offsets;
10046 -
10047 -       u8 internal_buf_offset;
10048 -       struct fman_ext_pools ext_buf_pools;
10049 -
10050 -       u16 max_frame_length;
10051 -       struct fman_port_rsrc open_dmas;
10052 -       struct fman_port_rsrc tasks;
10053 -       struct fman_port_rsrc fifo_bufs;
10054 -       struct fman_port_rx_pools_params rx_pools_params;
10055 -
10056 -       struct fman_port_cfg *cfg;
10057 -       struct fman_port_dts_params dts_params;
10058 -
10059 -       u8 ext_pools_num;
10060 -       u32 max_port_fifo_size;
10061 -       u32 max_num_of_ext_pools;
10062 -       u32 max_num_of_sub_portals;
10063 -       u32 bm_max_num_of_pools;
10064 -};
10065 -
10066 -static int init_bmi_rx(struct fman_port *port)
10067 -{
10068 -       struct fman_port_rx_bmi_regs __iomem *regs = &port->bmi_regs->rx;
10069 -       struct fman_port_cfg *cfg = port->cfg;
10070 -       u32 tmp;
10071 -
10072 -       /* DMA attributes */
10073 -       tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
10074 -       /* Enable write optimization */
10075 -       tmp |= BMI_DMA_ATTR_WRITE_OPTIMIZE;
10076 -       iowrite32be(tmp, &regs->fmbm_rda);
10077 -
10078 -       /* Rx FIFO parameters */
10079 -       tmp = (cfg->rx_pri_elevation / PORT_BMI_FIFO_UNITS - 1) <<
10080 -               BMI_RX_FIFO_PRI_ELEVATION_SHIFT;
10081 -       tmp |= cfg->rx_fifo_thr / PORT_BMI_FIFO_UNITS - 1;
10082 -       iowrite32be(tmp, &regs->fmbm_rfp);
10083 -
10084 -       if (cfg->excessive_threshold_register)
10085 -               /* always allow access to the extra resources */
10086 -               iowrite32be(BMI_RX_FIFO_THRESHOLD_ETHE, &regs->fmbm_reth);
10087 -
10088 -       /* Frame end data */
10089 -       tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
10090 -               BMI_FRAME_END_CS_IGNORE_SHIFT;
10091 -       tmp |= (cfg->rx_cut_end_bytes & BMI_RX_FRAME_END_CUT_MASK) <<
10092 -               BMI_RX_FRAME_END_CUT_SHIFT;
10093 -       if (cfg->errata_A006320)
10094 -               tmp &= 0xffe0ffff;
10095 -       iowrite32be(tmp, &regs->fmbm_rfed);
10096 -
10097 -       /* Internal context parameters */
10098 -       tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
10099 -               BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
10100 -       tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
10101 -               BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
10102 -       tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
10103 -               BMI_IC_SIZE_MASK;
10104 -       iowrite32be(tmp, &regs->fmbm_ricp);
10105 -
10106 -       /* Internal buffer offset */
10107 -       tmp = ((cfg->int_buf_start_margin / PORT_IC_OFFSET_UNITS) &
10108 -               BMI_INT_BUF_MARG_MASK) << BMI_INT_BUF_MARG_SHIFT;
10109 -       iowrite32be(tmp, &regs->fmbm_rim);
10110 -
10111 -       /* External buffer margins */
10112 -       tmp = (cfg->buf_margins.start_margins & BMI_EXT_BUF_MARG_START_MASK) <<
10113 -               BMI_EXT_BUF_MARG_START_SHIFT;
10114 -       tmp |= cfg->buf_margins.end_margins & BMI_EXT_BUF_MARG_END_MASK;
10115 -       iowrite32be(tmp, &regs->fmbm_rebm);
10116 -
10117 -       /* Frame attributes */
10118 -       tmp = BMI_CMD_RX_MR_DEF;
10119 -       tmp |= BMI_CMD_ATTR_ORDER;
10120 -       tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
10121 -       /* Synchronization request */
10122 -       tmp |= BMI_CMD_ATTR_SYNC;
10123 -
10124 -       iowrite32be(tmp, &regs->fmbm_rfca);
10125 -
10126 -       /* NIA */
10127 -       tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
10128 -
10129 -       tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME;
10130 -       iowrite32be(tmp, &regs->fmbm_rfne);
10131 -
10132 -       /* Enqueue NIA */
10133 -       iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
10134 -
10135 -       /* Default/error queues */
10136 -       iowrite32be((cfg->dflt_fqid & DFLT_FQ_ID), &regs->fmbm_rfqid);
10137 -       iowrite32be((cfg->err_fqid & DFLT_FQ_ID), &regs->fmbm_refqid);
10138 -
10139 -       /* Discard/error masks */
10140 -       iowrite32be(cfg->discard_mask, &regs->fmbm_rfsdm);
10141 -       iowrite32be(cfg->err_mask, &regs->fmbm_rfsem);
10142 -
10143 -       return 0;
10144 -}
10145 -
10146 -static int init_bmi_tx(struct fman_port *port)
10147 -{
10148 -       struct fman_port_tx_bmi_regs __iomem *regs = &port->bmi_regs->tx;
10149 -       struct fman_port_cfg *cfg = port->cfg;
10150 -       u32 tmp;
10151 -
10152 -       /* Tx Configuration register */
10153 -       tmp = 0;
10154 -       iowrite32be(tmp, &regs->fmbm_tcfg);
10155 -
10156 -       /* DMA attributes */
10157 -       tmp = (u32)cfg->dma_swap_data << BMI_DMA_ATTR_SWP_SHIFT;
10158 -       iowrite32be(tmp, &regs->fmbm_tda);
10159 -
10160 -       /* Tx FIFO parameters */
10161 -       tmp = (cfg->tx_fifo_min_level / PORT_BMI_FIFO_UNITS) <<
10162 -               BMI_TX_FIFO_MIN_FILL_SHIFT;
10163 -       tmp |= ((cfg->tx_fifo_deq_pipeline_depth - 1) &
10164 -               BMI_FIFO_PIPELINE_DEPTH_MASK) << BMI_FIFO_PIPELINE_DEPTH_SHIFT;
10165 -       tmp |= (cfg->tx_fifo_low_comf_level / PORT_BMI_FIFO_UNITS) - 1;
10166 -       iowrite32be(tmp, &regs->fmbm_tfp);
10167 -
10168 -       /* Frame end data */
10169 -       tmp = (cfg->cheksum_last_bytes_ignore & BMI_FRAME_END_CS_IGNORE_MASK) <<
10170 -               BMI_FRAME_END_CS_IGNORE_SHIFT;
10171 -       iowrite32be(tmp, &regs->fmbm_tfed);
10172 -
10173 -       /* Internal context parameters */
10174 -       tmp = ((cfg->int_context.ext_buf_offset / PORT_IC_OFFSET_UNITS) &
10175 -               BMI_IC_TO_EXT_MASK) << BMI_IC_TO_EXT_SHIFT;
10176 -       tmp |= ((cfg->int_context.int_context_offset / PORT_IC_OFFSET_UNITS) &
10177 -               BMI_IC_FROM_INT_MASK) << BMI_IC_FROM_INT_SHIFT;
10178 -       tmp |= (cfg->int_context.size / PORT_IC_OFFSET_UNITS) &
10179 -               BMI_IC_SIZE_MASK;
10180 -       iowrite32be(tmp, &regs->fmbm_ticp);
10181 -
10182 -       /* Frame attributes */
10183 -       tmp = BMI_CMD_TX_MR_DEF;
10184 -       tmp |= BMI_CMD_ATTR_ORDER;
10185 -       tmp |= (u32)cfg->color << BMI_CMD_ATTR_COLOR_SHIFT;
10186 -       iowrite32be(tmp, &regs->fmbm_tfca);
10187 -
10188 -       /* Dequeue NIA + enqueue NIA */
10189 -       iowrite32be(NIA_ENG_QMI_DEQ, &regs->fmbm_tfdne);
10190 -       iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_tfene);
10191 -       if (cfg->fmbm_tfne_has_features)
10192 -               iowrite32be(!cfg->dflt_fqid ?
10193 -                           BMI_EBD_EN | NIA_BMI_AC_FETCH_ALL_FRAME :
10194 -                           NIA_BMI_AC_FETCH_ALL_FRAME, &regs->fmbm_tfne);
10195 -       if (!cfg->dflt_fqid && cfg->dont_release_buf) {
10196 -               iowrite32be(DFLT_FQ_ID, &regs->fmbm_tcfqid);
10197 -               iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
10198 -                           &regs->fmbm_tfene);
10199 -               if (cfg->fmbm_tfne_has_features)
10200 -                       iowrite32be(ioread32be(&regs->fmbm_tfne) & ~BMI_EBD_EN,
10201 -                                   &regs->fmbm_tfne);
10202 -       }
10203 -
10204 -       /* Confirmation/error queues */
10205 -       if (cfg->dflt_fqid || !cfg->dont_release_buf)
10206 -               iowrite32be(cfg->dflt_fqid & DFLT_FQ_ID, &regs->fmbm_tcfqid);
10207 -       iowrite32be((cfg->err_fqid & DFLT_FQ_ID), &regs->fmbm_tefqid);
10208 -
10209 -       return 0;
10210 -}
10211 -
10212 -static int init_qmi(struct fman_port *port)
10213 -{
10214 -       struct fman_port_qmi_regs __iomem *regs = port->qmi_regs;
10215 -       struct fman_port_cfg *cfg = port->cfg;
10216 -       u32 tmp;
10217 -
10218 -       /* Rx port configuration */
10219 -       if (port->port_type == FMAN_PORT_TYPE_RX) {
10220 -               /* Enqueue NIA */
10221 -               iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_RELEASE, &regs->fmqm_pnen);
10222 -               return 0;
10223 -       }
10224 -
10225 -       /* Continue with Tx port configuration */
10226 -       if (port->port_type == FMAN_PORT_TYPE_TX) {
10227 -               /* Enqueue NIA */
10228 -               iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
10229 -                           &regs->fmqm_pnen);
10230 -               /* Dequeue NIA */
10231 -               iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX, &regs->fmqm_pndn);
10232 -       }
10233 -
10234 -       /* Dequeue Configuration register */
10235 -       tmp = 0;
10236 -       if (cfg->deq_high_priority)
10237 -               tmp |= QMI_DEQ_CFG_PRI;
10238 -
10239 -       switch (cfg->deq_type) {
10240 -       case FMAN_PORT_DEQ_BY_PRI:
10241 -               tmp |= QMI_DEQ_CFG_TYPE1;
10242 -               break;
10243 -       case FMAN_PORT_DEQ_ACTIVE_FQ:
10244 -               tmp |= QMI_DEQ_CFG_TYPE2;
10245 -               break;
10246 -       case FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS:
10247 -               tmp |= QMI_DEQ_CFG_TYPE3;
10248 -               break;
10249 -       default:
10250 -               return -EINVAL;
10251 -       }
10252 -
10253 -       switch (cfg->deq_prefetch_option) {
10254 -       case FMAN_PORT_DEQ_NO_PREFETCH:
10255 -               break;
10256 -       case FMAN_PORT_DEQ_PART_PREFETCH:
10257 -               tmp |= QMI_DEQ_CFG_PREFETCH_PARTIAL;
10258 -               break;
10259 -       case FMAN_PORT_DEQ_FULL_PREFETCH:
10260 -               tmp |= QMI_DEQ_CFG_PREFETCH_FULL;
10261 -               break;
10262 -       default:
10263 -               return -EINVAL;
10264 -       }
10265 -
10266 -       tmp |= (cfg->deq_sp & QMI_DEQ_CFG_SP_MASK) << QMI_DEQ_CFG_SP_SHIFT;
10267 -       tmp |= cfg->deq_byte_cnt;
10268 -       iowrite32be(tmp, &regs->fmqm_pndc);
10269 -
10270 -       return 0;
10271 -}
10272 -
10273 -static int init(struct fman_port *port)
10274 -{
10275 -       int err;
10276 -
10277 -       /* Init BMI registers */
10278 -       switch (port->port_type) {
10279 -       case FMAN_PORT_TYPE_RX:
10280 -               err = init_bmi_rx(port);
10281 -               break;
10282 -       case FMAN_PORT_TYPE_TX:
10283 -               err = init_bmi_tx(port);
10284 -               break;
10285 -       default:
10286 -               return -EINVAL;
10287 -       }
10288 -
10289 -       if (err)
10290 -               return err;
10291 -
10292 -       /* Init QMI registers */
10293 -       err = init_qmi(port);
10294 -       return err;
10295 -
10296 -       return 0;
10297 -}
10298 -
10299 -static int set_bpools(const struct fman_port *port,
10300 -                     const struct fman_port_bpools *bp)
10301 -{
10302 -       u32 __iomem *bp_reg, *bp_depl_reg;
10303 -       u32 tmp;
10304 -       u8 i, max_bp_num;
10305 -       bool grp_depl_used = false, rx_port;
10306 -
10307 -       switch (port->port_type) {
10308 -       case FMAN_PORT_TYPE_RX:
10309 -               max_bp_num = port->ext_pools_num;
10310 -               rx_port = true;
10311 -               bp_reg = port->bmi_regs->rx.fmbm_ebmpi;
10312 -               bp_depl_reg = &port->bmi_regs->rx.fmbm_mpd;
10313 -               break;
10314 -       default:
10315 -               return -EINVAL;
10316 -       }
10317 -
10318 -       if (rx_port) {
10319 -               /* Check buffers are provided in ascending order */
10320 -               for (i = 0; (i < (bp->count - 1) &&
10321 -                            (i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1)); i++) {
10322 -                       if (bp->bpool[i].size > bp->bpool[i + 1].size)
10323 -                               return -EINVAL;
10324 -               }
10325 -       }
10326 -
10327 -       /* Set up external buffers pools */
10328 -       for (i = 0; i < bp->count; i++) {
10329 -               tmp = BMI_EXT_BUF_POOL_VALID;
10330 -               tmp |= ((u32)bp->bpool[i].bpid <<
10331 -                       BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK;
10332 -
10333 -               if (rx_port) {
10334 -                       if (bp->counters_enable)
10335 -                               tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
10336 -
10337 -                       if (bp->bpool[i].is_backup)
10338 -                               tmp |= BMI_EXT_BUF_POOL_BACKUP;
10339 -
10340 -                       tmp |= (u32)bp->bpool[i].size;
10341 -               }
10342 -
10343 -               iowrite32be(tmp, &bp_reg[i]);
10344 -       }
10345 -
10346 -       /* Clear unused pools */
10347 -       for (i = bp->count; i < max_bp_num; i++)
10348 -               iowrite32be(0, &bp_reg[i]);
10349 -
10350 -       /* Pools depletion */
10351 -       tmp = 0;
10352 -       for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) {
10353 -               if (bp->bpool[i].grp_bp_depleted) {
10354 -                       grp_depl_used = true;
10355 -                       tmp |= 0x80000000 >> i;
10356 -               }
10357 -
10358 -               if (bp->bpool[i].single_bp_depleted)
10359 -                       tmp |= 0x80 >> i;
10360 -       }
10361 -
10362 -       if (grp_depl_used)
10363 -               tmp |= ((u32)bp->grp_bp_depleted_num - 1) <<
10364 -                   BMI_POOL_DEP_NUM_OF_POOLS_SHIFT;
10365 -
10366 -       iowrite32be(tmp, bp_depl_reg);
10367 -       return 0;
10368 -}
10369 -
10370 -static bool is_init_done(struct fman_port_cfg *cfg)
10371 -{
10372 -       /* Checks if FMan port driver parameters were initialized */
10373 -       if (!cfg)
10374 -               return true;
10375 -
10376 -       return false;
10377 -}
10378 -
10379 -static int verify_size_of_fifo(struct fman_port *port)
10380 -{
10381 -       u32 min_fifo_size_required = 0, opt_fifo_size_for_b2b = 0;
10382 -
10383 -       /* TX Ports */
10384 -       if (port->port_type == FMAN_PORT_TYPE_TX) {
10385 -               min_fifo_size_required = (u32)
10386 -                   (roundup(port->max_frame_length,
10387 -                            FMAN_BMI_FIFO_UNITS) + (3 * FMAN_BMI_FIFO_UNITS));
10388 -
10389 -               min_fifo_size_required +=
10390 -                   port->cfg->tx_fifo_deq_pipeline_depth *
10391 -                   FMAN_BMI_FIFO_UNITS;
10392 -
10393 -               opt_fifo_size_for_b2b = min_fifo_size_required;
10394 -
10395 -               /* Add some margin for back-to-back capability to improve
10396 -                * performance, allows the hardware to pipeline new frame dma
10397 -                * while the previous frame not yet transmitted.
10398 -                */
10399 -               if (port->port_speed == 10000)
10400 -                       opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
10401 -               else
10402 -                       opt_fifo_size_for_b2b += 2 * FMAN_BMI_FIFO_UNITS;
10403 -       }
10404 -
10405 -       /* RX Ports */
10406 -       else if (port->port_type == FMAN_PORT_TYPE_RX) {
10407 -               if (port->rev_info.major >= 6)
10408 -                       min_fifo_size_required = (u32)
10409 -                       (roundup(port->max_frame_length,
10410 -                                FMAN_BMI_FIFO_UNITS) +
10411 -                                (5 * FMAN_BMI_FIFO_UNITS));
10412 -                       /* 4 according to spec + 1 for FOF>0 */
10413 -               else
10414 -                       min_fifo_size_required = (u32)
10415 -                       (roundup(min(port->max_frame_length,
10416 -                                    port->rx_pools_params.largest_buf_size),
10417 -                                    FMAN_BMI_FIFO_UNITS) +
10418 -                                    (7 * FMAN_BMI_FIFO_UNITS));
10419 -
10420 -               opt_fifo_size_for_b2b = min_fifo_size_required;
10421 -
10422 -               /* Add some margin for back-to-back capability to improve
10423 -                * performance,allows the hardware to pipeline new frame dma
10424 -                * while the previous frame not yet transmitted.
10425 -                */
10426 -               if (port->port_speed == 10000)
10427 -                       opt_fifo_size_for_b2b += 8 * FMAN_BMI_FIFO_UNITS;
10428 -               else
10429 -                       opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
10430 -       }
10431 -
10432 -       WARN_ON(min_fifo_size_required <= 0);
10433 -       WARN_ON(opt_fifo_size_for_b2b < min_fifo_size_required);
10434 -
10435 -       /* Verify the size  */
10436 -       if (port->fifo_bufs.num < min_fifo_size_required)
10437 -               dev_dbg(port->dev, "%s: FIFO size should be enlarged to %d bytes\n",
10438 -                       __func__, min_fifo_size_required);
10439 -       else if (port->fifo_bufs.num < opt_fifo_size_for_b2b)
10440 -               dev_dbg(port->dev, "%s: For b2b processing,FIFO may be enlarged to %d bytes\n",
10441 -                       __func__, opt_fifo_size_for_b2b);
10442 -
10443 -       return 0;
10444 -}
10445 -
10446 -static int set_ext_buffer_pools(struct fman_port *port)
10447 -{
10448 -       struct fman_ext_pools *ext_buf_pools = &port->cfg->ext_buf_pools;
10449 -       struct fman_buf_pool_depletion *buf_pool_depletion =
10450 -       &port->cfg->buf_pool_depletion;
10451 -       u8 ordered_array[FMAN_PORT_MAX_EXT_POOLS_NUM];
10452 -       u16 sizes_array[BM_MAX_NUM_OF_POOLS];
10453 -       int i = 0, j = 0, err;
10454 -       struct fman_port_bpools bpools;
10455 -
10456 -       memset(&ordered_array, 0, sizeof(u8) * FMAN_PORT_MAX_EXT_POOLS_NUM);
10457 -       memset(&sizes_array, 0, sizeof(u16) * BM_MAX_NUM_OF_POOLS);
10458 -       memcpy(&port->ext_buf_pools, ext_buf_pools,
10459 -              sizeof(struct fman_ext_pools));
10460 -
10461 -       fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(ext_buf_pools,
10462 -                                                       ordered_array,
10463 -                                                       sizes_array);
10464 -
10465 -       memset(&bpools, 0, sizeof(struct fman_port_bpools));
10466 -       bpools.count = ext_buf_pools->num_of_pools_used;
10467 -       bpools.counters_enable = true;
10468 -       for (i = 0; i < ext_buf_pools->num_of_pools_used; i++) {
10469 -               bpools.bpool[i].bpid = ordered_array[i];
10470 -               bpools.bpool[i].size = sizes_array[ordered_array[i]];
10471 -       }
10472 -
10473 -       /* save pools parameters for later use */
10474 -       port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
10475 -       port->rx_pools_params.largest_buf_size =
10476 -           sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
10477 -       port->rx_pools_params.second_largest_buf_size =
10478 -           sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 2]];
10479 -
10480 -       /* FMBM_RMPD reg. - pool depletion */
10481 -       if (buf_pool_depletion->pools_grp_mode_enable) {
10482 -               bpools.grp_bp_depleted_num = buf_pool_depletion->num_of_pools;
10483 -               for (i = 0; i < port->bm_max_num_of_pools; i++) {
10484 -                       if (buf_pool_depletion->pools_to_consider[i]) {
10485 -                               for (j = 0; j < ext_buf_pools->
10486 -                                    num_of_pools_used; j++) {
10487 -                                       if (i == ordered_array[j]) {
10488 -                                               bpools.bpool[j].
10489 -                                                   grp_bp_depleted = true;
10490 -                                               break;
10491 -                                       }
10492 -                               }
10493 -                       }
10494 -               }
10495 -       }
10496 -
10497 -       if (buf_pool_depletion->single_pool_mode_enable) {
10498 -               for (i = 0; i < port->bm_max_num_of_pools; i++) {
10499 -                       if (buf_pool_depletion->
10500 -                           pools_to_consider_for_single_mode[i]) {
10501 -                               for (j = 0; j < ext_buf_pools->
10502 -                                    num_of_pools_used; j++) {
10503 -                                       if (i == ordered_array[j]) {
10504 -                                               bpools.bpool[j].
10505 -                                                   single_bp_depleted = true;
10506 -                                               break;
10507 -                                       }
10508 -                               }
10509 -                       }
10510 -               }
10511 -       }
10512 -
10513 -       err = set_bpools(port, &bpools);
10514 -       if (err != 0) {
10515 -               dev_err(port->dev, "%s: set_bpools() failed\n", __func__);
10516 -               return -EINVAL;
10517 -       }
10518 -
10519 -       return 0;
10520 -}
10521 -
10522 -static int init_low_level_driver(struct fman_port *port)
10523 -{
10524 -       struct fman_port_cfg *cfg = port->cfg;
10525 -       u32 tmp_val;
10526 -
10527 -       switch (port->port_type) {
10528 -       case FMAN_PORT_TYPE_RX:
10529 -               cfg->err_mask = (RX_ERRS_TO_ENQ & ~cfg->discard_mask);
10530 -               break;
10531 -       default:
10532 -               break;
10533 -       }
10534 -
10535 -       tmp_val = (u32)((port->internal_buf_offset % OFFSET_UNITS) ?
10536 -               (port->internal_buf_offset / OFFSET_UNITS + 1) :
10537 -               (port->internal_buf_offset / OFFSET_UNITS));
10538 -       port->internal_buf_offset = (u8)(tmp_val * OFFSET_UNITS);
10539 -       port->cfg->int_buf_start_margin = port->internal_buf_offset;
10540 -
10541 -       if (init(port) != 0) {
10542 -               dev_err(port->dev, "%s: fman port initialization failed\n",
10543 -                       __func__);
10544 -               return -ENODEV;
10545 -       }
10546 -
10547 -       /* The code bellow is a trick so the FM will not release the buffer
10548 -        * to BM nor will try to enqueue the frame to QM
10549 -        */
10550 -       if (port->port_type == FMAN_PORT_TYPE_TX) {
10551 -               if (!cfg->dflt_fqid && cfg->dont_release_buf) {
10552 -                       /* override fmbm_tcfqid 0 with a false non-0 value.
10553 -                        * This will force FM to act according to tfene.
10554 -                        * Otherwise, if fmbm_tcfqid is 0 the FM will release
10555 -                        * buffers to BM regardless of fmbm_tfene
10556 -                        */
10557 -                       iowrite32be(0xFFFFFF, &port->bmi_regs->tx.fmbm_tcfqid);
10558 -                       iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
10559 -                                   &port->bmi_regs->tx.fmbm_tfene);
10560 -               }
10561 -       }
10562 -
10563 -       return 0;
10564 -}
10565 -
10566 -static int fill_soc_specific_params(struct fman_port *port)
10567 -{
10568 -       u32 bmi_max_fifo_size;
10569 -
10570 -       bmi_max_fifo_size = fman_get_bmi_max_fifo_size(port->fm);
10571 -       port->max_port_fifo_size = MAX_PORT_FIFO_SIZE(bmi_max_fifo_size);
10572 -       port->bm_max_num_of_pools = 64;
10573 -
10574 -       /* P4080 - Major 2
10575 -        * P2041/P3041/P5020/P5040 - Major 3
10576 -        * Tx/Bx - Major 6
10577 -        */
10578 -       switch (port->rev_info.major) {
10579 -       case 2:
10580 -       case 3:
10581 -               port->max_num_of_ext_pools              = 4;
10582 -               port->max_num_of_sub_portals            = 12;
10583 -               break;
10584 -
10585 -       case 6:
10586 -               port->max_num_of_ext_pools              = 8;
10587 -               port->max_num_of_sub_portals            = 16;
10588 -               break;
10589 -
10590 -       default:
10591 -               dev_err(port->dev, "%s: Unsupported FMan version\n", __func__);
10592 -               return -EINVAL;
10593 -       }
10594 -
10595 -       return 0;
10596 -}
10597 -
10598 -static int get_dflt_fifo_deq_pipeline_depth(u8 major, enum fman_port_type type,
10599 -                                           u16 speed)
10600 -{
10601 -       switch (type) {
10602 -       case FMAN_PORT_TYPE_RX:
10603 -       case FMAN_PORT_TYPE_TX:
10604 -               switch (speed) {
10605 -               case 10000:
10606 -                       return 4;
10607 -               case 1000:
10608 -                       if (major >= 6)
10609 -                               return 2;
10610 -                       else
10611 -                               return 1;
10612 -               default:
10613 -                       return 0;
10614 -               }
10615 -       default:
10616 -               return 0;
10617 -       }
10618 -}
10619 -
10620 -static int get_dflt_num_of_tasks(u8 major, enum fman_port_type type,
10621 -                                u16 speed)
10622 -{
10623 -       switch (type) {
10624 -       case FMAN_PORT_TYPE_RX:
10625 -       case FMAN_PORT_TYPE_TX:
10626 -               switch (speed) {
10627 -               case 10000:
10628 -                       return 16;
10629 -               case 1000:
10630 -                       if (major >= 6)
10631 -                               return 4;
10632 -                       else
10633 -                               return 3;
10634 -               default:
10635 -                       return 0;
10636 -               }
10637 -       default:
10638 -               return 0;
10639 -       }
10640 -}
10641 -
10642 -static int get_dflt_extra_num_of_tasks(u8 major, enum fman_port_type type,
10643 -                                      u16 speed)
10644 -{
10645 -       switch (type) {
10646 -       case FMAN_PORT_TYPE_RX:
10647 -               /* FMan V3 */
10648 -               if (major >= 6)
10649 -                       return 0;
10650 -
10651 -               /* FMan V2 */
10652 -               if (speed == 10000)
10653 -                       return 8;
10654 -               else
10655 -                       return 2;
10656 -       case FMAN_PORT_TYPE_TX:
10657 -       default:
10658 -               return 0;
10659 -       }
10660 -}
10661 -
10662 -static int get_dflt_num_of_open_dmas(u8 major, enum fman_port_type type,
10663 -                                    u16 speed)
10664 -{
10665 -       int val;
10666 -
10667 -       if (major >= 6) {
10668 -               switch (type) {
10669 -               case FMAN_PORT_TYPE_TX:
10670 -                       if (speed == 10000)
10671 -                               val = 12;
10672 -                       else
10673 -                               val = 3;
10674 -                       break;
10675 -               case FMAN_PORT_TYPE_RX:
10676 -                       if (speed == 10000)
10677 -                               val = 8;
10678 -                       else
10679 -                               val = 2;
10680 -                       break;
10681 -               default:
10682 -                       return 0;
10683 -               }
10684 -       } else {
10685 -               switch (type) {
10686 -               case FMAN_PORT_TYPE_TX:
10687 -               case FMAN_PORT_TYPE_RX:
10688 -                       if (speed == 10000)
10689 -                               val = 8;
10690 -                       else
10691 -                               val = 1;
10692 -                       break;
10693 -               default:
10694 -                       val = 0;
10695 -               }
10696 -       }
10697 -
10698 -       return val;
10699 -}
10700 -
10701 -static int get_dflt_extra_num_of_open_dmas(u8 major, enum fman_port_type type,
10702 -                                          u16 speed)
10703 -{
10704 -       /* FMan V3 */
10705 -       if (major >= 6)
10706 -               return 0;
10707 -
10708 -       /* FMan V2 */
10709 -       switch (type) {
10710 -       case FMAN_PORT_TYPE_RX:
10711 -       case FMAN_PORT_TYPE_TX:
10712 -               if (speed == 10000)
10713 -                       return 8;
10714 -               else
10715 -                       return 1;
10716 -       default:
10717 -               return 0;
10718 -       }
10719 -}
10720 -
10721 -static int get_dflt_num_of_fifo_bufs(u8 major, enum fman_port_type type,
10722 -                                    u16 speed)
10723 -{
10724 -       int val;
10725 -
10726 -       if (major >= 6) {
10727 -               switch (type) {
10728 -               case FMAN_PORT_TYPE_TX:
10729 -                       if (speed == 10000)
10730 -                               val = 64;
10731 -                       else
10732 -                               val = 50;
10733 -                       break;
10734 -               case FMAN_PORT_TYPE_RX:
10735 -                       if (speed == 10000)
10736 -                               val = 96;
10737 -                       else
10738 -                               val = 50;
10739 -                       break;
10740 -               default:
10741 -                       val = 0;
10742 -               }
10743 -       } else {
10744 -               switch (type) {
10745 -               case FMAN_PORT_TYPE_TX:
10746 -                       if (speed == 10000)
10747 -                               val = 48;
10748 -                       else
10749 -                               val = 44;
10750 -                       break;
10751 -               case FMAN_PORT_TYPE_RX:
10752 -                       if (speed == 10000)
10753 -                               val = 48;
10754 -                       else
10755 -                               val = 45;
10756 -                       break;
10757 -               default:
10758 -                       val = 0;
10759 -               }
10760 -       }
10761 -
10762 -       return val;
10763 -}
10764 -
10765 -static void set_dflt_cfg(struct fman_port *port,
10766 -                        struct fman_port_params *port_params)
10767 -{
10768 -       struct fman_port_cfg *cfg = port->cfg;
10769 -
10770 -       cfg->dma_swap_data = FMAN_PORT_DMA_NO_SWAP;
10771 -       cfg->color = FMAN_PORT_COLOR_GREEN;
10772 -       cfg->rx_cut_end_bytes = DFLT_PORT_CUT_BYTES_FROM_END;
10773 -       cfg->rx_pri_elevation = BMI_PRIORITY_ELEVATION_LEVEL;
10774 -       cfg->rx_fifo_thr = BMI_FIFO_THRESHOLD;
10775 -       cfg->tx_fifo_low_comf_level = (5 * 1024);
10776 -       cfg->deq_type = FMAN_PORT_DEQ_BY_PRI;
10777 -       cfg->deq_prefetch_option = FMAN_PORT_DEQ_FULL_PREFETCH;
10778 -       cfg->tx_fifo_deq_pipeline_depth =
10779 -               BMI_DEQUEUE_PIPELINE_DEPTH(port->port_type, port->port_speed);
10780 -       cfg->deq_byte_cnt = QMI_BYTE_COUNT_LEVEL_CONTROL(port->port_type);
10781 -
10782 -       cfg->rx_pri_elevation =
10783 -               DFLT_PORT_RX_FIFO_PRI_ELEVATION_LEV(port->max_port_fifo_size);
10784 -       port->cfg->rx_fifo_thr =
10785 -               DFLT_PORT_RX_FIFO_THRESHOLD(port->rev_info.major,
10786 -                                           port->max_port_fifo_size);
10787 -
10788 -       if ((port->rev_info.major == 6) &&
10789 -           ((port->rev_info.minor == 0) || (port->rev_info.minor == 3)))
10790 -               cfg->errata_A006320 = true;
10791 -
10792 -       /* Excessive Threshold register - exists for pre-FMv3 chips only */
10793 -       if (port->rev_info.major < 6)
10794 -               cfg->excessive_threshold_register = true;
10795 -       else
10796 -               cfg->fmbm_tfne_has_features = true;
10797 -
10798 -       cfg->buffer_prefix_content.data_align =
10799 -               DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
10800 -}
10801 -
10802 -static void set_rx_dflt_cfg(struct fman_port *port,
10803 -                           struct fman_port_params *port_params)
10804 -{
10805 -       port->cfg->discard_mask = DFLT_PORT_ERRORS_TO_DISCARD;
10806 -
10807 -       memcpy(&port->cfg->ext_buf_pools,
10808 -              &port_params->specific_params.rx_params.ext_buf_pools,
10809 -              sizeof(struct fman_ext_pools));
10810 -       port->cfg->err_fqid =
10811 -               port_params->specific_params.rx_params.err_fqid;
10812 -       port->cfg->dflt_fqid =
10813 -               port_params->specific_params.rx_params.dflt_fqid;
10814 -}
10815 -
10816 -static void set_tx_dflt_cfg(struct fman_port *port,
10817 -                           struct fman_port_params *port_params,
10818 -                           struct fman_port_dts_params *dts_params)
10819 -{
10820 -       port->cfg->tx_fifo_deq_pipeline_depth =
10821 -               get_dflt_fifo_deq_pipeline_depth(port->rev_info.major,
10822 -                                                port->port_type,
10823 -                                                port->port_speed);
10824 -       port->cfg->err_fqid =
10825 -               port_params->specific_params.non_rx_params.err_fqid;
10826 -       port->cfg->deq_sp =
10827 -               (u8)(dts_params->qman_channel_id & QMI_DEQ_CFG_SUBPORTAL_MASK);
10828 -       port->cfg->dflt_fqid =
10829 -               port_params->specific_params.non_rx_params.dflt_fqid;
10830 -       port->cfg->deq_high_priority = true;
10831 -}
10832 -
10833 -/**
10834 - * fman_port_config
10835 - * @port:      Pointer to the port structure
10836 - * @params:    Pointer to data structure of parameters
10837 - *
10838 - * Creates a descriptor for the FM PORT module.
10839 - * The routine returns a pointer to the FM PORT object.
10840 - * This descriptor must be passed as first parameter to all other FM PORT
10841 - * function calls.
10842 - * No actual initialization or configuration of FM hardware is done by this
10843 - * routine.
10844 - *
10845 - * Return: 0 on success; Error code otherwise.
10846 - */
10847 -int fman_port_config(struct fman_port *port, struct fman_port_params *params)
10848 -{
10849 -       void __iomem *base_addr = port->dts_params.base_addr;
10850 -       int err;
10851 -
10852 -       /* Allocate the FM driver's parameters structure */
10853 -       port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
10854 -       if (!port->cfg)
10855 -               goto err_params;
10856 -
10857 -       /* Initialize FM port parameters which will be kept by the driver */
10858 -       port->port_type = port->dts_params.type;
10859 -       port->port_speed = port->dts_params.speed;
10860 -       port->port_id = port->dts_params.id;
10861 -       port->fm = port->dts_params.fman;
10862 -       port->ext_pools_num = (u8)8;
10863 -
10864 -       /* get FM revision */
10865 -       fman_get_revision(port->fm, &port->rev_info);
10866 -
10867 -       err = fill_soc_specific_params(port);
10868 -       if (err)
10869 -               goto err_port_cfg;
10870 -
10871 -       switch (port->port_type) {
10872 -       case FMAN_PORT_TYPE_RX:
10873 -               set_rx_dflt_cfg(port, params);
10874 -       case FMAN_PORT_TYPE_TX:
10875 -               set_tx_dflt_cfg(port, params, &port->dts_params);
10876 -       default:
10877 -               set_dflt_cfg(port, params);
10878 -       }
10879 -
10880 -       /* Continue with other parameters */
10881 -       /* set memory map pointers */
10882 -       port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
10883 -       port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
10884 -
10885 -       port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
10886 -       /* resource distribution. */
10887 -
10888 -       port->fifo_bufs.num =
10889 -       get_dflt_num_of_fifo_bufs(port->rev_info.major, port->port_type,
10890 -                                 port->port_speed) * FMAN_BMI_FIFO_UNITS;
10891 -       port->fifo_bufs.extra =
10892 -       DFLT_PORT_EXTRA_NUM_OF_FIFO_BUFS * FMAN_BMI_FIFO_UNITS;
10893 -
10894 -       port->open_dmas.num =
10895 -       get_dflt_num_of_open_dmas(port->rev_info.major,
10896 -                                 port->port_type, port->port_speed);
10897 -       port->open_dmas.extra =
10898 -       get_dflt_extra_num_of_open_dmas(port->rev_info.major,
10899 -                                       port->port_type, port->port_speed);
10900 -       port->tasks.num =
10901 -       get_dflt_num_of_tasks(port->rev_info.major,
10902 -                             port->port_type, port->port_speed);
10903 -       port->tasks.extra =
10904 -       get_dflt_extra_num_of_tasks(port->rev_info.major,
10905 -                                   port->port_type, port->port_speed);
10906 -
10907 -       /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 errata
10908 -        * workaround
10909 -        */
10910 -       if ((port->rev_info.major == 6) && (port->rev_info.minor == 0) &&
10911 -           (((port->port_type == FMAN_PORT_TYPE_TX) &&
10912 -           (port->port_speed == 1000)))) {
10913 -               port->open_dmas.num = 16;
10914 -               port->open_dmas.extra = 0;
10915 -       }
10916 -
10917 -       if (port->rev_info.major >= 6 &&
10918 -           port->port_type == FMAN_PORT_TYPE_TX &&
10919 -           port->port_speed == 1000) {
10920 -               /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata
10921 -                * workaround
10922 -                */
10923 -               if (port->rev_info.major >= 6) {
10924 -                       u32 reg;
10925 -
10926 -                       reg = 0x00001013;
10927 -                       iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp);
10928 -               }
10929 -       }
10930 -
10931 -       return 0;
10932 -
10933 -err_port_cfg:
10934 -       kfree(port->cfg);
10935 -err_params:
10936 -       kfree(port);
10937 -       return -EINVAL;
10938 -}
10939 -EXPORT_SYMBOL(fman_port_config);
10940 -
10941 -/**
10942 - * fman_port_init
10943 - * port:       A pointer to a FM Port module.
10944 - * Initializes the FM PORT module by defining the software structure and
10945 - * configuring the hardware registers.
10946 - *
10947 - * Return: 0 on success; Error code otherwise.
10948 - */
10949 -int fman_port_init(struct fman_port *port)
10950 -{
10951 -       struct fman_port_cfg *cfg;
10952 -       int err;
10953 -       struct fman_port_init_params params;
10954 -
10955 -       if (is_init_done(port->cfg))
10956 -               return -EINVAL;
10957 -
10958 -       err = fman_sp_build_buffer_struct(&port->cfg->int_context,
10959 -                                         &port->cfg->buffer_prefix_content,
10960 -                                         &port->cfg->buf_margins,
10961 -                                         &port->buffer_offsets,
10962 -                                         &port->internal_buf_offset);
10963 -       if (err)
10964 -               return err;
10965 -
10966 -       cfg = port->cfg;
10967 -
10968 -       if (port->port_type == FMAN_PORT_TYPE_RX) {
10969 -               /* Call the external Buffer routine which also checks fifo
10970 -                * size and updates it if necessary
10971 -                */
10972 -               /* define external buffer pools and pool depletion */
10973 -               err = set_ext_buffer_pools(port);
10974 -               if (err)
10975 -                       return err;
10976 -               /* check if the largest external buffer pool is large enough */
10977 -               if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE +
10978 -                   cfg->buf_margins.end_margins >
10979 -                   port->rx_pools_params.largest_buf_size) {
10980 -                       dev_err(port->dev, "%s: buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
10981 -                               __func__, cfg->buf_margins.start_margins,
10982 -                               cfg->buf_margins.end_margins,
10983 -                               port->rx_pools_params.largest_buf_size);
10984 -                       return -EINVAL;
10985 -               }
10986 -       }
10987 -
10988 -       /* Call FM module routine for communicating parameters */
10989 -       memset(&params, 0, sizeof(params));
10990 -       params.port_id = port->port_id;
10991 -       params.port_type = port->port_type;
10992 -       params.port_speed = port->port_speed;
10993 -       params.num_of_tasks = (u8)port->tasks.num;
10994 -       params.num_of_extra_tasks = (u8)port->tasks.extra;
10995 -       params.num_of_open_dmas = (u8)port->open_dmas.num;
10996 -       params.num_of_extra_open_dmas = (u8)port->open_dmas.extra;
10997 -
10998 -       if (port->fifo_bufs.num) {
10999 -               err = verify_size_of_fifo(port);
11000 -               if (err)
11001 -                       return err;
11002 -       }
11003 -       params.size_of_fifo = port->fifo_bufs.num;
11004 -       params.extra_size_of_fifo = port->fifo_bufs.extra;
11005 -       params.deq_pipeline_depth = port->cfg->tx_fifo_deq_pipeline_depth;
11006 -       params.max_frame_length = port->max_frame_length;
11007 -
11008 -       err = fman_set_port_params(port->fm, &params);
11009 -       if (err)
11010 -               return err;
11011 -
11012 -       err = init_low_level_driver(port);
11013 -       if (err)
11014 -               return err;
11015 -
11016 -       kfree(port->cfg);
11017 -       port->cfg = NULL;
11018 -
11019 -       return 0;
11020 -}
11021 -EXPORT_SYMBOL(fman_port_init);
11022 -
11023 -/**
11024 - * fman_port_cfg_buf_prefix_content
11025 - * @port                       A pointer to a FM Port module.
11026 - * @buffer_prefix_content      A structure of parameters describing
11027 - *                             the structure of the buffer.
11028 - *                             Out parameter:
11029 - *                             Start margin - offset of data from
11030 - *                             start of external buffer.
11031 - * Defines the structure, size and content of the application buffer.
11032 - * The prefix, in Tx ports, if 'pass_prs_result', the application should set
11033 - * a value to their offsets in the prefix of the FM will save the first
11034 - * 'priv_data_size', than, depending on 'pass_prs_result' and
11035 - * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself
11036 - * (in this order), to the application buffer, and to offset.
11037 - * Calling this routine changes the buffer margins definitions in the internal
11038 - * driver data base from its default configuration:
11039 - * Data size:  [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE]
11040 - * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT].
11041 - * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP].
11042 - * May be used for all ports
11043 - *
11044 - * Allowed only following fman_port_config() and before fman_port_init().
11045 - *
11046 - * Return: 0 on success; Error code otherwise.
11047 - */
11048 -int fman_port_cfg_buf_prefix_content(struct fman_port *port,
11049 -                                    struct fman_buffer_prefix_content *
11050 -                                    buffer_prefix_content)
11051 -{
11052 -       if (is_init_done(port->cfg))
11053 -               return -EINVAL;
11054 -
11055 -       memcpy(&port->cfg->buffer_prefix_content,
11056 -              buffer_prefix_content,
11057 -              sizeof(struct fman_buffer_prefix_content));
11058 -       /* if data_align was not initialized by user,
11059 -        * we return to driver's default
11060 -        */
11061 -       if (!port->cfg->buffer_prefix_content.data_align)
11062 -               port->cfg->buffer_prefix_content.data_align =
11063 -               DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
11064 -
11065 -       return 0;
11066 -}
11067 -EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
11068 -
11069 -/**
11070 - * fman_port_disable
11071 - * port:       A pointer to a FM Port module.
11072 - *
11073 - * Gracefully disable an FM port. The port will not start new  tasks after all
11074 - * tasks associated with the port are terminated.
11075 - *
11076 - * This is a blocking routine, it returns after port is gracefully stopped,
11077 - * i.e. the port will not except new frames, but it will finish all frames
11078 - * or tasks which were already began.
11079 - * Allowed only following fman_port_init().
11080 - *
11081 - * Return: 0 on success; Error code otherwise.
11082 - */
11083 -int fman_port_disable(struct fman_port *port)
11084 -{
11085 -       u32 __iomem *bmi_cfg_reg, *bmi_status_reg;
11086 -       u32 tmp;
11087 -       bool rx_port, failure = false;
11088 -       int count;
11089 -
11090 -       if (!is_init_done(port->cfg))
11091 -               return -EINVAL;
11092 -
11093 -       switch (port->port_type) {
11094 -       case FMAN_PORT_TYPE_RX:
11095 -               bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
11096 -               bmi_status_reg = &port->bmi_regs->rx.fmbm_rst;
11097 -               rx_port = true;
11098 -               break;
11099 -       case FMAN_PORT_TYPE_TX:
11100 -               bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
11101 -               bmi_status_reg = &port->bmi_regs->tx.fmbm_tst;
11102 -               rx_port = false;
11103 -               break;
11104 -       default:
11105 -               return -EINVAL;
11106 -       }
11107 -
11108 -       /* Disable QMI */
11109 -       if (!rx_port) {
11110 -               tmp = ioread32be(&port->qmi_regs->fmqm_pnc) & ~QMI_PORT_CFG_EN;
11111 -               iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
11112 -
11113 -               /* Wait for QMI to finish FD handling */
11114 -               count = 100;
11115 -               do {
11116 -                       udelay(10);
11117 -                       tmp = ioread32be(&port->qmi_regs->fmqm_pns);
11118 -               } while ((tmp & QMI_PORT_STATUS_DEQ_FD_BSY) && --count);
11119 -
11120 -               if (count == 0) {
11121 -                       /* Timeout */
11122 -                       failure = true;
11123 -               }
11124 -       }
11125 -
11126 -       /* Disable BMI */
11127 -       tmp = ioread32be(bmi_cfg_reg) & ~BMI_PORT_CFG_EN;
11128 -       iowrite32be(tmp, bmi_cfg_reg);
11129 -
11130 -       /* Wait for graceful stop end */
11131 -       count = 500;
11132 -       do {
11133 -               udelay(10);
11134 -               tmp = ioread32be(bmi_status_reg);
11135 -       } while ((tmp & BMI_PORT_STATUS_BSY) && --count);
11136 -
11137 -       if (count == 0) {
11138 -               /* Timeout */
11139 -               failure = true;
11140 -       }
11141 -
11142 -       if (failure)
11143 -               dev_dbg(port->dev, "%s: FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
11144 -                       __func__,  port->port_id);
11145 -
11146 -       return 0;
11147 -}
11148 -EXPORT_SYMBOL(fman_port_disable);
11149 -
11150 -/**
11151 - * fman_port_enable
11152 - * port:       A pointer to a FM Port module.
11153 - *
11154 - * A runtime routine provided to allow disable/enable of port.
11155 - *
11156 - * Allowed only following fman_port_init().
11157 - *
11158 - * Return: 0 on success; Error code otherwise.
11159 - */
11160 -int fman_port_enable(struct fman_port *port)
11161 -{
11162 -       u32 __iomem *bmi_cfg_reg;
11163 -       u32 tmp;
11164 -       bool rx_port;
11165 -
11166 -       if (!is_init_done(port->cfg))
11167 -               return -EINVAL;
11168 -
11169 -       switch (port->port_type) {
11170 -       case FMAN_PORT_TYPE_RX:
11171 -               bmi_cfg_reg = &port->bmi_regs->rx.fmbm_rcfg;
11172 -               rx_port = true;
11173 -               break;
11174 -       case FMAN_PORT_TYPE_TX:
11175 -               bmi_cfg_reg = &port->bmi_regs->tx.fmbm_tcfg;
11176 -               rx_port = false;
11177 -               break;
11178 -       default:
11179 -               return -EINVAL;
11180 -       }
11181 -
11182 -       /* Enable QMI */
11183 -       if (!rx_port) {
11184 -               tmp = ioread32be(&port->qmi_regs->fmqm_pnc) | QMI_PORT_CFG_EN;
11185 -               iowrite32be(tmp, &port->qmi_regs->fmqm_pnc);
11186 -       }
11187 -
11188 -       /* Enable BMI */
11189 -       tmp = ioread32be(bmi_cfg_reg) | BMI_PORT_CFG_EN;
11190 -       iowrite32be(tmp, bmi_cfg_reg);
11191 -
11192 -       return 0;
11193 -}
11194 -EXPORT_SYMBOL(fman_port_enable);
11195 -
11196 -/**
11197 - * fman_port_bind
11198 - * dev:                FMan Port OF device pointer
11199 - *
11200 - * Bind to a specific FMan Port.
11201 - *
11202 - * Allowed only after the port was created.
11203 - *
11204 - * Return: A pointer to the FMan port device.
11205 - */
11206 -struct fman_port *fman_port_bind(struct device *dev)
11207 -{
11208 -       return (struct fman_port *)(dev_get_drvdata(get_device(dev)));
11209 -}
11210 -EXPORT_SYMBOL(fman_port_bind);
11211 -
11212 -/**
11213 - * fman_port_get_qman_channel_id
11214 - * port:       Pointer to the FMan port devuce
11215 - *
11216 - * Get the QMan channel ID for the specific port
11217 - *
11218 - * Return: QMan channel ID
11219 - */
11220 -u32 fman_port_get_qman_channel_id(struct fman_port *port)
11221 -{
11222 -       return port->dts_params.qman_channel_id;
11223 -}
11224 -EXPORT_SYMBOL(fman_port_get_qman_channel_id);
11225 -
11226 -static int fman_port_probe(struct platform_device *of_dev)
11227 -{
11228 -       struct fman_port *port;
11229 -       struct fman *fman;
11230 -       struct device_node *fm_node, *port_node;
11231 -       struct resource res;
11232 -       struct resource *dev_res;
11233 -       u32 val;
11234 -       int err = 0, lenp;
11235 -       enum fman_port_type port_type;
11236 -       u16 port_speed;
11237 -       u8 port_id;
11238 -
11239 -       port = kzalloc(sizeof(*port), GFP_KERNEL);
11240 -       if (!port)
11241 -               return -ENOMEM;
11242 -
11243 -       port->dev = &of_dev->dev;
11244 -
11245 -       port_node = of_node_get(of_dev->dev.of_node);
11246 -
11247 -       /* Get the FM node */
11248 -       fm_node = of_get_parent(port_node);
11249 -       if (!fm_node) {
11250 -               dev_err(port->dev, "%s: of_get_parent() failed\n", __func__);
11251 -               err = -ENODEV;
11252 -               goto return_err;
11253 -       }
11254 -
11255 -       fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
11256 -       of_node_put(fm_node);
11257 -       if (!fman) {
11258 -               err = -EINVAL;
11259 -               goto return_err;
11260 -       }
11261 -
11262 -       err = of_property_read_u32(port_node, "cell-index", &val);
11263 -       if (err) {
11264 -               dev_err(port->dev, "%s: reading cell-index for %s failed\n",
11265 -                       __func__, port_node->full_name);
11266 -               err = -EINVAL;
11267 -               goto return_err;
11268 -       }
11269 -       port_id = (u8)val;
11270 -       port->dts_params.id = port_id;
11271 -
11272 -       if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
11273 -               port_type = FMAN_PORT_TYPE_TX;
11274 -               port_speed = 1000;
11275 -               if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
11276 -                       port_speed = 10000;
11277 -
11278 -       } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
11279 -               if (port_id >= TX_10G_PORT_BASE)
11280 -                       port_speed = 10000;
11281 -               else
11282 -                       port_speed = 1000;
11283 -               port_type = FMAN_PORT_TYPE_TX;
11284 -
11285 -       } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
11286 -               port_type = FMAN_PORT_TYPE_RX;
11287 -               port_speed = 1000;
11288 -               if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
11289 -                       port_speed = 10000;
11290 -
11291 -       } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
11292 -               if (port_id >= RX_10G_PORT_BASE)
11293 -                       port_speed = 10000;
11294 -               else
11295 -                       port_speed = 1000;
11296 -               port_type = FMAN_PORT_TYPE_RX;
11297 -
11298 -       }  else {
11299 -               dev_err(port->dev, "%s: Illegal port type\n", __func__);
11300 -               err = -EINVAL;
11301 -               goto return_err;
11302 -       }
11303 -
11304 -       port->dts_params.type = port_type;
11305 -       port->dts_params.speed = port_speed;
11306 -
11307 -       if (port_type == FMAN_PORT_TYPE_TX) {
11308 -               u32 qman_channel_id;
11309 -
11310 -               qman_channel_id = fman_get_qman_channel_id(fman, port_id);
11311 -               if (qman_channel_id == 0) {
11312 -                       dev_err(port->dev, "%s: incorrect qman-channel-id\n",
11313 -                               __func__);
11314 -                       err = -EINVAL;
11315 -                       goto return_err;
11316 -               }
11317 -               port->dts_params.qman_channel_id = qman_channel_id;
11318 -       }
11319 -
11320 -       err = of_address_to_resource(port_node, 0, &res);
11321 -       if (err < 0) {
11322 -               dev_err(port->dev, "%s: of_address_to_resource() failed\n",
11323 -                       __func__);
11324 -               err = -ENOMEM;
11325 -               goto return_err;
11326 -       }
11327 -
11328 -       port->dts_params.fman = fman;
11329 -
11330 -       of_node_put(port_node);
11331 -
11332 -       dev_res = __devm_request_region(port->dev, &res, res.start,
11333 -                                       resource_size(&res), "fman-port");
11334 -       if (!dev_res) {
11335 -               dev_err(port->dev, "%s: __devm_request_region() failed\n",
11336 -                       __func__);
11337 -               err = -EINVAL;
11338 -               goto free_port;
11339 -       }
11340 -
11341 -       port->dts_params.base_addr = devm_ioremap(port->dev, res.start,
11342 -                                                 resource_size(&res));
11343 -       if (!port->dts_params.base_addr)
11344 -               dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__);
11345 -
11346 -       dev_set_drvdata(&of_dev->dev, port);
11347 -
11348 -       return 0;
11349 -
11350 -return_err:
11351 -       of_node_put(port_node);
11352 -free_port:
11353 -       kfree(port);
11354 -       return err;
11355 -}
11356 -
11357 -static const struct of_device_id fman_port_match[] = {
11358 -       {.compatible = "fsl,fman-v3-port-rx"},
11359 -       {.compatible = "fsl,fman-v2-port-rx"},
11360 -       {.compatible = "fsl,fman-v3-port-tx"},
11361 -       {.compatible = "fsl,fman-v2-port-tx"},
11362 -       {}
11363 -};
11364 -
11365 -MODULE_DEVICE_TABLE(of, fman_port_match);
11366 -
11367 -static struct platform_driver fman_port_driver = {
11368 -       .driver = {
11369 -               .name = "fsl-fman-port",
11370 -               .of_match_table = fman_port_match,
11371 -       },
11372 -       .probe = fman_port_probe,
11373 -};
11374 -
11375 -static int __init fman_port_load(void)
11376 -{
11377 -       int err;
11378 -
11379 -       pr_debug("FSL DPAA FMan driver\n");
11380 -
11381 -       err = platform_driver_register(&fman_port_driver);
11382 -       if (err < 0)
11383 -               pr_err("Error, platform_driver_register() = %d\n", err);
11384 -
11385 -       return err;
11386 -}
11387 -module_init(fman_port_load);
11388 -
11389 -static void __exit fman_port_unload(void)
11390 -{
11391 -       platform_driver_unregister(&fman_port_driver);
11392 -}
11393 -module_exit(fman_port_unload);
11394 -
11395 -MODULE_LICENSE("Dual BSD/GPL");
11396 -MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver");
11397 diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
11398 deleted file mode 100644
11399 index 8ba9017..0000000
11400 --- a/drivers/net/ethernet/freescale/fman/fman_port.h
11401 +++ /dev/null
11402 @@ -1,151 +0,0 @@
11403 -/*
11404 - * Copyright 2008 - 2015 Freescale Semiconductor Inc.
11405 - *
11406 - * Redistribution and use in source and binary forms, with or without
11407 - * modification, are permitted provided that the following conditions are met:
11408 - *     * Redistributions of source code must retain the above copyright
11409 - *       notice, this list of conditions and the following disclaimer.
11410 - *     * Redistributions in binary form must reproduce the above copyright
11411 - *       notice, this list of conditions and the following disclaimer in the
11412 - *       documentation and/or other materials provided with the distribution.
11413 - *     * Neither the name of Freescale Semiconductor nor the
11414 - *       names of its contributors may be used to endorse or promote products
11415 - *       derived from this software without specific prior written permission.
11416 - *
11417 - *
11418 - * ALTERNATIVELY, this software may be distributed under the terms of the
11419 - * GNU General Public License ("GPL") as published by the Free Software
11420 - * Foundation, either version 2 of that License or (at your option) any
11421 - * later version.
11422 - *
11423 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11424 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11425 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11426 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11427 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11428 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11429 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11430 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11431 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11432 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11433 - */
11434 -
11435 -#ifndef __FMAN_PORT_H
11436 -#define __FMAN_PORT_H
11437 -
11438 -#include "fman.h"
11439 -
11440 -/* FM Port API
11441 - * The FM uses a general module called "port" to represent a Tx port (MAC),
11442 - * an Rx port (MAC).
11443 - * The number of ports in an FM varies between SOCs.
11444 - * The SW driver manages these ports as sub-modules of the FM,i.e. after an
11445 - * FM is initialized, its ports may be initialized and operated upon.
11446 - * The port is initialized aware of its type, but other functions on a port
11447 - * may be indifferent to its type. When necessary, the driver verifies
11448 - * coherence and returns error if applicable.
11449 - * On initialization, user specifies the port type and it's index (relative
11450 - * to the port's type) - always starting at 0.
11451 - */
11452 -
11453 -/* FM Frame error */
11454 -/* Frame Descriptor errors */
11455 -/* Not for Rx-Port! Unsupported Format */
11456 -#define FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT     FM_FD_ERR_UNSUPPORTED_FORMAT
11457 -/* Not for Rx-Port! Length Error */
11458 -#define FM_PORT_FRM_ERR_LENGTH                 FM_FD_ERR_LENGTH
11459 -/* DMA Data error */
11460 -#define FM_PORT_FRM_ERR_DMA                    FM_FD_ERR_DMA
11461 -/* non Frame-Manager error; probably come from SEC that was chained to FM */
11462 -#define FM_PORT_FRM_ERR_NON_FM                 FM_FD_RX_STATUS_ERR_NON_FM
11463 - /* IPR error */
11464 -#define FM_PORT_FRM_ERR_IPRE                   (FM_FD_ERR_IPR & ~FM_FD_IPR)
11465 -/* IPR non-consistent-sp */
11466 -#define FM_PORT_FRM_ERR_IPR_NCSP               (FM_FD_ERR_IPR_NCSP &   \
11467 -                                               ~FM_FD_IPR)
11468 -
11469 -/* Rx FIFO overflow, FCS error, code error, running disparity
11470 - * error (SGMII and TBI modes), FIFO parity error.
11471 - * PHY Sequence error, PHY error control character detected.
11472 - */
11473 -#define FM_PORT_FRM_ERR_PHYSICAL                FM_FD_ERR_PHYSICAL
11474 -/* Frame too long OR Frame size exceeds max_length_frame  */
11475 -#define FM_PORT_FRM_ERR_SIZE                    FM_FD_ERR_SIZE
11476 -/* indicates a classifier "drop" operation */
11477 -#define FM_PORT_FRM_ERR_CLS_DISCARD             FM_FD_ERR_CLS_DISCARD
11478 -/* Extract Out of Frame */
11479 -#define FM_PORT_FRM_ERR_EXTRACTION              FM_FD_ERR_EXTRACTION
11480 -/* No Scheme Selected */
11481 -#define FM_PORT_FRM_ERR_NO_SCHEME               FM_FD_ERR_NO_SCHEME
11482 -/* Keysize Overflow */
11483 -#define FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW        FM_FD_ERR_KEYSIZE_OVERFLOW
11484 -/* Frame color is red */
11485 -#define FM_PORT_FRM_ERR_COLOR_RED               FM_FD_ERR_COLOR_RED
11486 -/* Frame color is yellow */
11487 -#define FM_PORT_FRM_ERR_COLOR_YELLOW            FM_FD_ERR_COLOR_YELLOW
11488 -/* Parser Time out Exceed */
11489 -#define FM_PORT_FRM_ERR_PRS_TIMEOUT             FM_FD_ERR_PRS_TIMEOUT
11490 -/* Invalid Soft Parser instruction */
11491 -#define FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT        FM_FD_ERR_PRS_ILL_INSTRUCT
11492 -/* Header error was identified during parsing */
11493 -#define FM_PORT_FRM_ERR_PRS_HDR_ERR             FM_FD_ERR_PRS_HDR_ERR
11494 -/* Frame parsed beyind 256 first bytes */
11495 -#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED    FM_FD_ERR_BLOCK_LIMIT_EXCEEDED
11496 -/* FPM Frame Processing Timeout Exceeded */
11497 -#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT         0x00000001
11498 -
11499 -struct fman_port;
11500 -
11501 -/* A structure for additional Rx port parameters */
11502 -struct fman_port_rx_params {
11503 -       u32 err_fqid;                   /* Error Queue Id. */
11504 -       u32 dflt_fqid;                  /* Default Queue Id. */
11505 -       /* Which external buffer pools are used
11506 -        * (up to FMAN_PORT_MAX_EXT_POOLS_NUM), and their sizes.
11507 -        */
11508 -       struct fman_ext_pools ext_buf_pools;
11509 -};
11510 -
11511 -/* A structure for additional non-Rx port parameters */
11512 -struct fman_port_non_rx_params {
11513 -       /* Error Queue Id. */
11514 -       u32 err_fqid;
11515 -       /* For Tx - Default Confirmation queue, 0 means no Tx confirmation
11516 -        * for processed frames. For OP port - default Rx queue.
11517 -        */
11518 -       u32 dflt_fqid;
11519 -};
11520 -
11521 -/* A union for additional parameters depending on port type */
11522 -union fman_port_specific_params {
11523 -       /* Rx port parameters structure */
11524 -       struct fman_port_rx_params rx_params;
11525 -       /* Non-Rx port parameters structure */
11526 -       struct fman_port_non_rx_params non_rx_params;
11527 -};
11528 -
11529 -/* A structure representing FM initialization parameters */
11530 -struct fman_port_params {
11531 -       /* Virtual Address of memory mapped FM Port registers. */
11532 -       void *fm;
11533 -       union fman_port_specific_params specific_params;
11534 -       /* Additional parameters depending on port type. */
11535 -};
11536 -
11537 -int fman_port_config(struct fman_port *port, struct fman_port_params *params);
11538 -
11539 -int fman_port_init(struct fman_port *port);
11540 -
11541 -int fman_port_cfg_buf_prefix_content(struct fman_port *port,
11542 -                                    struct fman_buffer_prefix_content
11543 -                                    *buffer_prefix_content);
11544 -
11545 -int fman_port_disable(struct fman_port *port);
11546 -
11547 -int fman_port_enable(struct fman_port *port);
11548 -
11549 -u32 fman_port_get_qman_channel_id(struct fman_port *port);
11550 -
11551 -struct fman_port *fman_port_bind(struct device *dev);
11552 -
11553 -#endif /* __FMAN_PORT_H */
11554 diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
11555 deleted file mode 100644
11556 index 248f5bc..0000000
11557 --- a/drivers/net/ethernet/freescale/fman/fman_sp.c
11558 +++ /dev/null
11559 @@ -1,169 +0,0 @@
11560 -/*
11561 - * Copyright 2008 - 2015 Freescale Semiconductor Inc.
11562 - *
11563 - * Redistribution and use in source and binary forms, with or without
11564 - * modification, are permitted provided that the following conditions are met:
11565 - *     * Redistributions of source code must retain the above copyright
11566 - *       notice, this list of conditions and the following disclaimer.
11567 - *     * Redistributions in binary form must reproduce the above copyright
11568 - *       notice, this list of conditions and the following disclaimer in the
11569 - *       documentation and/or other materials provided with the distribution.
11570 - *     * Neither the name of Freescale Semiconductor nor the
11571 - *       names of its contributors may be used to endorse or promote products
11572 - *       derived from this software without specific prior written permission.
11573 - *
11574 - *
11575 - * ALTERNATIVELY, this software may be distributed under the terms of the
11576 - * GNU General Public License ("GPL") as published by the Free Software
11577 - * Foundation, either version 2 of that License or (at your option) any
11578 - * later version.
11579 - *
11580 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11581 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11582 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11583 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11584 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11585 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11586 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11587 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11588 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11589 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11590 - */
11591 -
11592 -#include "fman_sp.h"
11593 -#include "fman.h"
11594 -
11595 -void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
11596 -                                                    *fm_ext_pools,
11597 -                                                    u8 *ordered_array,
11598 -                                                    u16 *sizes_array)
11599 -{
11600 -       u16 buf_size = 0;
11601 -       int i = 0, j = 0, k = 0;
11602 -
11603 -       /* First we copy the external buffers pools information
11604 -        * to an ordered local array
11605 -        */
11606 -       for (i = 0; i < fm_ext_pools->num_of_pools_used; i++) {
11607 -               /* get pool size */
11608 -               buf_size = fm_ext_pools->ext_buf_pool[i].size;
11609 -
11610 -               /* keep sizes in an array according to poolId
11611 -                * for direct access
11612 -                */
11613 -               sizes_array[fm_ext_pools->ext_buf_pool[i].id] = buf_size;
11614 -
11615 -               /* save poolId in an ordered array according to size */
11616 -               for (j = 0; j <= i; j++) {
11617 -                       /* this is the next free place in the array */
11618 -                       if (j == i)
11619 -                               ordered_array[i] =
11620 -                                   fm_ext_pools->ext_buf_pool[i].id;
11621 -                       else {
11622 -                               /* find the right place for this poolId */
11623 -                               if (buf_size < sizes_array[ordered_array[j]]) {
11624 -                                       /* move the pool_ids one place ahead
11625 -                                        * to make room for this poolId
11626 -                                        */
11627 -                                       for (k = i; k > j; k--)
11628 -                                               ordered_array[k] =
11629 -                                                   ordered_array[k - 1];
11630 -
11631 -                                       /* now k==j, this is the place for
11632 -                                        * the new size
11633 -                                        */
11634 -                                       ordered_array[k] =
11635 -                                           fm_ext_pools->ext_buf_pool[i].id;
11636 -                                       break;
11637 -                               }
11638 -                       }
11639 -               }
11640 -       }
11641 -}
11642 -EXPORT_SYMBOL(fman_sp_set_buf_pools_in_asc_order_of_buf_sizes);
11643 -
11644 -int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
11645 -                               int_context_data_copy,
11646 -                               struct fman_buffer_prefix_content *
11647 -                               buffer_prefix_content,
11648 -                               struct fman_sp_buf_margins *buf_margins,
11649 -                               struct fman_sp_buffer_offsets *buffer_offsets,
11650 -                               u8 *internal_buf_offset)
11651 -{
11652 -       u32 tmp;
11653 -
11654 -       /* Align start of internal context data to 16 byte */
11655 -       int_context_data_copy->ext_buf_offset = (u16)
11656 -               ((buffer_prefix_content->priv_data_size & (OFFSET_UNITS - 1)) ?
11657 -               ((buffer_prefix_content->priv_data_size + OFFSET_UNITS) &
11658 -                       ~(u16)(OFFSET_UNITS - 1)) :
11659 -               buffer_prefix_content->priv_data_size);
11660 -
11661 -       /* Translate margin and int_context params to FM parameters */
11662 -       /* Initialize with illegal value. Later we'll set legal values. */
11663 -       buffer_offsets->prs_result_offset = (u32)ILLEGAL_BASE;
11664 -       buffer_offsets->time_stamp_offset = (u32)ILLEGAL_BASE;
11665 -       buffer_offsets->hash_result_offset = (u32)ILLEGAL_BASE;
11666 -
11667 -       /* Internally the driver supports 4 options
11668 -        * 1. prsResult/timestamp/hashResult selection (in fact 8 options,
11669 -        * but for simplicity we'll
11670 -        * relate to it as 1).
11671 -        * 2. All IC context (from AD) not including debug.
11672 -        */
11673 -
11674 -       /* This case covers the options under 1 */
11675 -       /* Copy size must be in 16-byte granularity. */
11676 -       int_context_data_copy->size =
11677 -           (u16)((buffer_prefix_content->pass_prs_result ? 32 : 0) +
11678 -                 ((buffer_prefix_content->pass_time_stamp ||
11679 -                 buffer_prefix_content->pass_hash_result) ? 16 : 0));
11680 -
11681 -       /* Align start of internal context data to 16 byte */
11682 -       int_context_data_copy->int_context_offset =
11683 -           (u8)(buffer_prefix_content->pass_prs_result ? 32 :
11684 -                ((buffer_prefix_content->pass_time_stamp ||
11685 -                buffer_prefix_content->pass_hash_result) ? 64 : 0));
11686 -
11687 -       if (buffer_prefix_content->pass_prs_result)
11688 -               buffer_offsets->prs_result_offset =
11689 -                   int_context_data_copy->ext_buf_offset;
11690 -       if (buffer_prefix_content->pass_time_stamp)
11691 -               buffer_offsets->time_stamp_offset =
11692 -                   buffer_prefix_content->pass_prs_result ?
11693 -                   (int_context_data_copy->ext_buf_offset +
11694 -                       sizeof(struct fman_prs_result)) :
11695 -                   int_context_data_copy->ext_buf_offset;
11696 -       if (buffer_prefix_content->pass_hash_result)
11697 -               /* If PR is not requested, whether TS is
11698 -                * requested or not, IC will be copied from TS
11699 -                        */
11700 -               buffer_offsets->hash_result_offset =
11701 -               buffer_prefix_content->pass_prs_result ?
11702 -                       (int_context_data_copy->ext_buf_offset +
11703 -                               sizeof(struct fman_prs_result) + 8) :
11704 -                       int_context_data_copy->ext_buf_offset + 8;
11705 -
11706 -       if (int_context_data_copy->size)
11707 -               buf_margins->start_margins =
11708 -                   (u16)(int_context_data_copy->ext_buf_offset +
11709 -                         int_context_data_copy->size);
11710 -       else
11711 -               /* No Internal Context passing, STartMargin is
11712 -                * immediately after private_info
11713 -                */
11714 -               buf_margins->start_margins =
11715 -                   buffer_prefix_content->priv_data_size;
11716 -
11717 -       /* align data start */
11718 -       tmp = (u32)(buf_margins->start_margins %
11719 -                   buffer_prefix_content->data_align);
11720 -       if (tmp)
11721 -               buf_margins->start_margins +=
11722 -                   (buffer_prefix_content->data_align - tmp);
11723 -       buffer_offsets->data_offset = buf_margins->start_margins;
11724 -
11725 -       return 0;
11726 -}
11727 -EXPORT_SYMBOL(fman_sp_build_buffer_struct);
11728 -
11729 diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h
11730 deleted file mode 100644
11731 index 820b7f6..0000000
11732 --- a/drivers/net/ethernet/freescale/fman/fman_sp.h
11733 +++ /dev/null
11734 @@ -1,103 +0,0 @@
11735 -/*
11736 - * Copyright 2008 - 2015 Freescale Semiconductor Inc.
11737 - *
11738 - * Redistribution and use in source and binary forms, with or without
11739 - * modification, are permitted provided that the following conditions are met:
11740 - *     * Redistributions of source code must retain the above copyright
11741 - *      notice, this list of conditions and the following disclaimer.
11742 - *     * Redistributions in binary form must reproduce the above copyright
11743 - *      notice, this list of conditions and the following disclaimer in the
11744 - *      documentation and/or other materials provided with the distribution.
11745 - *     * Neither the name of Freescale Semiconductor nor the
11746 - *      names of its contributors may be used to endorse or promote products
11747 - *      derived from this software without specific prior written permission.
11748 - *
11749 - * ALTERNATIVELY, this software may be distributed under the terms of the
11750 - * GNU General Public License ("GPL") as published by the Free Software
11751 - * Foundation, either version 2 of that License or (at your option) any
11752 - * later version.
11753 - *
11754 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11755 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11756 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11757 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11758 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11759 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11760 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11761 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11762 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11763 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11764 - */
11765 -
11766 -#ifndef __FM_SP_H
11767 -#define __FM_SP_H
11768 -
11769 -#include "fman.h"
11770 -#include <linux/types.h>
11771 -
11772 -#define ILLEGAL_BASE    (~0)
11773 -
11774 -/* defaults */
11775 -#define DFLT_FM_SP_BUFFER_PREFIX_CONTEXT_DATA_ALIGN    64
11776 -
11777 -/* Registers bit fields */
11778 -#define FMAN_SP_EXT_BUF_POOL_EN_COUNTER                0x40000000
11779 -#define FMAN_SP_EXT_BUF_POOL_VALID                     0x80000000
11780 -#define FMAN_SP_EXT_BUF_POOL_BACKUP                    0x20000000
11781 -#define FMAN_SP_DMA_ATTR_WRITE_OPTIMIZE                0x00100000
11782 -#define FMAN_SP_SG_DISABLE                             0x80000000
11783 -
11784 -/* shifts */
11785 -#define FMAN_SP_EXT_BUF_MARG_START_SHIFT               16
11786 -#define FMAN_SP_DMA_ATTR_SWP_SHIFT                     30
11787 -#define FMAN_SP_IC_TO_EXT_SHIFT                        16
11788 -#define FMAN_SP_IC_FROM_INT_SHIFT                      8
11789 -
11790 -/* structure for defining internal context copying */
11791 -struct fman_sp_int_context_data_copy {
11792 -       /* < Offset in External buffer to which internal
11793 -        *  context is copied to (Rx) or taken from (Tx, Op).
11794 -        */
11795 -       u16 ext_buf_offset;
11796 -       /* Offset within internal context to copy from
11797 -        * (Rx) or to copy to (Tx, Op).
11798 -        */
11799 -       u8 int_context_offset;
11800 -       /* Internal offset size to be copied */
11801 -       u16 size;
11802 -};
11803 -
11804 -/*  struct for defining external buffer margins */
11805 -struct fman_sp_buf_margins {
11806 -       /* Number of bytes to be left at the beginning
11807 -        * of the external buffer (must be divisible by 16)
11808 -        */
11809 -       u16 start_margins;
11810 -       /* number of bytes to be left at the end
11811 -        * of the external buffer(must be divisible by 16)
11812 -        */
11813 -       u16 end_margins;
11814 -};
11815 -
11816 -struct fman_sp_buffer_offsets {
11817 -       u32 data_offset;
11818 -       u32 prs_result_offset;
11819 -       u32 time_stamp_offset;
11820 -       u32 hash_result_offset;
11821 -};
11822 -
11823 -int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy
11824 -                               *int_context_data_copy,
11825 -                               struct fman_buffer_prefix_content
11826 -                               *buffer_prefix_content,
11827 -                               struct fman_sp_buf_margins *buf_margins,
11828 -                               struct fman_sp_buffer_offsets
11829 -                               *buffer_offsets,
11830 -                               u8 *internal_buf_offset);
11831 -
11832 -void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
11833 -                                                    *fm_ext_pools,
11834 -                                                    u8 *ordered_array,
11835 -                                                    u16 *sizes_array);
11836 -
11837 -#endif /* __FM_SP_H */
11838 diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
11839 deleted file mode 100644
11840 index 4b0f3a5..0000000
11841 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
11842 +++ /dev/null
11843 @@ -1,783 +0,0 @@
11844 -/*
11845 - * Copyright 2008-2015 Freescale Semiconductor Inc.
11846 - *
11847 - * Redistribution and use in source and binary forms, with or without
11848 - * modification, are permitted provided that the following conditions are met:
11849 - *     * Redistributions of source code must retain the above copyright
11850 - *       notice, this list of conditions and the following disclaimer.
11851 - *     * Redistributions in binary form must reproduce the above copyright
11852 - *       notice, this list of conditions and the following disclaimer in the
11853 - *       documentation and/or other materials provided with the distribution.
11854 - *     * Neither the name of Freescale Semiconductor nor the
11855 - *       names of its contributors may be used to endorse or promote products
11856 - *       derived from this software without specific prior written permission.
11857 - *
11858 - *
11859 - * ALTERNATIVELY, this software may be distributed under the terms of the
11860 - * GNU General Public License ("GPL") as published by the Free Software
11861 - * Foundation, either version 2 of that License or (at your option) any
11862 - * later version.
11863 - *
11864 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
11865 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
11866 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11867 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
11868 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
11869 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
11870 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
11871 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11872 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11873 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11874 - */
11875 -
11876 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11877 -
11878 -#include "fman_tgec.h"
11879 -#include "fman.h"
11880 -
11881 -#include <linux/slab.h>
11882 -#include <linux/bitrev.h>
11883 -#include <linux/io.h>
11884 -#include <linux/crc32.h>
11885 -
11886 -/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
11887 -#define TGEC_TX_IPG_LENGTH_MASK        0x000003ff
11888 -
11889 -/* Command and Configuration Register (COMMAND_CONFIG) */
11890 -#define CMD_CFG_NO_LEN_CHK             0x00020000
11891 -#define CMD_CFG_PAUSE_IGNORE           0x00000100
11892 -#define CMF_CFG_CRC_FWD                        0x00000040
11893 -#define CMD_CFG_PROMIS_EN              0x00000010
11894 -#define CMD_CFG_RX_EN                  0x00000002
11895 -#define CMD_CFG_TX_EN                  0x00000001
11896 -
11897 -/* Interrupt Mask Register (IMASK) */
11898 -#define TGEC_IMASK_MDIO_SCAN_EVENT     0x00010000
11899 -#define TGEC_IMASK_MDIO_CMD_CMPL       0x00008000
11900 -#define TGEC_IMASK_REM_FAULT           0x00004000
11901 -#define TGEC_IMASK_LOC_FAULT           0x00002000
11902 -#define TGEC_IMASK_TX_ECC_ER           0x00001000
11903 -#define TGEC_IMASK_TX_FIFO_UNFL        0x00000800
11904 -#define TGEC_IMASK_TX_FIFO_OVFL        0x00000400
11905 -#define TGEC_IMASK_TX_ER               0x00000200
11906 -#define TGEC_IMASK_RX_FIFO_OVFL        0x00000100
11907 -#define TGEC_IMASK_RX_ECC_ER           0x00000080
11908 -#define TGEC_IMASK_RX_JAB_FRM          0x00000040
11909 -#define TGEC_IMASK_RX_OVRSZ_FRM        0x00000020
11910 -#define TGEC_IMASK_RX_RUNT_FRM         0x00000010
11911 -#define TGEC_IMASK_RX_FRAG_FRM         0x00000008
11912 -#define TGEC_IMASK_RX_LEN_ER           0x00000004
11913 -#define TGEC_IMASK_RX_CRC_ER           0x00000002
11914 -#define TGEC_IMASK_RX_ALIGN_ER         0x00000001
11915 -
11916 -/* Hashtable Control Register (HASHTABLE_CTRL) */
11917 -#define TGEC_HASH_MCAST_SHIFT          23
11918 -#define TGEC_HASH_MCAST_EN             0x00000200
11919 -#define TGEC_HASH_ADR_MSK              0x000001ff
11920 -
11921 -#define DEFAULT_TX_IPG_LENGTH                  12
11922 -#define DEFAULT_MAX_FRAME_LENGTH               0x600
11923 -#define DEFAULT_PAUSE_QUANT                    0xf000
11924 -
11925 -/* number of pattern match registers (entries) */
11926 -#define TGEC_NUM_OF_PADDRS          1
11927 -
11928 -/* Group address bit indication */
11929 -#define GROUP_ADDRESS               0x0000010000000000LL
11930 -
11931 -/* Hash table size (= 32 bits*8 regs) */
11932 -#define TGEC_HASH_TABLE_SIZE             512
11933 -
11934 -/* tGEC memory map */
11935 -struct tgec_regs {
11936 -       u32 tgec_id;            /* 0x000 Controller ID */
11937 -       u32 reserved001[1];     /* 0x004 */
11938 -       u32 command_config;     /* 0x008 Control and configuration */
11939 -       u32 mac_addr_0;         /* 0x00c Lower 32 bits of the MAC adr */
11940 -       u32 mac_addr_1;         /* 0x010 Upper 16 bits of the MAC adr */
11941 -       u32 maxfrm;             /* 0x014 Maximum frame length */
11942 -       u32 pause_quant;        /* 0x018 Pause quanta */
11943 -       u32 rx_fifo_sections;   /* 0x01c  */
11944 -       u32 tx_fifo_sections;   /* 0x020  */
11945 -       u32 rx_fifo_almost_f_e; /* 0x024  */
11946 -       u32 tx_fifo_almost_f_e; /* 0x028  */
11947 -       u32 hashtable_ctrl;     /* 0x02c Hash table control */
11948 -       u32 mdio_cfg_status;    /* 0x030  */
11949 -       u32 mdio_command;       /* 0x034  */
11950 -       u32 mdio_data;          /* 0x038  */
11951 -       u32 mdio_regaddr;       /* 0x03c  */
11952 -       u32 status;             /* 0x040  */
11953 -       u32 tx_ipg_len;         /* 0x044 Transmitter inter-packet-gap */
11954 -       u32 mac_addr_2;         /* 0x048 Lower 32 bits of 2nd MAC adr */
11955 -       u32 mac_addr_3;         /* 0x04c Upper 16 bits of 2nd MAC adr */
11956 -       u32 rx_fifo_ptr_rd;     /* 0x050  */
11957 -       u32 rx_fifo_ptr_wr;     /* 0x054  */
11958 -       u32 tx_fifo_ptr_rd;     /* 0x058  */
11959 -       u32 tx_fifo_ptr_wr;     /* 0x05c  */
11960 -       u32 imask;              /* 0x060 Interrupt mask */
11961 -       u32 ievent;             /* 0x064 Interrupt event */
11962 -       u32 udp_port;           /* 0x068 Defines a UDP Port number */
11963 -       u32 type_1588v2;        /* 0x06c Type field for 1588v2 */
11964 -       u32 reserved070[4];     /* 0x070 */
11965 -       /* 10Ge Statistics Counter */
11966 -       u32 tfrm_u;             /* 80 aFramesTransmittedOK */
11967 -       u32 tfrm_l;             /* 84 aFramesTransmittedOK */
11968 -       u32 rfrm_u;             /* 88 aFramesReceivedOK */
11969 -       u32 rfrm_l;             /* 8c aFramesReceivedOK */
11970 -       u32 rfcs_u;             /* 90 aFrameCheckSequenceErrors */
11971 -       u32 rfcs_l;             /* 94 aFrameCheckSequenceErrors */
11972 -       u32 raln_u;             /* 98 aAlignmentErrors */
11973 -       u32 raln_l;             /* 9c aAlignmentErrors */
11974 -       u32 txpf_u;             /* A0 aPAUSEMACCtrlFramesTransmitted */
11975 -       u32 txpf_l;             /* A4 aPAUSEMACCtrlFramesTransmitted */
11976 -       u32 rxpf_u;             /* A8 aPAUSEMACCtrlFramesReceived */
11977 -       u32 rxpf_l;             /* Ac aPAUSEMACCtrlFramesReceived */
11978 -       u32 rlong_u;            /* B0 aFrameTooLongErrors */
11979 -       u32 rlong_l;            /* B4 aFrameTooLongErrors */
11980 -       u32 rflr_u;             /* B8 aInRangeLengthErrors */
11981 -       u32 rflr_l;             /* Bc aInRangeLengthErrors */
11982 -       u32 tvlan_u;            /* C0 VLANTransmittedOK */
11983 -       u32 tvlan_l;            /* C4 VLANTransmittedOK */
11984 -       u32 rvlan_u;            /* C8 VLANReceivedOK */
11985 -       u32 rvlan_l;            /* Cc VLANReceivedOK */
11986 -       u32 toct_u;             /* D0 if_out_octets */
11987 -       u32 toct_l;             /* D4 if_out_octets */
11988 -       u32 roct_u;             /* D8 if_in_octets */
11989 -       u32 roct_l;             /* Dc if_in_octets */
11990 -       u32 ruca_u;             /* E0 if_in_ucast_pkts */
11991 -       u32 ruca_l;             /* E4 if_in_ucast_pkts */
11992 -       u32 rmca_u;             /* E8 ifInMulticastPkts */
11993 -       u32 rmca_l;             /* Ec ifInMulticastPkts */
11994 -       u32 rbca_u;             /* F0 ifInBroadcastPkts */
11995 -       u32 rbca_l;             /* F4 ifInBroadcastPkts */
11996 -       u32 terr_u;             /* F8 if_out_errors */
11997 -       u32 terr_l;             /* Fc if_out_errors */
11998 -       u32 reserved100[2];     /* 100-108 */
11999 -       u32 tuca_u;             /* 108 if_out_ucast_pkts */
12000 -       u32 tuca_l;             /* 10c if_out_ucast_pkts */
12001 -       u32 tmca_u;             /* 110 ifOutMulticastPkts */
12002 -       u32 tmca_l;             /* 114 ifOutMulticastPkts */
12003 -       u32 tbca_u;             /* 118 ifOutBroadcastPkts */
12004 -       u32 tbca_l;             /* 11c ifOutBroadcastPkts */
12005 -       u32 rdrp_u;             /* 120 etherStatsDropEvents */
12006 -       u32 rdrp_l;             /* 124 etherStatsDropEvents */
12007 -       u32 reoct_u;            /* 128 etherStatsOctets */
12008 -       u32 reoct_l;            /* 12c etherStatsOctets */
12009 -       u32 rpkt_u;             /* 130 etherStatsPkts */
12010 -       u32 rpkt_l;             /* 134 etherStatsPkts */
12011 -       u32 trund_u;            /* 138 etherStatsUndersizePkts */
12012 -       u32 trund_l;            /* 13c etherStatsUndersizePkts */
12013 -       u32 r64_u;              /* 140 etherStatsPkts64Octets */
12014 -       u32 r64_l;              /* 144 etherStatsPkts64Octets */
12015 -       u32 r127_u;             /* 148 etherStatsPkts65to127Octets */
12016 -       u32 r127_l;             /* 14c etherStatsPkts65to127Octets */
12017 -       u32 r255_u;             /* 150 etherStatsPkts128to255Octets */
12018 -       u32 r255_l;             /* 154 etherStatsPkts128to255Octets */
12019 -       u32 r511_u;             /* 158 etherStatsPkts256to511Octets */
12020 -       u32 r511_l;             /* 15c etherStatsPkts256to511Octets */
12021 -       u32 r1023_u;            /* 160 etherStatsPkts512to1023Octets */
12022 -       u32 r1023_l;            /* 164 etherStatsPkts512to1023Octets */
12023 -       u32 r1518_u;            /* 168 etherStatsPkts1024to1518Octets */
12024 -       u32 r1518_l;            /* 16c etherStatsPkts1024to1518Octets */
12025 -       u32 r1519x_u;           /* 170 etherStatsPkts1519toX */
12026 -       u32 r1519x_l;           /* 174 etherStatsPkts1519toX */
12027 -       u32 trovr_u;            /* 178 etherStatsOversizePkts */
12028 -       u32 trovr_l;            /* 17c etherStatsOversizePkts */
12029 -       u32 trjbr_u;            /* 180 etherStatsJabbers */
12030 -       u32 trjbr_l;            /* 184 etherStatsJabbers */
12031 -       u32 trfrg_u;            /* 188 etherStatsFragments */
12032 -       u32 trfrg_l;            /* 18C etherStatsFragments */
12033 -       u32 rerr_u;             /* 190 if_in_errors */
12034 -       u32 rerr_l;             /* 194 if_in_errors */
12035 -};
12036 -
12037 -struct tgec_cfg {
12038 -       bool pause_ignore;
12039 -       bool promiscuous_mode_enable;
12040 -       u16 max_frame_length;
12041 -       u16 pause_quant;
12042 -       u32 tx_ipg_length;
12043 -};
12044 -
12045 -struct fman_mac {
12046 -       /* Pointer to the memory mapped registers. */
12047 -       struct tgec_regs __iomem *regs;
12048 -       /* MAC address of device; */
12049 -       u64 addr;
12050 -       u16 max_speed;
12051 -       void *dev_id; /* device cookie used by the exception cbs */
12052 -       fman_mac_exception_cb *exception_cb;
12053 -       fman_mac_exception_cb *event_cb;
12054 -       /* pointer to driver's global address hash table  */
12055 -       struct eth_hash_t *multicast_addr_hash;
12056 -       /* pointer to driver's individual address hash table  */
12057 -       struct eth_hash_t *unicast_addr_hash;
12058 -       u8 mac_id;
12059 -       u32 exceptions;
12060 -       struct tgec_cfg *cfg;
12061 -       void *fm;
12062 -       struct fman_rev_info fm_rev_info;
12063 -};
12064 -
12065 -static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
12066 -{
12067 -       u32 tmp0, tmp1;
12068 -
12069 -       tmp0 = (u32)(adr[0] | adr[1] << 8 | adr[2] << 16 | adr[3] << 24);
12070 -       tmp1 = (u32)(adr[4] | adr[5] << 8);
12071 -       iowrite32be(tmp0, &regs->mac_addr_0);
12072 -       iowrite32be(tmp1, &regs->mac_addr_1);
12073 -}
12074 -
12075 -static void set_dflts(struct tgec_cfg *cfg)
12076 -{
12077 -       cfg->promiscuous_mode_enable = false;
12078 -       cfg->pause_ignore = false;
12079 -       cfg->tx_ipg_length = DEFAULT_TX_IPG_LENGTH;
12080 -       cfg->max_frame_length = DEFAULT_MAX_FRAME_LENGTH;
12081 -       cfg->pause_quant = DEFAULT_PAUSE_QUANT;
12082 -}
12083 -
12084 -static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
12085 -               u32 exception_mask)
12086 -{
12087 -       u32 tmp;
12088 -
12089 -       /* Config */
12090 -       tmp = CMF_CFG_CRC_FWD;
12091 -       if (cfg->promiscuous_mode_enable)
12092 -               tmp |= CMD_CFG_PROMIS_EN;
12093 -       if (cfg->pause_ignore)
12094 -               tmp |= CMD_CFG_PAUSE_IGNORE;
12095 -       /* Payload length check disable */
12096 -       tmp |= CMD_CFG_NO_LEN_CHK;
12097 -       iowrite32be(tmp, &regs->command_config);
12098 -
12099 -       /* Max Frame Length */
12100 -       iowrite32be((u32)cfg->max_frame_length, &regs->maxfrm);
12101 -       /* Pause Time */
12102 -       iowrite32be(cfg->pause_quant, &regs->pause_quant);
12103 -
12104 -       /* clear all pending events and set-up interrupts */
12105 -       iowrite32be(0xffffffff, &regs->ievent);
12106 -       iowrite32be(ioread32be(&regs->imask) | exception_mask, &regs->imask);
12107 -
12108 -       return 0;
12109 -}
12110 -
12111 -static int check_init_parameters(struct fman_mac *tgec)
12112 -{
12113 -       if (tgec->max_speed < SPEED_10000) {
12114 -               pr_err("10G MAC driver only support 10G speed\n");
12115 -               return -EINVAL;
12116 -       }
12117 -       if (tgec->addr == 0) {
12118 -               pr_err("Ethernet 10G MAC Must have valid MAC Address\n");
12119 -               return -EINVAL;
12120 -       }
12121 -       if (!tgec->exception_cb) {
12122 -               pr_err("uninitialized exception_cb\n");
12123 -               return -EINVAL;
12124 -       }
12125 -       if (!tgec->event_cb) {
12126 -               pr_err("uninitialized event_cb\n");
12127 -               return -EINVAL;
12128 -       }
12129 -
12130 -       return 0;
12131 -}
12132 -
12133 -static int get_exception_flag(enum fman_mac_exceptions exception)
12134 -{
12135 -       u32 bit_mask;
12136 -
12137 -       switch (exception) {
12138 -       case FM_MAC_EX_10G_MDIO_SCAN_EVENT:
12139 -               bit_mask = TGEC_IMASK_MDIO_SCAN_EVENT;
12140 -               break;
12141 -       case FM_MAC_EX_10G_MDIO_CMD_CMPL:
12142 -               bit_mask = TGEC_IMASK_MDIO_CMD_CMPL;
12143 -               break;
12144 -       case FM_MAC_EX_10G_REM_FAULT:
12145 -               bit_mask = TGEC_IMASK_REM_FAULT;
12146 -               break;
12147 -       case FM_MAC_EX_10G_LOC_FAULT:
12148 -               bit_mask = TGEC_IMASK_LOC_FAULT;
12149 -               break;
12150 -       case FM_MAC_EX_10G_TX_ECC_ER:
12151 -               bit_mask = TGEC_IMASK_TX_ECC_ER;
12152 -               break;
12153 -       case FM_MAC_EX_10G_TX_FIFO_UNFL:
12154 -               bit_mask = TGEC_IMASK_TX_FIFO_UNFL;
12155 -               break;
12156 -       case FM_MAC_EX_10G_TX_FIFO_OVFL:
12157 -               bit_mask = TGEC_IMASK_TX_FIFO_OVFL;
12158 -               break;
12159 -       case FM_MAC_EX_10G_TX_ER:
12160 -               bit_mask = TGEC_IMASK_TX_ER;
12161 -               break;
12162 -       case FM_MAC_EX_10G_RX_FIFO_OVFL:
12163 -               bit_mask = TGEC_IMASK_RX_FIFO_OVFL;
12164 -               break;
12165 -       case FM_MAC_EX_10G_RX_ECC_ER:
12166 -               bit_mask = TGEC_IMASK_RX_ECC_ER;
12167 -               break;
12168 -       case FM_MAC_EX_10G_RX_JAB_FRM:
12169 -               bit_mask = TGEC_IMASK_RX_JAB_FRM;
12170 -               break;
12171 -       case FM_MAC_EX_10G_RX_OVRSZ_FRM:
12172 -               bit_mask = TGEC_IMASK_RX_OVRSZ_FRM;
12173 -               break;
12174 -       case FM_MAC_EX_10G_RX_RUNT_FRM:
12175 -               bit_mask = TGEC_IMASK_RX_RUNT_FRM;
12176 -               break;
12177 -       case FM_MAC_EX_10G_RX_FRAG_FRM:
12178 -               bit_mask = TGEC_IMASK_RX_FRAG_FRM;
12179 -               break;
12180 -       case FM_MAC_EX_10G_RX_LEN_ER:
12181 -               bit_mask = TGEC_IMASK_RX_LEN_ER;
12182 -               break;
12183 -       case FM_MAC_EX_10G_RX_CRC_ER:
12184 -               bit_mask = TGEC_IMASK_RX_CRC_ER;
12185 -               break;
12186 -       case FM_MAC_EX_10G_RX_ALIGN_ER:
12187 -               bit_mask = TGEC_IMASK_RX_ALIGN_ER;
12188 -               break;
12189 -       default:
12190 -               bit_mask = 0;
12191 -               break;
12192 -       }
12193 -
12194 -       return bit_mask;
12195 -}
12196 -
12197 -static void tgec_err_exception(void *handle)
12198 -{
12199 -       struct fman_mac *tgec = (struct fman_mac *)handle;
12200 -       struct tgec_regs __iomem *regs = tgec->regs;
12201 -       u32 event;
12202 -
12203 -       /* do not handle MDIO events */
12204 -       event = ioread32be(&regs->ievent) &
12205 -                          ~(TGEC_IMASK_MDIO_SCAN_EVENT |
12206 -                          TGEC_IMASK_MDIO_CMD_CMPL);
12207 -
12208 -       event &= ioread32be(&regs->imask);
12209 -
12210 -       iowrite32be(event, &regs->ievent);
12211 -
12212 -       if (event & TGEC_IMASK_REM_FAULT)
12213 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_REM_FAULT);
12214 -       if (event & TGEC_IMASK_LOC_FAULT)
12215 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_LOC_FAULT);
12216 -       if (event & TGEC_IMASK_TX_ECC_ER)
12217 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ECC_ER);
12218 -       if (event & TGEC_IMASK_TX_FIFO_UNFL)
12219 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_UNFL);
12220 -       if (event & TGEC_IMASK_TX_FIFO_OVFL)
12221 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_FIFO_OVFL);
12222 -       if (event & TGEC_IMASK_TX_ER)
12223 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_TX_ER);
12224 -       if (event & TGEC_IMASK_RX_FIFO_OVFL)
12225 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FIFO_OVFL);
12226 -       if (event & TGEC_IMASK_RX_ECC_ER)
12227 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ECC_ER);
12228 -       if (event & TGEC_IMASK_RX_JAB_FRM)
12229 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_JAB_FRM);
12230 -       if (event & TGEC_IMASK_RX_OVRSZ_FRM)
12231 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_OVRSZ_FRM);
12232 -       if (event & TGEC_IMASK_RX_RUNT_FRM)
12233 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_RUNT_FRM);
12234 -       if (event & TGEC_IMASK_RX_FRAG_FRM)
12235 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_FRAG_FRM);
12236 -       if (event & TGEC_IMASK_RX_LEN_ER)
12237 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_LEN_ER);
12238 -       if (event & TGEC_IMASK_RX_CRC_ER)
12239 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_CRC_ER);
12240 -       if (event & TGEC_IMASK_RX_ALIGN_ER)
12241 -               tgec->exception_cb(tgec->dev_id, FM_MAC_EX_10G_RX_ALIGN_ER);
12242 -}
12243 -
12244 -static void free_init_resources(struct fman_mac *tgec)
12245 -{
12246 -       fman_unregister_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
12247 -                            FMAN_INTR_TYPE_ERR);
12248 -
12249 -       /* release the driver's group hash table */
12250 -       free_hash_table(tgec->multicast_addr_hash);
12251 -       tgec->multicast_addr_hash = NULL;
12252 -
12253 -       /* release the driver's individual hash table */
12254 -       free_hash_table(tgec->unicast_addr_hash);
12255 -       tgec->unicast_addr_hash = NULL;
12256 -}
12257 -
12258 -static bool is_init_done(struct tgec_cfg *cfg)
12259 -{
12260 -       /* Checks if tGEC driver parameters were initialized */
12261 -       if (!cfg)
12262 -               return true;
12263 -
12264 -       return false;
12265 -}
12266 -
12267 -int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
12268 -{
12269 -       struct tgec_regs __iomem *regs = tgec->regs;
12270 -       u32 tmp;
12271 -
12272 -       if (!is_init_done(tgec->cfg))
12273 -               return -EINVAL;
12274 -
12275 -       tmp = ioread32be(&regs->command_config);
12276 -       if (mode & COMM_MODE_RX)
12277 -               tmp |= CMD_CFG_RX_EN;
12278 -       if (mode & COMM_MODE_TX)
12279 -               tmp |= CMD_CFG_TX_EN;
12280 -       iowrite32be(tmp, &regs->command_config);
12281 -
12282 -       return 0;
12283 -}
12284 -
12285 -int tgec_disable(struct fman_mac *tgec, enum comm_mode mode)
12286 -{
12287 -       struct tgec_regs __iomem *regs = tgec->regs;
12288 -       u32 tmp;
12289 -
12290 -       if (!is_init_done(tgec->cfg))
12291 -               return -EINVAL;
12292 -
12293 -       tmp = ioread32be(&regs->command_config);
12294 -       if (mode & COMM_MODE_RX)
12295 -               tmp &= ~CMD_CFG_RX_EN;
12296 -       if (mode & COMM_MODE_TX)
12297 -               tmp &= ~CMD_CFG_TX_EN;
12298 -       iowrite32be(tmp, &regs->command_config);
12299 -
12300 -       return 0;
12301 -}
12302 -
12303 -int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
12304 -{
12305 -       struct tgec_regs __iomem *regs = tgec->regs;
12306 -       u32 tmp;
12307 -
12308 -       if (!is_init_done(tgec->cfg))
12309 -               return -EINVAL;
12310 -
12311 -       tmp = ioread32be(&regs->command_config);
12312 -       if (new_val)
12313 -               tmp |= CMD_CFG_PROMIS_EN;
12314 -       else
12315 -               tmp &= ~CMD_CFG_PROMIS_EN;
12316 -       iowrite32be(tmp, &regs->command_config);
12317 -
12318 -       return 0;
12319 -}
12320 -
12321 -int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val)
12322 -{
12323 -       if (is_init_done(tgec->cfg))
12324 -               return -EINVAL;
12325 -
12326 -       tgec->cfg->max_frame_length = new_val;
12327 -
12328 -       return 0;
12329 -}
12330 -
12331 -int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
12332 -                            u16 pause_time, u16 __maybe_unused thresh_time)
12333 -{
12334 -       struct tgec_regs __iomem *regs = tgec->regs;
12335 -
12336 -       if (!is_init_done(tgec->cfg))
12337 -               return -EINVAL;
12338 -
12339 -       iowrite32be((u32)pause_time, &regs->pause_quant);
12340 -
12341 -       return 0;
12342 -}
12343 -
12344 -int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
12345 -{
12346 -       struct tgec_regs __iomem *regs = tgec->regs;
12347 -       u32 tmp;
12348 -
12349 -       if (!is_init_done(tgec->cfg))
12350 -               return -EINVAL;
12351 -
12352 -       tmp = ioread32be(&regs->command_config);
12353 -       if (!en)
12354 -               tmp |= CMD_CFG_PAUSE_IGNORE;
12355 -       else
12356 -               tmp &= ~CMD_CFG_PAUSE_IGNORE;
12357 -       iowrite32be(tmp, &regs->command_config);
12358 -
12359 -       return 0;
12360 -}
12361 -
12362 -int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr)
12363 -{
12364 -       if (!is_init_done(tgec->cfg))
12365 -               return -EINVAL;
12366 -
12367 -       tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
12368 -       set_mac_address(tgec->regs, (u8 *)(*p_enet_addr));
12369 -
12370 -       return 0;
12371 -}
12372 -
12373 -int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
12374 -{
12375 -       struct tgec_regs __iomem *regs = tgec->regs;
12376 -       struct eth_hash_entry *hash_entry;
12377 -       u32 crc = 0xFFFFFFFF, hash;
12378 -       u64 addr;
12379 -
12380 -       if (!is_init_done(tgec->cfg))
12381 -               return -EINVAL;
12382 -
12383 -       addr = ENET_ADDR_TO_UINT64(*eth_addr);
12384 -
12385 -       if (!(addr & GROUP_ADDRESS)) {
12386 -               /* Unicast addresses not supported in hash */
12387 -               pr_err("Unicast Address\n");
12388 -               return -EINVAL;
12389 -       }
12390 -       /* CRC calculation */
12391 -       crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
12392 -       crc = bitrev32(crc);
12393 -       /* Take 9 MSB bits */
12394 -       hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
12395 -
12396 -       /* Create element to be added to the driver hash table */
12397 -       hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
12398 -       if (!hash_entry)
12399 -               return -ENOMEM;
12400 -       hash_entry->addr = addr;
12401 -       INIT_LIST_HEAD(&hash_entry->node);
12402 -
12403 -       list_add_tail(&hash_entry->node,
12404 -                     &tgec->multicast_addr_hash->lsts[hash]);
12405 -       iowrite32be((hash | TGEC_HASH_MCAST_EN), &regs->hashtable_ctrl);
12406 -
12407 -       return 0;
12408 -}
12409 -
12410 -int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
12411 -{
12412 -       struct tgec_regs __iomem *regs = tgec->regs;
12413 -       struct eth_hash_entry *hash_entry = NULL;
12414 -       struct list_head *pos;
12415 -       u32 crc = 0xFFFFFFFF, hash;
12416 -       u64 addr;
12417 -
12418 -       if (!is_init_done(tgec->cfg))
12419 -               return -EINVAL;
12420 -
12421 -       addr = ((*(u64 *)eth_addr) >> 16);
12422 -
12423 -       /* CRC calculation */
12424 -       crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
12425 -       crc = bitrev32(crc);
12426 -       /* Take 9 MSB bits */
12427 -       hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
12428 -
12429 -       list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) {
12430 -               hash_entry = ETH_HASH_ENTRY_OBJ(pos);
12431 -               if (hash_entry->addr == addr) {
12432 -                       list_del_init(&hash_entry->node);
12433 -                       kfree(hash_entry);
12434 -                       break;
12435 -               }
12436 -       }
12437 -       if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
12438 -               iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
12439 -                           &regs->hashtable_ctrl);
12440 -
12441 -       return 0;
12442 -}
12443 -
12444 -int tgec_get_version(struct fman_mac *tgec, u32 *mac_version)
12445 -{
12446 -       struct tgec_regs __iomem *regs = tgec->regs;
12447 -
12448 -       if (!is_init_done(tgec->cfg))
12449 -               return -EINVAL;
12450 -
12451 -       *mac_version = ioread32be(&regs->tgec_id);
12452 -
12453 -       return 0;
12454 -}
12455 -
12456 -int tgec_set_exception(struct fman_mac *tgec,
12457 -                      enum fman_mac_exceptions exception, bool enable)
12458 -{
12459 -       struct tgec_regs __iomem *regs = tgec->regs;
12460 -       u32 bit_mask = 0;
12461 -
12462 -       if (!is_init_done(tgec->cfg))
12463 -               return -EINVAL;
12464 -
12465 -       bit_mask = get_exception_flag(exception);
12466 -       if (bit_mask) {
12467 -               if (enable)
12468 -                       tgec->exceptions |= bit_mask;
12469 -               else
12470 -                       tgec->exceptions &= ~bit_mask;
12471 -       } else {
12472 -               pr_err("Undefined exception\n");
12473 -               return -EINVAL;
12474 -       }
12475 -       if (enable)
12476 -               iowrite32be(ioread32be(&regs->imask) | bit_mask, &regs->imask);
12477 -       else
12478 -               iowrite32be(ioread32be(&regs->imask) & ~bit_mask, &regs->imask);
12479 -
12480 -       return 0;
12481 -}
12482 -
12483 -int tgec_init(struct fman_mac *tgec)
12484 -{
12485 -       struct tgec_cfg *cfg;
12486 -       enet_addr_t eth_addr;
12487 -       int err;
12488 -
12489 -       if (is_init_done(tgec->cfg))
12490 -               return -EINVAL;
12491 -
12492 -       if (DEFAULT_RESET_ON_INIT &&
12493 -           (fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) {
12494 -               pr_err("Can't reset MAC!\n");
12495 -               return -EINVAL;
12496 -       }
12497 -
12498 -       err = check_init_parameters(tgec);
12499 -       if (err)
12500 -               return err;
12501 -
12502 -       cfg = tgec->cfg;
12503 -
12504 -       MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
12505 -       set_mac_address(tgec->regs, (u8 *)eth_addr);
12506 -
12507 -       /* interrupts */
12508 -       /* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 Errata workaround */
12509 -       if (tgec->fm_rev_info.major <= 2)
12510 -               tgec->exceptions &= ~(TGEC_IMASK_REM_FAULT |
12511 -                                     TGEC_IMASK_LOC_FAULT);
12512 -
12513 -       err = init(tgec->regs, cfg, tgec->exceptions);
12514 -       if (err) {
12515 -               free_init_resources(tgec);
12516 -               pr_err("TGEC version doesn't support this i/f mode\n");
12517 -               return err;
12518 -       }
12519 -
12520 -       /* Max Frame Length */
12521 -       err = fman_set_mac_max_frame(tgec->fm, tgec->mac_id,
12522 -                                    cfg->max_frame_length);
12523 -       if (err) {
12524 -               pr_err("Setting max frame length FAILED\n");
12525 -               free_init_resources(tgec);
12526 -               return -EINVAL;
12527 -       }
12528 -
12529 -       /* FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 Errata workaround */
12530 -       if (tgec->fm_rev_info.major == 2) {
12531 -               struct tgec_regs __iomem *regs = tgec->regs;
12532 -               u32 tmp;
12533 -
12534 -               /* restore the default tx ipg Length */
12535 -               tmp = (ioread32be(&regs->tx_ipg_len) &
12536 -                      ~TGEC_TX_IPG_LENGTH_MASK) | 12;
12537 -
12538 -               iowrite32be(tmp, &regs->tx_ipg_len);
12539 -       }
12540 -
12541 -       tgec->multicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
12542 -       if (!tgec->multicast_addr_hash) {
12543 -               free_init_resources(tgec);
12544 -               pr_err("allocation hash table is FAILED\n");
12545 -               return -ENOMEM;
12546 -       }
12547 -
12548 -       tgec->unicast_addr_hash = alloc_hash_table(TGEC_HASH_TABLE_SIZE);
12549 -       if (!tgec->unicast_addr_hash) {
12550 -               free_init_resources(tgec);
12551 -               pr_err("allocation hash table is FAILED\n");
12552 -               return -ENOMEM;
12553 -       }
12554 -
12555 -       fman_register_intr(tgec->fm, FMAN_MOD_MAC, tgec->mac_id,
12556 -                          FMAN_INTR_TYPE_ERR, tgec_err_exception, tgec);
12557 -
12558 -       kfree(cfg);
12559 -       tgec->cfg = NULL;
12560 -
12561 -       return 0;
12562 -}
12563 -
12564 -int tgec_free(struct fman_mac *tgec)
12565 -{
12566 -       free_init_resources(tgec);
12567 -
12568 -       kfree(tgec->cfg);
12569 -       kfree(tgec);
12570 -
12571 -       return 0;
12572 -}
12573 -
12574 -struct fman_mac *tgec_config(struct fman_mac_params *params)
12575 -{
12576 -       struct fman_mac *tgec;
12577 -       struct tgec_cfg *cfg;
12578 -       void __iomem *base_addr;
12579 -
12580 -       base_addr = params->base_addr;
12581 -       /* allocate memory for the UCC GETH data structure. */
12582 -       tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
12583 -       if (!tgec)
12584 -               return NULL;
12585 -
12586 -       /* allocate memory for the 10G MAC driver parameters data structure. */
12587 -       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
12588 -       if (!cfg) {
12589 -               tgec_free(tgec);
12590 -               return NULL;
12591 -       }
12592 -
12593 -       /* Plant parameter structure pointer */
12594 -       tgec->cfg = cfg;
12595 -
12596 -       set_dflts(cfg);
12597 -
12598 -       tgec->regs = base_addr;
12599 -       tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
12600 -       tgec->max_speed = params->max_speed;
12601 -       tgec->mac_id = params->mac_id;
12602 -       tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT  |
12603 -                           TGEC_IMASK_REM_FAULT        |
12604 -                           TGEC_IMASK_LOC_FAULT        |
12605 -                           TGEC_IMASK_TX_ECC_ER        |
12606 -                           TGEC_IMASK_TX_FIFO_UNFL     |
12607 -                           TGEC_IMASK_TX_FIFO_OVFL     |
12608 -                           TGEC_IMASK_TX_ER            |
12609 -                           TGEC_IMASK_RX_FIFO_OVFL     |
12610 -                           TGEC_IMASK_RX_ECC_ER        |
12611 -                           TGEC_IMASK_RX_JAB_FRM       |
12612 -                           TGEC_IMASK_RX_OVRSZ_FRM     |
12613 -                           TGEC_IMASK_RX_RUNT_FRM      |
12614 -                           TGEC_IMASK_RX_FRAG_FRM      |
12615 -                           TGEC_IMASK_RX_CRC_ER        |
12616 -                           TGEC_IMASK_RX_ALIGN_ER);
12617 -       tgec->exception_cb = params->exception_cb;
12618 -       tgec->event_cb = params->event_cb;
12619 -       tgec->dev_id = params->dev_id;
12620 -       tgec->fm = params->fm;
12621 -
12622 -       /* Save FMan revision */
12623 -       fman_get_revision(tgec->fm, &tgec->fm_rev_info);
12624 -
12625 -       return tgec;
12626 -}
12627 diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
12628 deleted file mode 100644
12629 index 514bba9..0000000
12630 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
12631 +++ /dev/null
12632 @@ -1,55 +0,0 @@
12633 -/*
12634 - * Copyright 2008-2015 Freescale Semiconductor Inc.
12635 - *
12636 - * Redistribution and use in source and binary forms, with or without
12637 - * modification, are permitted provided that the following conditions are met:
12638 - *     * Redistributions of source code must retain the above copyright
12639 - *       notice, this list of conditions and the following disclaimer.
12640 - *     * Redistributions in binary form must reproduce the above copyright
12641 - *       notice, this list of conditions and the following disclaimer in the
12642 - *       documentation and/or other materials provided with the distribution.
12643 - *     * Neither the name of Freescale Semiconductor nor the
12644 - *       names of its contributors may be used to endorse or promote products
12645 - *       derived from this software without specific prior written permission.
12646 - *
12647 - *
12648 - * ALTERNATIVELY, this software may be distributed under the terms of the
12649 - * GNU General Public License ("GPL") as published by the Free Software
12650 - * Foundation, either version 2 of that License or (at your option) any
12651 - * later version.
12652 - *
12653 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
12654 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
12655 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
12656 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
12657 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
12658 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
12659 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
12660 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12661 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12662 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12663 - */
12664 -
12665 -#ifndef __TGEC_H
12666 -#define __TGEC_H
12667 -
12668 -#include "fman_mac.h"
12669 -
12670 -struct fman_mac *tgec_config(struct fman_mac_params *params);
12671 -int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
12672 -int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr);
12673 -int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
12674 -int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
12675 -int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
12676 -int tgec_init(struct fman_mac *tgec);
12677 -int tgec_free(struct fman_mac *tgec);
12678 -int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en);
12679 -int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority,
12680 -                            u16 pause_time, u16 thresh_time);
12681 -int tgec_set_exception(struct fman_mac *tgec,
12682 -                      enum fman_mac_exceptions exception, bool enable);
12683 -int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
12684 -int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
12685 -int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
12686 -
12687 -#endif /* __TGEC_H */
12688 diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
12689 deleted file mode 100644
12690 index 736db9d..0000000
12691 --- a/drivers/net/ethernet/freescale/fman/mac.c
12692 +++ /dev/null
12693 @@ -1,950 +0,0 @@
12694 -/* Copyright 2008-2015 Freescale Semiconductor, Inc.
12695 - *
12696 - * Redistribution and use in source and binary forms, with or without
12697 - * modification, are permitted provided that the following conditions are met:
12698 - *     * Redistributions of source code must retain the above copyright
12699 - *      notice, this list of conditions and the following disclaimer.
12700 - *     * Redistributions in binary form must reproduce the above copyright
12701 - *      notice, this list of conditions and the following disclaimer in the
12702 - *      documentation and/or other materials provided with the distribution.
12703 - *     * Neither the name of Freescale Semiconductor nor the
12704 - *      names of its contributors may be used to endorse or promote products
12705 - *      derived from this software without specific prior written permission.
12706 - *
12707 - *
12708 - * ALTERNATIVELY, this software may be distributed under the terms of the
12709 - * GNU General Public License ("GPL") as published by the Free Software
12710 - * Foundation, either version 2 of that License or (at your option) any
12711 - * later version.
12712 - *
12713 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
12714 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
12715 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
12716 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
12717 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
12718 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
12719 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
12720 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12721 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
12722 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12723 - */
12724 -
12725 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12726 -
12727 -#include <linux/init.h>
12728 -#include <linux/module.h>
12729 -#include <linux/of_address.h>
12730 -#include <linux/of_platform.h>
12731 -#include <linux/of_net.h>
12732 -#include <linux/of_mdio.h>
12733 -#include <linux/device.h>
12734 -#include <linux/phy.h>
12735 -#include <linux/netdevice.h>
12736 -#include <linux/phy_fixed.h>
12737 -#include <linux/etherdevice.h>
12738 -#include <linux/libfdt_env.h>
12739 -
12740 -#include "mac.h"
12741 -#include "fman_mac.h"
12742 -#include "fman_dtsec.h"
12743 -#include "fman_tgec.h"
12744 -#include "fman_memac.h"
12745 -
12746 -MODULE_LICENSE("Dual BSD/GPL");
12747 -MODULE_DESCRIPTION("FSL FMan MAC API based driver");
12748 -
12749 -struct mac_priv_s {
12750 -       struct device                   *dev;
12751 -       void __iomem                    *vaddr;
12752 -       u8                              cell_index;
12753 -       phy_interface_t                 phy_if;
12754 -       struct fman                     *fman;
12755 -       struct device_node              *phy_node;
12756 -       struct device_node              *internal_phy_node;
12757 -       /* List of multicast addresses */
12758 -       struct list_head                mc_addr_list;
12759 -       struct platform_device          *eth_dev;
12760 -       struct fixed_phy_status         *fixed_link;
12761 -       u16                             speed;
12762 -       u16                             max_speed;
12763 -
12764 -       int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode);
12765 -       int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode);
12766 -};
12767 -
12768 -struct mac_address {
12769 -       u8 addr[ETH_ALEN];
12770 -       struct list_head list;
12771 -};
12772 -
12773 -static void mac_exception(void *handle, enum fman_mac_exceptions ex)
12774 -{
12775 -       struct mac_device       *mac_dev;
12776 -       struct mac_priv_s       *priv;
12777 -
12778 -       mac_dev = handle;
12779 -       priv = mac_dev->priv;
12780 -
12781 -       if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
12782 -               /* don't flag RX FIFO after the first */
12783 -               mac_dev->set_exception(mac_dev->fman_mac,
12784 -                                      FM_MAC_EX_10G_RX_FIFO_OVFL, false);
12785 -               dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex);
12786 -       }
12787 -
12788 -       dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
12789 -               __func__, ex);
12790 -}
12791 -
12792 -static void set_fman_mac_params(struct mac_device *mac_dev,
12793 -                               struct fman_mac_params *params)
12794 -{
12795 -       struct mac_priv_s *priv = mac_dev->priv;
12796 -
12797 -       params->base_addr = (typeof(params->base_addr))
12798 -               devm_ioremap(priv->dev, mac_dev->res->start,
12799 -                            resource_size(mac_dev->res));
12800 -       memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
12801 -       params->max_speed       = priv->max_speed;
12802 -       params->phy_if          = priv->phy_if;
12803 -       params->basex_if        = false;
12804 -       params->mac_id          = priv->cell_index;
12805 -       params->fm              = (void *)priv->fman;
12806 -       params->exception_cb    = mac_exception;
12807 -       params->event_cb        = mac_exception;
12808 -       params->dev_id          = mac_dev;
12809 -       params->internal_phy_node = priv->internal_phy_node;
12810 -}
12811 -
12812 -static int tgec_initialization(struct mac_device *mac_dev)
12813 -{
12814 -       int err;
12815 -       struct mac_priv_s       *priv;
12816 -       struct fman_mac_params  params;
12817 -       u32                     version;
12818 -
12819 -       priv = mac_dev->priv;
12820 -
12821 -       set_fman_mac_params(mac_dev, &params);
12822 -
12823 -       mac_dev->fman_mac = tgec_config(&params);
12824 -       if (!mac_dev->fman_mac) {
12825 -               err = -EINVAL;
12826 -               goto _return;
12827 -       }
12828 -
12829 -       err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
12830 -       if (err < 0)
12831 -               goto _return_fm_mac_free;
12832 -
12833 -       err = tgec_init(mac_dev->fman_mac);
12834 -       if (err < 0)
12835 -               goto _return_fm_mac_free;
12836 -
12837 -       /* For 10G MAC, disable Tx ECC exception */
12838 -       err = mac_dev->set_exception(mac_dev->fman_mac,
12839 -                                    FM_MAC_EX_10G_TX_ECC_ER, false);
12840 -       if (err < 0)
12841 -               goto _return_fm_mac_free;
12842 -
12843 -       err = tgec_get_version(mac_dev->fman_mac, &version);
12844 -       if (err < 0)
12845 -               goto _return_fm_mac_free;
12846 -
12847 -       dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version);
12848 -
12849 -       goto _return;
12850 -
12851 -_return_fm_mac_free:
12852 -       tgec_free(mac_dev->fman_mac);
12853 -
12854 -_return:
12855 -       return err;
12856 -}
12857 -
12858 -static int dtsec_initialization(struct mac_device *mac_dev)
12859 -{
12860 -       int                     err;
12861 -       struct mac_priv_s       *priv;
12862 -       struct fman_mac_params  params;
12863 -       u32                     version;
12864 -
12865 -       priv = mac_dev->priv;
12866 -
12867 -       set_fman_mac_params(mac_dev, &params);
12868 -
12869 -       mac_dev->fman_mac = dtsec_config(&params);
12870 -       if (!mac_dev->fman_mac) {
12871 -               err = -EINVAL;
12872 -               goto _return;
12873 -       }
12874 -
12875 -       err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
12876 -       if (err < 0)
12877 -               goto _return_fm_mac_free;
12878 -
12879 -       err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true);
12880 -       if (err < 0)
12881 -               goto _return_fm_mac_free;
12882 -
12883 -       err = dtsec_init(mac_dev->fman_mac);
12884 -       if (err < 0)
12885 -               goto _return_fm_mac_free;
12886 -
12887 -       /* For 1G MAC, disable by default the MIB counters overflow interrupt */
12888 -       err = mac_dev->set_exception(mac_dev->fman_mac,
12889 -                                    FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
12890 -       if (err < 0)
12891 -               goto _return_fm_mac_free;
12892 -
12893 -       err = dtsec_get_version(mac_dev->fman_mac, &version);
12894 -       if (err < 0)
12895 -               goto _return_fm_mac_free;
12896 -
12897 -       dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version);
12898 -
12899 -       goto _return;
12900 -
12901 -_return_fm_mac_free:
12902 -       dtsec_free(mac_dev->fman_mac);
12903 -
12904 -_return:
12905 -       return err;
12906 -}
12907 -
12908 -static int memac_initialization(struct mac_device *mac_dev)
12909 -{
12910 -       int                      err;
12911 -       struct mac_priv_s       *priv;
12912 -       struct fman_mac_params   params;
12913 -
12914 -       priv = mac_dev->priv;
12915 -
12916 -       set_fman_mac_params(mac_dev, &params);
12917 -
12918 -       if (priv->max_speed == SPEED_10000)
12919 -               params.phy_if = PHY_INTERFACE_MODE_XGMII;
12920 -
12921 -       mac_dev->fman_mac = memac_config(&params);
12922 -       if (!mac_dev->fman_mac) {
12923 -               err = -EINVAL;
12924 -               goto _return;
12925 -       }
12926 -
12927 -       err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
12928 -       if (err < 0)
12929 -               goto _return_fm_mac_free;
12930 -
12931 -       err = memac_cfg_reset_on_init(mac_dev->fman_mac, true);
12932 -       if (err < 0)
12933 -               goto _return_fm_mac_free;
12934 -
12935 -       err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link);
12936 -       if (err < 0)
12937 -               goto _return_fm_mac_free;
12938 -
12939 -       err = memac_init(mac_dev->fman_mac);
12940 -       if (err < 0)
12941 -               goto _return_fm_mac_free;
12942 -
12943 -       dev_info(priv->dev, "FMan MEMAC\n");
12944 -
12945 -       goto _return;
12946 -
12947 -_return_fm_mac_free:
12948 -       memac_free(mac_dev->fman_mac);
12949 -
12950 -_return:
12951 -       return err;
12952 -}
12953 -
12954 -static int start(struct mac_device *mac_dev)
12955 -{
12956 -       int      err;
12957 -       struct phy_device *phy_dev = mac_dev->phy_dev;
12958 -       struct mac_priv_s *priv = mac_dev->priv;
12959 -
12960 -       err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
12961 -       if (!err && phy_dev)
12962 -               phy_start(phy_dev);
12963 -
12964 -       return err;
12965 -}
12966 -
12967 -static int stop(struct mac_device *mac_dev)
12968 -{
12969 -       struct mac_priv_s *priv = mac_dev->priv;
12970 -
12971 -       if (mac_dev->phy_dev)
12972 -               phy_stop(mac_dev->phy_dev);
12973 -
12974 -       return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
12975 -}
12976 -
12977 -static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
12978 -{
12979 -       struct mac_priv_s       *priv;
12980 -       struct mac_address      *old_addr, *tmp;
12981 -       struct netdev_hw_addr   *ha;
12982 -       int                     err;
12983 -       enet_addr_t             *addr;
12984 -
12985 -       priv = mac_dev->priv;
12986 -
12987 -       /* Clear previous address list */
12988 -       list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) {
12989 -               addr = (enet_addr_t *)old_addr->addr;
12990 -               err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr);
12991 -               if (err < 0)
12992 -                       return err;
12993 -
12994 -               list_del(&old_addr->list);
12995 -               kfree(old_addr);
12996 -       }
12997 -
12998 -       /* Add all the addresses from the new list */
12999 -       netdev_for_each_mc_addr(ha, net_dev) {
13000 -               addr = (enet_addr_t *)ha->addr;
13001 -               err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr);
13002 -               if (err < 0)
13003 -                       return err;
13004 -
13005 -               tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
13006 -               if (!tmp)
13007 -                       return -ENOMEM;
13008 -
13009 -               ether_addr_copy(tmp->addr, ha->addr);
13010 -               list_add(&tmp->list, &priv->mc_addr_list);
13011 -       }
13012 -       return 0;
13013 -}
13014 -
13015 -/**
13016 - * fman_set_mac_active_pause
13017 - * @mac_dev:   A pointer to the MAC device
13018 - * @rx:                Pause frame setting for RX
13019 - * @tx:                Pause frame setting for TX
13020 - *
13021 - * Set the MAC RX/TX PAUSE frames settings
13022 - *
13023 - * Avoid redundant calls to FMD, if the MAC driver already contains the desired
13024 - * active PAUSE settings. Otherwise, the new active settings should be reflected
13025 - * in FMan.
13026 - *
13027 - * Return: 0 on success; Error code otherwise.
13028 - */
13029 -int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
13030 -{
13031 -       struct fman_mac *fman_mac = mac_dev->fman_mac;
13032 -       int err = 0;
13033 -
13034 -       if (rx != mac_dev->rx_pause_active) {
13035 -               err = mac_dev->set_rx_pause(fman_mac, rx);
13036 -               if (likely(err == 0))
13037 -                       mac_dev->rx_pause_active = rx;
13038 -       }
13039 -
13040 -       if (tx != mac_dev->tx_pause_active) {
13041 -               u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE :
13042 -                                        FSL_FM_PAUSE_TIME_DISABLE);
13043 -
13044 -               err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0);
13045 -
13046 -               if (likely(err == 0))
13047 -                       mac_dev->tx_pause_active = tx;
13048 -       }
13049 -
13050 -       return err;
13051 -}
13052 -EXPORT_SYMBOL(fman_set_mac_active_pause);
13053 -
13054 -/**
13055 - * fman_get_pause_cfg
13056 - * @mac_dev:   A pointer to the MAC device
13057 - * @rx:                Return value for RX setting
13058 - * @tx:                Return value for TX setting
13059 - *
13060 - * Determine the MAC RX/TX PAUSE frames settings based on PHY
13061 - * autonegotiation or values set by eththool.
13062 - *
13063 - * Return: Pointer to FMan device.
13064 - */
13065 -void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
13066 -                       bool *tx_pause)
13067 -{
13068 -       struct phy_device *phy_dev = mac_dev->phy_dev;
13069 -       u16 lcl_adv, rmt_adv;
13070 -       u8 flowctrl;
13071 -
13072 -       *rx_pause = *tx_pause = false;
13073 -
13074 -       if (!phy_dev->duplex)
13075 -               return;
13076 -
13077 -       /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
13078 -        * are those set by ethtool.
13079 -        */
13080 -       if (!mac_dev->autoneg_pause) {
13081 -               *rx_pause = mac_dev->rx_pause_req;
13082 -               *tx_pause = mac_dev->tx_pause_req;
13083 -               return;
13084 -       }
13085 -
13086 -       /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
13087 -        * settings depend on the result of the link negotiation.
13088 -        */
13089 -
13090 -       /* get local capabilities */
13091 -       lcl_adv = 0;
13092 -       if (phy_dev->advertising & ADVERTISED_Pause)
13093 -               lcl_adv |= ADVERTISE_PAUSE_CAP;
13094 -       if (phy_dev->advertising & ADVERTISED_Asym_Pause)
13095 -               lcl_adv |= ADVERTISE_PAUSE_ASYM;
13096 -
13097 -       /* get link partner capabilities */
13098 -       rmt_adv = 0;
13099 -       if (phy_dev->pause)
13100 -               rmt_adv |= LPA_PAUSE_CAP;
13101 -       if (phy_dev->asym_pause)
13102 -               rmt_adv |= LPA_PAUSE_ASYM;
13103 -
13104 -       /* Calculate TX/RX settings based on local and peer advertised
13105 -        * symmetric/asymmetric PAUSE capabilities.
13106 -        */
13107 -       flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
13108 -       if (flowctrl & FLOW_CTRL_RX)
13109 -               *rx_pause = true;
13110 -       if (flowctrl & FLOW_CTRL_TX)
13111 -               *tx_pause = true;
13112 -}
13113 -EXPORT_SYMBOL(fman_get_pause_cfg);
13114 -
13115 -static void adjust_link_void(struct net_device *net_dev)
13116 -{
13117 -}
13118 -
13119 -static void adjust_link_dtsec(struct net_device *net_dev)
13120 -{
13121 -       struct device *dev = net_dev->dev.parent;
13122 -       struct dpaa_eth_data *eth_data = dev->platform_data;
13123 -       struct mac_device *mac_dev = eth_data->mac_dev;
13124 -       struct phy_device *phy_dev = mac_dev->phy_dev;
13125 -       struct fman_mac *fman_mac;
13126 -       bool rx_pause, tx_pause;
13127 -       int err;
13128 -
13129 -       fman_mac = mac_dev->fman_mac;
13130 -       if (!phy_dev->link) {
13131 -               dtsec_restart_autoneg(fman_mac);
13132 -
13133 -               return;
13134 -       }
13135 -
13136 -       dtsec_adjust_link(fman_mac, phy_dev->speed);
13137 -       fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
13138 -       err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
13139 -       if (err < 0)
13140 -               netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
13141 -}
13142 -
13143 -static void adjust_link_memac(struct net_device *net_dev)
13144 -{
13145 -       struct device *dev = net_dev->dev.parent;
13146 -       struct dpaa_eth_data *eth_data = dev->platform_data;
13147 -       struct mac_device *mac_dev = eth_data->mac_dev;
13148 -       struct phy_device *phy_dev = mac_dev->phy_dev;
13149 -       struct fman_mac *fman_mac;
13150 -       bool rx_pause, tx_pause;
13151 -       int err;
13152 -
13153 -       fman_mac = mac_dev->fman_mac;
13154 -       memac_adjust_link(fman_mac, phy_dev->speed);
13155 -
13156 -       fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
13157 -       err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
13158 -       if (err < 0)
13159 -               netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
13160 -}
13161 -
13162 -/* Initializes driver's PHY state, and attaches to the PHY.
13163 - * Returns 0 on success.
13164 - */
13165 -static struct phy_device *init_phy(struct net_device *net_dev,
13166 -                                  struct mac_device *mac_dev,
13167 -                                  void (*adj_lnk)(struct net_device *))
13168 -{
13169 -       struct phy_device       *phy_dev;
13170 -       struct mac_priv_s       *priv = mac_dev->priv;
13171 -
13172 -       phy_dev = of_phy_connect(net_dev, priv->phy_node, adj_lnk, 0,
13173 -                                priv->phy_if);
13174 -       if (!phy_dev) {
13175 -               netdev_err(net_dev, "Could not connect to PHY\n");
13176 -               return NULL;
13177 -       }
13178 -
13179 -       /* Remove any features not supported by the controller */
13180 -       phy_dev->supported &= mac_dev->if_support;
13181 -       /* Enable the symmetric and asymmetric PAUSE frame advertisements,
13182 -        * as most of the PHY drivers do not enable them by default.
13183 -        */
13184 -       phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
13185 -       phy_dev->advertising = phy_dev->supported;
13186 -
13187 -       mac_dev->phy_dev = phy_dev;
13188 -
13189 -       return phy_dev;
13190 -}
13191 -
13192 -static struct phy_device *dtsec_init_phy(struct net_device *net_dev,
13193 -                                        struct mac_device *mac_dev)
13194 -{
13195 -       return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
13196 -}
13197 -
13198 -static struct phy_device *tgec_init_phy(struct net_device *net_dev,
13199 -                                       struct mac_device *mac_dev)
13200 -{
13201 -       return init_phy(net_dev, mac_dev, adjust_link_void);
13202 -}
13203 -
13204 -static struct phy_device *memac_init_phy(struct net_device *net_dev,
13205 -                                        struct mac_device *mac_dev)
13206 -{
13207 -       return init_phy(net_dev, mac_dev, &adjust_link_memac);
13208 -}
13209 -
13210 -static void setup_dtsec(struct mac_device *mac_dev)
13211 -{
13212 -       mac_dev->init_phy               = dtsec_init_phy;
13213 -       mac_dev->init                   = dtsec_initialization;
13214 -       mac_dev->set_promisc            = dtsec_set_promiscuous;
13215 -       mac_dev->change_addr            = dtsec_modify_mac_address;
13216 -       mac_dev->add_hash_mac_addr      = dtsec_add_hash_mac_address;
13217 -       mac_dev->remove_hash_mac_addr   = dtsec_del_hash_mac_address;
13218 -       mac_dev->set_tx_pause           = dtsec_set_tx_pause_frames;
13219 -       mac_dev->set_rx_pause           = dtsec_accept_rx_pause_frames;
13220 -       mac_dev->set_exception          = dtsec_set_exception;
13221 -       mac_dev->set_multi              = set_multi;
13222 -       mac_dev->start                  = start;
13223 -       mac_dev->stop                   = stop;
13224 -
13225 -       mac_dev->priv->enable           = dtsec_enable;
13226 -       mac_dev->priv->disable          = dtsec_disable;
13227 -}
13228 -
13229 -static void setup_tgec(struct mac_device *mac_dev)
13230 -{
13231 -       mac_dev->init_phy               = tgec_init_phy;
13232 -       mac_dev->init                   = tgec_initialization;
13233 -       mac_dev->set_promisc            = tgec_set_promiscuous;
13234 -       mac_dev->change_addr            = tgec_modify_mac_address;
13235 -       mac_dev->add_hash_mac_addr      = tgec_add_hash_mac_address;
13236 -       mac_dev->remove_hash_mac_addr   = tgec_del_hash_mac_address;
13237 -       mac_dev->set_tx_pause           = tgec_set_tx_pause_frames;
13238 -       mac_dev->set_rx_pause           = tgec_accept_rx_pause_frames;
13239 -       mac_dev->set_exception          = tgec_set_exception;
13240 -       mac_dev->set_multi              = set_multi;
13241 -       mac_dev->start                  = start;
13242 -       mac_dev->stop                   = stop;
13243 -
13244 -       mac_dev->priv->enable           = tgec_enable;
13245 -       mac_dev->priv->disable          = tgec_disable;
13246 -}
13247 -
13248 -static void setup_memac(struct mac_device *mac_dev)
13249 -{
13250 -       mac_dev->init_phy               = memac_init_phy;
13251 -       mac_dev->init                   = memac_initialization;
13252 -       mac_dev->set_promisc            = memac_set_promiscuous;
13253 -       mac_dev->change_addr            = memac_modify_mac_address;
13254 -       mac_dev->add_hash_mac_addr      = memac_add_hash_mac_address;
13255 -       mac_dev->remove_hash_mac_addr   = memac_del_hash_mac_address;
13256 -       mac_dev->set_tx_pause           = memac_set_tx_pause_frames;
13257 -       mac_dev->set_rx_pause           = memac_accept_rx_pause_frames;
13258 -       mac_dev->set_exception          = memac_set_exception;
13259 -       mac_dev->set_multi              = set_multi;
13260 -       mac_dev->start                  = start;
13261 -       mac_dev->stop                   = stop;
13262 -
13263 -       mac_dev->priv->enable           = memac_enable;
13264 -       mac_dev->priv->disable          = memac_disable;
13265 -}
13266 -
13267 -#define DTSEC_SUPPORTED \
13268 -       (SUPPORTED_10baseT_Half \
13269 -       | SUPPORTED_10baseT_Full \
13270 -       | SUPPORTED_100baseT_Half \
13271 -       | SUPPORTED_100baseT_Full \
13272 -       | SUPPORTED_Autoneg \
13273 -       | SUPPORTED_Pause \
13274 -       | SUPPORTED_Asym_Pause \
13275 -       | SUPPORTED_MII)
13276 -
13277 -static DEFINE_MUTEX(eth_lock);
13278 -
13279 -static const u16 phy2speed[] = {
13280 -       [PHY_INTERFACE_MODE_MII]                = SPEED_100,
13281 -       [PHY_INTERFACE_MODE_GMII]               = SPEED_1000,
13282 -       [PHY_INTERFACE_MODE_SGMII]              = SPEED_1000,
13283 -       [PHY_INTERFACE_MODE_TBI]                = SPEED_1000,
13284 -       [PHY_INTERFACE_MODE_RMII]               = SPEED_100,
13285 -       [PHY_INTERFACE_MODE_RGMII]              = SPEED_1000,
13286 -       [PHY_INTERFACE_MODE_RGMII_ID]           = SPEED_1000,
13287 -       [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
13288 -       [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
13289 -       [PHY_INTERFACE_MODE_RTBI]               = SPEED_1000,
13290 -       [PHY_INTERFACE_MODE_XGMII]              = SPEED_10000
13291 -};
13292 -
13293 -static struct platform_device *dpaa_eth_add_device(int fman_id,
13294 -                                                  struct mac_device *mac_dev,
13295 -                                                  struct device_node *node)
13296 -{
13297 -       struct platform_device *pdev;
13298 -       struct dpaa_eth_data data;
13299 -       struct mac_priv_s       *priv;
13300 -       static int dpaa_eth_dev_cnt;
13301 -       int ret;
13302 -
13303 -       priv = mac_dev->priv;
13304 -
13305 -       data.mac_dev = mac_dev;
13306 -       data.mac_hw_id = priv->cell_index;
13307 -       data.fman_hw_id = fman_id;
13308 -       data.mac_node = node;
13309 -
13310 -       mutex_lock(&eth_lock);
13311 -
13312 -       pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt);
13313 -       if (!pdev) {
13314 -               ret = -ENOMEM;
13315 -               goto no_mem;
13316 -       }
13317 -
13318 -       ret = platform_device_add_data(pdev, &data, sizeof(data));
13319 -       if (ret)
13320 -               goto err;
13321 -
13322 -       ret = platform_device_add(pdev);
13323 -       if (ret)
13324 -               goto err;
13325 -
13326 -       dpaa_eth_dev_cnt++;
13327 -       mutex_unlock(&eth_lock);
13328 -
13329 -       return pdev;
13330 -
13331 -err:
13332 -       platform_device_put(pdev);
13333 -no_mem:
13334 -       mutex_unlock(&eth_lock);
13335 -
13336 -       return ERR_PTR(ret);
13337 -}
13338 -
13339 -static const struct of_device_id mac_match[] = {
13340 -       { .compatible   = "fsl,fman-dtsec" },
13341 -       { .compatible   = "fsl,fman-xgec" },
13342 -       { .compatible   = "fsl,fman-memac" },
13343 -       {}
13344 -};
13345 -MODULE_DEVICE_TABLE(of, mac_match);
13346 -
13347 -static int mac_probe(struct platform_device *_of_dev)
13348 -{
13349 -       int                      err, i, nph;
13350 -       struct device           *dev;
13351 -       struct device_node      *mac_node, *dev_node;
13352 -       struct mac_device       *mac_dev;
13353 -       struct platform_device  *of_dev;
13354 -       struct resource          res;
13355 -       struct mac_priv_s       *priv;
13356 -       const u8                *mac_addr;
13357 -       u32                      val;
13358 -       u8                      fman_id;
13359 -       int                     phy_if;
13360 -
13361 -       dev = &_of_dev->dev;
13362 -       mac_node = dev->of_node;
13363 -
13364 -       mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
13365 -       if (!mac_dev) {
13366 -               err = -ENOMEM;
13367 -               dev_err(dev, "devm_kzalloc() = %d\n", err);
13368 -               goto _return;
13369 -       }
13370 -       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
13371 -       if (!priv) {
13372 -               err = -ENOMEM;
13373 -               goto _return;
13374 -       }
13375 -
13376 -       /* Save private information */
13377 -       mac_dev->priv = priv;
13378 -       priv->dev = dev;
13379 -
13380 -       if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
13381 -               setup_dtsec(mac_dev);
13382 -               priv->internal_phy_node = of_parse_phandle(mac_node,
13383 -                                                         "tbi-handle", 0);
13384 -       } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
13385 -               setup_tgec(mac_dev);
13386 -       } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
13387 -               setup_memac(mac_dev);
13388 -               priv->internal_phy_node = of_parse_phandle(mac_node,
13389 -                                                         "pcsphy-handle", 0);
13390 -       } else {
13391 -               dev_err(dev, "MAC node (%s) contains unsupported MAC\n",
13392 -                       mac_node->full_name);
13393 -               err = -EINVAL;
13394 -               goto _return;
13395 -       }
13396 -
13397 -       /* Register mac_dev */
13398 -       dev_set_drvdata(dev, mac_dev);
13399 -
13400 -       INIT_LIST_HEAD(&priv->mc_addr_list);
13401 -
13402 -       /* Get the FM node */
13403 -       dev_node = of_get_parent(mac_node);
13404 -       if (!dev_node) {
13405 -               dev_err(dev, "of_get_parent(%s) failed\n",
13406 -                       mac_node->full_name);
13407 -               err = -EINVAL;
13408 -               goto _return_dev_set_drvdata;
13409 -       }
13410 -
13411 -       of_dev = of_find_device_by_node(dev_node);
13412 -       if (!of_dev) {
13413 -               dev_err(dev, "of_find_device_by_node(%s) failed\n",
13414 -                       dev_node->full_name);
13415 -               err = -EINVAL;
13416 -               goto _return_of_node_put;
13417 -       }
13418 -
13419 -       /* Get the FMan cell-index */
13420 -       err = of_property_read_u32(dev_node, "cell-index", &val);
13421 -       if (err) {
13422 -               dev_err(dev, "failed to read cell-index for %s\n",
13423 -                       dev_node->full_name);
13424 -               err = -EINVAL;
13425 -               goto _return_of_node_put;
13426 -       }
13427 -       /* cell-index 0 => FMan id 1 */
13428 -       fman_id = (u8)(val + 1);
13429 -
13430 -       priv->fman = fman_bind(&of_dev->dev);
13431 -       if (!priv->fman) {
13432 -               dev_err(dev, "fman_bind(%s) failed\n", dev_node->full_name);
13433 -               err = -ENODEV;
13434 -               goto _return_of_node_put;
13435 -       }
13436 -
13437 -       of_node_put(dev_node);
13438 -
13439 -       /* Get the address of the memory mapped registers */
13440 -       err = of_address_to_resource(mac_node, 0, &res);
13441 -       if (err < 0) {
13442 -               dev_err(dev, "of_address_to_resource(%s) = %d\n",
13443 -                       mac_node->full_name, err);
13444 -               goto _return_dev_set_drvdata;
13445 -       }
13446 -
13447 -       mac_dev->res = __devm_request_region(dev,
13448 -                                            fman_get_mem_region(priv->fman),
13449 -                                            res.start, res.end + 1 - res.start,
13450 -                                            "mac");
13451 -       if (!mac_dev->res) {
13452 -               dev_err(dev, "__devm_request_mem_region(mac) failed\n");
13453 -               err = -EBUSY;
13454 -               goto _return_dev_set_drvdata;
13455 -       }
13456 -
13457 -       priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
13458 -                                  mac_dev->res->end + 1 - mac_dev->res->start);
13459 -       if (!priv->vaddr) {
13460 -               dev_err(dev, "devm_ioremap() failed\n");
13461 -               err = -EIO;
13462 -               goto _return_dev_set_drvdata;
13463 -       }
13464 -
13465 -       if (!of_device_is_available(mac_node)) {
13466 -               devm_iounmap(dev, priv->vaddr);
13467 -               __devm_release_region(dev, fman_get_mem_region(priv->fman),
13468 -                                     res.start, res.end + 1 - res.start);
13469 -               devm_kfree(dev, mac_dev);
13470 -               dev_set_drvdata(dev, NULL);
13471 -               return -ENODEV;
13472 -       }
13473 -
13474 -       /* Get the cell-index */
13475 -       err = of_property_read_u32(mac_node, "cell-index", &val);
13476 -       if (err) {
13477 -               dev_err(dev, "failed to read cell-index for %s\n",
13478 -                       mac_node->full_name);
13479 -               err = -EINVAL;
13480 -               goto _return_dev_set_drvdata;
13481 -       }
13482 -       priv->cell_index = (u8)val;
13483 -
13484 -       /* Get the MAC address */
13485 -       mac_addr = of_get_mac_address(mac_node);
13486 -       if (!mac_addr) {
13487 -               dev_err(dev, "of_get_mac_address(%s) failed\n",
13488 -                       mac_node->full_name);
13489 -               err = -EINVAL;
13490 -               goto _return_dev_set_drvdata;
13491 -       }
13492 -       memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
13493 -
13494 -       /* Get the port handles */
13495 -       nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
13496 -       if (unlikely(nph < 0)) {
13497 -               dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n",
13498 -                       mac_node->full_name);
13499 -               err = nph;
13500 -               goto _return_dev_set_drvdata;
13501 -       }
13502 -
13503 -       if (nph != ARRAY_SIZE(mac_dev->port)) {
13504 -               dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n",
13505 -                       mac_node->full_name);
13506 -               err = -EINVAL;
13507 -               goto _return_dev_set_drvdata;
13508 -       }
13509 -
13510 -       for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
13511 -               /* Find the port node */
13512 -               dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
13513 -               if (!dev_node) {
13514 -                       dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n",
13515 -                               mac_node->full_name);
13516 -                       err = -EINVAL;
13517 -                       goto _return_of_node_put;
13518 -               }
13519 -
13520 -               of_dev = of_find_device_by_node(dev_node);
13521 -               if (!of_dev) {
13522 -                       dev_err(dev, "of_find_device_by_node(%s) failed\n",
13523 -                               dev_node->full_name);
13524 -                       err = -EINVAL;
13525 -                       goto _return_of_node_put;
13526 -               }
13527 -
13528 -               mac_dev->port[i] = fman_port_bind(&of_dev->dev);
13529 -               if (!mac_dev->port[i]) {
13530 -                       dev_err(dev, "dev_get_drvdata(%s) failed\n",
13531 -                               dev_node->full_name);
13532 -                       err = -EINVAL;
13533 -                       goto _return_of_node_put;
13534 -               }
13535 -               of_node_put(dev_node);
13536 -       }
13537 -
13538 -       /* Get the PHY connection type */
13539 -       phy_if = of_get_phy_mode(mac_node);
13540 -       if (phy_if < 0) {
13541 -               dev_warn(dev,
13542 -                        "of_get_phy_mode() for %s failed. Defaulting to SGMII\n",
13543 -                        mac_node->full_name);
13544 -               phy_if = PHY_INTERFACE_MODE_SGMII;
13545 -       }
13546 -       priv->phy_if = phy_if;
13547 -
13548 -       priv->speed             = phy2speed[priv->phy_if];
13549 -       priv->max_speed         = priv->speed;
13550 -       mac_dev->if_support     = DTSEC_SUPPORTED;
13551 -       /* We don't support half-duplex in SGMII mode */
13552 -       if (priv->phy_if == PHY_INTERFACE_MODE_SGMII)
13553 -               mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
13554 -                                       SUPPORTED_100baseT_Half);
13555 -
13556 -       /* Gigabit support (no half-duplex) */
13557 -       if (priv->max_speed == 1000)
13558 -               mac_dev->if_support |= SUPPORTED_1000baseT_Full;
13559 -
13560 -       /* The 10G interface only supports one mode */
13561 -       if (priv->phy_if == PHY_INTERFACE_MODE_XGMII)
13562 -               mac_dev->if_support = SUPPORTED_10000baseT_Full;
13563 -
13564 -       /* Get the rest of the PHY information */
13565 -       priv->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
13566 -       if (!priv->phy_node && of_phy_is_fixed_link(mac_node)) {
13567 -               struct phy_device *phy;
13568 -
13569 -               err = of_phy_register_fixed_link(mac_node);
13570 -               if (err)
13571 -                       goto _return_dev_set_drvdata;
13572 -
13573 -               priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
13574 -                                          GFP_KERNEL);
13575 -               if (!priv->fixed_link)
13576 -                       goto _return_dev_set_drvdata;
13577 -
13578 -               priv->phy_node = of_node_get(mac_node);
13579 -               phy = of_phy_find_device(priv->phy_node);
13580 -               if (!phy)
13581 -                       goto _return_dev_set_drvdata;
13582 -
13583 -               priv->fixed_link->link = phy->link;
13584 -               priv->fixed_link->speed = phy->speed;
13585 -               priv->fixed_link->duplex = phy->duplex;
13586 -               priv->fixed_link->pause = phy->pause;
13587 -               priv->fixed_link->asym_pause = phy->asym_pause;
13588 -
13589 -               put_device(&phy->mdio.dev);
13590 -       }
13591 -
13592 -       err = mac_dev->init(mac_dev);
13593 -       if (err < 0) {
13594 -               dev_err(dev, "mac_dev->init() = %d\n", err);
13595 -               of_node_put(priv->phy_node);
13596 -               goto _return_dev_set_drvdata;
13597 -       }
13598 -
13599 -       /* pause frame autonegotiation enabled */
13600 -       mac_dev->autoneg_pause = true;
13601 -
13602 -       /* By intializing the values to false, force FMD to enable PAUSE frames
13603 -        * on RX and TX
13604 -        */
13605 -       mac_dev->rx_pause_req = true;
13606 -       mac_dev->tx_pause_req = true;
13607 -       mac_dev->rx_pause_active = false;
13608 -       mac_dev->tx_pause_active = false;
13609 -       err = fman_set_mac_active_pause(mac_dev, true, true);
13610 -       if (err < 0)
13611 -               dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
13612 -
13613 -       dev_info(dev, "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n",
13614 -                mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
13615 -                mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
13616 -
13617 -       priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev, mac_node);
13618 -       if (IS_ERR(priv->eth_dev)) {
13619 -               dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
13620 -                       priv->cell_index);
13621 -               priv->eth_dev = NULL;
13622 -       }
13623 -
13624 -       goto _return;
13625 -
13626 -_return_of_node_put:
13627 -       of_node_put(dev_node);
13628 -_return_dev_set_drvdata:
13629 -       kfree(priv->fixed_link);
13630 -       dev_set_drvdata(dev, NULL);
13631 -_return:
13632 -       return err;
13633 -}
13634 -
13635 -static struct platform_driver mac_driver = {
13636 -       .driver = {
13637 -               .name           = KBUILD_MODNAME,
13638 -               .of_match_table = mac_match,
13639 -       },
13640 -       .probe          = mac_probe,
13641 -};
13642 -
13643 -builtin_platform_driver(mac_driver);
13644 diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
13645 deleted file mode 100644
13646 index d7313f0..0000000
13647 --- a/drivers/net/ethernet/freescale/fman/mac.h
13648 +++ /dev/null
13649 @@ -1,98 +0,0 @@
13650 -/* Copyright 2008-2015 Freescale Semiconductor, Inc.
13651 - *
13652 - * Redistribution and use in source and binary forms, with or without
13653 - * modification, are permitted provided that the following conditions are met:
13654 - *     * Redistributions of source code must retain the above copyright
13655 - *      notice, this list of conditions and the following disclaimer.
13656 - *     * Redistributions in binary form must reproduce the above copyright
13657 - *      notice, this list of conditions and the following disclaimer in the
13658 - *      documentation and/or other materials provided with the distribution.
13659 - *     * Neither the name of Freescale Semiconductor nor the
13660 - *      names of its contributors may be used to endorse or promote products
13661 - *      derived from this software without specific prior written permission.
13662 - *
13663 - *
13664 - * ALTERNATIVELY, this software may be distributed under the terms of the
13665 - * GNU General Public License ("GPL") as published by the Free Software
13666 - * Foundation, either version 2 of that License or (at your option) any
13667 - * later version.
13668 - *
13669 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
13670 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
13671 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
13672 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
13673 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
13674 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
13675 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
13676 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
13677 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13678 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13679 - */
13680 -
13681 -#ifndef __MAC_H
13682 -#define __MAC_H
13683 -
13684 -#include <linux/device.h>
13685 -#include <linux/if_ether.h>
13686 -#include <linux/phy.h>
13687 -#include <linux/list.h>
13688 -
13689 -#include "fman_port.h"
13690 -#include "fman.h"
13691 -#include "fman_mac.h"
13692 -
13693 -struct fman_mac;
13694 -struct mac_priv_s;
13695 -
13696 -struct mac_device {
13697 -       struct resource         *res;
13698 -       u8                       addr[ETH_ALEN];
13699 -       struct fman_port        *port[2];
13700 -       u32                      if_support;
13701 -       struct phy_device       *phy_dev;
13702 -
13703 -       bool autoneg_pause;
13704 -       bool rx_pause_req;
13705 -       bool tx_pause_req;
13706 -       bool rx_pause_active;
13707 -       bool tx_pause_active;
13708 -       bool promisc;
13709 -
13710 -       struct phy_device *(*init_phy)(struct net_device *net_dev,
13711 -                                      struct mac_device *mac_dev);
13712 -       int (*init)(struct mac_device *mac_dev);
13713 -       int (*start)(struct mac_device *mac_dev);
13714 -       int (*stop)(struct mac_device *mac_dev);
13715 -       int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
13716 -       int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
13717 -       int (*set_multi)(struct net_device *net_dev,
13718 -                        struct mac_device *mac_dev);
13719 -       int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
13720 -       int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority,
13721 -                           u16 pause_time, u16 thresh_time);
13722 -       int (*set_exception)(struct fman_mac *mac_dev,
13723 -                            enum fman_mac_exceptions exception, bool enable);
13724 -       int (*add_hash_mac_addr)(struct fman_mac *mac_dev,
13725 -                                enet_addr_t *eth_addr);
13726 -       int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
13727 -                                   enet_addr_t *eth_addr);
13728 -
13729 -       struct fman_mac         *fman_mac;
13730 -       struct mac_priv_s       *priv;
13731 -};
13732 -
13733 -struct dpaa_eth_data {
13734 -       struct device_node *mac_node;
13735 -       struct mac_device *mac_dev;
13736 -       int mac_hw_id;
13737 -       int fman_hw_id;
13738 -};
13739 -
13740 -extern const char      *mac_driver_description;
13741 -
13742 -int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
13743 -
13744 -void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
13745 -                       bool *tx_pause);
13746 -
13747 -#endif /* __MAC_H */
13748 diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
13749 index 4b86260..9b3639e 100644
13750 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
13751 +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
13752 @@ -60,9 +60,6 @@ module_param(fs_enet_debug, int, 0);
13753  MODULE_PARM_DESC(fs_enet_debug,
13754                  "Freescale bitmapped debugging message enable value");
13755  
13756 -#define RX_RING_SIZE   32
13757 -#define TX_RING_SIZE   64
13758 -
13759  #ifdef CONFIG_NET_POLL_CONTROLLER
13760  static void fs_enet_netpoll(struct net_device *dev);
13761  #endif
13762 @@ -82,113 +79,20 @@ static void skb_align(struct sk_buff *skb, int align)
13763                 skb_reserve(skb, align - off);
13764  }
13765  
13766 -/* NAPI function */
13767 -static int fs_enet_napi(struct napi_struct *napi, int budget)
13768 +/* NAPI receive function */
13769 +static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
13770  {
13771         struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
13772         struct net_device *dev = fep->ndev;
13773         const struct fs_platform_info *fpi = fep->fpi;
13774         cbd_t __iomem *bdp;
13775 -       struct sk_buff *skb, *skbn;
13776 +       struct sk_buff *skb, *skbn, *skbt;
13777         int received = 0;
13778         u16 pkt_len, sc;
13779         int curidx;
13780 -       int dirtyidx, do_wake, do_restart;
13781 -       int tx_left = TX_RING_SIZE;
13782 -
13783 -       spin_lock(&fep->tx_lock);
13784 -       bdp = fep->dirty_tx;
13785 -
13786 -       /* clear status bits for napi*/
13787 -       (*fep->ops->napi_clear_event)(dev);
13788 -
13789 -       do_wake = do_restart = 0;
13790 -       while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) {
13791 -               dirtyidx = bdp - fep->tx_bd_base;
13792 -
13793 -               if (fep->tx_free == fep->tx_ring)
13794 -                       break;
13795 -
13796 -               skb = fep->tx_skbuff[dirtyidx];
13797 -
13798 -               /*
13799 -                * Check for errors.
13800 -                */
13801 -               if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
13802 -                         BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
13803 -
13804 -                       if (sc & BD_ENET_TX_HB) /* No heartbeat */
13805 -                               fep->stats.tx_heartbeat_errors++;
13806 -                       if (sc & BD_ENET_TX_LC) /* Late collision */
13807 -                               fep->stats.tx_window_errors++;
13808 -                       if (sc & BD_ENET_TX_RL) /* Retrans limit */
13809 -                               fep->stats.tx_aborted_errors++;
13810 -                       if (sc & BD_ENET_TX_UN) /* Underrun */
13811 -                               fep->stats.tx_fifo_errors++;
13812 -                       if (sc & BD_ENET_TX_CSL)        /* Carrier lost */
13813 -                               fep->stats.tx_carrier_errors++;
13814 -
13815 -                       if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
13816 -                               fep->stats.tx_errors++;
13817 -                               do_restart = 1;
13818 -                       }
13819 -               } else
13820 -                       fep->stats.tx_packets++;
13821 -
13822 -               if (sc & BD_ENET_TX_READY) {
13823 -                       dev_warn(fep->dev,
13824 -                                "HEY! Enet xmit interrupt and TX_READY.\n");
13825 -               }
13826 -
13827 -               /*
13828 -                * Deferred means some collisions occurred during transmit,
13829 -                * but we eventually sent the packet OK.
13830 -                */
13831 -               if (sc & BD_ENET_TX_DEF)
13832 -                       fep->stats.collisions++;
13833 -
13834 -               /* unmap */
13835 -               if (fep->mapped_as_page[dirtyidx])
13836 -                       dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
13837 -                                      CBDR_DATLEN(bdp), DMA_TO_DEVICE);
13838 -               else
13839 -                       dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
13840 -                                        CBDR_DATLEN(bdp), DMA_TO_DEVICE);
13841  
13842 -               /*
13843 -                * Free the sk buffer associated with this last transmit.
13844 -                */
13845 -               if (skb) {
13846 -                       dev_kfree_skb(skb);
13847 -                       fep->tx_skbuff[dirtyidx] = NULL;
13848 -               }
13849 -
13850 -               /*
13851 -                * Update pointer to next buffer descriptor to be transmitted.
13852 -                */
13853 -               if ((sc & BD_ENET_TX_WRAP) == 0)
13854 -                       bdp++;
13855 -               else
13856 -                       bdp = fep->tx_bd_base;
13857 -
13858 -               /*
13859 -                * Since we have freed up a buffer, the ring is no longer
13860 -                * full.
13861 -                */
13862 -               if (++fep->tx_free == MAX_SKB_FRAGS)
13863 -                       do_wake = 1;
13864 -               tx_left--;
13865 -       }
13866 -
13867 -       fep->dirty_tx = bdp;
13868 -
13869 -       if (do_restart)
13870 -               (*fep->ops->tx_restart)(dev);
13871 -
13872 -       spin_unlock(&fep->tx_lock);
13873 -
13874 -       if (do_wake)
13875 -               netif_wake_queue(dev);
13876 +       if (budget <= 0)
13877 +               return received;
13878  
13879         /*
13880          * First, grab all of the stats for the incoming packet.
13881 @@ -196,8 +100,10 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
13882          */
13883         bdp = fep->cur_rx;
13884  
13885 -       while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0 &&
13886 -              received < budget) {
13887 +       /* clear RX status bits for napi*/
13888 +       (*fep->ops->napi_clear_rx_event)(dev);
13889 +
13890 +       while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
13891                 curidx = bdp - fep->rx_bd_base;
13892  
13893                 /*
13894 @@ -226,10 +132,21 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
13895                         if (sc & BD_ENET_RX_OV)
13896                                 fep->stats.rx_crc_errors++;
13897  
13898 -                       skbn = fep->rx_skbuff[curidx];
13899 +                       skb = fep->rx_skbuff[curidx];
13900 +
13901 +                       dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
13902 +                               L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
13903 +                               DMA_FROM_DEVICE);
13904 +
13905 +                       skbn = skb;
13906 +
13907                 } else {
13908                         skb = fep->rx_skbuff[curidx];
13909  
13910 +                       dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
13911 +                               L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
13912 +                               DMA_FROM_DEVICE);
13913 +
13914                         /*
13915                          * Process the incoming frame.
13916                          */
13917 @@ -244,31 +161,16 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
13918                                         skb_reserve(skbn, 2);   /* align IP header */
13919                                         skb_copy_from_linear_data(skb,
13920                                                       skbn->data, pkt_len);
13921 -                                       swap(skb, skbn);
13922 -                                       dma_sync_single_for_cpu(fep->dev,
13923 -                                               CBDR_BUFADDR(bdp),
13924 -                                               L1_CACHE_ALIGN(pkt_len),
13925 -                                               DMA_FROM_DEVICE);
13926 +                                       /* swap */
13927 +                                       skbt = skb;
13928 +                                       skb = skbn;
13929 +                                       skbn = skbt;
13930                                 }
13931                         } else {
13932                                 skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
13933  
13934 -                               if (skbn) {
13935 -                                       dma_addr_t dma;
13936 -
13937 +                               if (skbn)
13938                                         skb_align(skbn, ENET_RX_ALIGN);
13939 -
13940 -                                       dma_unmap_single(fep->dev,
13941 -                                               CBDR_BUFADDR(bdp),
13942 -                                               L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
13943 -                                               DMA_FROM_DEVICE);
13944 -
13945 -                                       dma = dma_map_single(fep->dev,
13946 -                                               skbn->data,
13947 -                                               L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
13948 -                                               DMA_FROM_DEVICE);
13949 -                                       CBDW_BUFADDR(bdp, dma);
13950 -                               }
13951                         }
13952  
13953                         if (skbn != NULL) {
13954 @@ -283,6 +185,9 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
13955                 }
13956  
13957                 fep->rx_skbuff[curidx] = skbn;
13958 +               CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
13959 +                            L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
13960 +                            DMA_FROM_DEVICE));
13961                 CBDW_DATLEN(bdp, 0);
13962                 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
13963  
13964 @@ -295,19 +200,134 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
13965                         bdp = fep->rx_bd_base;
13966  
13967                 (*fep->ops->rx_bd_done)(dev);
13968 +
13969 +               if (received >= budget)
13970 +                       break;
13971         }
13972  
13973         fep->cur_rx = bdp;
13974  
13975 -       if (received < budget && tx_left) {
13976 +       if (received < budget) {
13977                 /* done */
13978                 napi_complete(napi);
13979 -               (*fep->ops->napi_enable)(dev);
13980 +               (*fep->ops->napi_enable_rx)(dev);
13981 +       }
13982 +       return received;
13983 +}
13984  
13985 -               return received;
13986 +static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
13987 +{
13988 +       struct fs_enet_private *fep = container_of(napi, struct fs_enet_private,
13989 +                                                  napi_tx);
13990 +       struct net_device *dev = fep->ndev;
13991 +       cbd_t __iomem *bdp;
13992 +       struct sk_buff *skb;
13993 +       int dirtyidx, do_wake, do_restart;
13994 +       u16 sc;
13995 +       int has_tx_work = 0;
13996 +
13997 +       spin_lock(&fep->tx_lock);
13998 +       bdp = fep->dirty_tx;
13999 +
14000 +       /* clear TX status bits for napi*/
14001 +       (*fep->ops->napi_clear_tx_event)(dev);
14002 +
14003 +       do_wake = do_restart = 0;
14004 +       while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
14005 +               dirtyidx = bdp - fep->tx_bd_base;
14006 +
14007 +               if (fep->tx_free == fep->tx_ring)
14008 +                       break;
14009 +
14010 +               skb = fep->tx_skbuff[dirtyidx];
14011 +
14012 +               /*
14013 +                * Check for errors.
14014 +                */
14015 +               if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
14016 +                         BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
14017 +
14018 +                       if (sc & BD_ENET_TX_HB) /* No heartbeat */
14019 +                               fep->stats.tx_heartbeat_errors++;
14020 +                       if (sc & BD_ENET_TX_LC) /* Late collision */
14021 +                               fep->stats.tx_window_errors++;
14022 +                       if (sc & BD_ENET_TX_RL) /* Retrans limit */
14023 +                               fep->stats.tx_aborted_errors++;
14024 +                       if (sc & BD_ENET_TX_UN) /* Underrun */
14025 +                               fep->stats.tx_fifo_errors++;
14026 +                       if (sc & BD_ENET_TX_CSL)        /* Carrier lost */
14027 +                               fep->stats.tx_carrier_errors++;
14028 +
14029 +                       if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
14030 +                               fep->stats.tx_errors++;
14031 +                               do_restart = 1;
14032 +                       }
14033 +               } else
14034 +                       fep->stats.tx_packets++;
14035 +
14036 +               if (sc & BD_ENET_TX_READY) {
14037 +                       dev_warn(fep->dev,
14038 +                                "HEY! Enet xmit interrupt and TX_READY.\n");
14039 +               }
14040 +
14041 +               /*
14042 +                * Deferred means some collisions occurred during transmit,
14043 +                * but we eventually sent the packet OK.
14044 +                */
14045 +               if (sc & BD_ENET_TX_DEF)
14046 +                       fep->stats.collisions++;
14047 +
14048 +               /* unmap */
14049 +               if (fep->mapped_as_page[dirtyidx])
14050 +                       dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
14051 +                                      CBDR_DATLEN(bdp), DMA_TO_DEVICE);
14052 +               else
14053 +                       dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
14054 +                                        CBDR_DATLEN(bdp), DMA_TO_DEVICE);
14055 +
14056 +               /*
14057 +                * Free the sk buffer associated with this last transmit.
14058 +                */
14059 +               if (skb) {
14060 +                       dev_kfree_skb(skb);
14061 +                       fep->tx_skbuff[dirtyidx] = NULL;
14062 +               }
14063 +
14064 +               /*
14065 +                * Update pointer to next buffer descriptor to be transmitted.
14066 +                */
14067 +               if ((sc & BD_ENET_TX_WRAP) == 0)
14068 +                       bdp++;
14069 +               else
14070 +                       bdp = fep->tx_bd_base;
14071 +
14072 +               /*
14073 +                * Since we have freed up a buffer, the ring is no longer
14074 +                * full.
14075 +                */
14076 +               if (++fep->tx_free >= MAX_SKB_FRAGS)
14077 +                       do_wake = 1;
14078 +               has_tx_work = 1;
14079 +       }
14080 +
14081 +       fep->dirty_tx = bdp;
14082 +
14083 +       if (do_restart)
14084 +               (*fep->ops->tx_restart)(dev);
14085 +
14086 +       if (!has_tx_work) {
14087 +               napi_complete(napi);
14088 +               (*fep->ops->napi_enable_tx)(dev);
14089         }
14090  
14091 -       return budget;
14092 +       spin_unlock(&fep->tx_lock);
14093 +
14094 +       if (do_wake)
14095 +               netif_wake_queue(dev);
14096 +
14097 +       if (has_tx_work)
14098 +               return budget;
14099 +       return 0;
14100  }
14101  
14102  /*
14103 @@ -333,18 +353,18 @@ fs_enet_interrupt(int irq, void *dev_id)
14104                 nr++;
14105  
14106                 int_clr_events = int_events;
14107 -               int_clr_events &= ~fep->ev_napi;
14108 +               int_clr_events &= ~fep->ev_napi_rx;
14109  
14110                 (*fep->ops->clear_int_events)(dev, int_clr_events);
14111  
14112                 if (int_events & fep->ev_err)
14113                         (*fep->ops->ev_error)(dev, int_events);
14114  
14115 -               if (int_events & fep->ev) {
14116 +               if (int_events & fep->ev_rx) {
14117                         napi_ok = napi_schedule_prep(&fep->napi);
14118  
14119 -                       (*fep->ops->napi_disable)(dev);
14120 -                       (*fep->ops->clear_int_events)(dev, fep->ev_napi);
14121 +                       (*fep->ops->napi_disable_rx)(dev);
14122 +                       (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
14123  
14124                         /* NOTE: it is possible for FCCs in NAPI mode    */
14125                         /* to submit a spurious interrupt while in poll  */
14126 @@ -352,6 +372,17 @@ fs_enet_interrupt(int irq, void *dev_id)
14127                                 __napi_schedule(&fep->napi);
14128                 }
14129  
14130 +               if (int_events & fep->ev_tx) {
14131 +                       napi_ok = napi_schedule_prep(&fep->napi_tx);
14132 +
14133 +                       (*fep->ops->napi_disable_tx)(dev);
14134 +                       (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx);
14135 +
14136 +                       /* NOTE: it is possible for FCCs in NAPI mode    */
14137 +                       /* to submit a spurious interrupt while in poll  */
14138 +                       if (napi_ok)
14139 +                               __napi_schedule(&fep->napi_tx);
14140 +               }
14141         }
14142  
14143         handled = nr > 0;
14144 @@ -459,9 +490,6 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
14145  {
14146         struct sk_buff *new_skb;
14147  
14148 -       if (skb_linearize(skb))
14149 -               return NULL;
14150 -
14151         /* Alloc new skb */
14152         new_skb = netdev_alloc_skb(dev, skb->len + 4);
14153         if (!new_skb)
14154 @@ -487,27 +515,12 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
14155         cbd_t __iomem *bdp;
14156         int curidx;
14157         u16 sc;
14158 -       int nr_frags;
14159 +       int nr_frags = skb_shinfo(skb)->nr_frags;
14160         skb_frag_t *frag;
14161         int len;
14162 -#ifdef CONFIG_FS_ENET_MPC5121_FEC
14163 -       int is_aligned = 1;
14164 -       int i;
14165 -
14166 -       if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
14167 -               is_aligned = 0;
14168 -       } else {
14169 -               nr_frags = skb_shinfo(skb)->nr_frags;
14170 -               frag = skb_shinfo(skb)->frags;
14171 -               for (i = 0; i < nr_frags; i++, frag++) {
14172 -                       if (!IS_ALIGNED(frag->page_offset, 4)) {
14173 -                               is_aligned = 0;
14174 -                               break;
14175 -                       }
14176 -               }
14177 -       }
14178  
14179 -       if (!is_aligned) {
14180 +#ifdef CONFIG_FS_ENET_MPC5121_FEC
14181 +       if (((unsigned long)skb->data) & 0x3) {
14182                 skb = tx_skb_align_workaround(dev, skb);
14183                 if (!skb) {
14184                         /*
14185 @@ -519,7 +532,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
14186                 }
14187         }
14188  #endif
14189 -
14190         spin_lock(&fep->tx_lock);
14191  
14192         /*
14193 @@ -527,7 +539,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
14194          */
14195         bdp = fep->cur_tx;
14196  
14197 -       nr_frags = skb_shinfo(skb)->nr_frags;
14198         if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
14199                 netif_stop_queue(dev);
14200                 spin_unlock(&fep->tx_lock);
14201 @@ -558,8 +569,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
14202         frag = skb_shinfo(skb)->frags;
14203         while (nr_frags) {
14204                 CBDC_SC(bdp,
14205 -                       BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
14206 -                       BD_ENET_TX_TC);
14207 +                       BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
14208                 CBDS_SC(bdp, BD_ENET_TX_READY);
14209  
14210                 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
14211 @@ -624,15 +634,14 @@ static void fs_timeout(struct net_device *dev)
14212         spin_lock_irqsave(&fep->lock, flags);
14213  
14214         if (dev->flags & IFF_UP) {
14215 -               phy_stop(dev->phydev);
14216 +               phy_stop(fep->phydev);
14217                 (*fep->ops->stop)(dev);
14218                 (*fep->ops->restart)(dev);
14219 -               phy_start(dev->phydev);
14220 +               phy_start(fep->phydev);
14221         }
14222  
14223 -       phy_start(dev->phydev);
14224 -       wake = fep->tx_free >= MAX_SKB_FRAGS &&
14225 -              !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
14226 +       phy_start(fep->phydev);
14227 +       wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
14228         spin_unlock_irqrestore(&fep->lock, flags);
14229  
14230         if (wake)
14231 @@ -645,7 +654,7 @@ static void fs_timeout(struct net_device *dev)
14232  static void generic_adjust_link(struct  net_device *dev)
14233  {
14234         struct fs_enet_private *fep = netdev_priv(dev);
14235 -       struct phy_device *phydev = dev->phydev;
14236 +       struct phy_device *phydev = fep->phydev;
14237         int new_state = 0;
14238  
14239         if (phydev->link) {
14240 @@ -714,6 +723,8 @@ static int fs_init_phy(struct net_device *dev)
14241                 return -ENODEV;
14242         }
14243  
14244 +       fep->phydev = phydev;
14245 +
14246         return 0;
14247  }
14248  
14249 @@ -724,10 +735,11 @@ static int fs_enet_open(struct net_device *dev)
14250         int err;
14251  
14252         /* to initialize the fep->cur_rx,... */
14253 -       /* not doing this, will cause a crash in fs_enet_napi */
14254 +       /* not doing this, will cause a crash in fs_enet_rx_napi */
14255         fs_init_bds(fep->ndev);
14256  
14257         napi_enable(&fep->napi);
14258 +       napi_enable(&fep->napi_tx);
14259  
14260         /* Install our interrupt handler. */
14261         r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
14262 @@ -735,6 +747,7 @@ static int fs_enet_open(struct net_device *dev)
14263         if (r != 0) {
14264                 dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
14265                 napi_disable(&fep->napi);
14266 +               napi_disable(&fep->napi_tx);
14267                 return -EINVAL;
14268         }
14269  
14270 @@ -742,9 +755,10 @@ static int fs_enet_open(struct net_device *dev)
14271         if (err) {
14272                 free_irq(fep->interrupt, dev);
14273                 napi_disable(&fep->napi);
14274 +               napi_disable(&fep->napi_tx);
14275                 return err;
14276         }
14277 -       phy_start(dev->phydev);
14278 +       phy_start(fep->phydev);
14279  
14280         netif_start_queue(dev);
14281  
14282 @@ -759,7 +773,8 @@ static int fs_enet_close(struct net_device *dev)
14283         netif_stop_queue(dev);
14284         netif_carrier_off(dev);
14285         napi_disable(&fep->napi);
14286 -       phy_stop(dev->phydev);
14287 +       napi_disable(&fep->napi_tx);
14288 +       phy_stop(fep->phydev);
14289  
14290         spin_lock_irqsave(&fep->lock, flags);
14291         spin_lock(&fep->tx_lock);
14292 @@ -768,7 +783,8 @@ static int fs_enet_close(struct net_device *dev)
14293         spin_unlock_irqrestore(&fep->lock, flags);
14294  
14295         /* release any irqs */
14296 -       phy_disconnect(dev->phydev);
14297 +       phy_disconnect(fep->phydev);
14298 +       fep->phydev = NULL;
14299         free_irq(fep->interrupt, dev);
14300  
14301         return 0;
14302 @@ -813,82 +829,64 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
14303                 regs->version = 0;
14304  }
14305  
14306 -static int fs_nway_reset(struct net_device *dev)
14307 +static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
14308  {
14309 -       return 0;
14310 +       struct fs_enet_private *fep = netdev_priv(dev);
14311 +
14312 +       if (!fep->phydev)
14313 +               return -ENODEV;
14314 +
14315 +       return phy_ethtool_gset(fep->phydev, cmd);
14316  }
14317  
14318 -static u32 fs_get_msglevel(struct net_device *dev)
14319 +static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
14320  {
14321         struct fs_enet_private *fep = netdev_priv(dev);
14322 -       return fep->msg_enable;
14323 +
14324 +       if (!fep->phydev)
14325 +               return -ENODEV;
14326 +
14327 +       return phy_ethtool_sset(fep->phydev, cmd);
14328  }
14329  
14330 -static void fs_set_msglevel(struct net_device *dev, u32 value)
14331 +static int fs_nway_reset(struct net_device *dev)
14332  {
14333 -       struct fs_enet_private *fep = netdev_priv(dev);
14334 -       fep->msg_enable = value;
14335 +       return 0;
14336  }
14337  
14338 -static int fs_get_tunable(struct net_device *dev,
14339 -                         const struct ethtool_tunable *tuna, void *data)
14340 +static u32 fs_get_msglevel(struct net_device *dev)
14341  {
14342         struct fs_enet_private *fep = netdev_priv(dev);
14343 -       struct fs_platform_info *fpi = fep->fpi;
14344 -       int ret = 0;
14345 -
14346 -       switch (tuna->id) {
14347 -       case ETHTOOL_RX_COPYBREAK:
14348 -               *(u32 *)data = fpi->rx_copybreak;
14349 -               break;
14350 -       default:
14351 -               ret = -EINVAL;
14352 -               break;
14353 -       }
14354 -
14355 -       return ret;
14356 +       return fep->msg_enable;
14357  }
14358  
14359 -static int fs_set_tunable(struct net_device *dev,
14360 -                         const struct ethtool_tunable *tuna, const void *data)
14361 +static void fs_set_msglevel(struct net_device *dev, u32 value)
14362  {
14363         struct fs_enet_private *fep = netdev_priv(dev);
14364 -       struct fs_platform_info *fpi = fep->fpi;
14365 -       int ret = 0;
14366 -
14367 -       switch (tuna->id) {
14368 -       case ETHTOOL_RX_COPYBREAK:
14369 -               fpi->rx_copybreak = *(u32 *)data;
14370 -               break;
14371 -       default:
14372 -               ret = -EINVAL;
14373 -               break;
14374 -       }
14375 -
14376 -       return ret;
14377 +       fep->msg_enable = value;
14378  }
14379  
14380  static const struct ethtool_ops fs_ethtool_ops = {
14381         .get_drvinfo = fs_get_drvinfo,
14382         .get_regs_len = fs_get_regs_len,
14383 +       .get_settings = fs_get_settings,
14384 +       .set_settings = fs_set_settings,
14385         .nway_reset = fs_nway_reset,
14386         .get_link = ethtool_op_get_link,
14387         .get_msglevel = fs_get_msglevel,
14388         .set_msglevel = fs_set_msglevel,
14389         .get_regs = fs_get_regs,
14390         .get_ts_info = ethtool_op_get_ts_info,
14391 -       .get_link_ksettings = phy_ethtool_get_link_ksettings,
14392 -       .set_link_ksettings = phy_ethtool_set_link_ksettings,
14393 -       .get_tunable = fs_get_tunable,
14394 -       .set_tunable = fs_set_tunable,
14395  };
14396  
14397  static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
14398  {
14399 +       struct fs_enet_private *fep = netdev_priv(dev);
14400 +
14401         if (!netif_running(dev))
14402                 return -EINVAL;
14403  
14404 -       return phy_mii_ioctl(dev->phydev, rq, cmd);
14405 +       return phy_mii_ioctl(fep->phydev, rq, cmd);
14406  }
14407  
14408  extern int fs_mii_connect(struct net_device *dev);
14409 @@ -948,8 +946,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
14410                 fpi->cp_command = *data;
14411         }
14412  
14413 -       fpi->rx_ring = RX_RING_SIZE;
14414 -       fpi->tx_ring = TX_RING_SIZE;
14415 +       fpi->rx_ring = 32;
14416 +       fpi->tx_ring = 64;
14417         fpi->rx_copybreak = 240;
14418         fpi->napi_weight = 17;
14419         fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
14420 @@ -980,7 +978,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
14421                 err = clk_prepare_enable(clk);
14422                 if (err) {
14423                         ret = err;
14424 -                       goto out_deregister_fixed_link;
14425 +                       goto out_free_fpi;
14426                 }
14427                 fpi->clk_per = clk;
14428         }
14429 @@ -1033,7 +1031,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
14430  
14431         ndev->netdev_ops = &fs_enet_netdev_ops;
14432         ndev->watchdog_timeo = 2 * HZ;
14433 -       netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
14434 +       netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight);
14435 +       netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2);
14436  
14437         ndev->ethtool_ops = &fs_ethtool_ops;
14438  
14439 @@ -1061,9 +1060,6 @@ static int fs_enet_probe(struct platform_device *ofdev)
14440         of_node_put(fpi->phy_node);
14441         if (fpi->clk_per)
14442                 clk_disable_unprepare(fpi->clk_per);
14443 -out_deregister_fixed_link:
14444 -       if (of_phy_is_fixed_link(ofdev->dev.of_node))
14445 -               of_phy_deregister_fixed_link(ofdev->dev.of_node);
14446  out_free_fpi:
14447         kfree(fpi);
14448         return ret;
14449 @@ -1082,8 +1078,6 @@ static int fs_enet_remove(struct platform_device *ofdev)
14450         of_node_put(fep->fpi->phy_node);
14451         if (fep->fpi->clk_per)
14452                 clk_disable_unprepare(fep->fpi->clk_per);
14453 -       if (of_phy_is_fixed_link(ofdev->dev.of_node))
14454 -               of_phy_deregister_fixed_link(ofdev->dev.of_node);
14455         free_netdev(ndev);
14456         return 0;
14457  }
14458 diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
14459 index fee24c8..f184d8f 100644
14460 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
14461 +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
14462 @@ -81,9 +81,12 @@ struct fs_ops {
14463         void (*adjust_link)(struct net_device *dev);
14464         void (*restart)(struct net_device *dev);
14465         void (*stop)(struct net_device *dev);
14466 -       void (*napi_clear_event)(struct net_device *dev);
14467 -       void (*napi_enable)(struct net_device *dev);
14468 -       void (*napi_disable)(struct net_device *dev);
14469 +       void (*napi_clear_rx_event)(struct net_device *dev);
14470 +       void (*napi_enable_rx)(struct net_device *dev);
14471 +       void (*napi_disable_rx)(struct net_device *dev);
14472 +       void (*napi_clear_tx_event)(struct net_device *dev);
14473 +       void (*napi_enable_tx)(struct net_device *dev);
14474 +       void (*napi_disable_tx)(struct net_device *dev);
14475         void (*rx_bd_done)(struct net_device *dev);
14476         void (*tx_kickstart)(struct net_device *dev);
14477         u32 (*get_int_events)(struct net_device *dev);
14478 @@ -119,6 +122,7 @@ struct phy_info {
14479  
14480  struct fs_enet_private {
14481         struct napi_struct napi;
14482 +       struct napi_struct napi_tx;
14483         struct device *dev;     /* pointer back to the device (must be initialized first) */
14484         struct net_device *ndev;
14485         spinlock_t lock;        /* during all ops except TX pckt processing */
14486 @@ -145,11 +149,14 @@ struct fs_enet_private {
14487         unsigned int last_mii_status;
14488         int interrupt;
14489  
14490 +       struct phy_device *phydev;
14491         int oldduplex, oldspeed, oldlink;       /* current settings */
14492  
14493         /* event masks */
14494 -       u32 ev_napi;            /* mask of NAPI events */
14495 -       u32 ev;                 /* event mask          */
14496 +       u32 ev_napi_rx;         /* mask of NAPI rx events */
14497 +       u32 ev_napi_tx;         /* mask of NAPI rx events */
14498 +       u32 ev_rx;              /* rx event mask          */
14499 +       u32 ev_tx;              /* tx event mask          */
14500         u32 ev_err;             /* error event mask       */
14501  
14502         u16 bd_rx_empty;        /* mask of BD rx empty    */
14503 diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
14504 index 120c758..08f5b91 100644
14505 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
14506 +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
14507 @@ -90,7 +90,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
14508         int ret = -EINVAL;
14509  
14510         fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
14511 -       if (!fep->interrupt)
14512 +       if (fep->interrupt == NO_IRQ)
14513                 goto out;
14514  
14515         fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
14516 @@ -124,8 +124,10 @@ static int do_pd_setup(struct fs_enet_private *fep)
14517         return ret;
14518  }
14519  
14520 -#define FCC_NAPI_EVENT_MSK     (FCC_ENET_RXF | FCC_ENET_RXB | FCC_ENET_TXB)
14521 -#define FCC_EVENT              (FCC_ENET_RXF | FCC_ENET_TXB)
14522 +#define FCC_NAPI_RX_EVENT_MSK  (FCC_ENET_RXF | FCC_ENET_RXB)
14523 +#define FCC_NAPI_TX_EVENT_MSK  (FCC_ENET_TXB)
14524 +#define FCC_RX_EVENT           (FCC_ENET_RXF)
14525 +#define FCC_TX_EVENT           (FCC_ENET_TXB)
14526  #define FCC_ERR_EVENT_MSK      (FCC_ENET_TXE)
14527  
14528  static int setup_data(struct net_device *dev)
14529 @@ -135,8 +137,10 @@ static int setup_data(struct net_device *dev)
14530         if (do_pd_setup(fep) != 0)
14531                 return -EINVAL;
14532  
14533 -       fep->ev_napi = FCC_NAPI_EVENT_MSK;
14534 -       fep->ev = FCC_EVENT;
14535 +       fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
14536 +       fep->ev_napi_tx = FCC_NAPI_TX_EVENT_MSK;
14537 +       fep->ev_rx = FCC_RX_EVENT;
14538 +       fep->ev_tx = FCC_TX_EVENT;
14539         fep->ev_err = FCC_ERR_EVENT_MSK;
14540  
14541         return 0;
14542 @@ -366,7 +370,7 @@ static void restart(struct net_device *dev)
14543  
14544         /* adjust to speed (for RMII mode) */
14545         if (fpi->use_rmii) {
14546 -               if (dev->phydev->speed == 100)
14547 +               if (fep->phydev->speed == 100)
14548                         C8(fcccp, fcc_gfemr, 0x20);
14549                 else
14550                         S8(fcccp, fcc_gfemr, 0x20);
14551 @@ -392,7 +396,7 @@ static void restart(struct net_device *dev)
14552                 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
14553  
14554         /* adjust to duplex mode */
14555 -       if (dev->phydev->duplex)
14556 +       if (fep->phydev->duplex)
14557                 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
14558         else
14559                 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
14560 @@ -420,28 +424,52 @@ static void stop(struct net_device *dev)
14561         fs_cleanup_bds(dev);
14562  }
14563  
14564 -static void napi_clear_event_fs(struct net_device *dev)
14565 +static void napi_clear_rx_event(struct net_device *dev)
14566  {
14567         struct fs_enet_private *fep = netdev_priv(dev);
14568         fcc_t __iomem *fccp = fep->fcc.fccp;
14569  
14570 -       W16(fccp, fcc_fcce, FCC_NAPI_EVENT_MSK);
14571 +       W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
14572  }
14573  
14574 -static void napi_enable_fs(struct net_device *dev)
14575 +static void napi_enable_rx(struct net_device *dev)
14576  {
14577         struct fs_enet_private *fep = netdev_priv(dev);
14578         fcc_t __iomem *fccp = fep->fcc.fccp;
14579  
14580 -       S16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
14581 +       S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
14582  }
14583  
14584 -static void napi_disable_fs(struct net_device *dev)
14585 +static void napi_disable_rx(struct net_device *dev)
14586  {
14587         struct fs_enet_private *fep = netdev_priv(dev);
14588         fcc_t __iomem *fccp = fep->fcc.fccp;
14589  
14590 -       C16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
14591 +       C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
14592 +}
14593 +
14594 +static void napi_clear_tx_event(struct net_device *dev)
14595 +{
14596 +       struct fs_enet_private *fep = netdev_priv(dev);
14597 +       fcc_t __iomem *fccp = fep->fcc.fccp;
14598 +
14599 +       W16(fccp, fcc_fcce, FCC_NAPI_TX_EVENT_MSK);
14600 +}
14601 +
14602 +static void napi_enable_tx(struct net_device *dev)
14603 +{
14604 +       struct fs_enet_private *fep = netdev_priv(dev);
14605 +       fcc_t __iomem *fccp = fep->fcc.fccp;
14606 +
14607 +       S16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
14608 +}
14609 +
14610 +static void napi_disable_tx(struct net_device *dev)
14611 +{
14612 +       struct fs_enet_private *fep = netdev_priv(dev);
14613 +       fcc_t __iomem *fccp = fep->fcc.fccp;
14614 +
14615 +       C16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
14616  }
14617  
14618  static void rx_bd_done(struct net_device *dev)
14619 @@ -524,7 +552,7 @@ static void tx_restart(struct net_device *dev)
14620         cbd_t __iomem *prev_bd;
14621         cbd_t __iomem *last_tx_bd;
14622  
14623 -       last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1);
14624 +       last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
14625  
14626         /* get the current bd held in TBPTR  and scan back from this point */
14627         recheck_bd = curr_tbptr = (cbd_t __iomem *)
14628 @@ -567,9 +595,12 @@ const struct fs_ops fs_fcc_ops = {
14629         .set_multicast_list     = set_multicast_list,
14630         .restart                = restart,
14631         .stop                   = stop,
14632 -       .napi_clear_event       = napi_clear_event_fs,
14633 -       .napi_enable            = napi_enable_fs,
14634 -       .napi_disable           = napi_disable_fs,
14635 +       .napi_clear_rx_event    = napi_clear_rx_event,
14636 +       .napi_enable_rx         = napi_enable_rx,
14637 +       .napi_disable_rx        = napi_disable_rx,
14638 +       .napi_clear_tx_event    = napi_clear_tx_event,
14639 +       .napi_enable_tx         = napi_enable_tx,
14640 +       .napi_disable_tx        = napi_disable_tx,
14641         .rx_bd_done             = rx_bd_done,
14642         .tx_kickstart           = tx_kickstart,
14643         .get_int_events         = get_int_events,
14644 diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
14645 index 777beff..b34214e 100644
14646 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
14647 +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
14648 @@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
14649         struct platform_device *ofdev = to_platform_device(fep->dev);
14650  
14651         fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
14652 -       if (!fep->interrupt)
14653 +       if (fep->interrupt == NO_IRQ)
14654                 return -EINVAL;
14655  
14656         fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
14657 @@ -109,8 +109,10 @@ static int do_pd_setup(struct fs_enet_private *fep)
14658         return 0;
14659  }
14660  
14661 -#define FEC_NAPI_EVENT_MSK     (FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_TXF)
14662 -#define FEC_EVENT              (FEC_ENET_RXF | FEC_ENET_TXF)
14663 +#define FEC_NAPI_RX_EVENT_MSK  (FEC_ENET_RXF | FEC_ENET_RXB)
14664 +#define FEC_NAPI_TX_EVENT_MSK  (FEC_ENET_TXF | FEC_ENET_TXB)
14665 +#define FEC_RX_EVENT           (FEC_ENET_RXF)
14666 +#define FEC_TX_EVENT           (FEC_ENET_TXF)
14667  #define FEC_ERR_EVENT_MSK      (FEC_ENET_HBERR | FEC_ENET_BABR | \
14668                                  FEC_ENET_BABT | FEC_ENET_EBERR)
14669  
14670 @@ -124,8 +126,10 @@ static int setup_data(struct net_device *dev)
14671         fep->fec.hthi = 0;
14672         fep->fec.htlo = 0;
14673  
14674 -       fep->ev_napi = FEC_NAPI_EVENT_MSK;
14675 -       fep->ev = FEC_EVENT;
14676 +       fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
14677 +       fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK;
14678 +       fep->ev_rx = FEC_RX_EVENT;
14679 +       fep->ev_tx = FEC_TX_EVENT;
14680         fep->ev_err = FEC_ERR_EVENT_MSK;
14681  
14682         return 0;
14683 @@ -250,7 +254,7 @@ static void restart(struct net_device *dev)
14684         int r;
14685         u32 addrhi, addrlo;
14686  
14687 -       struct mii_bus *mii = dev->phydev->mdio.bus;
14688 +       struct mii_bus* mii = fep->phydev->bus;
14689         struct fec_info* fec_inf = mii->priv;
14690  
14691         r = whack_reset(fep->fec.fecp);
14692 @@ -329,7 +333,7 @@ static void restart(struct net_device *dev)
14693         /*
14694          * adjust to duplex mode
14695          */
14696 -       if (dev->phydev->duplex) {
14697 +       if (fep->phydev->duplex) {
14698                 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
14699                 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN);     /* FD enable */
14700         } else {
14701 @@ -359,7 +363,7 @@ static void stop(struct net_device *dev)
14702         const struct fs_platform_info *fpi = fep->fpi;
14703         struct fec __iomem *fecp = fep->fec.fecp;
14704  
14705 -       struct fec_info *feci = dev->phydev->mdio.bus->priv;
14706 +       struct fec_info* feci= fep->phydev->bus->priv;
14707  
14708         int i;
14709  
14710 @@ -392,28 +396,52 @@ static void stop(struct net_device *dev)
14711         }
14712  }
14713  
14714 -static void napi_clear_event_fs(struct net_device *dev)
14715 +static void napi_clear_rx_event(struct net_device *dev)
14716  {
14717         struct fs_enet_private *fep = netdev_priv(dev);
14718         struct fec __iomem *fecp = fep->fec.fecp;
14719  
14720 -       FW(fecp, ievent, FEC_NAPI_EVENT_MSK);
14721 +       FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
14722  }
14723  
14724 -static void napi_enable_fs(struct net_device *dev)
14725 +static void napi_enable_rx(struct net_device *dev)
14726  {
14727         struct fs_enet_private *fep = netdev_priv(dev);
14728         struct fec __iomem *fecp = fep->fec.fecp;
14729  
14730 -       FS(fecp, imask, FEC_NAPI_EVENT_MSK);
14731 +       FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
14732  }
14733  
14734 -static void napi_disable_fs(struct net_device *dev)
14735 +static void napi_disable_rx(struct net_device *dev)
14736  {
14737         struct fs_enet_private *fep = netdev_priv(dev);
14738         struct fec __iomem *fecp = fep->fec.fecp;
14739  
14740 -       FC(fecp, imask, FEC_NAPI_EVENT_MSK);
14741 +       FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
14742 +}
14743 +
14744 +static void napi_clear_tx_event(struct net_device *dev)
14745 +{
14746 +       struct fs_enet_private *fep = netdev_priv(dev);
14747 +       struct fec __iomem *fecp = fep->fec.fecp;
14748 +
14749 +       FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK);
14750 +}
14751 +
14752 +static void napi_enable_tx(struct net_device *dev)
14753 +{
14754 +       struct fs_enet_private *fep = netdev_priv(dev);
14755 +       struct fec __iomem *fecp = fep->fec.fecp;
14756 +
14757 +       FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
14758 +}
14759 +
14760 +static void napi_disable_tx(struct net_device *dev)
14761 +{
14762 +       struct fs_enet_private *fep = netdev_priv(dev);
14763 +       struct fec __iomem *fecp = fep->fec.fecp;
14764 +
14765 +       FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
14766  }
14767  
14768  static void rx_bd_done(struct net_device *dev)
14769 @@ -485,9 +513,12 @@ const struct fs_ops fs_fec_ops = {
14770         .set_multicast_list     = set_multicast_list,
14771         .restart                = restart,
14772         .stop                   = stop,
14773 -       .napi_clear_event       = napi_clear_event_fs,
14774 -       .napi_enable            = napi_enable_fs,
14775 -       .napi_disable           = napi_disable_fs,
14776 +       .napi_clear_rx_event    = napi_clear_rx_event,
14777 +       .napi_enable_rx         = napi_enable_rx,
14778 +       .napi_disable_rx        = napi_disable_rx,
14779 +       .napi_clear_tx_event    = napi_clear_tx_event,
14780 +       .napi_enable_tx         = napi_enable_tx,
14781 +       .napi_disable_tx        = napi_disable_tx,
14782         .rx_bd_done             = rx_bd_done,
14783         .tx_kickstart           = tx_kickstart,
14784         .get_int_events         = get_int_events,
14785 diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
14786 index 15abd37..7a184e8 100644
14787 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
14788 +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
14789 @@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
14790         struct platform_device *ofdev = to_platform_device(fep->dev);
14791  
14792         fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
14793 -       if (!fep->interrupt)
14794 +       if (fep->interrupt == NO_IRQ)
14795                 return -EINVAL;
14796  
14797         fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
14798 @@ -115,8 +115,10 @@ static int do_pd_setup(struct fs_enet_private *fep)
14799         return 0;
14800  }
14801  
14802 -#define SCC_NAPI_EVENT_MSK     (SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
14803 -#define SCC_EVENT              (SCCE_ENET_RXF | SCCE_ENET_TXB)
14804 +#define SCC_NAPI_RX_EVENT_MSK  (SCCE_ENET_RXF | SCCE_ENET_RXB)
14805 +#define SCC_NAPI_TX_EVENT_MSK  (SCCE_ENET_TXB)
14806 +#define SCC_RX_EVENT           (SCCE_ENET_RXF)
14807 +#define SCC_TX_EVENT           (SCCE_ENET_TXB)
14808  #define SCC_ERR_EVENT_MSK      (SCCE_ENET_TXE | SCCE_ENET_BSY)
14809  
14810  static int setup_data(struct net_device *dev)
14811 @@ -128,8 +130,10 @@ static int setup_data(struct net_device *dev)
14812         fep->scc.hthi = 0;
14813         fep->scc.htlo = 0;
14814  
14815 -       fep->ev_napi = SCC_NAPI_EVENT_MSK;
14816 -       fep->ev = SCC_EVENT | SCCE_ENET_TXE;
14817 +       fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
14818 +       fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK;
14819 +       fep->ev_rx = SCC_RX_EVENT;
14820 +       fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
14821         fep->ev_err = SCC_ERR_EVENT_MSK;
14822  
14823         return 0;
14824 @@ -348,7 +352,7 @@ static void restart(struct net_device *dev)
14825         W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
14826  
14827         /* Set full duplex mode if needed */
14828 -       if (dev->phydev->duplex)
14829 +       if (fep->phydev->duplex)
14830                 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
14831  
14832         /* Restore multicast and promiscuous settings */
14833 @@ -375,28 +379,52 @@ static void stop(struct net_device *dev)
14834         fs_cleanup_bds(dev);
14835  }
14836  
14837 -static void napi_clear_event_fs(struct net_device *dev)
14838 +static void napi_clear_rx_event(struct net_device *dev)
14839  {
14840         struct fs_enet_private *fep = netdev_priv(dev);
14841         scc_t __iomem *sccp = fep->scc.sccp;
14842  
14843 -       W16(sccp, scc_scce, SCC_NAPI_EVENT_MSK);
14844 +       W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
14845  }
14846  
14847 -static void napi_enable_fs(struct net_device *dev)
14848 +static void napi_enable_rx(struct net_device *dev)
14849  {
14850         struct fs_enet_private *fep = netdev_priv(dev);
14851         scc_t __iomem *sccp = fep->scc.sccp;
14852  
14853 -       S16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
14854 +       S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
14855  }
14856  
14857 -static void napi_disable_fs(struct net_device *dev)
14858 +static void napi_disable_rx(struct net_device *dev)
14859  {
14860         struct fs_enet_private *fep = netdev_priv(dev);
14861         scc_t __iomem *sccp = fep->scc.sccp;
14862  
14863 -       C16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
14864 +       C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
14865 +}
14866 +
14867 +static void napi_clear_tx_event(struct net_device *dev)
14868 +{
14869 +       struct fs_enet_private *fep = netdev_priv(dev);
14870 +       scc_t __iomem *sccp = fep->scc.sccp;
14871 +
14872 +       W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK);
14873 +}
14874 +
14875 +static void napi_enable_tx(struct net_device *dev)
14876 +{
14877 +       struct fs_enet_private *fep = netdev_priv(dev);
14878 +       scc_t __iomem *sccp = fep->scc.sccp;
14879 +
14880 +       S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
14881 +}
14882 +
14883 +static void napi_disable_tx(struct net_device *dev)
14884 +{
14885 +       struct fs_enet_private *fep = netdev_priv(dev);
14886 +       scc_t __iomem *sccp = fep->scc.sccp;
14887 +
14888 +       C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
14889  }
14890  
14891  static void rx_bd_done(struct net_device *dev)
14892 @@ -469,9 +497,12 @@ const struct fs_ops fs_scc_ops = {
14893         .set_multicast_list     = set_multicast_list,
14894         .restart                = restart,
14895         .stop                   = stop,
14896 -       .napi_clear_event       = napi_clear_event_fs,
14897 -       .napi_enable            = napi_enable_fs,
14898 -       .napi_disable           = napi_disable_fs,
14899 +       .napi_clear_rx_event    = napi_clear_rx_event,
14900 +       .napi_enable_rx         = napi_enable_rx,
14901 +       .napi_disable_rx        = napi_disable_rx,
14902 +       .napi_clear_tx_event    = napi_clear_tx_event,
14903 +       .napi_enable_tx         = napi_enable_tx,
14904 +       .napi_disable_tx        = napi_disable_tx,
14905         .rx_bd_done             = rx_bd_done,
14906         .tx_kickstart           = tx_kickstart,
14907         .get_int_events         = get_int_events,
14908 diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
14909 index 1f015ed..68a428d 100644
14910 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
14911 +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
14912 @@ -172,16 +172,23 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
14913                 goto out_free_bus;
14914  
14915         new_bus->phy_mask = ~0;
14916 +       new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
14917 +       if (!new_bus->irq) {
14918 +               ret = -ENOMEM;
14919 +               goto out_unmap_regs;
14920 +       }
14921  
14922         new_bus->parent = &ofdev->dev;
14923         platform_set_drvdata(ofdev, new_bus);
14924  
14925         ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
14926         if (ret)
14927 -               goto out_unmap_regs;
14928 +               goto out_free_irqs;
14929  
14930         return 0;
14931  
14932 +out_free_irqs:
14933 +       kfree(new_bus->irq);
14934  out_unmap_regs:
14935         iounmap(bitbang->dir);
14936  out_free_bus:
14937 @@ -198,6 +205,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
14938         struct bb_info *bitbang = bus->priv;
14939  
14940         mdiobus_unregister(bus);
14941 +       kfree(bus->irq);
14942         free_mdio_bitbang(bus);
14943         iounmap(bitbang->dir);
14944         kfree(bitbang);
14945 diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
14946 index a89267b..2be383e 100644
14947 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
14948 +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
14949 @@ -166,16 +166,23 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
14950         clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed);
14951  
14952         new_bus->phy_mask = ~0;
14953 +       new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
14954 +       if (!new_bus->irq) {
14955 +               ret = -ENOMEM;
14956 +               goto out_unmap_regs;
14957 +       }
14958  
14959         new_bus->parent = &ofdev->dev;
14960         platform_set_drvdata(ofdev, new_bus);
14961  
14962         ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
14963         if (ret)
14964 -               goto out_unmap_regs;
14965 +               goto out_free_irqs;
14966  
14967         return 0;
14968  
14969 +out_free_irqs:
14970 +       kfree(new_bus->irq);
14971  out_unmap_regs:
14972         iounmap(fec->fecp);
14973  out_res:
14974 @@ -193,6 +200,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
14975         struct fec_info *fec = bus->priv;
14976  
14977         mdiobus_unregister(bus);
14978 +       kfree(bus->irq);
14979         iounmap(fec->fecp);
14980         kfree(fec);
14981         mdiobus_free(bus);
14982 diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
14983 index 446c7b3..3c40f6b 100644
14984 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
14985 +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
14986 @@ -29,7 +29,7 @@
14987  
14988  #include <asm/io.h>
14989  #if IS_ENABLED(CONFIG_UCC_GETH)
14990 -#include <soc/fsl/qe/ucc.h>
14991 +#include <asm/ucc.h>   /* for ucc_set_qe_mux_mii_mng() */
14992  #endif
14993  
14994  #include "gianfar.h"
14995 @@ -69,6 +69,7 @@ struct fsl_pq_mdio {
14996  struct fsl_pq_mdio_priv {
14997         void __iomem *map;
14998         struct fsl_pq_mii __iomem *regs;
14999 +       int irqs[PHY_MAX_ADDR];
15000  };
15001  
15002  /*
15003 @@ -195,15 +196,13 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
15004         return 0;
15005  }
15006  
15007 -#if IS_ENABLED(CONFIG_GIANFAR)
15008 +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
15009  /*
15010 - * Return the TBIPA address, starting from the address
15011 - * of the mapped GFAR MDIO registers (struct gfar)
15012   * This is mildly evil, but so is our hardware for doing this.
15013   * Also, we have to cast back to struct gfar because of
15014   * definition weirdness done in gianfar.h.
15015   */
15016 -static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
15017 +static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
15018  {
15019         struct gfar __iomem *enet_regs = p;
15020  
15021 @@ -211,15 +210,6 @@ static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
15022  }
15023  
15024  /*
15025 - * Return the TBIPA address, starting from the address
15026 - * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar)
15027 - */
15028 -static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p)
15029 -{
15030 -       return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs));
15031 -}
15032 -
15033 -/*
15034   * Return the TBIPAR address for an eTSEC2 node
15035   */
15036  static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
15037 @@ -228,14 +218,13 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
15038  }
15039  #endif
15040  
15041 -#if IS_ENABLED(CONFIG_UCC_GETH)
15042 +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
15043  /*
15044 - * Return the TBIPAR address for a QE MDIO node, starting from the address
15045 - * of the mapped MII registers (struct fsl_pq_mii)
15046 + * Return the TBIPAR address for a QE MDIO node
15047   */
15048  static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
15049  {
15050 -       struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii);
15051 +       struct fsl_pq_mdio __iomem *mdio = p;
15052  
15053         return &mdio->utbipar;
15054  }
15055 @@ -306,19 +295,19 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
15056  #endif
15057  
15058  static const struct of_device_id fsl_pq_mdio_match[] = {
15059 -#if IS_ENABLED(CONFIG_GIANFAR)
15060 +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
15061         {
15062                 .compatible = "fsl,gianfar-tbi",
15063                 .data = &(struct fsl_pq_mdio_data) {
15064                         .mii_offset = 0,
15065 -                       .get_tbipa = get_gfar_tbipa_from_mii,
15066 +                       .get_tbipa = get_gfar_tbipa,
15067                 },
15068         },
15069         {
15070                 .compatible = "fsl,gianfar-mdio",
15071                 .data = &(struct fsl_pq_mdio_data) {
15072                         .mii_offset = 0,
15073 -                       .get_tbipa = get_gfar_tbipa_from_mii,
15074 +                       .get_tbipa = get_gfar_tbipa,
15075                 },
15076         },
15077         {
15078 @@ -326,7 +315,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
15079                 .compatible = "gianfar",
15080                 .data = &(struct fsl_pq_mdio_data) {
15081                         .mii_offset = offsetof(struct fsl_pq_mdio, mii),
15082 -                       .get_tbipa = get_gfar_tbipa_from_mdio,
15083 +                       .get_tbipa = get_gfar_tbipa,
15084                 },
15085         },
15086         {
15087 @@ -344,7 +333,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
15088                 },
15089         },
15090  #endif
15091 -#if IS_ENABLED(CONFIG_UCC_GETH)
15092 +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
15093         {
15094                 .compatible = "fsl,ucc-mdio",
15095                 .data = &(struct fsl_pq_mdio_data) {
15096 @@ -400,6 +389,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
15097         new_bus->read = &fsl_pq_mdio_read;
15098         new_bus->write = &fsl_pq_mdio_write;
15099         new_bus->reset = &fsl_pq_mdio_reset;
15100 +       new_bus->irq = priv->irqs;
15101  
15102         err = of_address_to_resource(np, 0, &res);
15103         if (err < 0) {
15104 @@ -455,16 +445,6 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
15105  
15106                         tbipa = data->get_tbipa(priv->map);
15107  
15108 -                       /*
15109 -                        * Add consistency check to make sure TBI is contained
15110 -                        * within the mapped range (not because we would get a
15111 -                        * segfault, rather to catch bugs in computing TBI
15112 -                        * address). Print error message but continue anyway.
15113 -                        */
15114 -                       if ((void *)tbipa > priv->map + resource_size(&res) - 4)
15115 -                               dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
15116 -                                       ((void *)tbipa - priv->map) + 4);
15117 -
15118                         iowrite32be(be32_to_cpup(prop), tbipa);
15119                 }
15120         }
15121 diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
15122 index 9061c2f..4ee080d 100644
15123 --- a/drivers/net/ethernet/freescale/gianfar.c
15124 +++ b/drivers/net/ethernet/freescale/gianfar.c
15125 @@ -107,17 +107,17 @@
15126  
15127  #include "gianfar.h"
15128  
15129 -#define TX_TIMEOUT      (5*HZ)
15130 +#define TX_TIMEOUT      (1*HZ)
15131  
15132 -const char gfar_driver_version[] = "2.0";
15133 +const char gfar_driver_version[] = "1.3";
15134  
15135  static int gfar_enet_open(struct net_device *dev);
15136  static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
15137  static void gfar_reset_task(struct work_struct *work);
15138  static void gfar_timeout(struct net_device *dev);
15139  static int gfar_close(struct net_device *dev);
15140 -static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
15141 -                               int alloc_cnt);
15142 +static struct sk_buff *gfar_new_skb(struct net_device *dev,
15143 +                                   dma_addr_t *bufaddr);
15144  static int gfar_set_mac_address(struct net_device *dev);
15145  static int gfar_change_mtu(struct net_device *dev, int new_mtu);
15146  static irqreturn_t gfar_error(int irq, void *dev_id);
15147 @@ -141,7 +141,8 @@ static void gfar_netpoll(struct net_device *dev);
15148  #endif
15149  int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
15150  static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
15151 -static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
15152 +static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
15153 +                              int amount_pull, struct napi_struct *napi);
15154  static void gfar_halt_nodisable(struct gfar_private *priv);
15155  static void gfar_clear_exact_match(struct net_device *dev);
15156  static void gfar_set_mac_for_addr(struct net_device *dev, int num,
15157 @@ -168,15 +169,17 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
15158         bdp->lstatus = cpu_to_be32(lstatus);
15159  }
15160  
15161 -static void gfar_init_bds(struct net_device *ndev)
15162 +static int gfar_init_bds(struct net_device *ndev)
15163  {
15164         struct gfar_private *priv = netdev_priv(ndev);
15165         struct gfar __iomem *regs = priv->gfargrp[0].regs;
15166         struct gfar_priv_tx_q *tx_queue = NULL;
15167         struct gfar_priv_rx_q *rx_queue = NULL;
15168         struct txbd8 *txbdp;
15169 +       struct rxbd8 *rxbdp;
15170         u32 __iomem *rfbptr;
15171         int i, j;
15172 +       dma_addr_t bufaddr;
15173  
15174         for (i = 0; i < priv->num_tx_queues; i++) {
15175                 tx_queue = priv->tx_queue[i];
15176 @@ -204,26 +207,40 @@ static void gfar_init_bds(struct net_device *ndev)
15177         rfbptr = &regs->rfbptr0;
15178         for (i = 0; i < priv->num_rx_queues; i++) {
15179                 rx_queue = priv->rx_queue[i];
15180 +               rx_queue->cur_rx = rx_queue->rx_bd_base;
15181 +               rx_queue->skb_currx = 0;
15182 +               rxbdp = rx_queue->rx_bd_base;
15183  
15184 -               rx_queue->next_to_clean = 0;
15185 -               rx_queue->next_to_use = 0;
15186 -               rx_queue->next_to_alloc = 0;
15187 +               for (j = 0; j < rx_queue->rx_ring_size; j++) {
15188 +                       struct sk_buff *skb = rx_queue->rx_skbuff[j];
15189  
15190 -               /* make sure next_to_clean != next_to_use after this
15191 -                * by leaving at least 1 unused descriptor
15192 -                */
15193 -               gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
15194 +                       if (skb) {
15195 +                               bufaddr = be32_to_cpu(rxbdp->bufPtr);
15196 +                       } else {
15197 +                               skb = gfar_new_skb(ndev, &bufaddr);
15198 +                               if (!skb) {
15199 +                                       netdev_err(ndev, "Can't allocate RX buffers\n");
15200 +                                       return -ENOMEM;
15201 +                               }
15202 +                               rx_queue->rx_skbuff[j] = skb;
15203 +                       }
15204 +
15205 +                       gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
15206 +                       rxbdp++;
15207 +               }
15208  
15209                 rx_queue->rfbptr = rfbptr;
15210                 rfbptr += 2;
15211         }
15212 +
15213 +       return 0;
15214  }
15215  
15216  static int gfar_alloc_skb_resources(struct net_device *ndev)
15217  {
15218         void *vaddr;
15219         dma_addr_t addr;
15220 -       int i, j;
15221 +       int i, j, k;
15222         struct gfar_private *priv = netdev_priv(ndev);
15223         struct device *dev = priv->dev;
15224         struct gfar_priv_tx_q *tx_queue = NULL;
15225 @@ -262,8 +279,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
15226                 rx_queue = priv->rx_queue[i];
15227                 rx_queue->rx_bd_base = vaddr;
15228                 rx_queue->rx_bd_dma_base = addr;
15229 -               rx_queue->ndev = ndev;
15230 -               rx_queue->dev = dev;
15231 +               rx_queue->dev = ndev;
15232                 addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
15233                 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
15234         }
15235 @@ -278,20 +294,25 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
15236                 if (!tx_queue->tx_skbuff)
15237                         goto cleanup;
15238  
15239 -               for (j = 0; j < tx_queue->tx_ring_size; j++)
15240 -                       tx_queue->tx_skbuff[j] = NULL;
15241 +               for (k = 0; k < tx_queue->tx_ring_size; k++)
15242 +                       tx_queue->tx_skbuff[k] = NULL;
15243         }
15244  
15245         for (i = 0; i < priv->num_rx_queues; i++) {
15246                 rx_queue = priv->rx_queue[i];
15247 -               rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
15248 -                                           sizeof(*rx_queue->rx_buff),
15249 -                                           GFP_KERNEL);
15250 -               if (!rx_queue->rx_buff)
15251 +               rx_queue->rx_skbuff =
15252 +                       kmalloc_array(rx_queue->rx_ring_size,
15253 +                                     sizeof(*rx_queue->rx_skbuff),
15254 +                                     GFP_KERNEL);
15255 +               if (!rx_queue->rx_skbuff)
15256                         goto cleanup;
15257 +
15258 +               for (j = 0; j < rx_queue->rx_ring_size; j++)
15259 +                       rx_queue->rx_skbuff[j] = NULL;
15260         }
15261  
15262 -       gfar_init_bds(ndev);
15263 +       if (gfar_init_bds(ndev))
15264 +               goto cleanup;
15265  
15266         return 0;
15267  
15268 @@ -333,16 +354,28 @@ static void gfar_init_rqprm(struct gfar_private *priv)
15269         }
15270  }
15271  
15272 -static void gfar_rx_offload_en(struct gfar_private *priv)
15273 +static void gfar_rx_buff_size_config(struct gfar_private *priv)
15274  {
15275 +       int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
15276 +
15277         /* set this when rx hw offload (TOE) functions are being used */
15278         priv->uses_rxfcb = 0;
15279  
15280         if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
15281                 priv->uses_rxfcb = 1;
15282  
15283 -       if (priv->hwts_rx_en || priv->rx_filer_enable)
15284 +       if (priv->hwts_rx_en)
15285                 priv->uses_rxfcb = 1;
15286 +
15287 +       if (priv->uses_rxfcb)
15288 +               frame_size += GMAC_FCB_LEN;
15289 +
15290 +       frame_size += priv->padding;
15291 +
15292 +       frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
15293 +                    INCREMENTAL_BUFFER_SIZE;
15294 +
15295 +       priv->rx_buffer_size = frame_size;
15296  }
15297  
15298  static void gfar_mac_rx_config(struct gfar_private *priv)
15299 @@ -351,7 +384,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
15300         u32 rctrl = 0;
15301  
15302         if (priv->rx_filer_enable) {
15303 -               rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
15304 +               rctrl |= RCTRL_FILREN;
15305                 /* Program the RIR0 reg with the required distribution */
15306                 if (priv->poll_mode == GFAR_SQ_POLLING)
15307                         gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
15308 @@ -483,15 +516,6 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
15309         return &dev->stats;
15310  }
15311  
15312 -static int gfar_set_mac_addr(struct net_device *dev, void *p)
15313 -{
15314 -       eth_mac_addr(dev, p);
15315 -
15316 -       gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
15317 -
15318 -       return 0;
15319 -}
15320 -
15321  static const struct net_device_ops gfar_netdev_ops = {
15322         .ndo_open = gfar_enet_open,
15323         .ndo_start_xmit = gfar_start_xmit,
15324 @@ -502,7 +526,7 @@ static const struct net_device_ops gfar_netdev_ops = {
15325         .ndo_tx_timeout = gfar_timeout,
15326         .ndo_do_ioctl = gfar_ioctl,
15327         .ndo_get_stats = gfar_get_stats,
15328 -       .ndo_set_mac_address = gfar_set_mac_addr,
15329 +       .ndo_set_mac_address = eth_mac_addr,
15330         .ndo_validate_addr = eth_validate_addr,
15331  #ifdef CONFIG_NET_POLL_CONTROLLER
15332         .ndo_poll_controller = gfar_netpoll,
15333 @@ -532,6 +556,22 @@ static void gfar_ints_enable(struct gfar_private *priv)
15334         }
15335  }
15336  
15337 +static void lock_tx_qs(struct gfar_private *priv)
15338 +{
15339 +       int i;
15340 +
15341 +       for (i = 0; i < priv->num_tx_queues; i++)
15342 +               spin_lock(&priv->tx_queue[i]->txlock);
15343 +}
15344 +
15345 +static void unlock_tx_qs(struct gfar_private *priv)
15346 +{
15347 +       int i;
15348 +
15349 +       for (i = 0; i < priv->num_tx_queues; i++)
15350 +               spin_unlock(&priv->tx_queue[i]->txlock);
15351 +}
15352 +
15353  static int gfar_alloc_tx_queues(struct gfar_private *priv)
15354  {
15355         int i;
15356 @@ -560,8 +600,9 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv)
15357                 if (!priv->rx_queue[i])
15358                         return -ENOMEM;
15359  
15360 +               priv->rx_queue[i]->rx_skbuff = NULL;
15361                 priv->rx_queue[i]->qindex = i;
15362 -               priv->rx_queue[i]->ndev = priv->ndev;
15363 +               priv->rx_queue[i]->dev = priv->ndev;
15364         }
15365         return 0;
15366  }
15367 @@ -647,9 +688,9 @@ static int gfar_parse_group(struct device_node *np,
15368         if (model && strcasecmp(model, "FEC")) {
15369                 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
15370                 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
15371 -               if (!gfar_irq(grp, TX)->irq ||
15372 -                   !gfar_irq(grp, RX)->irq ||
15373 -                   !gfar_irq(grp, ER)->irq)
15374 +               if (gfar_irq(grp, TX)->irq == NO_IRQ ||
15375 +                   gfar_irq(grp, RX)->irq == NO_IRQ ||
15376 +                   gfar_irq(grp, ER)->irq == NO_IRQ)
15377                         return -EINVAL;
15378         }
15379  
15380 @@ -738,6 +779,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
15381         struct gfar_private *priv = NULL;
15382         struct device_node *np = ofdev->dev.of_node;
15383         struct device_node *child = NULL;
15384 +       struct property *stash;
15385         u32 stash_len = 0;
15386         u32 stash_idx = 0;
15387         unsigned int num_tx_qs, num_rx_qs;
15388 @@ -853,7 +895,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
15389                         goto err_grp_init;
15390         }
15391  
15392 -       if (of_property_read_bool(np, "bd-stash")) {
15393 +       stash = of_find_property(np, "bd-stash", NULL);
15394 +
15395 +       if (stash) {
15396                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
15397                 priv->bd_stash_en = 1;
15398         }
15399 @@ -891,8 +935,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
15400                                      FSL_GIANFAR_DEV_HAS_VLAN |
15401                                      FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
15402                                      FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
15403 -                                    FSL_GIANFAR_DEV_HAS_TIMER |
15404 -                                    FSL_GIANFAR_DEV_HAS_RX_FILER;
15405 +                                    FSL_GIANFAR_DEV_HAS_TIMER;
15406  
15407         err = of_property_read_string(np, "phy-connection-type", &ctype);
15408  
15409 @@ -905,9 +948,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
15410         if (of_find_property(np, "fsl,magic-packet", NULL))
15411                 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
15412  
15413 -       if (of_get_property(np, "fsl,wake-on-filer", NULL))
15414 -               priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
15415 -
15416         priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
15417  
15418         /* In the case of a fixed PHY, the DT node associated
15419 @@ -999,7 +1039,7 @@ static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
15420  
15421  static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
15422  {
15423 -       struct phy_device *phydev = dev->phydev;
15424 +       struct gfar_private *priv = netdev_priv(dev);
15425  
15426         if (!netif_running(dev))
15427                 return -EINVAL;
15428 @@ -1009,10 +1049,10 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
15429         if (cmd == SIOCGHWTSTAMP)
15430                 return gfar_hwtstamp_get(dev, rq);
15431  
15432 -       if (!phydev)
15433 +       if (!priv->phydev)
15434                 return -ENODEV;
15435  
15436 -       return phy_mii_ioctl(phydev, rq, cmd);
15437 +       return phy_mii_ioctl(priv->phydev, rq, cmd);
15438  }
15439  
15440  static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
15441 @@ -1111,10 +1151,8 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv)
15442  
15443         if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
15444                 priv->errata |= GFAR_ERRATA_12;
15445 -       /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
15446         if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
15447 -           ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
15448 -           ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
15449 +           ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
15450                 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
15451  }
15452  #endif
15453 @@ -1156,11 +1194,12 @@ void gfar_mac_reset(struct gfar_private *priv)
15454  
15455         udelay(3);
15456  
15457 -       gfar_rx_offload_en(priv);
15458 +       /* Compute rx_buff_size based on config flags */
15459 +       gfar_rx_buff_size_config(priv);
15460  
15461         /* Initialize the max receive frame/buffer lengths */
15462 -       gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
15463 -       gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
15464 +       gfar_write(&regs->maxfrm, priv->rx_buffer_size);
15465 +       gfar_write(&regs->mrblr, priv->rx_buffer_size);
15466  
15467         /* Initialize the Minimum Frame Length Register */
15468         gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
15469 @@ -1168,11 +1207,12 @@ void gfar_mac_reset(struct gfar_private *priv)
15470         /* Initialize MACCFG2. */
15471         tempval = MACCFG2_INIT_SETTINGS;
15472  
15473 -       /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
15474 -        * are marked as truncated.  Avoid this by MACCFG2[Huge Frame]=1,
15475 -        * and by checking RxBD[LG] and discarding larger than MAXFRM.
15476 +       /* If the mtu is larger than the max size for standard
15477 +        * ethernet frames (ie, a jumbo frame), then set maccfg2
15478 +        * to allow huge frames, and to check the length
15479          */
15480 -       if (gfar_has_errata(priv, GFAR_ERRATA_74))
15481 +       if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
15482 +           gfar_has_errata(priv, GFAR_ERRATA_74))
15483                 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
15484  
15485         gfar_write(&regs->maccfg2, tempval);
15486 @@ -1312,7 +1352,6 @@ static void gfar_init_addr_hash_table(struct gfar_private *priv)
15487   */
15488  static int gfar_probe(struct platform_device *ofdev)
15489  {
15490 -       struct device_node *np = ofdev->dev.of_node;
15491         struct net_device *dev = NULL;
15492         struct gfar_private *priv = NULL;
15493         int err = 0, i;
15494 @@ -1328,6 +1367,7 @@ static int gfar_probe(struct platform_device *ofdev)
15495         priv->dev = &ofdev->dev;
15496         SET_NETDEV_DEV(dev, &ofdev->dev);
15497  
15498 +       spin_lock_init(&priv->bflock);
15499         INIT_WORK(&priv->reset_task, gfar_reset_task);
15500  
15501         platform_set_drvdata(ofdev, priv);
15502 @@ -1348,12 +1388,12 @@ static int gfar_probe(struct platform_device *ofdev)
15503                 if (priv->poll_mode == GFAR_SQ_POLLING) {
15504                         netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
15505                                        gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
15506 -                       netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
15507 +                       netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
15508                                        gfar_poll_tx_sq, 2);
15509                 } else {
15510                         netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
15511                                        gfar_poll_rx, GFAR_DEV_WEIGHT);
15512 -                       netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
15513 +                       netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
15514                                        gfar_poll_tx, 2);
15515                 }
15516         }
15517 @@ -1371,8 +1411,6 @@ static int gfar_probe(struct platform_device *ofdev)
15518                 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
15519         }
15520  
15521 -       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
15522 -
15523         gfar_init_addr_hash_table(priv);
15524  
15525         /* Insert receive time stamps into padding alignment bytes */
15526 @@ -1383,6 +1421,8 @@ static int gfar_probe(struct platform_device *ofdev)
15527             priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
15528                 dev->needed_headroom = GMAC_FCB_LEN;
15529  
15530 +       priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
15531 +
15532         /* Initializing some of the rx/tx queue level parameters */
15533         for (i = 0; i < priv->num_tx_queues; i++) {
15534                 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
15535 @@ -1397,9 +1437,8 @@ static int gfar_probe(struct platform_device *ofdev)
15536                 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
15537         }
15538  
15539 -       /* Always enable rx filer if available */
15540 -       priv->rx_filer_enable =
15541 -           (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
15542 +       /* always enable rx filer */
15543 +       priv->rx_filer_enable = 1;
15544         /* Enable most messages by default */
15545         priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
15546         /* use pritority h/w tx queue scheduling for single queue devices */
15547 @@ -1420,14 +1459,9 @@ static int gfar_probe(struct platform_device *ofdev)
15548                 goto register_fail;
15549         }
15550  
15551 -       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
15552 -               priv->wol_supported |= GFAR_WOL_MAGIC;
15553 -
15554 -       if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
15555 -           priv->rx_filer_enable)
15556 -               priv->wol_supported |= GFAR_WOL_FILER_UCAST;
15557 -
15558 -       device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
15559 +       device_init_wakeup(&dev->dev,
15560 +                          priv->device_flags &
15561 +                          FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
15562  
15563         /* fill out IRQ number and name fields */
15564         for (i = 0; i < priv->num_grps; i++) {
15565 @@ -1463,8 +1497,6 @@ static int gfar_probe(struct platform_device *ofdev)
15566         return 0;
15567  
15568  register_fail:
15569 -       if (of_phy_is_fixed_link(np))
15570 -               of_phy_deregister_fixed_link(np);
15571         unmap_group_regs(priv);
15572         gfar_free_rx_queues(priv);
15573         gfar_free_tx_queues(priv);
15574 @@ -1477,16 +1509,11 @@ static int gfar_probe(struct platform_device *ofdev)
15575  static int gfar_remove(struct platform_device *ofdev)
15576  {
15577         struct gfar_private *priv = platform_get_drvdata(ofdev);
15578 -       struct device_node *np = ofdev->dev.of_node;
15579  
15580         of_node_put(priv->phy_node);
15581         of_node_put(priv->tbi_node);
15582  
15583         unregister_netdev(priv->ndev);
15584 -
15585 -       if (of_phy_is_fixed_link(np))
15586 -               of_phy_deregister_fixed_link(np);
15587 -
15588         unmap_group_regs(priv);
15589         gfar_free_rx_queues(priv);
15590         gfar_free_tx_queues(priv);
15591 @@ -1497,153 +1524,53 @@ static int gfar_remove(struct platform_device *ofdev)
15592  
15593  #ifdef CONFIG_PM
15594  
15595 -static void __gfar_filer_disable(struct gfar_private *priv)
15596 -{
15597 -       struct gfar __iomem *regs = priv->gfargrp[0].regs;
15598 -       u32 temp;
15599 -
15600 -       temp = gfar_read(&regs->rctrl);
15601 -       temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
15602 -       gfar_write(&regs->rctrl, temp);
15603 -}
15604 -
15605 -static void __gfar_filer_enable(struct gfar_private *priv)
15606 -{
15607 -       struct gfar __iomem *regs = priv->gfargrp[0].regs;
15608 -       u32 temp;
15609 -
15610 -       temp = gfar_read(&regs->rctrl);
15611 -       temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
15612 -       gfar_write(&regs->rctrl, temp);
15613 -}
15614 -
15615 -/* Filer rules implementing wol capabilities */
15616 -static void gfar_filer_config_wol(struct gfar_private *priv)
15617 -{
15618 -       unsigned int i;
15619 -       u32 rqfcr;
15620 -
15621 -       __gfar_filer_disable(priv);
15622 -
15623 -       /* clear the filer table, reject any packet by default */
15624 -       rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
15625 -       for (i = 0; i <= MAX_FILER_IDX; i++)
15626 -               gfar_write_filer(priv, i, rqfcr, 0);
15627 -
15628 -       i = 0;
15629 -       if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
15630 -               /* unicast packet, accept it */
15631 -               struct net_device *ndev = priv->ndev;
15632 -               /* get the default rx queue index */
15633 -               u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
15634 -               u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
15635 -                                   (ndev->dev_addr[1] << 8) |
15636 -                                    ndev->dev_addr[2];
15637 -
15638 -               rqfcr = (qindex << 10) | RQFCR_AND |
15639 -                       RQFCR_CMP_EXACT | RQFCR_PID_DAH;
15640 -
15641 -               gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
15642 -
15643 -               dest_mac_addr = (ndev->dev_addr[3] << 16) |
15644 -                               (ndev->dev_addr[4] << 8) |
15645 -                                ndev->dev_addr[5];
15646 -               rqfcr = (qindex << 10) | RQFCR_GPI |
15647 -                       RQFCR_CMP_EXACT | RQFCR_PID_DAL;
15648 -               gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
15649 -       }
15650 -
15651 -       __gfar_filer_enable(priv);
15652 -}
15653 -
15654 -static void gfar_filer_restore_table(struct gfar_private *priv)
15655 -{
15656 -       u32 rqfcr, rqfpr;
15657 -       unsigned int i;
15658 -
15659 -       __gfar_filer_disable(priv);
15660 -
15661 -       for (i = 0; i <= MAX_FILER_IDX; i++) {
15662 -               rqfcr = priv->ftp_rqfcr[i];
15663 -               rqfpr = priv->ftp_rqfpr[i];
15664 -               gfar_write_filer(priv, i, rqfcr, rqfpr);
15665 -       }
15666 -
15667 -       __gfar_filer_enable(priv);
15668 -}
15669 -
15670 -/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
15671 -static void gfar_start_wol_filer(struct gfar_private *priv)
15672 -{
15673 -       struct gfar __iomem *regs = priv->gfargrp[0].regs;
15674 -       u32 tempval;
15675 -       int i = 0;
15676 -
15677 -       /* Enable Rx hw queues */
15678 -       gfar_write(&regs->rqueue, priv->rqueue);
15679 -
15680 -       /* Initialize DMACTRL to have WWR and WOP */
15681 -       tempval = gfar_read(&regs->dmactrl);
15682 -       tempval |= DMACTRL_INIT_SETTINGS;
15683 -       gfar_write(&regs->dmactrl, tempval);
15684 -
15685 -       /* Make sure we aren't stopped */
15686 -       tempval = gfar_read(&regs->dmactrl);
15687 -       tempval &= ~DMACTRL_GRS;
15688 -       gfar_write(&regs->dmactrl, tempval);
15689 -
15690 -       for (i = 0; i < priv->num_grps; i++) {
15691 -               regs = priv->gfargrp[i].regs;
15692 -               /* Clear RHLT, so that the DMA starts polling now */
15693 -               gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
15694 -               /* enable the Filer General Purpose Interrupt */
15695 -               gfar_write(&regs->imask, IMASK_FGPI);
15696 -       }
15697 -
15698 -       /* Enable Rx DMA */
15699 -       tempval = gfar_read(&regs->maccfg1);
15700 -       tempval |= MACCFG1_RX_EN;
15701 -       gfar_write(&regs->maccfg1, tempval);
15702 -}
15703 -
15704  static int gfar_suspend(struct device *dev)
15705  {
15706         struct gfar_private *priv = dev_get_drvdata(dev);
15707         struct net_device *ndev = priv->ndev;
15708         struct gfar __iomem *regs = priv->gfargrp[0].regs;
15709 +       unsigned long flags;
15710         u32 tempval;
15711 -       u16 wol = priv->wol_opts;
15712  
15713 -       if (!netif_running(ndev))
15714 -               return 0;
15715 +       int magic_packet = priv->wol_en &&
15716 +                          (priv->device_flags &
15717 +                           FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
15718  
15719 -       disable_napi(priv);
15720 -       netif_tx_lock(ndev);
15721         netif_device_detach(ndev);
15722 -       netif_tx_unlock(ndev);
15723  
15724 -       gfar_halt(priv);
15725 +       if (netif_running(ndev)) {
15726  
15727 -       if (wol & GFAR_WOL_MAGIC) {
15728 -               /* Enable interrupt on Magic Packet */
15729 -               gfar_write(&regs->imask, IMASK_MAG);
15730 +               local_irq_save(flags);
15731 +               lock_tx_qs(priv);
15732  
15733 -               /* Enable Magic Packet mode */
15734 -               tempval = gfar_read(&regs->maccfg2);
15735 -               tempval |= MACCFG2_MPEN;
15736 -               gfar_write(&regs->maccfg2, tempval);
15737 +               gfar_halt_nodisable(priv);
15738  
15739 -               /* re-enable the Rx block */
15740 +               /* Disable Tx, and Rx if wake-on-LAN is disabled. */
15741                 tempval = gfar_read(&regs->maccfg1);
15742 -               tempval |= MACCFG1_RX_EN;
15743 +
15744 +               tempval &= ~MACCFG1_TX_EN;
15745 +
15746 +               if (!magic_packet)
15747 +                       tempval &= ~MACCFG1_RX_EN;
15748 +
15749                 gfar_write(&regs->maccfg1, tempval);
15750  
15751 -       } else if (wol & GFAR_WOL_FILER_UCAST) {
15752 -               gfar_filer_config_wol(priv);
15753 -               gfar_start_wol_filer(priv);
15754 +               unlock_tx_qs(priv);
15755 +               local_irq_restore(flags);
15756  
15757 -       } else {
15758 -               phy_stop(ndev->phydev);
15759 +               disable_napi(priv);
15760 +
15761 +               if (magic_packet) {
15762 +                       /* Enable interrupt on Magic Packet */
15763 +                       gfar_write(&regs->imask, IMASK_MAG);
15764 +
15765 +                       /* Enable Magic Packet mode */
15766 +                       tempval = gfar_read(&regs->maccfg2);
15767 +                       tempval |= MACCFG2_MPEN;
15768 +                       gfar_write(&regs->maccfg2, tempval);
15769 +               } else {
15770 +                       phy_stop(priv->phydev);
15771 +               }
15772         }
15773  
15774         return 0;
15775 @@ -1654,30 +1581,37 @@ static int gfar_resume(struct device *dev)
15776         struct gfar_private *priv = dev_get_drvdata(dev);
15777         struct net_device *ndev = priv->ndev;
15778         struct gfar __iomem *regs = priv->gfargrp[0].regs;
15779 +       unsigned long flags;
15780         u32 tempval;
15781 -       u16 wol = priv->wol_opts;
15782 +       int magic_packet = priv->wol_en &&
15783 +                          (priv->device_flags &
15784 +                           FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
15785  
15786 -       if (!netif_running(ndev))
15787 +       if (!netif_running(ndev)) {
15788 +               netif_device_attach(ndev);
15789                 return 0;
15790 +       }
15791  
15792 -       if (wol & GFAR_WOL_MAGIC) {
15793 -               /* Disable Magic Packet mode */
15794 -               tempval = gfar_read(&regs->maccfg2);
15795 -               tempval &= ~MACCFG2_MPEN;
15796 -               gfar_write(&regs->maccfg2, tempval);
15797 +       if (!magic_packet && priv->phydev)
15798 +               phy_start(priv->phydev);
15799  
15800 -       } else if (wol & GFAR_WOL_FILER_UCAST) {
15801 -               /* need to stop rx only, tx is already down */
15802 -               gfar_halt(priv);
15803 -               gfar_filer_restore_table(priv);
15804 +       /* Disable Magic Packet mode, in case something
15805 +        * else woke us up.
15806 +        */
15807 +       local_irq_save(flags);
15808 +       lock_tx_qs(priv);
15809  
15810 -       } else {
15811 -               phy_start(ndev->phydev);
15812 -       }
15813 +       tempval = gfar_read(&regs->maccfg2);
15814 +       tempval &= ~MACCFG2_MPEN;
15815 +       gfar_write(&regs->maccfg2, tempval);
15816  
15817         gfar_start(priv);
15818  
15819 +       unlock_tx_qs(priv);
15820 +       local_irq_restore(flags);
15821 +
15822         netif_device_attach(ndev);
15823 +
15824         enable_napi(priv);
15825  
15826         return 0;
15827 @@ -1694,7 +1628,10 @@ static int gfar_restore(struct device *dev)
15828                 return 0;
15829         }
15830  
15831 -       gfar_init_bds(ndev);
15832 +       if (gfar_init_bds(ndev)) {
15833 +               free_skb_resources(priv);
15834 +               return -ENOMEM;
15835 +       }
15836  
15837         gfar_mac_reset(priv);
15838  
15839 @@ -1706,8 +1643,8 @@ static int gfar_restore(struct device *dev)
15840         priv->oldspeed = 0;
15841         priv->oldduplex = -1;
15842  
15843 -       if (ndev->phydev)
15844 -               phy_start(ndev->phydev);
15845 +       if (priv->phydev)
15846 +               phy_start(priv->phydev);
15847  
15848         netif_device_attach(ndev);
15849         enable_napi(priv);
15850 @@ -1786,7 +1723,6 @@ static int init_phy(struct net_device *dev)
15851                 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
15852                 GFAR_SUPPORTED_GBIT : 0;
15853         phy_interface_t interface;
15854 -       struct phy_device *phydev;
15855  
15856         priv->oldlink = 0;
15857         priv->oldspeed = 0;
15858 @@ -1794,9 +1730,9 @@ static int init_phy(struct net_device *dev)
15859  
15860         interface = gfar_get_interface(dev);
15861  
15862 -       phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
15863 -                               interface);
15864 -       if (!phydev) {
15865 +       priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
15866 +                                     interface);
15867 +       if (!priv->phydev) {
15868                 dev_err(&dev->dev, "could not attach to PHY\n");
15869                 return -ENODEV;
15870         }
15871 @@ -1805,11 +1741,11 @@ static int init_phy(struct net_device *dev)
15872                 gfar_configure_serdes(dev);
15873  
15874         /* Remove any features not supported by the controller */
15875 -       phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
15876 -       phydev->advertising = phydev->supported;
15877 +       priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
15878 +       priv->phydev->advertising = priv->phydev->supported;
15879  
15880         /* Add support for flow control, but don't advertise it by default */
15881 -       phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
15882 +       priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
15883  
15884         return 0;
15885  }
15886 @@ -1844,10 +1780,8 @@ static void gfar_configure_serdes(struct net_device *dev)
15887          * everything for us?  Resetting it takes the link down and requires
15888          * several seconds for it to come back.
15889          */
15890 -       if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
15891 -               put_device(&tbiphy->mdio.dev);
15892 +       if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
15893                 return;
15894 -       }
15895  
15896         /* Single clk mode, mii mode off(for serdes communication) */
15897         phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
15898 @@ -1859,8 +1793,6 @@ static void gfar_configure_serdes(struct net_device *dev)
15899         phy_write(tbiphy, MII_BMCR,
15900                   BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
15901                   BMCR_SPEED1000);
15902 -
15903 -       put_device(&tbiphy->mdio.dev);
15904  }
15905  
15906  static int __gfar_is_rx_idle(struct gfar_private *priv)
15907 @@ -1953,7 +1885,7 @@ void stop_gfar(struct net_device *dev)
15908         /* disable ints and gracefully shut down Rx/Tx DMA */
15909         gfar_halt(priv);
15910  
15911 -       phy_stop(dev->phydev);
15912 +       phy_stop(priv->phydev);
15913  
15914         free_skb_resources(priv);
15915  }
15916 @@ -1990,32 +1922,26 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
15917  
15918  static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
15919  {
15920 +       struct rxbd8 *rxbdp;
15921 +       struct gfar_private *priv = netdev_priv(rx_queue->dev);
15922         int i;
15923  
15924 -       struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
15925 -
15926 -       if (rx_queue->skb)
15927 -               dev_kfree_skb(rx_queue->skb);
15928 +       rxbdp = rx_queue->rx_bd_base;
15929  
15930         for (i = 0; i < rx_queue->rx_ring_size; i++) {
15931 -               struct  gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
15932 -
15933 +               if (rx_queue->rx_skbuff[i]) {
15934 +                       dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
15935 +                                        priv->rx_buffer_size,
15936 +                                        DMA_FROM_DEVICE);
15937 +                       dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
15938 +                       rx_queue->rx_skbuff[i] = NULL;
15939 +               }
15940                 rxbdp->lstatus = 0;
15941                 rxbdp->bufPtr = 0;
15942                 rxbdp++;
15943 -
15944 -               if (!rxb->page)
15945 -                       continue;
15946 -
15947 -               dma_unmap_single(rx_queue->dev, rxb->dma,
15948 -                                PAGE_SIZE, DMA_FROM_DEVICE);
15949 -               __free_page(rxb->page);
15950 -
15951 -               rxb->page = NULL;
15952         }
15953 -
15954 -       kfree(rx_queue->rx_buff);
15955 -       rx_queue->rx_buff = NULL;
15956 +       kfree(rx_queue->rx_skbuff);
15957 +       rx_queue->rx_skbuff = NULL;
15958  }
15959  
15960  /* If there are any tx skbs or rx skbs still around, free them.
15961 @@ -2040,7 +1966,7 @@ static void free_skb_resources(struct gfar_private *priv)
15962  
15963         for (i = 0; i < priv->num_rx_queues; i++) {
15964                 rx_queue = priv->rx_queue[i];
15965 -               if (rx_queue->rx_buff)
15966 +               if (rx_queue->rx_skbuff)
15967                         free_skb_rx_queue(rx_queue);
15968         }
15969  
15970 @@ -2085,7 +2011,7 @@ void gfar_start(struct gfar_private *priv)
15971  
15972         gfar_ints_enable(priv);
15973  
15974 -       netif_trans_update(priv->ndev); /* prevent tx timeout */
15975 +       priv->ndev->trans_start = jiffies; /* prevent tx timeout */
15976  }
15977  
15978  static void free_grp_irqs(struct gfar_priv_grp *grp)
15979 @@ -2116,8 +2042,6 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
15980  
15981                         goto err_irq_fail;
15982                 }
15983 -               enable_irq_wake(gfar_irq(grp, ER)->irq);
15984 -
15985                 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
15986                                   gfar_irq(grp, TX)->name, grp);
15987                 if (err < 0) {
15988 @@ -2132,8 +2056,6 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
15989                                   gfar_irq(grp, RX)->irq);
15990                         goto rx_irq_fail;
15991                 }
15992 -               enable_irq_wake(gfar_irq(grp, RX)->irq);
15993 -
15994         } else {
15995                 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
15996                                   gfar_irq(grp, TX)->name, grp);
15997 @@ -2142,7 +2064,6 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
15998                                   gfar_irq(grp, TX)->irq);
15999                         goto err_irq_fail;
16000                 }
16001 -               enable_irq_wake(gfar_irq(grp, TX)->irq);
16002         }
16003  
16004         return 0;
16005 @@ -2208,12 +2129,7 @@ int startup_gfar(struct net_device *ndev)
16006         /* Start Rx/Tx DMA and enable the interrupts */
16007         gfar_start(priv);
16008  
16009 -       /* force link state update after mac reset */
16010 -       priv->oldlink = 0;
16011 -       priv->oldspeed = 0;
16012 -       priv->oldduplex = -1;
16013 -
16014 -       phy_start(ndev->phydev);
16015 +       phy_start(priv->phydev);
16016  
16017         enable_napi(priv);
16018  
16019 @@ -2242,6 +2158,8 @@ static int gfar_enet_open(struct net_device *dev)
16020         if (err)
16021                 return err;
16022  
16023 +       device_set_wakeup_enable(&dev->dev, priv->wol_en);
16024 +
16025         return err;
16026  }
16027  
16028 @@ -2283,7 +2201,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
16029         fcb->flags = flags;
16030  }
16031  
16032 -static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
16033 +void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
16034  {
16035         fcb->flags |= TXFCB_VLN;
16036         fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
16037 @@ -2333,10 +2251,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16038         struct txfcb *fcb = NULL;
16039         struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
16040         u32 lstatus;
16041 -       skb_frag_t *frag;
16042         int i, rq = 0;
16043         int do_tstamp, do_csum, do_vlan;
16044         u32 bufaddr;
16045 +       unsigned long flags;
16046         unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
16047  
16048         rq = skb->queue_mapping;
16049 @@ -2401,6 +2319,52 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16050         txbdp = txbdp_start = tx_queue->cur_tx;
16051         lstatus = be32_to_cpu(txbdp->lstatus);
16052  
16053 +       /* Time stamp insertion requires one additional TxBD */
16054 +       if (unlikely(do_tstamp))
16055 +               txbdp_tstamp = txbdp = next_txbd(txbdp, base,
16056 +                                                tx_queue->tx_ring_size);
16057 +
16058 +       if (nr_frags == 0) {
16059 +               if (unlikely(do_tstamp)) {
16060 +                       u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
16061 +
16062 +                       lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
16063 +                       txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
16064 +               } else {
16065 +                       lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
16066 +               }
16067 +       } else {
16068 +               /* Place the fragment addresses and lengths into the TxBDs */
16069 +               for (i = 0; i < nr_frags; i++) {
16070 +                       unsigned int frag_len;
16071 +                       /* Point at the next BD, wrapping as needed */
16072 +                       txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
16073 +
16074 +                       frag_len = skb_shinfo(skb)->frags[i].size;
16075 +
16076 +                       lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
16077 +                                 BD_LFLAG(TXBD_READY);
16078 +
16079 +                       /* Handle the last BD specially */
16080 +                       if (i == nr_frags - 1)
16081 +                               lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
16082 +
16083 +                       bufaddr = skb_frag_dma_map(priv->dev,
16084 +                                                  &skb_shinfo(skb)->frags[i],
16085 +                                                  0,
16086 +                                                  frag_len,
16087 +                                                  DMA_TO_DEVICE);
16088 +                       if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
16089 +                               goto dma_map_err;
16090 +
16091 +                       /* set the TxBD length and buffer pointer */
16092 +                       txbdp->bufPtr = cpu_to_be32(bufaddr);
16093 +                       txbdp->lstatus = cpu_to_be32(lstatus);
16094 +               }
16095 +
16096 +               lstatus = be32_to_cpu(txbdp_start->lstatus);
16097 +       }
16098 +
16099         /* Add TxPAL between FCB and frame if required */
16100         if (unlikely(do_tstamp)) {
16101                 skb_push(skb, GMAC_TXPAL_LEN);
16102 @@ -2435,6 +2399,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16103         if (do_vlan)
16104                 gfar_tx_vlan(skb, fcb);
16105  
16106 +       /* Setup tx hardware time stamping if requested */
16107 +       if (unlikely(do_tstamp)) {
16108 +               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
16109 +               fcb->ptp = 1;
16110 +       }
16111 +
16112         bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
16113                                  DMA_TO_DEVICE);
16114         if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
16115 @@ -2442,47 +2412,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16116  
16117         txbdp_start->bufPtr = cpu_to_be32(bufaddr);
16118  
16119 -       /* Time stamp insertion requires one additional TxBD */
16120 -       if (unlikely(do_tstamp))
16121 -               txbdp_tstamp = txbdp = next_txbd(txbdp, base,
16122 -                                                tx_queue->tx_ring_size);
16123 -
16124 -       if (likely(!nr_frags)) {
16125 -               if (likely(!do_tstamp))
16126 -                       lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
16127 -       } else {
16128 -               u32 lstatus_start = lstatus;
16129 -
16130 -               /* Place the fragment addresses and lengths into the TxBDs */
16131 -               frag = &skb_shinfo(skb)->frags[0];
16132 -               for (i = 0; i < nr_frags; i++, frag++) {
16133 -                       unsigned int size;
16134 -
16135 -                       /* Point at the next BD, wrapping as needed */
16136 -                       txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
16137 -
16138 -                       size = skb_frag_size(frag);
16139 -
16140 -                       lstatus = be32_to_cpu(txbdp->lstatus) | size |
16141 -                                 BD_LFLAG(TXBD_READY);
16142 -
16143 -                       /* Handle the last BD specially */
16144 -                       if (i == nr_frags - 1)
16145 -                               lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
16146 -
16147 -                       bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
16148 -                                                  size, DMA_TO_DEVICE);
16149 -                       if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
16150 -                               goto dma_map_err;
16151 -
16152 -                       /* set the TxBD length and buffer pointer */
16153 -                       txbdp->bufPtr = cpu_to_be32(bufaddr);
16154 -                       txbdp->lstatus = cpu_to_be32(lstatus);
16155 -               }
16156 -
16157 -               lstatus = lstatus_start;
16158 -       }
16159 -
16160         /* If time stamping is requested one additional TxBD must be set up. The
16161          * first TxBD points to the FCB and must have a data length of
16162          * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
16163 @@ -2493,25 +2422,31 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16164  
16165                 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
16166                 bufaddr += fcb_len;
16167 -
16168                 lstatus_ts |= BD_LFLAG(TXBD_READY) |
16169                               (skb_headlen(skb) - fcb_len);
16170 -               if (!nr_frags)
16171 -                       lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
16172  
16173                 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
16174                 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
16175                 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
16176 -
16177 -               /* Setup tx hardware time stamping */
16178 -               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
16179 -               fcb->ptp = 1;
16180         } else {
16181                 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
16182         }
16183  
16184         netdev_tx_sent_queue(txq, bytes_sent);
16185  
16186 +       /* We can work in parallel with gfar_clean_tx_ring(), except
16187 +        * when modifying num_txbdfree. Note that we didn't grab the lock
16188 +        * when we were reading the num_txbdfree and checking for available
16189 +        * space, that's because outside of this function it can only grow,
16190 +        * and once we've got needed space, it cannot suddenly disappear.
16191 +        *
16192 +        * The lock also protects us from gfar_error(), which can modify
16193 +        * regs->tstat and thus retrigger the transfers, which is why we
16194 +        * also must grab the lock before setting ready bit for the first
16195 +        * to be transmitted BD.
16196 +        */
16197 +       spin_lock_irqsave(&tx_queue->txlock, flags);
16198 +
16199         gfar_wmb();
16200  
16201         txbdp_start->lstatus = cpu_to_be32(lstatus);
16202 @@ -2528,15 +2463,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16203  
16204         tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
16205  
16206 -       /* We can work in parallel with gfar_clean_tx_ring(), except
16207 -        * when modifying num_txbdfree. Note that we didn't grab the lock
16208 -        * when we were reading the num_txbdfree and checking for available
16209 -        * space, that's because outside of this function it can only grow.
16210 -        */
16211 -       spin_lock_bh(&tx_queue->txlock);
16212         /* reduce TxBD free count */
16213         tx_queue->num_txbdfree -= (nr_txbds);
16214 -       spin_unlock_bh(&tx_queue->txlock);
16215  
16216         /* If the next BD still needs to be cleaned up, then the bds
16217          * are full.  We need to tell the kernel to stop sending us stuff.
16218 @@ -2550,6 +2478,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
16219         /* Tell the DMA to go go go */
16220         gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
16221  
16222 +       /* Unlock priv */
16223 +       spin_unlock_irqrestore(&tx_queue->txlock, flags);
16224 +
16225         return NETDEV_TX_OK;
16226  
16227  dma_map_err:
16228 @@ -2582,7 +2513,8 @@ static int gfar_close(struct net_device *dev)
16229         stop_gfar(dev);
16230  
16231         /* Disconnect from the PHY */
16232 -       phy_disconnect(dev->phydev);
16233 +       phy_disconnect(priv->phydev);
16234 +       priv->phydev = NULL;
16235  
16236         gfar_free_irq(priv);
16237  
16238 @@ -2602,7 +2534,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
16239         struct gfar_private *priv = netdev_priv(dev);
16240         int frame_size = new_mtu + ETH_HLEN;
16241  
16242 -       if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
16243 +       if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
16244                 netif_err(priv, drv, dev, "Invalid MTU setting\n");
16245                 return -EINVAL;
16246         }
16247 @@ -2656,6 +2588,15 @@ static void gfar_timeout(struct net_device *dev)
16248         schedule_work(&priv->reset_task);
16249  }
16250  
16251 +static void gfar_align_skb(struct sk_buff *skb)
16252 +{
16253 +       /* We need the data buffer to be aligned properly.  We will reserve
16254 +        * as many bytes as needed to align the data properly
16255 +        */
16256 +       skb_reserve(skb, RXBUF_ALIGNMENT -
16257 +                   (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
16258 +}
16259 +
16260  /* Interrupt Handler for Transmit complete */
16261  static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
16262  {
16263 @@ -2681,6 +2622,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
16264         skb_dirtytx = tx_queue->skb_dirtytx;
16265  
16266         while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
16267 +               unsigned long flags;
16268  
16269                 frags = skb_shinfo(skb)->nr_frags;
16270  
16271 @@ -2713,11 +2655,10 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
16272  
16273                 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
16274                         struct skb_shared_hwtstamps shhwtstamps;
16275 -                       u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
16276 -                                         ~0x7UL);
16277 +                       u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
16278  
16279                         memset(&shhwtstamps, 0, sizeof(shhwtstamps));
16280 -                       shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
16281 +                       shhwtstamps.hwtstamp = ns_to_ktime(*ns);
16282                         skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
16283                         skb_tstamp_tx(skb, &shhwtstamps);
16284                         gfar_clear_txbd_status(bdp);
16285 @@ -2745,9 +2686,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
16286                               TX_RING_MOD_MASK(tx_ring_size);
16287  
16288                 howmany++;
16289 -               spin_lock(&tx_queue->txlock);
16290 +               spin_lock_irqsave(&tx_queue->txlock, flags);
16291                 tx_queue->num_txbdfree += nr_txbds;
16292 -               spin_unlock(&tx_queue->txlock);
16293 +               spin_unlock_irqrestore(&tx_queue->txlock, flags);
16294         }
16295  
16296         /* If we freed a buffer, we can restart transmission, if necessary */
16297 @@ -2763,85 +2704,49 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
16298         netdev_tx_completed_queue(txq, howmany, bytes_sent);
16299  }
16300  
16301 -static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
16302 +static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
16303  {
16304 -       struct page *page;
16305 -       dma_addr_t addr;
16306 -
16307 -       page = dev_alloc_page();
16308 -       if (unlikely(!page))
16309 -               return false;
16310 -
16311 -       addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
16312 -       if (unlikely(dma_mapping_error(rxq->dev, addr))) {
16313 -               __free_page(page);
16314 -
16315 -               return false;
16316 -       }
16317 +       struct gfar_private *priv = netdev_priv(dev);
16318 +       struct sk_buff *skb;
16319  
16320 -       rxb->dma = addr;
16321 -       rxb->page = page;
16322 -       rxb->page_offset = 0;
16323 +       skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
16324 +       if (!skb)
16325 +               return NULL;
16326  
16327 -       return true;
16328 -}
16329 +       gfar_align_skb(skb);
16330  
16331 -static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
16332 -{
16333 -       struct gfar_private *priv = netdev_priv(rx_queue->ndev);
16334 -       struct gfar_extra_stats *estats = &priv->extra_stats;
16335 -
16336 -       netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
16337 -       atomic64_inc(&estats->rx_alloc_err);
16338 +       return skb;
16339  }
16340  
16341 -static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
16342 -                               int alloc_cnt)
16343 +static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
16344  {
16345 -       struct rxbd8 *bdp;
16346 -       struct gfar_rx_buff *rxb;
16347 -       int i;
16348 -
16349 -       i = rx_queue->next_to_use;
16350 -       bdp = &rx_queue->rx_bd_base[i];
16351 -       rxb = &rx_queue->rx_buff[i];
16352 -
16353 -       while (alloc_cnt--) {
16354 -               /* try reuse page */
16355 -               if (unlikely(!rxb->page)) {
16356 -                       if (unlikely(!gfar_new_page(rx_queue, rxb))) {
16357 -                               gfar_rx_alloc_err(rx_queue);
16358 -                               break;
16359 -                       }
16360 -               }
16361 +       struct gfar_private *priv = netdev_priv(dev);
16362 +       struct sk_buff *skb;
16363 +       dma_addr_t addr;
16364  
16365 -               /* Setup the new RxBD */
16366 -               gfar_init_rxbdp(rx_queue, bdp,
16367 -                               rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
16368 +       skb = gfar_alloc_skb(dev);
16369 +       if (!skb)
16370 +               return NULL;
16371  
16372 -               /* Update to the next pointer */
16373 -               bdp++;
16374 -               rxb++;
16375 -
16376 -               if (unlikely(++i == rx_queue->rx_ring_size)) {
16377 -                       i = 0;
16378 -                       bdp = rx_queue->rx_bd_base;
16379 -                       rxb = rx_queue->rx_buff;
16380 -               }
16381 +       addr = dma_map_single(priv->dev, skb->data,
16382 +                             priv->rx_buffer_size, DMA_FROM_DEVICE);
16383 +       if (unlikely(dma_mapping_error(priv->dev, addr))) {
16384 +               dev_kfree_skb_any(skb);
16385 +               return NULL;
16386         }
16387  
16388 -       rx_queue->next_to_use = i;
16389 -       rx_queue->next_to_alloc = i;
16390 +       *bufaddr = addr;
16391 +       return skb;
16392  }
16393  
16394 -static void count_errors(u32 lstatus, struct net_device *ndev)
16395 +static inline void count_errors(unsigned short status, struct net_device *dev)
16396  {
16397 -       struct gfar_private *priv = netdev_priv(ndev);
16398 -       struct net_device_stats *stats = &ndev->stats;
16399 +       struct gfar_private *priv = netdev_priv(dev);
16400 +       struct net_device_stats *stats = &dev->stats;
16401         struct gfar_extra_stats *estats = &priv->extra_stats;
16402  
16403         /* If the packet was truncated, none of the other errors matter */
16404 -       if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
16405 +       if (status & RXBD_TRUNCATED) {
16406                 stats->rx_length_errors++;
16407  
16408                 atomic64_inc(&estats->rx_trunc);
16409 @@ -2849,25 +2754,25 @@ static void count_errors(u32 lstatus, struct net_device *ndev)
16410                 return;
16411         }
16412         /* Count the errors, if there were any */
16413 -       if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
16414 +       if (status & (RXBD_LARGE | RXBD_SHORT)) {
16415                 stats->rx_length_errors++;
16416  
16417 -               if (lstatus & BD_LFLAG(RXBD_LARGE))
16418 +               if (status & RXBD_LARGE)
16419                         atomic64_inc(&estats->rx_large);
16420                 else
16421                         atomic64_inc(&estats->rx_short);
16422         }
16423 -       if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
16424 +       if (status & RXBD_NONOCTET) {
16425                 stats->rx_frame_errors++;
16426                 atomic64_inc(&estats->rx_nonoctet);
16427         }
16428 -       if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
16429 +       if (status & RXBD_CRCERR) {
16430                 atomic64_inc(&estats->rx_crcerr);
16431                 stats->rx_crc_errors++;
16432         }
16433 -       if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
16434 +       if (status & RXBD_OVERRUN) {
16435                 atomic64_inc(&estats->rx_overrun);
16436 -               stats->rx_over_errors++;
16437 +               stats->rx_crc_errors++;
16438         }
16439  }
16440  
16441 @@ -2875,14 +2780,7 @@ irqreturn_t gfar_receive(int irq, void *grp_id)
16442  {
16443         struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
16444         unsigned long flags;
16445 -       u32 imask, ievent;
16446 -
16447 -       ievent = gfar_read(&grp->regs->ievent);
16448 -
16449 -       if (unlikely(ievent & IEVENT_FGPI)) {
16450 -               gfar_write(&grp->regs->ievent, IEVENT_FGPI);
16451 -               return IRQ_HANDLED;
16452 -       }
16453 +       u32 imask;
16454  
16455         if (likely(napi_schedule_prep(&grp->napi_rx))) {
16456                 spin_lock_irqsave(&grp->grplock, flags);
16457 @@ -2925,101 +2823,6 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
16458         return IRQ_HANDLED;
16459  }
16460  
16461 -static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
16462 -                            struct sk_buff *skb, bool first)
16463 -{
16464 -       unsigned int size = lstatus & BD_LENGTH_MASK;
16465 -       struct page *page = rxb->page;
16466 -       bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
16467 -
16468 -       /* Remove the FCS from the packet length */
16469 -       if (last)
16470 -               size -= ETH_FCS_LEN;
16471 -
16472 -       if (likely(first)) {
16473 -               skb_put(skb, size);
16474 -       } else {
16475 -               /* the last fragments' length contains the full frame length */
16476 -               if (last)
16477 -                       size -= skb->len;
16478 -
16479 -               /* in case the last fragment consisted only of the FCS */
16480 -               if (size > 0)
16481 -                       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
16482 -                                       rxb->page_offset + RXBUF_ALIGNMENT,
16483 -                                       size, GFAR_RXB_TRUESIZE);
16484 -       }
16485 -
16486 -       /* try reuse page */
16487 -       if (unlikely(page_count(page) != 1))
16488 -               return false;
16489 -
16490 -       /* change offset to the other half */
16491 -       rxb->page_offset ^= GFAR_RXB_TRUESIZE;
16492 -
16493 -       page_ref_inc(page);
16494 -
16495 -       return true;
16496 -}
16497 -
16498 -static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
16499 -                              struct gfar_rx_buff *old_rxb)
16500 -{
16501 -       struct gfar_rx_buff *new_rxb;
16502 -       u16 nta = rxq->next_to_alloc;
16503 -
16504 -       new_rxb = &rxq->rx_buff[nta];
16505 -
16506 -       /* find next buf that can reuse a page */
16507 -       nta++;
16508 -       rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
16509 -
16510 -       /* copy page reference */
16511 -       *new_rxb = *old_rxb;
16512 -
16513 -       /* sync for use by the device */
16514 -       dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
16515 -                                        old_rxb->page_offset,
16516 -                                        GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
16517 -}
16518 -
16519 -static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
16520 -                                           u32 lstatus, struct sk_buff *skb)
16521 -{
16522 -       struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
16523 -       struct page *page = rxb->page;
16524 -       bool first = false;
16525 -
16526 -       if (likely(!skb)) {
16527 -               void *buff_addr = page_address(page) + rxb->page_offset;
16528 -
16529 -               skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
16530 -               if (unlikely(!skb)) {
16531 -                       gfar_rx_alloc_err(rx_queue);
16532 -                       return NULL;
16533 -               }
16534 -               skb_reserve(skb, RXBUF_ALIGNMENT);
16535 -               first = true;
16536 -       }
16537 -
16538 -       dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
16539 -                                     GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
16540 -
16541 -       if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
16542 -               /* reuse the free half of the page */
16543 -               gfar_reuse_rx_page(rx_queue, rxb);
16544 -       } else {
16545 -               /* page cannot be reused, unmap it */
16546 -               dma_unmap_page(rx_queue->dev, rxb->dma,
16547 -                              PAGE_SIZE, DMA_FROM_DEVICE);
16548 -       }
16549 -
16550 -       /* clear rxb content */
16551 -       rxb->page = NULL;
16552 -
16553 -       return skb;
16554 -}
16555 -
16556  static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
16557  {
16558         /* If valid headers were found, and valid sums
16559 @@ -3034,9 +2837,10 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
16560  }
16561  
16562  /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
16563 -static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
16564 +static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
16565 +                              int amount_pull, struct napi_struct *napi)
16566  {
16567 -       struct gfar_private *priv = netdev_priv(ndev);
16568 +       struct gfar_private *priv = netdev_priv(dev);
16569         struct rxfcb *fcb = NULL;
16570  
16571         /* fcb is at the beginning if exists */
16572 @@ -3045,8 +2849,10 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
16573         /* Remove the FCB from the skb
16574          * Remove the padded bytes, if there are any
16575          */
16576 -       if (priv->uses_rxfcb)
16577 -               skb_pull(skb, GMAC_FCB_LEN);
16578 +       if (amount_pull) {
16579 +               skb_record_rx_queue(skb, fcb->rq);
16580 +               skb_pull(skb, amount_pull);
16581 +       }
16582  
16583         /* Get receive timestamp from the skb */
16584         if (priv->hwts_rx_en) {
16585 @@ -3054,26 +2860,30 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
16586                 u64 *ns = (u64 *) skb->data;
16587  
16588                 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
16589 -               shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
16590 +               shhwtstamps->hwtstamp = ns_to_ktime(*ns);
16591         }
16592  
16593         if (priv->padding)
16594                 skb_pull(skb, priv->padding);
16595  
16596 -       if (ndev->features & NETIF_F_RXCSUM)
16597 +       if (dev->features & NETIF_F_RXCSUM)
16598                 gfar_rx_checksum(skb, fcb);
16599  
16600         /* Tell the skb what kind of packet this is */
16601 -       skb->protocol = eth_type_trans(skb, ndev);
16602 +       skb->protocol = eth_type_trans(skb, dev);
16603  
16604         /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
16605          * Even if vlan rx accel is disabled, on some chips
16606          * RXFCB_VLN is pseudo randomly set.
16607          */
16608 -       if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
16609 +       if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
16610             be16_to_cpu(fcb->flags) & RXFCB_VLN)
16611                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
16612                                        be16_to_cpu(fcb->vlctl));
16613 +
16614 +       /* Send the packet up the stack */
16615 +       napi_gro_receive(napi, skb);
16616 +
16617  }
16618  
16619  /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
16620 @@ -3082,89 +2892,91 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
16621   */
16622  int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
16623  {
16624 -       struct net_device *ndev = rx_queue->ndev;
16625 -       struct gfar_private *priv = netdev_priv(ndev);
16626 -       struct rxbd8 *bdp;
16627 -       int i, howmany = 0;
16628 -       struct sk_buff *skb = rx_queue->skb;
16629 -       int cleaned_cnt = gfar_rxbd_unused(rx_queue);
16630 -       unsigned int total_bytes = 0, total_pkts = 0;
16631 +       struct net_device *dev = rx_queue->dev;
16632 +       struct rxbd8 *bdp, *base;
16633 +       struct sk_buff *skb;
16634 +       int pkt_len;
16635 +       int amount_pull;
16636 +       int howmany = 0;
16637 +       struct gfar_private *priv = netdev_priv(dev);
16638  
16639         /* Get the first full descriptor */
16640 -       i = rx_queue->next_to_clean;
16641 +       bdp = rx_queue->cur_rx;
16642 +       base = rx_queue->rx_bd_base;
16643  
16644 -       while (rx_work_limit--) {
16645 -               u32 lstatus;
16646 +       amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
16647  
16648 -               if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
16649 -                       gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
16650 -                       cleaned_cnt = 0;
16651 -               }
16652 -
16653 -               bdp = &rx_queue->rx_bd_base[i];
16654 -               lstatus = be32_to_cpu(bdp->lstatus);
16655 -               if (lstatus & BD_LFLAG(RXBD_EMPTY))
16656 -                       break;
16657 +       while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
16658 +               struct sk_buff *newskb;
16659 +               dma_addr_t bufaddr;
16660  
16661 -               /* order rx buffer descriptor reads */
16662                 rmb();
16663  
16664 -               /* fetch next to clean buffer from the ring */
16665 -               skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
16666 -               if (unlikely(!skb))
16667 -                       break;
16668 -
16669 -               cleaned_cnt++;
16670 -               howmany++;
16671 +               /* Add another skb for the future */
16672 +               newskb = gfar_new_skb(dev, &bufaddr);
16673  
16674 -               if (unlikely(++i == rx_queue->rx_ring_size))
16675 -                       i = 0;
16676 +               skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
16677  
16678 -               rx_queue->next_to_clean = i;
16679 -
16680 -               /* fetch next buffer if not the last in frame */
16681 -               if (!(lstatus & BD_LFLAG(RXBD_LAST)))
16682 -                       continue;
16683 +               dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
16684 +                                priv->rx_buffer_size, DMA_FROM_DEVICE);
16685 +
16686 +               if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
16687 +                            be16_to_cpu(bdp->length) > priv->rx_buffer_size))
16688 +                       bdp->status = cpu_to_be16(RXBD_LARGE);
16689 +
16690 +               /* We drop the frame if we failed to allocate a new buffer */
16691 +               if (unlikely(!newskb ||
16692 +                            !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
16693 +                            be16_to_cpu(bdp->status) & RXBD_ERR)) {
16694 +                       count_errors(be16_to_cpu(bdp->status), dev);
16695 +
16696 +                       if (unlikely(!newskb)) {
16697 +                               newskb = skb;
16698 +                               bufaddr = be32_to_cpu(bdp->bufPtr);
16699 +                       } else if (skb)
16700 +                               dev_kfree_skb(skb);
16701 +               } else {
16702 +                       /* Increment the number of packets */
16703 +                       rx_queue->stats.rx_packets++;
16704 +                       howmany++;
16705 +
16706 +                       if (likely(skb)) {
16707 +                               pkt_len = be16_to_cpu(bdp->length) -
16708 +                                         ETH_FCS_LEN;
16709 +                               /* Remove the FCS from the packet length */
16710 +                               skb_put(skb, pkt_len);
16711 +                               rx_queue->stats.rx_bytes += pkt_len;
16712 +                               skb_record_rx_queue(skb, rx_queue->qindex);
16713 +                               gfar_process_frame(dev, skb, amount_pull,
16714 +                                                  &rx_queue->grp->napi_rx);
16715  
16716 -               if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
16717 -                       count_errors(lstatus, ndev);
16718 +                       } else {
16719 +                               netif_warn(priv, rx_err, dev, "Missing skb!\n");
16720 +                               rx_queue->stats.rx_dropped++;
16721 +                               atomic64_inc(&priv->extra_stats.rx_skbmissing);
16722 +                       }
16723  
16724 -                       /* discard faulty buffer */
16725 -                       dev_kfree_skb(skb);
16726 -                       skb = NULL;
16727 -                       rx_queue->stats.rx_dropped++;
16728 -                       continue;
16729                 }
16730  
16731 -               /* Increment the number of packets */
16732 -               total_pkts++;
16733 -               total_bytes += skb->len;
16734 +               rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
16735  
16736 -               skb_record_rx_queue(skb, rx_queue->qindex);
16737 +               /* Setup the new bdp */
16738 +               gfar_init_rxbdp(rx_queue, bdp, bufaddr);
16739  
16740 -               gfar_process_frame(ndev, skb);
16741 +               /* Update Last Free RxBD pointer for LFC */
16742 +               if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
16743 +                       gfar_write(rx_queue->rfbptr, (u32)bdp);
16744  
16745 -               /* Send the packet up the stack */
16746 -               napi_gro_receive(&rx_queue->grp->napi_rx, skb);
16747 +               /* Update to the next pointer */
16748 +               bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
16749  
16750 -               skb = NULL;
16751 +               /* update to point at the next skb */
16752 +               rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
16753 +                                     RX_RING_MOD_MASK(rx_queue->rx_ring_size);
16754         }
16755  
16756 -       /* Store incomplete frames for completion */
16757 -       rx_queue->skb = skb;
16758 -
16759 -       rx_queue->stats.rx_packets += total_pkts;
16760 -       rx_queue->stats.rx_bytes += total_bytes;
16761 -
16762 -       if (cleaned_cnt)
16763 -               gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
16764 -
16765 -       /* Update Last Free RxBD pointer for LFC */
16766 -       if (unlikely(priv->tx_actual_en)) {
16767 -               u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
16768 -
16769 -               gfar_write(rx_queue->rfbptr, bdp_dma);
16770 -       }
16771 +       /* Update the current rxbd pointer to be the next one */
16772 +       rx_queue->cur_rx = bdp;
16773  
16774         return howmany;
16775  }
16776 @@ -3396,7 +3208,7 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
16777  static void adjust_link(struct net_device *dev)
16778  {
16779         struct gfar_private *priv = netdev_priv(dev);
16780 -       struct phy_device *phydev = dev->phydev;
16781 +       struct phy_device *phydev = priv->phydev;
16782  
16783         if (unlikely(phydev->link != priv->oldlink ||
16784                      (phydev->link && (phydev->duplex != priv->oldduplex ||
16785 @@ -3599,19 +3411,30 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
16786                 if (events & IEVENT_CRL)
16787                         dev->stats.tx_aborted_errors++;
16788                 if (events & IEVENT_XFUN) {
16789 +                       unsigned long flags;
16790 +
16791                         netif_dbg(priv, tx_err, dev,
16792                                   "TX FIFO underrun, packet dropped\n");
16793                         dev->stats.tx_dropped++;
16794                         atomic64_inc(&priv->extra_stats.tx_underrun);
16795  
16796 -                       schedule_work(&priv->reset_task);
16797 +                       local_irq_save(flags);
16798 +                       lock_tx_qs(priv);
16799 +
16800 +                       /* Reactivate the Tx Queues */
16801 +                       gfar_write(&regs->tstat, gfargrp->tstat);
16802 +
16803 +                       unlock_tx_qs(priv);
16804 +                       local_irq_restore(flags);
16805                 }
16806                 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
16807         }
16808         if (events & IEVENT_BSY) {
16809 -               dev->stats.rx_over_errors++;
16810 +               dev->stats.rx_errors++;
16811                 atomic64_inc(&priv->extra_stats.rx_bsy);
16812  
16813 +               gfar_receive(irq, grp_id);
16814 +
16815                 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
16816                           gfar_read(&regs->rstat));
16817         }
16818 @@ -3637,8 +3460,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
16819  
16820  static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
16821  {
16822 -       struct net_device *ndev = priv->ndev;
16823 -       struct phy_device *phydev = ndev->phydev;
16824 +       struct phy_device *phydev = priv->phydev;
16825         u32 val = 0;
16826  
16827         if (!phydev->duplex)
16828 @@ -3678,10 +3500,10 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
16829  static noinline void gfar_update_link_state(struct gfar_private *priv)
16830  {
16831         struct gfar __iomem *regs = priv->gfargrp[0].regs;
16832 -       struct net_device *ndev = priv->ndev;
16833 -       struct phy_device *phydev = ndev->phydev;
16834 +       struct phy_device *phydev = priv->phydev;
16835         struct gfar_priv_rx_q *rx_queue = NULL;
16836         int i;
16837 +       struct rxbd8 *bdp;
16838  
16839         if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
16840                 return;
16841 @@ -3738,11 +3560,15 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
16842                 /* Turn last free buffer recording on */
16843                 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
16844                         for (i = 0; i < priv->num_rx_queues; i++) {
16845 -                               u32 bdp_dma;
16846 -
16847                                 rx_queue = priv->rx_queue[i];
16848 -                               bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
16849 -                               gfar_write(rx_queue->rfbptr, bdp_dma);
16850 +                               bdp = rx_queue->cur_rx;
16851 +                               /* skip to previous bd */
16852 +                               bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
16853 +                                             rx_queue->rx_bd_base,
16854 +                                             rx_queue->rx_ring_size);
16855 +
16856 +                               if (rx_queue->rfbptr)
16857 +                                       gfar_write(rx_queue->rfbptr, (u32)bdp);
16858                         }
16859  
16860                         priv->tx_actual_en = 1;
16861 diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
16862 index 6e8a9c8..daa1d37 100644
16863 --- a/drivers/net/ethernet/freescale/gianfar.h
16864 +++ b/drivers/net/ethernet/freescale/gianfar.h
16865 @@ -71,6 +71,11 @@ struct ethtool_rx_list {
16866  /* Number of bytes to align the rx bufs to */
16867  #define RXBUF_ALIGNMENT 64
16868  
16869 +/* The number of bytes which composes a unit for the purpose of
16870 + * allocating data buffers.  ie-for any given MTU, the data buffer
16871 + * will be the next highest multiple of 512 bytes. */
16872 +#define INCREMENTAL_BUFFER_SIZE 512
16873 +
16874  #define PHY_INIT_TIMEOUT 100000
16875  
16876  #define DRV_NAME "gfar-enet"
16877 @@ -87,8 +92,6 @@ extern const char gfar_driver_version[];
16878  #define DEFAULT_TX_RING_SIZE   256
16879  #define DEFAULT_RX_RING_SIZE   256
16880  
16881 -#define GFAR_RX_BUFF_ALLOC     16
16882 -
16883  #define GFAR_RX_MAX_RING_SIZE   256
16884  #define GFAR_TX_MAX_RING_SIZE   256
16885  
16886 @@ -100,15 +103,11 @@ extern const char gfar_driver_version[];
16887  #define DEFAULT_RX_LFC_THR  16
16888  #define DEFAULT_LFC_PTVVAL  4
16889  
16890 -/* prevent fragmenation by HW in DSA environments */
16891 -#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
16892 -#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
16893 -                         + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
16894 -#define GFAR_RXB_TRUESIZE 2048
16895 -
16896 +#define DEFAULT_RX_BUFFER_SIZE  1536
16897  #define TX_RING_MOD_MASK(size) (size-1)
16898  #define RX_RING_MOD_MASK(size) (size-1)
16899 -#define GFAR_JUMBO_FRAME_SIZE 9600
16900 +#define JUMBO_BUFFER_SIZE 9728
16901 +#define JUMBO_FRAME_SIZE 9600
16902  
16903  #define DEFAULT_FIFO_TX_THR 0x100
16904  #define DEFAULT_FIFO_TX_STARVE 0x40
16905 @@ -341,7 +340,6 @@ extern const char gfar_driver_version[];
16906  #define IEVENT_MAG             0x00000800
16907  #define IEVENT_GRSC            0x00000100
16908  #define IEVENT_RXF0            0x00000080
16909 -#define IEVENT_FGPI            0x00000010
16910  #define IEVENT_FIR             0x00000008
16911  #define IEVENT_FIQ             0x00000004
16912  #define IEVENT_DPE             0x00000002
16913 @@ -374,7 +372,6 @@ extern const char gfar_driver_version[];
16914  #define IMASK_MAG              0x00000800
16915  #define IMASK_GRSC              0x00000100
16916  #define IMASK_RXFEN0           0x00000080
16917 -#define IMASK_FGPI             0x00000010
16918  #define IMASK_FIR              0x00000008
16919  #define IMASK_FIQ              0x00000004
16920  #define IMASK_DPE              0x00000002
16921 @@ -543,9 +540,6 @@ extern const char gfar_driver_version[];
16922  
16923  #define GFAR_INT_NAME_MAX      (IFNAMSIZ + 6)  /* '_g#_xx' */
16924  
16925 -#define GFAR_WOL_MAGIC         0x00000001
16926 -#define GFAR_WOL_FILER_UCAST   0x00000002
16927 -
16928  struct txbd8
16929  {
16930         union {
16931 @@ -646,7 +640,6 @@ struct rmon_mib
16932  };
16933  
16934  struct gfar_extra_stats {
16935 -       atomic64_t rx_alloc_err;
16936         atomic64_t rx_large;
16937         atomic64_t rx_short;
16938         atomic64_t rx_nonoctet;
16939 @@ -658,6 +651,7 @@ struct gfar_extra_stats {
16940         atomic64_t eberr;
16941         atomic64_t tx_babt;
16942         atomic64_t tx_underrun;
16943 +       atomic64_t rx_skbmissing;
16944         atomic64_t tx_timeout;
16945  };
16946  
16947 @@ -923,8 +917,6 @@ struct gfar {
16948  #define FSL_GIANFAR_DEV_HAS_BD_STASHING                0x00000200
16949  #define FSL_GIANFAR_DEV_HAS_BUF_STASHING       0x00000400
16950  #define FSL_GIANFAR_DEV_HAS_TIMER              0x00000800
16951 -#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER      0x00001000
16952 -#define FSL_GIANFAR_DEV_HAS_RX_FILER           0x00002000
16953  
16954  #if (MAXGROUPS == 2)
16955  #define DEFAULT_MAPPING        0xAA
16956 @@ -1020,42 +1012,34 @@ struct rx_q_stats {
16957         unsigned long rx_dropped;
16958  };
16959  
16960 -struct gfar_rx_buff {
16961 -       dma_addr_t dma;
16962 -       struct page *page;
16963 -       unsigned int page_offset;
16964 -};
16965 -
16966  /**
16967   *     struct gfar_priv_rx_q - per rx queue structure
16968 - *     @rx_buff: Array of buffer info metadata structs
16969 + *     @rx_skbuff: skb pointers
16970 + *     @skb_currx: currently use skb pointer
16971   *     @rx_bd_base: First rx buffer descriptor
16972 - *     @next_to_use: index of the next buffer to be alloc'd
16973 - *     @next_to_clean: index of the next buffer to be cleaned
16974 + *     @cur_rx: Next free rx ring entry
16975   *     @qindex: index of this queue
16976 - *     @ndev: back pointer to net_device
16977 + *     @dev: back pointer to the dev structure
16978   *     @rx_ring_size: Rx ring size
16979   *     @rxcoalescing: enable/disable rx-coalescing
16980   *     @rxic: receive interrupt coalescing vlaue
16981   */
16982  
16983  struct gfar_priv_rx_q {
16984 -       struct  gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES);
16985 +       struct  sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
16986 +       dma_addr_t rx_bd_dma_base;
16987         struct  rxbd8 *rx_bd_base;
16988 -       struct  net_device *ndev;
16989 -       struct  device *dev;
16990 -       u16 rx_ring_size;
16991 -       u16 qindex;
16992 -       struct  gfar_priv_grp *grp;
16993 -       u16 next_to_clean;
16994 -       u16 next_to_use;
16995 -       u16 next_to_alloc;
16996 -       struct  sk_buff *skb;
16997 +       struct  rxbd8 *cur_rx;
16998 +       struct  net_device *dev;
16999 +       struct gfar_priv_grp *grp;
17000         struct rx_q_stats stats;
17001 -       u32 __iomem *rfbptr;
17002 +       u16     skb_currx;
17003 +       u16     qindex;
17004 +       unsigned int    rx_ring_size;
17005 +       /* RX Coalescing values */
17006         unsigned char rxcoalescing;
17007         unsigned long rxic;
17008 -       dma_addr_t rx_bd_dma_base;
17009 +       u32 __iomem *rfbptr;
17010  };
17011  
17012  enum gfar_irqinfo_id {
17013 @@ -1125,6 +1109,7 @@ struct gfar_private {
17014         struct device *dev;
17015         struct net_device *ndev;
17016         enum gfar_errata errata;
17017 +       unsigned int rx_buffer_size;
17018  
17019         u16 uses_rxfcb;
17020         u16 padding;
17021 @@ -1154,11 +1139,15 @@ struct gfar_private {
17022         phy_interface_t interface;
17023         struct device_node *phy_node;
17024         struct device_node *tbi_node;
17025 +       struct phy_device *phydev;
17026         struct mii_bus *mii_bus;
17027         int oldspeed;
17028         int oldduplex;
17029         int oldlink;
17030  
17031 +       /* Bitfield update lock */
17032 +       spinlock_t bflock;
17033 +
17034         uint32_t msg_enable;
17035  
17036         struct work_struct reset_task;
17037 @@ -1168,6 +1157,8 @@ struct gfar_private {
17038                 extended_hash:1,
17039                 bd_stash_en:1,
17040                 rx_filer_enable:1,
17041 +               /* Wake-on-LAN enabled */
17042 +               wol_en:1,
17043                 /* Enable priorty based Tx scheduling in Hw */
17044                 prio_sched_en:1,
17045                 /* Flow control flags */
17046 @@ -1196,10 +1187,6 @@ struct gfar_private {
17047         u32 __iomem *hash_regs[16];
17048         int hash_width;
17049  
17050 -       /* wake-on-lan settings */
17051 -       u16 wol_opts;
17052 -       u16 wol_supported;
17053 -
17054         /*Filer table*/
17055         unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
17056         unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
17057 @@ -1308,28 +1295,6 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
17058         bdp->lstatus = cpu_to_be32(lstatus);
17059  }
17060  
17061 -static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
17062 -{
17063 -       if (rxq->next_to_clean > rxq->next_to_use)
17064 -               return rxq->next_to_clean - rxq->next_to_use - 1;
17065 -
17066 -       return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
17067 -}
17068 -
17069 -static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
17070 -{
17071 -       struct rxbd8 *bdp;
17072 -       u32 bdp_dma;
17073 -       int i;
17074 -
17075 -       i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
17076 -       bdp = &rxq->rx_bd_base[i];
17077 -       bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
17078 -       bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
17079 -
17080 -       return bdp_dma;
17081 -}
17082 -
17083  irqreturn_t gfar_receive(int irq, void *dev_id);
17084  int startup_gfar(struct net_device *dev);
17085  void stop_gfar(struct net_device *dev);
17086 diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
17087 index 56588f2..fda12fb 100644
17088 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
17089 +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
17090 @@ -61,8 +61,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
17091                           struct ethtool_drvinfo *drvinfo);
17092  
17093  static const char stat_gstrings[][ETH_GSTRING_LEN] = {
17094 -       /* extra stats */
17095 -       "rx-allocation-errors",
17096         "rx-large-frame-errors",
17097         "rx-short-frame-errors",
17098         "rx-non-octet-errors",
17099 @@ -74,8 +72,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = {
17100         "ethernet-bus-error",
17101         "tx-babbling-errors",
17102         "tx-underrun-errors",
17103 +       "rx-skb-missing-errors",
17104         "tx-timeout-errors",
17105 -       /* rmon stats */
17106         "tx-rx-64-frames",
17107         "tx-rx-65-127-frames",
17108         "tx-rx-128-255-frames",
17109 @@ -182,6 +180,42 @@ static void gfar_gdrvinfo(struct net_device *dev,
17110                 sizeof(drvinfo->version));
17111         strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
17112         strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
17113 +       drvinfo->regdump_len = 0;
17114 +       drvinfo->eedump_len = 0;
17115 +}
17116 +
17117 +
17118 +static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
17119 +{
17120 +       struct gfar_private *priv = netdev_priv(dev);
17121 +       struct phy_device *phydev = priv->phydev;
17122 +
17123 +       if (NULL == phydev)
17124 +               return -ENODEV;
17125 +
17126 +       return phy_ethtool_sset(phydev, cmd);
17127 +}
17128 +
17129 +
17130 +/* Return the current settings in the ethtool_cmd structure */
17131 +static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
17132 +{
17133 +       struct gfar_private *priv = netdev_priv(dev);
17134 +       struct phy_device *phydev = priv->phydev;
17135 +       struct gfar_priv_rx_q *rx_queue = NULL;
17136 +       struct gfar_priv_tx_q *tx_queue = NULL;
17137 +
17138 +       if (NULL == phydev)
17139 +               return -ENODEV;
17140 +       tx_queue = priv->tx_queue[0];
17141 +       rx_queue = priv->rx_queue[0];
17142 +
17143 +       /* etsec-1.7 and older versions have only one txic
17144 +        * and rxic regs although they support multiple queues */
17145 +       cmd->maxtxpkt = get_icft_value(tx_queue->txic);
17146 +       cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
17147 +
17148 +       return phy_ethtool_gset(phydev, cmd);
17149  }
17150  
17151  /* Return the length of the register structure */
17152 @@ -208,12 +242,10 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
17153  static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
17154                                      unsigned int usecs)
17155  {
17156 -       struct net_device *ndev = priv->ndev;
17157 -       struct phy_device *phydev = ndev->phydev;
17158         unsigned int count;
17159  
17160         /* The timer is different, depending on the interface speed */
17161 -       switch (phydev->speed) {
17162 +       switch (priv->phydev->speed) {
17163         case SPEED_1000:
17164                 count = GFAR_GBIT_TIME;
17165                 break;
17166 @@ -235,12 +267,10 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
17167  static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
17168                                      unsigned int ticks)
17169  {
17170 -       struct net_device *ndev = priv->ndev;
17171 -       struct phy_device *phydev = ndev->phydev;
17172         unsigned int count;
17173  
17174         /* The timer is different, depending on the interface speed */
17175 -       switch (phydev->speed) {
17176 +       switch (priv->phydev->speed) {
17177         case SPEED_1000:
17178                 count = GFAR_GBIT_TIME;
17179                 break;
17180 @@ -274,7 +304,7 @@ static int gfar_gcoalesce(struct net_device *dev,
17181         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
17182                 return -EOPNOTSUPP;
17183  
17184 -       if (!dev->phydev)
17185 +       if (NULL == priv->phydev)
17186                 return -ENODEV;
17187  
17188         rx_queue = priv->rx_queue[0];
17189 @@ -335,7 +365,7 @@ static int gfar_scoalesce(struct net_device *dev,
17190         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
17191                 return -EOPNOTSUPP;
17192  
17193 -       if (!dev->phydev)
17194 +       if (NULL == priv->phydev)
17195                 return -ENODEV;
17196  
17197         /* Check the bounds of the values */
17198 @@ -499,7 +529,7 @@ static int gfar_spauseparam(struct net_device *dev,
17199                             struct ethtool_pauseparam *epause)
17200  {
17201         struct gfar_private *priv = netdev_priv(dev);
17202 -       struct phy_device *phydev = dev->phydev;
17203 +       struct phy_device *phydev = priv->phydev;
17204         struct gfar __iomem *regs = priv->gfargrp[0].regs;
17205         u32 oldadv, newadv;
17206  
17207 @@ -612,49 +642,31 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
17208  {
17209         struct gfar_private *priv = netdev_priv(dev);
17210  
17211 -       wol->supported = 0;
17212 -       wol->wolopts = 0;
17213 -
17214 -       if (priv->wol_supported & GFAR_WOL_MAGIC)
17215 -               wol->supported |= WAKE_MAGIC;
17216 -
17217 -       if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
17218 -               wol->supported |= WAKE_UCAST;
17219 -
17220 -       if (priv->wol_opts & GFAR_WOL_MAGIC)
17221 -               wol->wolopts |= WAKE_MAGIC;
17222 -
17223 -       if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
17224 -               wol->wolopts |= WAKE_UCAST;
17225 +       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
17226 +               wol->supported = WAKE_MAGIC;
17227 +               wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
17228 +       } else {
17229 +               wol->supported = wol->wolopts = 0;
17230 +       }
17231  }
17232  
17233  static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
17234  {
17235         struct gfar_private *priv = netdev_priv(dev);
17236 -       u16 wol_opts = 0;
17237 -       int err;
17238 +       unsigned long flags;
17239  
17240 -       if (!priv->wol_supported && wol->wolopts)
17241 +       if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
17242 +           wol->wolopts != 0)
17243                 return -EINVAL;
17244  
17245 -       if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
17246 +       if (wol->wolopts & ~WAKE_MAGIC)
17247                 return -EINVAL;
17248  
17249 -       if (wol->wolopts & WAKE_MAGIC) {
17250 -               wol_opts |= GFAR_WOL_MAGIC;
17251 -       } else {
17252 -               if (wol->wolopts & WAKE_UCAST)
17253 -                       wol_opts |= GFAR_WOL_FILER_UCAST;
17254 -       }
17255 -
17256 -       wol_opts &= priv->wol_supported;
17257 -       priv->wol_opts = 0;
17258 -
17259 -       err = device_set_wakeup_enable(priv->dev, wol_opts);
17260 -       if (err)
17261 -               return err;
17262 +       device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
17263  
17264 -       priv->wol_opts = wol_opts;
17265 +       spin_lock_irqsave(&priv->bflock, flags);
17266 +       priv->wol_en =  !!device_may_wakeup(&dev->dev);
17267 +       spin_unlock_irqrestore(&priv->bflock, flags);
17268  
17269         return 0;
17270  }
17271 @@ -665,14 +677,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
17272         u32 fcr = 0x0, fpr = FPR_FILER_MASK;
17273  
17274         if (ethflow & RXH_L2DA) {
17275 -               fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
17276 +               fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
17277                       RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
17278                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
17279                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
17280                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
17281                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
17282  
17283 -               fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
17284 +               fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
17285                       RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
17286                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
17287                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
17288 @@ -891,6 +903,27 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
17289         return 0;
17290  }
17291  
17292 +static int gfar_comp_asc(const void *a, const void *b)
17293 +{
17294 +       return memcmp(a, b, 4);
17295 +}
17296 +
17297 +static int gfar_comp_desc(const void *a, const void *b)
17298 +{
17299 +       return -memcmp(a, b, 4);
17300 +}
17301 +
17302 +static void gfar_swap(void *a, void *b, int size)
17303 +{
17304 +       u32 *_a = a;
17305 +       u32 *_b = b;
17306 +
17307 +       swap(_a[0], _b[0]);
17308 +       swap(_a[1], _b[1]);
17309 +       swap(_a[2], _b[2]);
17310 +       swap(_a[3], _b[3]);
17311 +}
17312 +
17313  /* Write a mask to filer cache */
17314  static void gfar_set_mask(u32 mask, struct filer_table *tab)
17315  {
17316 @@ -1240,6 +1273,310 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
17317         return 0;
17318  }
17319  
17320 +/* Copy size filer entries */
17321 +static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
17322 +                                   struct gfar_filer_entry src[0], s32 size)
17323 +{
17324 +       while (size > 0) {
17325 +               size--;
17326 +               dst[size].ctrl = src[size].ctrl;
17327 +               dst[size].prop = src[size].prop;
17328 +       }
17329 +}
17330 +
17331 +/* Delete the contents of the filer-table between start and end
17332 + * and collapse them
17333 + */
17334 +static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
17335 +{
17336 +       int length;
17337 +
17338 +       if (end > MAX_FILER_CACHE_IDX || end < begin)
17339 +               return -EINVAL;
17340 +
17341 +       end++;
17342 +       length = end - begin;
17343 +
17344 +       /* Copy */
17345 +       while (end < tab->index) {
17346 +               tab->fe[begin].ctrl = tab->fe[end].ctrl;
17347 +               tab->fe[begin++].prop = tab->fe[end++].prop;
17348 +
17349 +       }
17350 +       /* Fill up with don't cares */
17351 +       while (begin < tab->index) {
17352 +               tab->fe[begin].ctrl = 0x60;
17353 +               tab->fe[begin].prop = 0xFFFFFFFF;
17354 +               begin++;
17355 +       }
17356 +
17357 +       tab->index -= length;
17358 +       return 0;
17359 +}
17360 +
17361 +/* Make space on the wanted location */
17362 +static int gfar_expand_filer_entries(u32 begin, u32 length,
17363 +                                    struct filer_table *tab)
17364 +{
17365 +       if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
17366 +           begin > MAX_FILER_CACHE_IDX)
17367 +               return -EINVAL;
17368 +
17369 +       gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
17370 +                               tab->index - length + 1);
17371 +
17372 +       tab->index += length;
17373 +       return 0;
17374 +}
17375 +
17376 +static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
17377 +{
17378 +       for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
17379 +            start++) {
17380 +               if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
17381 +                   (RQFCR_AND | RQFCR_CLE))
17382 +                       return start;
17383 +       }
17384 +       return -1;
17385 +}
17386 +
17387 +static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
17388 +{
17389 +       for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
17390 +            start++) {
17391 +               if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
17392 +                   (RQFCR_CLE))
17393 +                       return start;
17394 +       }
17395 +       return -1;
17396 +}
17397 +
17398 +/* Uses hardwares clustering option to reduce
17399 + * the number of filer table entries
17400 + */
17401 +static void gfar_cluster_filer(struct filer_table *tab)
17402 +{
17403 +       s32 i = -1, j, iend, jend;
17404 +
17405 +       while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
17406 +               j = i;
17407 +               while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
17408 +                       /* The cluster entries self and the previous one
17409 +                        * (a mask) must be identical!
17410 +                        */
17411 +                       if (tab->fe[i].ctrl != tab->fe[j].ctrl)
17412 +                               break;
17413 +                       if (tab->fe[i].prop != tab->fe[j].prop)
17414 +                               break;
17415 +                       if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
17416 +                               break;
17417 +                       if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
17418 +                               break;
17419 +                       iend = gfar_get_next_cluster_end(i, tab);
17420 +                       jend = gfar_get_next_cluster_end(j, tab);
17421 +                       if (jend == -1 || iend == -1)
17422 +                               break;
17423 +
17424 +                       /* First we make some free space, where our cluster
17425 +                        * element should be. Then we copy it there and finally
17426 +                        * delete in from its old location.
17427 +                        */
17428 +                       if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
17429 +                           -EINVAL)
17430 +                               break;
17431 +
17432 +                       gfar_copy_filer_entries(&(tab->fe[iend + 1]),
17433 +                                               &(tab->fe[jend + 1]), jend - j);
17434 +
17435 +                       if (gfar_trim_filer_entries(jend - 1,
17436 +                                                   jend + (jend - j),
17437 +                                                   tab) == -EINVAL)
17438 +                               return;
17439 +
17440 +                       /* Mask out cluster bit */
17441 +                       tab->fe[iend].ctrl &= ~(RQFCR_CLE);
17442 +               }
17443 +       }
17444 +}
17445 +
17446 +/* Swaps the masked bits of a1<>a2 and b1<>b2 */
17447 +static void gfar_swap_bits(struct gfar_filer_entry *a1,
17448 +                          struct gfar_filer_entry *a2,
17449 +                          struct gfar_filer_entry *b1,
17450 +                          struct gfar_filer_entry *b2, u32 mask)
17451 +{
17452 +       u32 temp[4];
17453 +       temp[0] = a1->ctrl & mask;
17454 +       temp[1] = a2->ctrl & mask;
17455 +       temp[2] = b1->ctrl & mask;
17456 +       temp[3] = b2->ctrl & mask;
17457 +
17458 +       a1->ctrl &= ~mask;
17459 +       a2->ctrl &= ~mask;
17460 +       b1->ctrl &= ~mask;
17461 +       b2->ctrl &= ~mask;
17462 +
17463 +       a1->ctrl |= temp[1];
17464 +       a2->ctrl |= temp[0];
17465 +       b1->ctrl |= temp[3];
17466 +       b2->ctrl |= temp[2];
17467 +}
17468 +
17469 +/* Generate a list consisting of masks values with their start and
17470 + * end of validity and block as indicator for parts belonging
17471 + * together (glued by ANDs) in mask_table
17472 + */
17473 +static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
17474 +                                   struct filer_table *tab)
17475 +{
17476 +       u32 i, and_index = 0, block_index = 1;
17477 +
17478 +       for (i = 0; i < tab->index; i++) {
17479 +
17480 +               /* LSByte of control = 0 sets a mask */
17481 +               if (!(tab->fe[i].ctrl & 0xF)) {
17482 +                       mask_table[and_index].mask = tab->fe[i].prop;
17483 +                       mask_table[and_index].start = i;
17484 +                       mask_table[and_index].block = block_index;
17485 +                       if (and_index >= 1)
17486 +                               mask_table[and_index - 1].end = i - 1;
17487 +                       and_index++;
17488 +               }
17489 +               /* cluster starts and ends will be separated because they should
17490 +                * hold their position
17491 +                */
17492 +               if (tab->fe[i].ctrl & RQFCR_CLE)
17493 +                       block_index++;
17494 +               /* A not set AND indicates the end of a depended block */
17495 +               if (!(tab->fe[i].ctrl & RQFCR_AND))
17496 +                       block_index++;
17497 +       }
17498 +
17499 +       mask_table[and_index - 1].end = i - 1;
17500 +
17501 +       return and_index;
17502 +}
17503 +
17504 +/* Sorts the entries of mask_table by the values of the masks.
17505 + * Important: The 0xFF80 flags of the first and last entry of a
17506 + * block must hold their position (which queue, CLusterEnable, ReJEct,
17507 + * AND)
17508 + */
17509 +static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
17510 +                                struct filer_table *temp_table, u32 and_index)
17511 +{
17512 +       /* Pointer to compare function (_asc or _desc) */
17513 +       int (*gfar_comp)(const void *, const void *);
17514 +
17515 +       u32 i, size = 0, start = 0, prev = 1;
17516 +       u32 old_first, old_last, new_first, new_last;
17517 +
17518 +       gfar_comp = &gfar_comp_desc;
17519 +
17520 +       for (i = 0; i < and_index; i++) {
17521 +               if (prev != mask_table[i].block) {
17522 +                       old_first = mask_table[start].start + 1;
17523 +                       old_last = mask_table[i - 1].end;
17524 +                       sort(mask_table + start, size,
17525 +                            sizeof(struct gfar_mask_entry),
17526 +                            gfar_comp, &gfar_swap);
17527 +
17528 +                       /* Toggle order for every block. This makes the
17529 +                        * thing more efficient!
17530 +                        */
17531 +                       if (gfar_comp == gfar_comp_desc)
17532 +                               gfar_comp = &gfar_comp_asc;
17533 +                       else
17534 +                               gfar_comp = &gfar_comp_desc;
17535 +
17536 +                       new_first = mask_table[start].start + 1;
17537 +                       new_last = mask_table[i - 1].end;
17538 +
17539 +                       gfar_swap_bits(&temp_table->fe[new_first],
17540 +                                      &temp_table->fe[old_first],
17541 +                                      &temp_table->fe[new_last],
17542 +                                      &temp_table->fe[old_last],
17543 +                                      RQFCR_QUEUE | RQFCR_CLE |
17544 +                                      RQFCR_RJE | RQFCR_AND);
17545 +
17546 +                       start = i;
17547 +                       size = 0;
17548 +               }
17549 +               size++;
17550 +               prev = mask_table[i].block;
17551 +       }
17552 +}
17553 +
17554 +/* Reduces the number of masks needed in the filer table to save entries
17555 + * This is done by sorting the masks of a depended block. A depended block is
17556 + * identified by gluing ANDs or CLE. The sorting order toggles after every
17557 + * block. Of course entries in scope of a mask must change their location with
17558 + * it.
17559 + */
17560 +static int gfar_optimize_filer_masks(struct filer_table *tab)
17561 +{
17562 +       struct filer_table *temp_table;
17563 +       struct gfar_mask_entry *mask_table;
17564 +
17565 +       u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
17566 +       s32 ret = 0;
17567 +
17568 +       /* We need a copy of the filer table because
17569 +        * we want to change its order
17570 +        */
17571 +       temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
17572 +       if (temp_table == NULL)
17573 +               return -ENOMEM;
17574 +
17575 +       mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
17576 +                            sizeof(struct gfar_mask_entry), GFP_KERNEL);
17577 +
17578 +       if (mask_table == NULL) {
17579 +               ret = -ENOMEM;
17580 +               goto end;
17581 +       }
17582 +
17583 +       and_index = gfar_generate_mask_table(mask_table, tab);
17584 +
17585 +       gfar_sort_mask_table(mask_table, temp_table, and_index);
17586 +
17587 +       /* Now we can copy the data from our duplicated filer table to
17588 +        * the real one in the order the mask table says
17589 +        */
17590 +       for (i = 0; i < and_index; i++) {
17591 +               size = mask_table[i].end - mask_table[i].start + 1;
17592 +               gfar_copy_filer_entries(&(tab->fe[j]),
17593 +                               &(temp_table->fe[mask_table[i].start]), size);
17594 +               j += size;
17595 +       }
17596 +
17597 +       /* And finally we just have to check for duplicated masks and drop the
17598 +        * second ones
17599 +        */
17600 +       for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
17601 +               if (tab->fe[i].ctrl == 0x80) {
17602 +                       previous_mask = i++;
17603 +                       break;
17604 +               }
17605 +       }
17606 +       for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
17607 +               if (tab->fe[i].ctrl == 0x80) {
17608 +                       if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
17609 +                               /* Two identical ones found!
17610 +                                * So drop the second one!
17611 +                                */
17612 +                               gfar_trim_filer_entries(i, i, tab);
17613 +                       } else
17614 +                               /* Not identical! */
17615 +                               previous_mask = i;
17616 +               }
17617 +       }
17618 +
17619 +       kfree(mask_table);
17620 +end:   kfree(temp_table);
17621 +       return ret;
17622 +}
17623 +
17624  /* Write the bit-pattern from software's buffer to hardware registers */
17625  static int gfar_write_filer_table(struct gfar_private *priv,
17626                                   struct filer_table *tab)
17627 @@ -1249,10 +1586,11 @@ static int gfar_write_filer_table(struct gfar_private *priv,
17628                 return -EBUSY;
17629  
17630         /* Fill regular entries */
17631 -       for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
17632 +       for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
17633 +            i++)
17634                 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
17635         /* Fill the rest with fall-troughs */
17636 -       for (; i < MAX_FILER_IDX; i++)
17637 +       for (; i < MAX_FILER_IDX - 1; i++)
17638                 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
17639         /* Last entry must be default accept
17640          * because that's what people expect
17641 @@ -1286,6 +1624,7 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
17642  {
17643         struct ethtool_flow_spec_container *j;
17644         struct filer_table *tab;
17645 +       s32 i = 0;
17646         s32 ret = 0;
17647  
17648         /* So index is set to zero, too! */
17649 @@ -1310,6 +1649,17 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
17650                 }
17651         }
17652  
17653 +       i = tab->index;
17654 +
17655 +       /* Optimizations to save entries */
17656 +       gfar_cluster_filer(tab);
17657 +       gfar_optimize_filer_masks(tab);
17658 +
17659 +       pr_debug("\tSummary:\n"
17660 +                "\tData on hardware: %d\n"
17661 +                "\tCompression rate: %d%%\n",
17662 +                tab->index, 100 - (100 * tab->index) / i);
17663 +
17664         /* Write everything to hardware */
17665         ret = gfar_write_filer_table(priv, tab);
17666         if (ret == -EBUSY) {
17667 @@ -1375,14 +1725,13 @@ static int gfar_add_cls(struct gfar_private *priv,
17668         }
17669  
17670  process:
17671 -       priv->rx_list.count++;
17672         ret = gfar_process_filer_changes(priv);
17673         if (ret)
17674                 goto clean_list;
17675 +       priv->rx_list.count++;
17676         return ret;
17677  
17678  clean_list:
17679 -       priv->rx_list.count--;
17680         list_del(&temp->list);
17681  clean_mem:
17682         kfree(temp);
17683 @@ -1535,6 +1884,8 @@ static int gfar_get_ts_info(struct net_device *dev,
17684  }
17685  
17686  const struct ethtool_ops gfar_ethtool_ops = {
17687 +       .get_settings = gfar_gsettings,
17688 +       .set_settings = gfar_ssettings,
17689         .get_drvinfo = gfar_gdrvinfo,
17690         .get_regs_len = gfar_reglen,
17691         .get_regs = gfar_get_regs,
17692 @@ -1557,6 +1908,4 @@ const struct ethtool_ops gfar_ethtool_ops = {
17693         .set_rxnfc = gfar_set_nfc,
17694         .get_rxnfc = gfar_get_nfc,
17695         .get_ts_info = gfar_get_ts_info,
17696 -       .get_link_ksettings = phy_ethtool_get_link_ksettings,
17697 -       .set_link_ksettings = phy_ethtool_set_link_ksettings,
17698  };
17699 diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
17700 index 5779881..8e3cd77 100644
17701 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c
17702 +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
17703 @@ -422,6 +422,19 @@ static struct ptp_clock_info ptp_gianfar_caps = {
17704         .enable         = ptp_gianfar_enable,
17705  };
17706  
17707 +/* OF device tree */
17708 +
17709 +static int get_of_u32(struct device_node *node, char *str, u32 *val)
17710 +{
17711 +       int plen;
17712 +       const u32 *prop = of_get_property(node, str, &plen);
17713 +
17714 +       if (!prop || plen != sizeof(*prop))
17715 +               return -1;
17716 +       *val = *prop;
17717 +       return 0;
17718 +}
17719 +
17720  static int gianfar_ptp_probe(struct platform_device *dev)
17721  {
17722         struct device_node *node = dev->dev.of_node;
17723 @@ -439,28 +452,22 @@ static int gianfar_ptp_probe(struct platform_device *dev)
17724  
17725         etsects->caps = ptp_gianfar_caps;
17726  
17727 -       if (of_property_read_u32(node, "fsl,cksel", &etsects->cksel))
17728 +       if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
17729                 etsects->cksel = DEFAULT_CKSEL;
17730  
17731 -       if (of_property_read_u32(node,
17732 -                                "fsl,tclk-period", &etsects->tclk_period) ||
17733 -           of_property_read_u32(node,
17734 -                                "fsl,tmr-prsc", &etsects->tmr_prsc) ||
17735 -           of_property_read_u32(node,
17736 -                                "fsl,tmr-add", &etsects->tmr_add) ||
17737 -           of_property_read_u32(node,
17738 -                                "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
17739 -           of_property_read_u32(node,
17740 -                                "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
17741 -           of_property_read_u32(node,
17742 -                                "fsl,max-adj", &etsects->caps.max_adj)) {
17743 +       if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
17744 +           get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
17745 +           get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) ||
17746 +           get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
17747 +           get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
17748 +           get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) {
17749                 pr_err("device tree node missing required elements\n");
17750                 goto no_node;
17751         }
17752  
17753         etsects->irq = platform_get_irq(dev, 0);
17754  
17755 -       if (etsects->irq < 0) {
17756 +       if (etsects->irq == NO_IRQ) {
17757                 pr_err("irq not in device tree\n");
17758                 goto no_node;
17759         }
17760 @@ -550,7 +557,6 @@ static const struct of_device_id match_table[] = {
17761         { .compatible = "fsl,etsec-ptp" },
17762         {},
17763  };
17764 -MODULE_DEVICE_TABLE(of, match_table);
17765  
17766  static struct platform_driver gianfar_ptp_driver = {
17767         .driver = {
17768 diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
17769 index f76d332..4dd40e0 100644
17770 --- a/drivers/net/ethernet/freescale/ucc_geth.c
17771 +++ b/drivers/net/ethernet/freescale/ucc_geth.c
17772 @@ -40,10 +40,10 @@
17773  #include <asm/uaccess.h>
17774  #include <asm/irq.h>
17775  #include <asm/io.h>
17776 -#include <soc/fsl/qe/immap_qe.h>
17777 -#include <soc/fsl/qe/qe.h>
17778 -#include <soc/fsl/qe/ucc.h>
17779 -#include <soc/fsl/qe/ucc_fast.h>
17780 +#include <asm/immap_qe.h>
17781 +#include <asm/qe.h>
17782 +#include <asm/ucc.h>
17783 +#include <asm/ucc_fast.h>
17784  #include <asm/machdep.h>
17785  
17786  #include "ucc_geth.h"
17787 @@ -1384,8 +1384,6 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
17788                 value = phy_read(tbiphy, ENET_TBI_MII_CR);
17789                 value &= ~0x1000;       /* Turn off autonegotiation */
17790                 phy_write(tbiphy, ENET_TBI_MII_CR, value);
17791 -
17792 -               put_device(&tbiphy->mdio.dev);
17793         }
17794  
17795         init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
17796 @@ -1704,10 +1702,8 @@ static void uec_configure_serdes(struct net_device *dev)
17797          * everything for us?  Resetting it takes the link down and requires
17798          * several seconds for it to come back.
17799          */
17800 -       if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) {
17801 -               put_device(&tbiphy->mdio.dev);
17802 +       if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
17803                 return;
17804 -       }
17805  
17806         /* Single clk mode, mii mode off(for serdes communication) */
17807         phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
17808 @@ -1715,8 +1711,6 @@ static void uec_configure_serdes(struct net_device *dev)
17809         phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
17810  
17811         phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
17812 -
17813 -       put_device(&tbiphy->mdio.dev);
17814  }
17815  
17816  /* Configure the PHY for dev.
17817 @@ -3756,7 +3750,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
17818                         return -EINVAL;
17819                 }
17820                 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
17821 -                       pr_err("invalid rx-clock property\n");
17822 +                       pr_err("invalid rx-clock propperty\n");
17823                         return -EINVAL;
17824                 }
17825                 ug_info->uf_info.rx_clock = *prop;
17826 @@ -3868,8 +3862,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
17827         dev = alloc_etherdev(sizeof(*ugeth));
17828  
17829         if (dev == NULL) {
17830 -               err = -ENOMEM;
17831 -               goto err_deregister_fixed_link;
17832 +               of_node_put(ug_info->tbi_node);
17833 +               of_node_put(ug_info->phy_node);
17834 +               return -ENOMEM;
17835         }
17836  
17837         ugeth = netdev_priv(dev);
17838 @@ -3906,7 +3901,10 @@ static int ucc_geth_probe(struct platform_device* ofdev)
17839                 if (netif_msg_probe(ugeth))
17840                         pr_err("%s: Cannot register net device, aborting\n",
17841                                dev->name);
17842 -               goto err_free_netdev;
17843 +               free_netdev(dev);
17844 +               of_node_put(ug_info->tbi_node);
17845 +               of_node_put(ug_info->phy_node);
17846 +               return err;
17847         }
17848  
17849         mac_addr = of_get_mac_address(np);
17850 @@ -3919,29 +3917,16 @@ static int ucc_geth_probe(struct platform_device* ofdev)
17851         ugeth->node = np;
17852  
17853         return 0;
17854 -
17855 -err_free_netdev:
17856 -       free_netdev(dev);
17857 -err_deregister_fixed_link:
17858 -       if (of_phy_is_fixed_link(np))
17859 -               of_phy_deregister_fixed_link(np);
17860 -       of_node_put(ug_info->tbi_node);
17861 -       of_node_put(ug_info->phy_node);
17862 -
17863 -       return err;
17864  }
17865  
17866  static int ucc_geth_remove(struct platform_device* ofdev)
17867  {
17868         struct net_device *dev = platform_get_drvdata(ofdev);
17869         struct ucc_geth_private *ugeth = netdev_priv(dev);
17870 -       struct device_node *np = ofdev->dev.of_node;
17871  
17872         unregister_netdev(dev);
17873         free_netdev(dev);
17874         ucc_geth_memclean(ugeth);
17875 -       if (of_phy_is_fixed_link(np))
17876 -               of_phy_deregister_fixed_link(np);
17877         of_node_put(ugeth->ug_info->tbi_node);
17878         of_node_put(ugeth->ug_info->phy_node);
17879  
17880 diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
17881 index 5da19b4..75f3371 100644
17882 --- a/drivers/net/ethernet/freescale/ucc_geth.h
17883 +++ b/drivers/net/ethernet/freescale/ucc_geth.h
17884 @@ -22,11 +22,11 @@
17885  #include <linux/list.h>
17886  #include <linux/if_ether.h>
17887  
17888 -#include <soc/fsl/qe/immap_qe.h>
17889 -#include <soc/fsl/qe/qe.h>
17890 +#include <asm/immap_qe.h>
17891 +#include <asm/qe.h>
17892  
17893 -#include <soc/fsl/qe/ucc.h>
17894 -#include <soc/fsl/qe/ucc_fast.h>
17895 +#include <asm/ucc.h>
17896 +#include <asm/ucc_fast.h>
17897  
17898  #define DRV_DESC "QE UCC Gigabit Ethernet Controller"
17899  #define DRV_NAME "ucc_geth"
17900 diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
17901 index 812a968..cc83350 100644
17902 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
17903 +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
17904 @@ -105,20 +105,23 @@ static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
17905  #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
17906  
17907  static int
17908 -uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
17909 +uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
17910  {
17911         struct ucc_geth_private *ugeth = netdev_priv(netdev);
17912         struct phy_device *phydev = ugeth->phydev;
17913 +       struct ucc_geth_info *ug_info = ugeth->ug_info;
17914  
17915         if (!phydev)
17916                 return -ENODEV;
17917  
17918 -       return phy_ethtool_ksettings_get(phydev, cmd);
17919 +       ecmd->maxtxpkt = 1;
17920 +       ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
17921 +
17922 +       return phy_ethtool_gset(phydev, ecmd);
17923  }
17924  
17925  static int
17926 -uec_set_ksettings(struct net_device *netdev,
17927 -                 const struct ethtool_link_ksettings *cmd)
17928 +uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
17929  {
17930         struct ucc_geth_private *ugeth = netdev_priv(netdev);
17931         struct phy_device *phydev = ugeth->phydev;
17932 @@ -126,7 +129,7 @@ uec_set_ksettings(struct net_device *netdev,
17933         if (!phydev)
17934                 return -ENODEV;
17935  
17936 -       return phy_ethtool_ksettings_set(phydev, cmd);
17937 +       return phy_ethtool_sset(phydev, ecmd);
17938  }
17939  
17940  static void
17941 @@ -348,6 +351,8 @@ uec_get_drvinfo(struct net_device *netdev,
17942         strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
17943         strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
17944         strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
17945 +       drvinfo->eedump_len = 0;
17946 +       drvinfo->regdump_len = uec_get_regs_len(netdev);
17947  }
17948  
17949  #ifdef CONFIG_PM
17950 @@ -389,6 +394,8 @@ static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
17951  #endif /* CONFIG_PM */
17952  
17953  static const struct ethtool_ops uec_ethtool_ops = {
17954 +       .get_settings           = uec_get_settings,
17955 +       .set_settings           = uec_set_settings,
17956         .get_drvinfo            = uec_get_drvinfo,
17957         .get_regs_len           = uec_get_regs_len,
17958         .get_regs               = uec_get_regs,
17959 @@ -406,8 +413,6 @@ static const struct ethtool_ops uec_ethtool_ops = {
17960         .get_wol                = uec_get_wol,
17961         .set_wol                = uec_set_wol,
17962         .get_ts_info            = ethtool_op_get_ts_info,
17963 -       .get_link_ksettings     = uec_get_ksettings,
17964 -       .set_link_ksettings     = uec_set_ksettings,
17965  };
17966  
17967  void uec_set_ethtool_ops(struct net_device *netdev)
17968 diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
17969 index e03b30c..7b8fe86 100644
17970 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c
17971 +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
17972 @@ -271,8 +271,11 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
17973                 goto err_ioremap;
17974         }
17975  
17976 -       priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
17977 -                                                      "little-endian");
17978 +       if (of_get_property(pdev->dev.of_node,
17979 +                           "little-endian", NULL))
17980 +               priv->is_little_endian = true;
17981 +       else
17982 +               priv->is_little_endian = false;
17983  
17984         ret = of_mdiobus_register(bus, np);
17985         if (ret) {