fix machine definition substitution
[meta-kc-bsp.git] / recipes-kernel / linux / linux-karo-4.9.11 / ethernet-update-driver.patch
1 diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
2 index 2204c57..25e3425 100644
3 --- a/drivers/net/ethernet/freescale/Kconfig
4 +++ b/drivers/net/ethernet/freescale/Kconfig
5 @@ -7,10 +7,11 @@ config NET_VENDOR_FREESCALE
6         default y
7         depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
8                    M523x || M527x || M5272 || M528x || M520x || M532x || \
9 -                  ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
10 -                  ARCH_LAYERSCAPE
11 +                  ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
12         ---help---
13 -         If you have a network (Ethernet) card belonging to this class, say Y.
14 +         If you have a network (Ethernet) card belonging to this class, say Y
15 +         and read the Ethernet-HOWTO, available from
16 +         <http://www.tldp.org/docs.html#howto>.
17  
18           Note that the answer to this question doesn't directly affect the
19           kernel: saying N will just cause the configurator to skip all
20 @@ -22,8 +23,8 @@ if NET_VENDOR_FREESCALE
21  config FEC
22         tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
23         depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
24 -                  ARM || ARM64)
25 -       default y
26 +                  ARCH_MXC || SOC_IMX28)
27 +       default ARCH_MXC || SOC_IMX28 if ARM
28         select PHYLIB
29         select PTP_1588_CLOCK
30         ---help---
31 @@ -54,7 +55,6 @@ config FEC_MPC52xx_MDIO
32           If compiled as module, it will be called fec_mpc52xx_phy.
33  
34  source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
35 -source "drivers/net/ethernet/freescale/fman/Kconfig"
36  
37  config FSL_PQ_MDIO
38         tristate "Freescale PQ MDIO"
39 @@ -85,12 +85,12 @@ config UGETH_TX_ON_DEMAND
40  
41  config GIANFAR
42         tristate "Gianfar Ethernet"
43 +       depends on FSL_SOC
44         select FSL_PQ_MDIO
45         select PHYLIB
46         select CRC32
47         ---help---
48           This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
49 -         and MPC86xx family of chips, the eTSEC on LS1021A and the FEC
50 -         on the 8540.
51 +         and MPC86xx family of chips, and the FEC on the 8540.
52  
53  endif # NET_VENDOR_FREESCALE
54 diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
55 index 7f022dd..71debd1 100644
56 --- a/drivers/net/ethernet/freescale/Makefile
57 +++ b/drivers/net/ethernet/freescale/Makefile
58 @@ -3,10 +3,7 @@
59  #
60  
61  obj-$(CONFIG_FEC) += fec.o
62 -fec-objs :=fec_main.o fec_fixup.o fec_ptp.o
63 -CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
64 -CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
65 -
66 +fec-objs :=fec_main.o fec_ptp.o
67  obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
68  ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
69         obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
70 @@ -20,5 +17,3 @@ gianfar_driver-objs := gianfar.o \
71                 gianfar_ethtool.o
72  obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
73  ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
74 -
75 -obj-$(CONFIG_FSL_FMAN) += fman/
76 diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
77 index 1d7b3cc..ecdc711 100644
78 --- a/drivers/net/ethernet/freescale/fec.h
79 +++ b/drivers/net/ethernet/freescale/fec.h
80 @@ -20,8 +20,8 @@
81  #include <linux/timecounter.h>
82  
83  #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
84 -    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
85 -    defined(CONFIG_ARM64)
86 +    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
87 +    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
88  /*
89   *     Just figures, Motorola would have to change the offsets for
90   *     registers in the same peripheral device on different models
91 @@ -192,45 +192,28 @@
92  
93  /*
94   *     Define the buffer descriptor structure.
95 - *
96 - *     Evidently, ARM SoCs have the FEC block generated in a
97 - *     little endian mode so adjust endianness accordingly.
98   */
99 -#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
100 -#define fec32_to_cpu le32_to_cpu
101 -#define fec16_to_cpu le16_to_cpu
102 -#define cpu_to_fec32 cpu_to_le32
103 -#define cpu_to_fec16 cpu_to_le16
104 -#define __fec32 __le32
105 -#define __fec16 __le16
106 -
107 +#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
108  struct bufdesc {
109 -       __fec16 cbd_datlen;     /* Data length */
110 -       __fec16 cbd_sc;         /* Control and status info */
111 -       __fec32 cbd_bufaddr;    /* Buffer address */
112 +       unsigned short cbd_datlen;      /* Data length */
113 +       unsigned short cbd_sc;  /* Control and status info */
114 +       unsigned long cbd_bufaddr;      /* Buffer address */
115  };
116  #else
117 -#define fec32_to_cpu be32_to_cpu
118 -#define fec16_to_cpu be16_to_cpu
119 -#define cpu_to_fec32 cpu_to_be32
120 -#define cpu_to_fec16 cpu_to_be16
121 -#define __fec32 __be32
122 -#define __fec16 __be16
123 -
124  struct bufdesc {
125 -       __fec16 cbd_sc;         /* Control and status info */
126 -       __fec16 cbd_datlen;     /* Data length */
127 -       __fec32 cbd_bufaddr;    /* Buffer address */
128 +       unsigned short  cbd_sc;                 /* Control and status info */
129 +       unsigned short  cbd_datlen;             /* Data length */
130 +       unsigned long   cbd_bufaddr;            /* Buffer address */
131  };
132  #endif
133  
134  struct bufdesc_ex {
135         struct bufdesc desc;
136 -       __fec32 cbd_esc;
137 -       __fec32 cbd_prot;
138 -       __fec32 cbd_bdu;
139 -       __fec32 ts;
140 -       __fec16 res0[4];
141 +       unsigned long cbd_esc;
142 +       unsigned long cbd_prot;
143 +       unsigned long cbd_bdu;
144 +       unsigned long ts;
145 +       unsigned short res0[4];
146  };
147  
148  /*
149 @@ -294,7 +277,7 @@ struct bufdesc_ex {
150  
151  
152  /* This device has up to three irqs on some platforms */
153 -#define FEC_IRQ_NUM            4
154 +#define FEC_IRQ_NUM            3
155  
156  /* Maximum number of queues supported
157   * ENET with AVB IP can support up to 3 independent tx queues and rx queues.
158 @@ -312,6 +295,12 @@ struct bufdesc_ex {
159  #define FEC_R_BUFF_SIZE(X)     (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
160                                 (((X) == 2) ? \
161                                         FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
162 +#define FEC_R_DES_ACTIVE(X)    (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \
163 +                               (((X) == 2) ? \
164 +                                  FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
165 +#define FEC_X_DES_ACTIVE(X)    (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \
166 +                               (((X) == 2) ? \
167 +                                  FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
168  
169  #define FEC_DMA_CFG(X)         (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
170  
171 @@ -379,7 +368,6 @@ struct bufdesc_ex {
172  #define FEC_ENET_TS_TIMER       ((uint)0x00008000)
173  
174  #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
175 -#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER)
176  #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
177  
178  #define FEC_ENET_ETHEREN       ((uint)0x00000002)
179 @@ -448,32 +436,12 @@ struct bufdesc_ex {
180  #define FEC_QUIRK_SINGLE_MDIO          (1 << 11)
181  /* Controller supports RACC register */
182  #define FEC_QUIRK_HAS_RACC             (1 << 12)
183 -/* Controller supports interrupt coalesc */
184 -#define FEC_QUIRK_HAS_COALESCE         (1 << 13)
185 -/* Interrupt doesn't wake CPU from deep idle */
186 -#define FEC_QUIRK_ERR006687            (1 << 14)
187  /*
188   * i.MX6Q/DL ENET cannot wake up system in wait mode because ENET tx & rx
189   * interrupt signal don't connect to GPC. So use pm qos to avoid cpu enter
190   * to wait mode.
191   */
192 -#define FEC_QUIRK_BUG_WAITMODE         (1 << 15)
193 -
194 -/* PHY fixup flag define */
195 -#define FEC_QUIRK_AR8031_FIXUP         (1 << 0)
196 -
197 -struct bufdesc_prop {
198 -       int qid;
199 -       /* Address of Rx and Tx buffers */
200 -       struct bufdesc  *base;
201 -       struct bufdesc  *last;
202 -       struct bufdesc  *cur;
203 -       void __iomem    *reg_desc_active;
204 -       dma_addr_t      dma;
205 -       unsigned short ring_size;
206 -       unsigned char dsize;
207 -       unsigned char dsize_log2;
208 -};
209 +#define FEC_QUIRK_BUG_WAITMODE         (1 << 13)
210  
211  struct fec_enet_stop_mode {
212         struct regmap *gpr;
213 @@ -482,21 +450,32 @@ struct fec_enet_stop_mode {
214  };
215  
216  struct fec_enet_priv_tx_q {
217 -       struct bufdesc_prop bd;
218 +       int index;
219         unsigned char *tx_bounce[TX_RING_SIZE];
220         struct  sk_buff *tx_skbuff[TX_RING_SIZE];
221  
222 +       dma_addr_t      bd_dma;
223 +       struct bufdesc  *tx_bd_base;
224 +       uint tx_ring_size;
225 +
226         unsigned short tx_stop_threshold;
227         unsigned short tx_wake_threshold;
228  
229 +       struct bufdesc  *cur_tx;
230         struct bufdesc  *dirty_tx;
231         char *tso_hdrs;
232         dma_addr_t tso_hdrs_dma;
233  };
234  
235  struct fec_enet_priv_rx_q {
236 -       struct bufdesc_prop bd;
237 +       int index;
238         struct  sk_buff *rx_skbuff[RX_RING_SIZE];
239 +
240 +       dma_addr_t      bd_dma;
241 +       struct bufdesc  *rx_bd_base;
242 +       uint rx_ring_size;
243 +
244 +       struct bufdesc  *cur_rx;
245  };
246  
247  /* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
248 @@ -536,20 +515,22 @@ struct fec_enet_private {
249         unsigned long work_ts;
250         unsigned long work_mdio;
251  
252 +       unsigned short bufdesc_size;
253 +
254         struct  platform_device *pdev;
255  
256         int     dev_id;
257  
258         /* Phylib and MDIO interface */
259         struct  mii_bus *mii_bus;
260 +       struct  phy_device *phy_dev;
261         int     mii_timeout;
262         int     mii_bus_share;
263 -       bool    active_in_suspend;
264 +       bool    miibus_up_failed;
265         uint    phy_speed;
266         phy_interface_t phy_interface;
267         struct device_node *phy_node;
268         int     link;
269 -       bool    fixed_link;
270         int     full_duplex;
271         int     speed;
272         struct  completion mdio_done;
273 @@ -559,7 +540,8 @@ struct fec_enet_private {
274         int     wol_flag;
275         int     wake_irq;
276         u32     quirks;
277 -       u32     fixups;
278 +       int phy_reset_gpio;
279 +       int phy_reset_duration;
280  
281         struct  napi_struct napi;
282         int     csum_flags;
283 @@ -602,19 +584,14 @@ struct fec_enet_private {
284         int pps_enable;
285         unsigned int next_counter;
286  
287 -       u64 ethtool_stats[0];
288 -
289         struct fec_enet_stop_mode gpr;
290  };
291  
292  void fec_ptp_init(struct platform_device *pdev);
293 -void fec_ptp_stop(struct platform_device *pdev);
294  void fec_ptp_start_cyclecounter(struct net_device *ndev);
295  int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
296  int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
297  uint fec_ptp_check_pps_event(struct fec_enet_private *fep);
298 -void fec_enet_register_fixup(struct net_device *ndev);
299 -int of_fec_enet_parse_fixup(struct device_node *np);
300  
301  /****************************************************************************/
302  #endif /* FEC_H */
303 diff --git a/drivers/net/ethernet/freescale/fec_fixup.c b/drivers/net/ethernet/freescale/fec_fixup.c
304 deleted file mode 100644
305 index 5a8497c..0000000
306 --- a/drivers/net/ethernet/freescale/fec_fixup.c
307 +++ /dev/null
308 @@ -1,74 +0,0 @@
309 -/*
310 - * Copyright 2017 NXP
311 - *
312 - * This program is free software; you can redistribute it and/or
313 - * modify it under the terms of the GNU General Public License
314 - * as published by the Free Software Foundation; either version 2
315 - * of the License, or (at your option) any later version.
316 - *
317 - * This program is distributed in the hope that it will be useful,
318 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
319 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
320 - * GNU General Public License for more details.
321 - */
322 -
323 -#include <linux/netdevice.h>
324 -#include <linux/phy.h>
325 -#include "fec.h"
326 -
327 -#define PHY_ID_AR8031   0x004dd074
328 -
329 -static int ar8031_phy_fixup(struct phy_device *dev)
330 -{
331 -       u16 val;
332 -
333 -       /* Set RGMII IO voltage to 1.8V */
334 -       phy_write(dev, 0x1d, 0x1f);
335 -       phy_write(dev, 0x1e, 0x8);
336 -
337 -       /* Disable phy AR8031 SmartEEE function */
338 -       phy_write(dev, 0xd, 0x3);
339 -       phy_write(dev, 0xe, 0x805d);
340 -       phy_write(dev, 0xd, 0x4003);
341 -       val = phy_read(dev, 0xe);
342 -       val &= ~(0x1 << 8);
343 -       phy_write(dev, 0xe, val);
344 -
345 -       /* Introduce tx clock delay */
346 -       phy_write(dev, 0x1d, 0x5);
347 -       phy_write(dev, 0x1e, 0x100);
348 -
349 -       return 0;
350 -}
351 -
352 -void fec_enet_register_fixup(struct net_device *ndev)
353 -{
354 -       struct fec_enet_private *fep = netdev_priv(ndev);
355 -       static int registered = 0;
356 -       int err;
357 -
358 -       if (!IS_BUILTIN(CONFIG_PHYLIB))
359 -               return;
360 -
361 -       if (fep->fixups & FEC_QUIRK_AR8031_FIXUP) {
362 -               static int ar8031_registered = 0;
363 -
364 -               if (ar8031_registered)
365 -                       return;
366 -               err = phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef,
367 -                                       ar8031_phy_fixup);
368 -               if (err)
369 -                       netdev_info(ndev, "Cannot register PHY board fixup\n");
370 -               registered = 1;
371 -       }
372 -}
373 -
374 -int of_fec_enet_parse_fixup(struct device_node *np)
375 -{
376 -       int fixups = 0;
377 -
378 -       if (of_get_property(np, "fsl,ar8031-phy-fixup", NULL))
379 -               fixups |= FEC_QUIRK_AR8031_FIXUP;
380 -
381 -       return fixups;
382 -}
383 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
384 index 41a31f2..15c06df 100644
385 --- a/drivers/net/ethernet/freescale/fec_main.c
386 +++ b/drivers/net/ethernet/freescale/fec_main.c
387 @@ -19,8 +19,6 @@
388   * Copyright (c) 2004-2006 Macq Electronique SA.
389   *
390   * Copyright (C) 2010-2014 Freescale Semiconductor, Inc.
391 - *
392 - * Copyright 2017 NXP
393   */
394  
395  #include <linux/module.h>
396 @@ -48,9 +46,7 @@
397  #include <linux/io.h>
398  #include <linux/irq.h>
399  #include <linux/clk.h>
400 -#include <linux/clk/clk-conf.h>
401  #include <linux/platform_device.h>
402 -#include <linux/mdio.h>
403  #include <linux/phy.h>
404  #include <linux/fec.h>
405  #include <linux/of.h>
406 @@ -68,12 +64,12 @@
407  #include <linux/regmap.h>
408  
409  #include <asm/cacheflush.h>
410 -#include <soc/imx/cpuidle.h>
411  
412  #include "fec.h"
413  
414  static void set_multicast_list(struct net_device *ndev);
415  static void fec_enet_itr_coal_init(struct net_device *ndev);
416 +static void fec_reset_phy(struct platform_device *pdev);
417  
418  #define DRIVER_NAME    "fec"
419  
420 @@ -87,7 +83,6 @@ static const u16 fec_enet_vlan_pri_to_queue[8] = {1, 1, 1, 1, 2, 2, 2, 2};
421  #define FEC_ENET_RAEM_V        0x8
422  #define FEC_ENET_RAFL_V        0x8
423  #define FEC_ENET_OPD_V 0xFFF0
424 -#define FEC_MDIO_PM_TIMEOUT  100 /* ms */
425  
426  static struct platform_device_id fec_devtype[] = {
427         {
428 @@ -96,10 +91,10 @@ static struct platform_device_id fec_devtype[] = {
429                 .driver_data = 0,
430         }, {
431                 .name = "imx25-fec",
432 -               .driver_data = FEC_QUIRK_USE_GASKET,
433 +               .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
434         }, {
435                 .name = "imx27-fec",
436 -               .driver_data = 0,
437 +               .driver_data = FEC_QUIRK_HAS_RACC,
438         }, {
439                 .name = "imx28-fec",
440                 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
441 @@ -119,20 +114,12 @@ static struct platform_device_id fec_devtype[] = {
442                                 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
443                                 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
444                                 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
445 -                               FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
446 +                               FEC_QUIRK_HAS_RACC,
447         }, {
448                 .name = "imx6ul-fec",
449                 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
450                                 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
451 -                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
452 -                               FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
453 -       }, {
454 -               .name = "imx8qm-fec",
455 -               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
456 -                               FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
457 -                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
458 -                               FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
459 -                               FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
460 +                               FEC_QUIRK_HAS_VLAN,
461         }, {
462                 /* sentinel */
463         }
464 @@ -147,7 +134,6 @@ enum imx_fec_type {
465         MVF600_FEC,
466         IMX6SX_FEC,
467         IMX6UL_FEC,
468 -       IMX8QM_FEC,
469  };
470  
471  static const struct of_device_id fec_dt_ids[] = {
472 @@ -158,7 +144,6 @@ static const struct of_device_id fec_dt_ids[] = {
473         { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
474         { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
475         { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
476 -       { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
477         { /* sentinel */ }
478  };
479  MODULE_DEVICE_TABLE(of, fec_dt_ids);
480 @@ -196,7 +181,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
481  /* FEC receive acceleration */
482  #define FEC_RACC_IPDIS         (1 << 1)
483  #define FEC_RACC_PRODIS                (1 << 2)
484 -#define FEC_RACC_SHIFT16       BIT(7)
485  #define FEC_RACC_OPTIONS       (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
486  
487  /*
488 @@ -205,8 +189,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
489   * account when setting it.
490   */
491  #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
492 -    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
493 -    defined(CONFIG_ARM64)
494 +    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
495  #define        OPT_FRAME_SIZE  (PKT_MAXBUF_SIZE << 16)
496  #else
497  #define        OPT_FRAME_SIZE  0
498 @@ -244,38 +227,86 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
499  
500  #define IS_TSO_HEADER(txq, addr) \
501         ((addr >= txq->tso_hdrs_dma) && \
502 -       (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
503 +       (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
504  
505  static int mii_cnt;
506  
507 -static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
508 -                                            struct bufdesc_prop *bd)
509 -{
510 -       return (bdp >= bd->last) ? bd->base
511 -                       : (struct bufdesc *)(((void *)bdp) + bd->dsize);
512 -}
513 +static inline
514 +struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
515 +                                     struct fec_enet_private *fep,
516 +                                     int queue_id)
517 +{
518 +       struct bufdesc *new_bd = bdp + 1;
519 +       struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
520 +       struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
521 +       struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
522 +       struct bufdesc_ex *ex_base;
523 +       struct bufdesc *base;
524 +       int ring_size;
525 +
526 +       if (bdp >= txq->tx_bd_base) {
527 +               base = txq->tx_bd_base;
528 +               ring_size = txq->tx_ring_size;
529 +               ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
530 +       } else {
531 +               base = rxq->rx_bd_base;
532 +               ring_size = rxq->rx_ring_size;
533 +               ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
534 +       }
535  
536 -static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
537 -                                            struct bufdesc_prop *bd)
538 -{
539 -       return (bdp <= bd->base) ? bd->last
540 -                       : (struct bufdesc *)(((void *)bdp) - bd->dsize);
541 +       if (fep->bufdesc_ex)
542 +               return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
543 +                       ex_base : ex_new_bd);
544 +       else
545 +               return (new_bd >= (base + ring_size)) ?
546 +                       base : new_bd;
547 +}
548 +
549 +static inline
550 +struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
551 +                                     struct fec_enet_private *fep,
552 +                                     int queue_id)
553 +{
554 +       struct bufdesc *new_bd = bdp - 1;
555 +       struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
556 +       struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
557 +       struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
558 +       struct bufdesc_ex *ex_base;
559 +       struct bufdesc *base;
560 +       int ring_size;
561 +
562 +       if (bdp >= txq->tx_bd_base) {
563 +               base = txq->tx_bd_base;
564 +               ring_size = txq->tx_ring_size;
565 +               ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
566 +       } else {
567 +               base = rxq->rx_bd_base;
568 +               ring_size = rxq->rx_ring_size;
569 +               ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
570 +       }
571 +
572 +       if (fep->bufdesc_ex)
573 +               return (struct bufdesc *)((ex_new_bd < ex_base) ?
574 +                       (ex_new_bd + ring_size) : ex_new_bd);
575 +       else
576 +               return (new_bd < base) ? (new_bd + ring_size) : new_bd;
577  }
578  
579 -static int fec_enet_get_bd_index(struct bufdesc *bdp,
580 -                                struct bufdesc_prop *bd)
581 +static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
582 +                               struct fec_enet_private *fep)
583  {
584 -       return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
585 +       return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
586  }
587  
588 -static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
589 +static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
590 +                                       struct fec_enet_priv_tx_q *txq)
591  {
592         int entries;
593  
594 -       entries = (((const char *)txq->dirty_tx -
595 -                       (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
596 +       entries = ((const char *)txq->dirty_tx -
597 +                       (const char *)txq->cur_tx) / fep->bufdesc_size - 1;
598  
599 -       return entries >= 0 ? entries : entries + txq->bd.ring_size;
600 +       return entries >= 0 ? entries : entries + txq->tx_ring_size;
601  }
602  
603  static void swap_buffer(void *bufaddr, int len)
604 @@ -308,20 +339,18 @@ static void fec_dump(struct net_device *ndev)
605         pr_info("Nr     SC     addr       len  SKB\n");
606  
607         txq = fep->tx_queue[0];
608 -       bdp = txq->bd.base;
609 +       bdp = txq->tx_bd_base;
610  
611         do {
612 -               pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
613 +               pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
614                         index,
615 -                       bdp == txq->bd.cur ? 'S' : ' ',
616 +                       bdp == txq->cur_tx ? 'S' : ' ',
617                         bdp == txq->dirty_tx ? 'H' : ' ',
618 -                       fec16_to_cpu(bdp->cbd_sc),
619 -                       fec32_to_cpu(bdp->cbd_bufaddr),
620 -                       fec16_to_cpu(bdp->cbd_datlen),
621 +                       bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
622                         txq->tx_skbuff[index]);
623 -               bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
624 +               bdp = fec_enet_get_nextdesc(bdp, fep, 0);
625                 index++;
626 -       } while (bdp != txq->bd.base);
627 +       } while (bdp != txq->tx_bd_base);
628  }
629  
630  static inline bool is_ipv4_pkt(struct sk_buff *skb)
631 @@ -352,9 +381,10 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
632                              struct net_device *ndev)
633  {
634         struct fec_enet_private *fep = netdev_priv(ndev);
635 -       struct bufdesc *bdp = txq->bd.cur;
636 +       struct bufdesc *bdp = txq->cur_tx;
637         struct bufdesc_ex *ebdp;
638         int nr_frags = skb_shinfo(skb)->nr_frags;
639 +       unsigned short queue = skb_get_queue_mapping(skb);
640         int frag, frag_len;
641         unsigned short status;
642         unsigned int estatus = 0;
643 @@ -366,10 +396,10 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
644  
645         for (frag = 0; frag < nr_frags; frag++) {
646                 this_frag = &skb_shinfo(skb)->frags[frag];
647 -               bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
648 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue);
649                 ebdp = (struct bufdesc_ex *)bdp;
650  
651 -               status = fec16_to_cpu(bdp->cbd_sc);
652 +               status = bdp->cbd_sc;
653                 status &= ~BD_ENET_TX_STATS;
654                 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
655                 frag_len = skb_shinfo(skb)->frags[frag].size;
656 @@ -387,16 +417,16 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
657  
658                 if (fep->bufdesc_ex) {
659                         if (fep->quirks & FEC_QUIRK_HAS_AVB)
660 -                               estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
661 +                               estatus |= FEC_TX_BD_FTYPE(queue);
662                         if (skb->ip_summed == CHECKSUM_PARTIAL)
663                                 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
664                         ebdp->cbd_bdu = 0;
665 -                       ebdp->cbd_esc = cpu_to_fec32(estatus);
666 +                       ebdp->cbd_esc = estatus;
667                 }
668  
669                 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
670  
671 -               index = fec_enet_get_bd_index(bdp, &txq->bd);
672 +               index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
673                 if (((unsigned long) bufaddr) & fep->tx_align ||
674                         fep->quirks & FEC_QUIRK_SWAP_FRAME) {
675                         memcpy(txq->tx_bounce[index], bufaddr, frag_len);
676 @@ -409,27 +439,24 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
677                 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
678                                       DMA_TO_DEVICE);
679                 if (dma_mapping_error(&fep->pdev->dev, addr)) {
680 +                       dev_kfree_skb_any(skb);
681                         if (net_ratelimit())
682                                 netdev_err(ndev, "Tx DMA memory map failed\n");
683                         goto dma_mapping_error;
684                 }
685  
686 -               bdp->cbd_bufaddr = cpu_to_fec32(addr);
687 -               bdp->cbd_datlen = cpu_to_fec16(frag_len);
688 -               /* Make sure the updates to rest of the descriptor are
689 -                * performed before transferring ownership.
690 -                */
691 -               wmb();
692 -               bdp->cbd_sc = cpu_to_fec16(status);
693 +               bdp->cbd_bufaddr = addr;
694 +               bdp->cbd_datlen = frag_len;
695 +               bdp->cbd_sc = status;
696         }
697  
698         return bdp;
699  dma_mapping_error:
700 -       bdp = txq->bd.cur;
701 +       bdp = txq->cur_tx;
702         for (i = 0; i < frag; i++) {
703 -               bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
704 -               dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
705 -                                fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
706 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue);
707 +               dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
708 +                               bdp->cbd_datlen, DMA_TO_DEVICE);
709         }
710         return ERR_PTR(-ENOMEM);
711  }
712 @@ -444,11 +471,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
713         dma_addr_t addr;
714         unsigned short status;
715         unsigned short buflen;
716 +       unsigned short queue;
717         unsigned int estatus = 0;
718         unsigned int index;
719         int entries_free;
720  
721 -       entries_free = fec_enet_get_free_txdesc_num(txq);
722 +       entries_free = fec_enet_get_free_txdesc_num(fep, txq);
723         if (entries_free < MAX_SKB_FRAGS + 1) {
724                 dev_kfree_skb_any(skb);
725                 if (net_ratelimit())
726 @@ -463,16 +491,17 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
727         }
728  
729         /* Fill in a Tx ring entry */
730 -       bdp = txq->bd.cur;
731 +       bdp = txq->cur_tx;
732         last_bdp = bdp;
733 -       status = fec16_to_cpu(bdp->cbd_sc);
734 +       status = bdp->cbd_sc;
735         status &= ~BD_ENET_TX_STATS;
736  
737         /* Set buffer length and buffer pointer */
738         bufaddr = skb->data;
739         buflen = skb_headlen(skb);
740  
741 -       index = fec_enet_get_bd_index(bdp, &txq->bd);
742 +       queue = skb_get_queue_mapping(skb);
743 +       index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
744         if (((unsigned long) bufaddr) & fep->tx_align ||
745                 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
746                 memcpy(txq->tx_bounce[index], skb->data, buflen);
747 @@ -493,12 +522,8 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
748  
749         if (nr_frags) {
750                 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
751 -               if (IS_ERR(last_bdp)) {
752 -                       dma_unmap_single(&fep->pdev->dev, addr,
753 -                                        buflen, DMA_TO_DEVICE);
754 -                       dev_kfree_skb_any(skb);
755 +               if (IS_ERR(last_bdp))
756                         return NETDEV_TX_OK;
757 -               }
758         } else {
759                 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
760                 if (fep->bufdesc_ex) {
761 @@ -508,8 +533,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
762                                 estatus |= BD_ENET_TX_TS;
763                 }
764         }
765 -       bdp->cbd_bufaddr = cpu_to_fec32(addr);
766 -       bdp->cbd_datlen = cpu_to_fec16(buflen);
767  
768         if (fep->bufdesc_ex) {
769  
770 @@ -520,43 +543,41 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
771                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
772  
773                 if (fep->quirks & FEC_QUIRK_HAS_AVB)
774 -                       estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
775 +                       estatus |= FEC_TX_BD_FTYPE(queue);
776  
777                 if (skb->ip_summed == CHECKSUM_PARTIAL)
778                         estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
779  
780                 ebdp->cbd_bdu = 0;
781 -               ebdp->cbd_esc = cpu_to_fec32(estatus);
782 +               ebdp->cbd_esc = estatus;
783         }
784  
785 -       index = fec_enet_get_bd_index(last_bdp, &txq->bd);
786 +       index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
787         /* Save skb pointer */
788         txq->tx_skbuff[index] = skb;
789  
790 -       /* Make sure the updates to rest of the descriptor are performed before
791 -        * transferring ownership.
792 -        */
793 -       wmb();
794 +       bdp->cbd_datlen = buflen;
795 +       bdp->cbd_bufaddr = addr;
796  
797         /* Send it on its way.  Tell FEC it's ready, interrupt when done,
798          * it's the last BD of the frame, and to put the CRC on the end.
799          */
800         status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
801 -       bdp->cbd_sc = cpu_to_fec16(status);
802 +       bdp->cbd_sc = status;
803  
804         /* If this was the last BD in the ring, start at the beginning again. */
805 -       bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
806 +       bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
807  
808         skb_tx_timestamp(skb);
809  
810         /* Make sure the update to bdp and tx_skbuff are performed before
811 -        * txq->bd.cur.
812 +        * cur_tx.
813          */
814         wmb();
815 -       txq->bd.cur = bdp;
816 +       txq->cur_tx = bdp;
817  
818         /* Trigger transmission start */
819 -       writel(0, txq->bd.reg_desc_active);
820 +       writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
821  
822         return 0;
823  }
824 @@ -569,11 +590,12 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
825  {
826         struct fec_enet_private *fep = netdev_priv(ndev);
827         struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
828 +       unsigned short queue = skb_get_queue_mapping(skb);
829         unsigned short status;
830         unsigned int estatus = 0;
831         dma_addr_t addr;
832  
833 -       status = fec16_to_cpu(bdp->cbd_sc);
834 +       status = bdp->cbd_sc;
835         status &= ~BD_ENET_TX_STATS;
836  
837         status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
838 @@ -595,16 +617,16 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
839                 return NETDEV_TX_BUSY;
840         }
841  
842 -       bdp->cbd_datlen = cpu_to_fec16(size);
843 -       bdp->cbd_bufaddr = cpu_to_fec32(addr);
844 +       bdp->cbd_datlen = size;
845 +       bdp->cbd_bufaddr = addr;
846  
847         if (fep->bufdesc_ex) {
848                 if (fep->quirks & FEC_QUIRK_HAS_AVB)
849 -                       estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
850 +                       estatus |= FEC_TX_BD_FTYPE(queue);
851                 if (skb->ip_summed == CHECKSUM_PARTIAL)
852                         estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
853                 ebdp->cbd_bdu = 0;
854 -               ebdp->cbd_esc = cpu_to_fec32(estatus);
855 +               ebdp->cbd_esc = estatus;
856         }
857  
858         /* Handle the last BD specially */
859 @@ -613,10 +635,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
860         if (is_last) {
861                 status |= BD_ENET_TX_INTR;
862                 if (fep->bufdesc_ex)
863 -                       ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
864 +                       ebdp->cbd_esc |= BD_ENET_TX_INT;
865         }
866  
867 -       bdp->cbd_sc = cpu_to_fec16(status);
868 +       bdp->cbd_sc = status;
869  
870         return 0;
871  }
872 @@ -629,12 +651,13 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
873         struct fec_enet_private *fep = netdev_priv(ndev);
874         int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
875         struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
876 +       unsigned short queue = skb_get_queue_mapping(skb);
877         void *bufaddr;
878         unsigned long dmabuf;
879         unsigned short status;
880         unsigned int estatus = 0;
881  
882 -       status = fec16_to_cpu(bdp->cbd_sc);
883 +       status = bdp->cbd_sc;
884         status &= ~BD_ENET_TX_STATS;
885         status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
886  
887 @@ -658,19 +681,19 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
888                 }
889         }
890  
891 -       bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
892 -       bdp->cbd_datlen = cpu_to_fec16(hdr_len);
893 +       bdp->cbd_bufaddr = dmabuf;
894 +       bdp->cbd_datlen = hdr_len;
895  
896         if (fep->bufdesc_ex) {
897                 if (fep->quirks & FEC_QUIRK_HAS_AVB)
898 -                       estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
899 +                       estatus |= FEC_TX_BD_FTYPE(queue);
900                 if (skb->ip_summed == CHECKSUM_PARTIAL)
901                         estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
902                 ebdp->cbd_bdu = 0;
903 -               ebdp->cbd_esc = cpu_to_fec32(estatus);
904 +               ebdp->cbd_esc = estatus;
905         }
906  
907 -       bdp->cbd_sc = cpu_to_fec16(status);
908 +       bdp->cbd_sc = status;
909  
910         return 0;
911  }
912 @@ -682,12 +705,13 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
913         struct fec_enet_private *fep = netdev_priv(ndev);
914         int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
915         int total_len, data_left;
916 -       struct bufdesc *bdp = txq->bd.cur;
917 +       struct bufdesc *bdp = txq->cur_tx;
918 +       unsigned short queue = skb_get_queue_mapping(skb);
919         struct tso_t tso;
920         unsigned int index = 0;
921         int ret;
922  
923 -       if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
924 +       if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
925                 dev_kfree_skb_any(skb);
926                 if (net_ratelimit())
927                         netdev_err(ndev, "NOT enough BD for TSO!\n");
928 @@ -707,7 +731,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
929         while (total_len > 0) {
930                 char *hdr;
931  
932 -               index = fec_enet_get_bd_index(bdp, &txq->bd);
933 +               index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
934                 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
935                 total_len -= data_left;
936  
937 @@ -722,8 +746,9 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
938                         int size;
939  
940                         size = min_t(int, tso.size, data_left);
941 -                       bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
942 -                       index = fec_enet_get_bd_index(bdp, &txq->bd);
943 +                       bdp = fec_enet_get_nextdesc(bdp, fep, queue);
944 +                       index = fec_enet_get_bd_index(txq->tx_bd_base,
945 +                                                     bdp, fep);
946                         ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
947                                                         bdp, index,
948                                                         tso.data, size,
949 @@ -736,22 +761,22 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
950                         tso_build_data(skb, &tso, size);
951                 }
952  
953 -               bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
954 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue);
955         }
956  
957         /* Save skb pointer */
958         txq->tx_skbuff[index] = skb;
959  
960         skb_tx_timestamp(skb);
961 -       txq->bd.cur = bdp;
962 +       txq->cur_tx = bdp;
963  
964         /* Trigger transmission start */
965         if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
966 -           !readl(txq->bd.reg_desc_active) ||
967 -           !readl(txq->bd.reg_desc_active) ||
968 -           !readl(txq->bd.reg_desc_active) ||
969 -           !readl(txq->bd.reg_desc_active))
970 -               writel(0, txq->bd.reg_desc_active);
971 +           !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
972 +           !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
973 +           !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
974 +           !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)))
975 +               writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
976  
977         return 0;
978  
979 @@ -781,7 +806,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
980         if (ret)
981                 return ret;
982  
983 -       entries_free = fec_enet_get_free_txdesc_num(txq);
984 +       entries_free = fec_enet_get_free_txdesc_num(fep, txq);
985         if (entries_free <= txq->tx_stop_threshold)
986                 netif_tx_stop_queue(nq);
987  
988 @@ -802,45 +827,45 @@ static void fec_enet_bd_init(struct net_device *dev)
989         for (q = 0; q < fep->num_rx_queues; q++) {
990                 /* Initialize the receive buffer descriptors. */
991                 rxq = fep->rx_queue[q];
992 -               bdp = rxq->bd.base;
993 +               bdp = rxq->rx_bd_base;
994  
995 -               for (i = 0; i < rxq->bd.ring_size; i++) {
996 +               for (i = 0; i < rxq->rx_ring_size; i++) {
997  
998                         /* Initialize the BD for every fragment in the page. */
999                         if (bdp->cbd_bufaddr)
1000 -                               bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
1001 +                               bdp->cbd_sc = BD_ENET_RX_EMPTY;
1002                         else
1003 -                               bdp->cbd_sc = cpu_to_fec16(0);
1004 -                       bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1005 +                               bdp->cbd_sc = 0;
1006 +                       bdp = fec_enet_get_nextdesc(bdp, fep, q);
1007                 }
1008  
1009                 /* Set the last buffer to wrap */
1010 -               bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
1011 -               bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
1012 +               bdp = fec_enet_get_prevdesc(bdp, fep, q);
1013 +               bdp->cbd_sc |= BD_SC_WRAP;
1014  
1015 -               rxq->bd.cur = rxq->bd.base;
1016 +               rxq->cur_rx = rxq->rx_bd_base;
1017         }
1018  
1019         for (q = 0; q < fep->num_tx_queues; q++) {
1020                 /* ...and the same for transmit */
1021                 txq = fep->tx_queue[q];
1022 -               bdp = txq->bd.base;
1023 -               txq->bd.cur = bdp;
1024 +               bdp = txq->tx_bd_base;
1025 +               txq->cur_tx = bdp;
1026  
1027 -               for (i = 0; i < txq->bd.ring_size; i++) {
1028 +               for (i = 0; i < txq->tx_ring_size; i++) {
1029                         /* Initialize the BD for every fragment in the page. */
1030 -                       bdp->cbd_sc = cpu_to_fec16(0);
1031 +                       bdp->cbd_sc = 0;
1032                         if (txq->tx_skbuff[i]) {
1033                                 dev_kfree_skb_any(txq->tx_skbuff[i]);
1034                                 txq->tx_skbuff[i] = NULL;
1035                         }
1036 -                       bdp->cbd_bufaddr = cpu_to_fec32(0);
1037 -                       bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1038 +                       bdp->cbd_bufaddr = 0;
1039 +                       bdp = fec_enet_get_nextdesc(bdp, fep, q);
1040                 }
1041  
1042                 /* Set the last buffer to wrap */
1043 -               bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
1044 -               bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
1045 +               bdp = fec_enet_get_prevdesc(bdp, fep, q);
1046 +               bdp->cbd_sc |= BD_SC_WRAP;
1047                 txq->dirty_tx = bdp;
1048         }
1049  }
1050 @@ -851,7 +876,7 @@ static void fec_enet_active_rxring(struct net_device *ndev)
1051         int i;
1052  
1053         for (i = 0; i < fep->num_rx_queues; i++)
1054 -               writel(0, fep->rx_queue[i]->bd.reg_desc_active);
1055 +               writel(0, fep->hwp + FEC_R_DES_ACTIVE(i));
1056  }
1057  
1058  static void fec_enet_enable_ring(struct net_device *ndev)
1059 @@ -863,7 +888,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
1060  
1061         for (i = 0; i < fep->num_rx_queues; i++) {
1062                 rxq = fep->rx_queue[i];
1063 -               writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
1064 +               writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
1065                 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
1066  
1067                 /* enable DMA1/2 */
1068 @@ -874,7 +899,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
1069  
1070         for (i = 0; i < fep->num_tx_queues; i++) {
1071                 txq = fep->tx_queue[i];
1072 -               writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
1073 +               writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i));
1074  
1075                 /* enable DMA1/2 */
1076                 if (i)
1077 @@ -892,7 +917,7 @@ static void fec_enet_reset_skb(struct net_device *ndev)
1078         for (i = 0; i < fep->num_tx_queues; i++) {
1079                 txq = fep->tx_queue[i];
1080  
1081 -               for (j = 0; j < txq->bd.ring_size; j++) {
1082 +               for (j = 0; j < txq->tx_ring_size; j++) {
1083                         if (txq->tx_skbuff[j]) {
1084                                 dev_kfree_skb_any(txq->tx_skbuff[j]);
1085                                 txq->tx_skbuff[j] = NULL;
1086 @@ -930,11 +955,11 @@ fec_restart(struct net_device *ndev)
1087          * enet-mac reset will reset mac address registers too,
1088          * so need to reconfigure it.
1089          */
1090 -       memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1091 -       writel((__force u32)cpu_to_be32(temp_mac[0]),
1092 -              fep->hwp + FEC_ADDR_LOW);
1093 -       writel((__force u32)cpu_to_be32(temp_mac[1]),
1094 -              fep->hwp + FEC_ADDR_HIGH);
1095 +       if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1096 +               memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1097 +               writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
1098 +               writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
1099 +       }
1100  
1101         /* Clear any outstanding interrupt. */
1102         writel(0xffffffff, fep->hwp + FEC_IEVENT);
1103 @@ -961,16 +986,13 @@ fec_restart(struct net_device *ndev)
1104  
1105  #if !defined(CONFIG_M5272)
1106         if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1107 +               /* set RX checksum */
1108                 val = readl(fep->hwp + FEC_RACC);
1109 -               /* align IP header */
1110 -               val |= FEC_RACC_SHIFT16;
1111                 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1112 -                       /* set RX checksum */
1113                         val |= FEC_RACC_OPTIONS;
1114                 else
1115                         val &= ~FEC_RACC_OPTIONS;
1116                 writel(val, fep->hwp + FEC_RACC);
1117 -               writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1118         }
1119         writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1120  #endif
1121 @@ -995,10 +1017,10 @@ fec_restart(struct net_device *ndev)
1122                         rcntl &= ~(1 << 8);
1123  
1124                 /* 1G, 100M or 10M */
1125 -               if (ndev->phydev) {
1126 -                       if (ndev->phydev->speed == SPEED_1000)
1127 +               if (fep->phy_dev) {
1128 +                       if (fep->phy_dev->speed == SPEED_1000)
1129                                 ecntl |= (1 << 5);
1130 -                       else if (ndev->phydev->speed == SPEED_100)
1131 +                       else if (fep->phy_dev->speed == SPEED_100)
1132                                 rcntl &= ~(1 << 9);
1133                         else
1134                                 rcntl |= (1 << 9);
1135 @@ -1019,7 +1041,7 @@ fec_restart(struct net_device *ndev)
1136                          */
1137                         cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1138                                 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1139 -                       if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1140 +                       if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
1141                                 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1142                         writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1143  
1144 @@ -1033,7 +1055,7 @@ fec_restart(struct net_device *ndev)
1145         /* enable pause frame*/
1146         if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1147             ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1148 -            ndev->phydev && ndev->phydev->pause)) {
1149 +            fep->phy_dev && fep->phy_dev->pause)) {
1150                 rcntl |= FEC_ENET_FCE;
1151  
1152                 /* set FIFO threshold parameter to reduce overrun */
1153 @@ -1213,12 +1235,13 @@ static void
1154  fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1155  {
1156         struct  fec_enet_private *fep;
1157 -       struct bufdesc *bdp;
1158 +       struct bufdesc *bdp, *bdp_t;
1159         unsigned short status;
1160         struct  sk_buff *skb;
1161         struct fec_enet_priv_tx_q *txq;
1162         struct netdev_queue *nq;
1163         int     index = 0;
1164 +       int     i, bdnum;
1165         int     entries_free;
1166  
1167         fep = netdev_priv(ndev);
1168 @@ -1231,27 +1254,37 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1169         bdp = txq->dirty_tx;
1170  
1171         /* get next bdp of dirty_tx */
1172 -       bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1173 +       bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1174  
1175 -       while (bdp != READ_ONCE(txq->bd.cur)) {
1176 -               /* Order the load of bd.cur and cbd_sc */
1177 +       while (bdp != READ_ONCE(txq->cur_tx)) {
1178 +               /* Order the load of cur_tx and cbd_sc */
1179                 rmb();
1180 -               status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1181 +               status = READ_ONCE(bdp->cbd_sc);
1182                 if (status & BD_ENET_TX_READY)
1183                         break;
1184  
1185 -               index = fec_enet_get_bd_index(bdp, &txq->bd);
1186 -
1187 +               bdp_t = bdp;
1188 +               bdnum = 1;
1189 +               index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1190                 skb = txq->tx_skbuff[index];
1191 +               while (!skb) {
1192 +                       bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
1193 +                       index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1194 +                       skb = txq->tx_skbuff[index];
1195 +                       bdnum++;
1196 +               }
1197 +               if ((status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
1198 +                       break;
1199 +
1200 +               for (i = 0; i < bdnum; i++) {
1201 +                       if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1202 +                               dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1203 +                                                bdp->cbd_datlen, DMA_TO_DEVICE);
1204 +                       bdp->cbd_bufaddr = 0;
1205 +                       if (i < bdnum - 1)
1206 +                               bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1207 +               }
1208                 txq->tx_skbuff[index] = NULL;
1209 -               if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1210 -                       dma_unmap_single(&fep->pdev->dev,
1211 -                                        fec32_to_cpu(bdp->cbd_bufaddr),
1212 -                                        fec16_to_cpu(bdp->cbd_datlen),
1213 -                                        DMA_TO_DEVICE);
1214 -               bdp->cbd_bufaddr = cpu_to_fec32(0);
1215 -               if (!skb)
1216 -                       goto skb_done;
1217  
1218                 /* Check for errors. */
1219                 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1220 @@ -1278,7 +1311,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1221                         struct skb_shared_hwtstamps shhwtstamps;
1222                         struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1223  
1224 -                       fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1225 +                       fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
1226                         skb_tstamp_tx(skb, &shhwtstamps);
1227                 }
1228  
1229 @@ -1290,7 +1323,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1230  
1231                 /* Free the sk buffer associated with this last transmit */
1232                 dev_kfree_skb_any(skb);
1233 -skb_done:
1234 +
1235                 /* Make sure the update to bdp and tx_skbuff are performed
1236                  * before dirty_tx
1237                  */
1238 @@ -1298,21 +1331,21 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1239                 txq->dirty_tx = bdp;
1240  
1241                 /* Update pointer to next buffer descriptor to be transmitted */
1242 -               bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1243 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1244  
1245                 /* Since we have freed up a buffer, the ring is no longer full
1246                  */
1247                 if (netif_queue_stopped(ndev)) {
1248 -                       entries_free = fec_enet_get_free_txdesc_num(txq);
1249 +                       entries_free = fec_enet_get_free_txdesc_num(fep, txq);
1250                         if (entries_free >= txq->tx_wake_threshold)
1251                                 netif_tx_wake_queue(nq);
1252                 }
1253         }
1254  
1255         /* ERR006538: Keep the transmitter going */
1256 -       if (bdp != txq->bd.cur &&
1257 -           readl(txq->bd.reg_desc_active) == 0)
1258 -               writel(0, txq->bd.reg_desc_active);
1259 +       if (bdp != txq->cur_tx &&
1260 +           readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0)
1261 +               writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id));
1262  }
1263  
1264  static void
1265 @@ -1338,8 +1371,10 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
1266         if (off)
1267                 skb_reserve(skb, fep->rx_align + 1 - off);
1268  
1269 -       bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
1270 -       if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1271 +       bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
1272 +                                         FEC_ENET_RX_FRSIZE - fep->rx_align,
1273 +                                         DMA_FROM_DEVICE);
1274 +       if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
1275                 if (net_ratelimit())
1276                         netdev_err(ndev, "Rx DMA memory map failed\n");
1277                 return -ENOMEM;
1278 @@ -1361,8 +1396,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1279         if (!new_skb)
1280                 return false;
1281  
1282 -       dma_sync_single_for_cpu(&fep->pdev->dev,
1283 -                               fec32_to_cpu(bdp->cbd_bufaddr),
1284 +       dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
1285                                 FEC_ENET_RX_FRSIZE - fep->rx_align,
1286                                 DMA_FROM_DEVICE);
1287         if (!swap)
1288 @@ -1374,7 +1408,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1289         return true;
1290  }
1291  
1292 -/* During a receive, the bd_rx.cur points to the current incoming buffer.
1293 +/* During a receive, the cur_rx points to the current incoming buffer.
1294   * When we update through the ring, if the next incoming buffer has
1295   * not been given to the system, we just set the empty indicator,
1296   * effectively tossing the packet.
1297 @@ -1407,9 +1441,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1298         /* First, grab all of the stats for the incoming packet.
1299          * These get messed up if we get called due to a busy condition.
1300          */
1301 -       bdp = rxq->bd.cur;
1302 +       bdp = rxq->cur_rx;
1303  
1304 -       while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1305 +       while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
1306  
1307                 if (pkt_received >= budget)
1308                         break;
1309 @@ -1445,10 +1479,10 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1310  
1311                 /* Process the incoming frame. */
1312                 ndev->stats.rx_packets++;
1313 -               pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1314 +               pkt_len = bdp->cbd_datlen;
1315                 ndev->stats.rx_bytes += pkt_len;
1316  
1317 -               index = fec_enet_get_bd_index(bdp, &rxq->bd);
1318 +               index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
1319                 skb = rxq->rx_skbuff[index];
1320  
1321                 /* The packet length includes FCS, but we don't want to
1322 @@ -1463,8 +1497,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1323                                 ndev->stats.rx_dropped++;
1324                                 goto rx_processing_done;
1325                         }
1326 -                       dma_unmap_single(&fep->pdev->dev,
1327 -                                        fec32_to_cpu(bdp->cbd_bufaddr),
1328 +                       dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1329                                          FEC_ENET_RX_FRSIZE - fep->rx_align,
1330                                          DMA_FROM_DEVICE);
1331                 }
1332 @@ -1472,15 +1505,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1333                 prefetch(skb->data - NET_IP_ALIGN);
1334                 skb_put(skb, pkt_len - 4);
1335                 data = skb->data;
1336 -
1337                 if (!is_copybreak && need_swap)
1338                         swap_buffer(data, pkt_len);
1339  
1340 -#if !defined(CONFIG_M5272)
1341 -               if (fep->quirks & FEC_QUIRK_HAS_RACC)
1342 -                       data = skb_pull_inline(skb, 2);
1343 -#endif
1344 -
1345                 /* Extract the enhanced buffer descriptor */
1346                 ebdp = NULL;
1347                 if (fep->bufdesc_ex)
1348 @@ -1489,8 +1516,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1349                 /* If this is a VLAN packet remove the VLAN Tag */
1350                 vlan_packet_rcvd = false;
1351                 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1352 -                   fep->bufdesc_ex &&
1353 -                   (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1354 +                       fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
1355                         /* Push and remove the vlan tag */
1356                         struct vlan_hdr *vlan_header =
1357                                         (struct vlan_hdr *) (data + ETH_HLEN);
1358 @@ -1506,12 +1532,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1359  
1360                 /* Get receive timestamp from the skb */
1361                 if (fep->hwts_rx_en && fep->bufdesc_ex)
1362 -                       fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1363 +                       fec_enet_hwtstamp(fep, ebdp->ts,
1364                                           skb_hwtstamps(skb));
1365  
1366                 if (fep->bufdesc_ex &&
1367                     (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1368 -                       if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1369 +                       if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
1370                                 /* don't check it */
1371                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1372                         } else {
1373 @@ -1528,8 +1554,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1374                 napi_gro_receive(&fep->napi, skb);
1375  
1376                 if (is_copybreak) {
1377 -                       dma_sync_single_for_device(&fep->pdev->dev,
1378 -                                                  fec32_to_cpu(bdp->cbd_bufaddr),
1379 +                       dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
1380                                                    FEC_ENET_RX_FRSIZE - fep->rx_align,
1381                                                    DMA_FROM_DEVICE);
1382                 } else {
1383 @@ -1543,30 +1568,26 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1384  
1385                 /* Mark the buffer empty */
1386                 status |= BD_ENET_RX_EMPTY;
1387 +               bdp->cbd_sc = status;
1388  
1389                 if (fep->bufdesc_ex) {
1390                         struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1391  
1392 -                       ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1393 +                       ebdp->cbd_esc = BD_ENET_RX_INT;
1394                         ebdp->cbd_prot = 0;
1395                         ebdp->cbd_bdu = 0;
1396                 }
1397 -               /* Make sure the updates to rest of the descriptor are
1398 -                * performed before transferring ownership.
1399 -                */
1400 -               wmb();
1401 -               bdp->cbd_sc = cpu_to_fec16(status);
1402  
1403                 /* Update BD pointer to next entry */
1404 -               bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1405 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1406  
1407                 /* Doing this here will keep the FEC running while we process
1408                  * incoming frames.  On a heavily loaded network, we should be
1409                  * able to keep up at the expense of system resources.
1410                  */
1411 -               writel(0, rxq->bd.reg_desc_active);
1412 +               writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id));
1413         }
1414 -       rxq->bd.cur = bdp;
1415 +       rxq->cur_rx = bdp;
1416         return pkt_received;
1417  }
1418  
1419 @@ -1578,15 +1599,9 @@ fec_enet_rx(struct net_device *ndev, int budget)
1420         struct fec_enet_private *fep = netdev_priv(ndev);
1421  
1422         for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1423 -               int ret;
1424 -
1425 -               ret = fec_enet_rx_queue(ndev,
1426 +               clear_bit(queue_id, &fep->work_rx);
1427 +               pkt_received += fec_enet_rx_queue(ndev,
1428                                         budget - pkt_received, queue_id);
1429 -
1430 -               if (ret < budget - pkt_received)
1431 -                       clear_bit(queue_id, &fep->work_rx);
1432 -
1433 -               pkt_received += ret;
1434         }
1435         return pkt_received;
1436  }
1437 @@ -1631,7 +1646,7 @@ fec_enet_interrupt(int irq, void *dev_id)
1438  
1439                 if (napi_schedule_prep(&fep->napi)) {
1440                         /* Disable the NAPI interrupts */
1441 -                       writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
1442 +                       writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1443                         __napi_schedule(&fep->napi);
1444                 }
1445         }
1446 @@ -1742,7 +1757,7 @@ static void fec_get_mac(struct net_device *ndev)
1447  static void fec_enet_adjust_link(struct net_device *ndev)
1448  {
1449         struct fec_enet_private *fep = netdev_priv(ndev);
1450 -       struct phy_device *phy_dev = ndev->phydev;
1451 +       struct phy_device *phy_dev = fep->phy_dev;
1452         int status_change = 0;
1453  
1454         /* Prevent a state halted on mii error */
1455 @@ -1802,16 +1817,10 @@ static void fec_enet_adjust_link(struct net_device *ndev)
1456  static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1457  {
1458         struct fec_enet_private *fep = bus->priv;
1459 -       struct device *dev = &fep->pdev->dev;
1460         unsigned long time_left;
1461 -       int ret = 0;
1462 -
1463 -       ret = pm_runtime_get_sync(dev);
1464 -       if (ret < 0)
1465 -               return ret;
1466  
1467         fep->mii_timeout = 0;
1468 -       reinit_completion(&fep->mdio_done);
1469 +       init_completion(&fep->mdio_done);
1470  
1471         /* start a read op */
1472         writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
1473 @@ -1824,35 +1833,21 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1474         if (time_left == 0) {
1475                 fep->mii_timeout = 1;
1476                 netdev_err(fep->netdev, "MDIO read timeout\n");
1477 -               ret = -ETIMEDOUT;
1478 -               goto out;
1479 +               return -ETIMEDOUT;
1480         }
1481  
1482 -       ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1483 -
1484 -out:
1485 -       pm_runtime_mark_last_busy(dev);
1486 -       pm_runtime_put_autosuspend(dev);
1487 -
1488 -       return ret;
1489 +       /* return value */
1490 +       return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1491  }
1492  
1493  static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1494                            u16 value)
1495  {
1496         struct fec_enet_private *fep = bus->priv;
1497 -       struct device *dev = &fep->pdev->dev;
1498         unsigned long time_left;
1499 -       int ret;
1500 -
1501 -       ret = pm_runtime_get_sync(dev);
1502 -       if (ret < 0)
1503 -               return ret;
1504 -       else
1505 -               ret = 0;
1506  
1507         fep->mii_timeout = 0;
1508 -       reinit_completion(&fep->mdio_done);
1509 +       init_completion(&fep->mdio_done);
1510  
1511         /* start a write op */
1512         writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1513 @@ -1866,13 +1861,10 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1514         if (time_left == 0) {
1515                 fep->mii_timeout = 1;
1516                 netdev_err(fep->netdev, "MDIO write timeout\n");
1517 -               ret  = -ETIMEDOUT;
1518 +               return -ETIMEDOUT;
1519         }
1520  
1521 -       pm_runtime_mark_last_busy(dev);
1522 -       pm_runtime_put_autosuspend(dev);
1523 -
1524 -       return ret;
1525 +       return 0;
1526  }
1527  
1528  static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1529 @@ -1881,10 +1873,18 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1530         int ret;
1531  
1532         if (enable) {
1533 +               ret = clk_prepare_enable(fep->clk_ahb);
1534 +               if (ret)
1535 +                       return ret;
1536 +               ret = clk_prepare_enable(fep->clk_ipg);
1537 +               if (ret)
1538 +                       goto failed_clk_ipg;
1539                 if (fep->clk_enet_out) {
1540                         ret = clk_prepare_enable(fep->clk_enet_out);
1541                         if (ret)
1542 -                               return ret;
1543 +                               goto failed_clk_enet_out;
1544 +
1545 +                       fec_reset_phy(fep->pdev);
1546                 }
1547                 if (fep->clk_ptp) {
1548                         mutex_lock(&fep->ptp_clk_mutex);
1549 @@ -1903,6 +1903,8 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1550                                 goto failed_clk_ref;
1551                 }
1552         } else {
1553 +               clk_disable_unprepare(fep->clk_ahb);
1554 +               clk_disable_unprepare(fep->clk_ipg);
1555                 if (fep->clk_enet_out)
1556                         clk_disable_unprepare(fep->clk_enet_out);
1557                 if (fep->clk_ptp) {
1558 @@ -1923,27 +1925,23 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1559  failed_clk_ptp:
1560         if (fep->clk_enet_out)
1561                 clk_disable_unprepare(fep->clk_enet_out);
1562 +failed_clk_enet_out:
1563 +               clk_disable_unprepare(fep->clk_ipg);
1564 +failed_clk_ipg:
1565 +               clk_disable_unprepare(fep->clk_ahb);
1566  
1567         return ret;
1568  }
1569  
1570 -static int fec_restore_mii_bus(struct net_device *ndev)
1571 +static void fec_restore_mii_bus(struct net_device *ndev)
1572  {
1573         struct fec_enet_private *fep = netdev_priv(ndev);
1574 -       int ret;
1575 -
1576 -       ret = pm_runtime_get_sync(&fep->pdev->dev);
1577 -       if (ret < 0)
1578 -               return ret;
1579  
1580 +       fec_enet_clk_enable(ndev, true);
1581         writel(0xffc00000, fep->hwp + FEC_IEVENT);
1582         writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1583         writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1584         writel(FEC_ENET_ETHEREN, fep->hwp + FEC_ECNTRL);
1585 -
1586 -       pm_runtime_mark_last_busy(&fep->pdev->dev);
1587 -       pm_runtime_put_autosuspend(&fep->pdev->dev);
1588 -       return 0;
1589  }
1590  
1591  static int fec_enet_mii_probe(struct net_device *ndev)
1592 @@ -1955,6 +1953,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1593         int phy_id;
1594         int dev_id = fep->dev_id;
1595  
1596 +       fep->phy_dev = NULL;
1597 +
1598         if (fep->phy_node) {
1599                 phy_dev = of_phy_connect(ndev, fep->phy_node,
1600                                          &fec_enet_adjust_link, 0,
1601 @@ -1964,7 +1964,11 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1602         } else {
1603                 /* check for attached phy */
1604                 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1605 -                       if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
1606 +                       if ((fep->mii_bus->phy_mask & (1 << phy_id)))
1607 +                               continue;
1608 +                       if (fep->mii_bus->mdio_map[phy_id] == NULL)
1609 +                               continue;
1610 +                       if (fep->mii_bus->mdio_map[phy_id]->addr == 0)
1611                                 continue;
1612                         if (dev_id--)
1613                                 continue;
1614 @@ -2002,10 +2006,13 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1615  
1616         phy_dev->advertising = phy_dev->supported;
1617  
1618 +       fep->phy_dev = phy_dev;
1619         fep->link = 0;
1620         fep->full_duplex = 0;
1621  
1622 -       phy_attached_info(phy_dev);
1623 +       netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1624 +                   fep->phy_dev->drv->name, NULL,
1625 +                   fep->phy_dev->irq);
1626  
1627         return 0;
1628  }
1629 @@ -2017,7 +2024,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1630         struct net_device *ndev = platform_get_drvdata(pdev);
1631         struct fec_enet_private *fep = netdev_priv(ndev);
1632         struct device_node *node;
1633 -       int err = -ENXIO;
1634 +       int err = -ENXIO, i;
1635         u32 mii_speed, holdtime;
1636  
1637         /*
1638 @@ -2036,7 +2043,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1639          * mdio interface in board design, and need to be configured by
1640          * fec0 mii_bus.
1641          */
1642 -       if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
1643 +       if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
1644                 /* fec1 uses fec0 mii_bus */
1645                 if (mii_cnt && fec0_mii_bus) {
1646                         fep->mii_bus = fec0_mii_bus;
1647 @@ -2100,29 +2107,38 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1648         fep->mii_bus->priv = fep;
1649         fep->mii_bus->parent = &pdev->dev;
1650  
1651 +/*     fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1652 +       if (!fep->mii_bus->irq) {
1653 +               err = -ENOMEM;
1654 +               goto err_out_free_mdiobus;
1655 +       }
1656 +*/
1657 +       for (i = 0; i < PHY_MAX_ADDR; i++)
1658 +               fep->mii_bus->irq[i] = PHY_POLL;
1659 +
1660         node = of_get_child_by_name(pdev->dev.of_node, "mdio");
1661         if (node) {
1662                 err = of_mdiobus_register(fep->mii_bus, node);
1663                 of_node_put(node);
1664 -       } else if (fep->phy_node && !fep->fixed_link) {
1665 -               err = -EPROBE_DEFER;
1666         } else {
1667                 err = mdiobus_register(fep->mii_bus);
1668         }
1669  
1670         if (err)
1671 -               goto err_out_free_mdiobus;
1672 +               goto err_out_free_mdio_irq;
1673  
1674         mii_cnt++;
1675  
1676         /* save fec0 mii_bus */
1677 -       if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) {
1678 +       if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1679                 fec0_mii_bus = fep->mii_bus;
1680                 fec_mii_bus_share = &fep->mii_bus_share;
1681         }
1682  
1683         return 0;
1684  
1685 +err_out_free_mdio_irq:
1686 +       kfree(fep->mii_bus->irq);
1687  err_out_free_mdiobus:
1688         mdiobus_free(fep->mii_bus);
1689  err_out:
1690 @@ -2133,10 +2149,35 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
1691  {
1692         if (--mii_cnt == 0) {
1693                 mdiobus_unregister(fep->mii_bus);
1694 +               kfree(fep->mii_bus->irq);
1695                 mdiobus_free(fep->mii_bus);
1696         }
1697  }
1698  
1699 +static int fec_enet_get_settings(struct net_device *ndev,
1700 +                                 struct ethtool_cmd *cmd)
1701 +{
1702 +       struct fec_enet_private *fep = netdev_priv(ndev);
1703 +       struct phy_device *phydev = fep->phy_dev;
1704 +
1705 +       if (!phydev)
1706 +               return -ENODEV;
1707 +
1708 +       return phy_ethtool_gset(phydev, cmd);
1709 +}
1710 +
1711 +static int fec_enet_set_settings(struct net_device *ndev,
1712 +                                struct ethtool_cmd *cmd)
1713 +{
1714 +       struct fec_enet_private *fep = netdev_priv(ndev);
1715 +       struct phy_device *phydev = fep->phy_dev;
1716 +
1717 +       if (!phydev)
1718 +               return -ENODEV;
1719 +
1720 +       return phy_ethtool_sset(phydev, cmd);
1721 +}
1722 +
1723  static void fec_enet_get_drvinfo(struct net_device *ndev,
1724                                  struct ethtool_drvinfo *info)
1725  {
1726 @@ -2163,8 +2204,8 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
1727  
1728  /* List of registers that can be safety be read to dump them with ethtool */
1729  #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
1730 -       defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
1731 -       defined(CONFIG_ARM64)
1732 +       defined(CONFIG_M520x) || defined(CONFIG_M532x) ||               \
1733 +       defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
1734  static u32 fec_enet_register_offset[] = {
1735         FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
1736         FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
1737 @@ -2270,7 +2311,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1738  {
1739         struct fec_enet_private *fep = netdev_priv(ndev);
1740  
1741 -       if (!ndev->phydev)
1742 +       if (!fep->phy_dev)
1743                 return -ENODEV;
1744  
1745         if (pause->tx_pause != pause->rx_pause) {
1746 @@ -2286,17 +2327,17 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1747         fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
1748  
1749         if (pause->rx_pause || pause->autoneg) {
1750 -               ndev->phydev->supported |= ADVERTISED_Pause;
1751 -               ndev->phydev->advertising |= ADVERTISED_Pause;
1752 +               fep->phy_dev->supported |= ADVERTISED_Pause;
1753 +               fep->phy_dev->advertising |= ADVERTISED_Pause;
1754         } else {
1755 -               ndev->phydev->supported &= ~ADVERTISED_Pause;
1756 -               ndev->phydev->advertising &= ~ADVERTISED_Pause;
1757 +               fep->phy_dev->supported &= ~ADVERTISED_Pause;
1758 +               fep->phy_dev->advertising &= ~ADVERTISED_Pause;
1759         }
1760  
1761         if (pause->autoneg) {
1762                 if (netif_running(ndev))
1763                         fec_stop(ndev);
1764 -               phy_start_aneg(ndev->phydev);
1765 +               phy_start_aneg(fep->phy_dev);
1766         }
1767         if (netif_running(ndev)) {
1768                 napi_disable(&fep->napi);
1769 @@ -2376,26 +2417,14 @@ static const struct fec_stat {
1770         { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
1771  };
1772  
1773 -#define FEC_STATS_SIZE         (ARRAY_SIZE(fec_stats) * sizeof(u64))
1774 -
1775 -static void fec_enet_update_ethtool_stats(struct net_device *dev)
1776 +static void fec_enet_get_ethtool_stats(struct net_device *dev,
1777 +       struct ethtool_stats *stats, u64 *data)
1778  {
1779         struct fec_enet_private *fep = netdev_priv(dev);
1780         int i;
1781  
1782         for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
1783 -               fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
1784 -}
1785 -
1786 -static void fec_enet_get_ethtool_stats(struct net_device *dev,
1787 -                                      struct ethtool_stats *stats, u64 *data)
1788 -{
1789 -       struct fec_enet_private *fep = netdev_priv(dev);
1790 -
1791 -       if (netif_running(dev))
1792 -               fec_enet_update_ethtool_stats(dev);
1793 -
1794 -       memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
1795 +               data[i] = readl(fep->hwp + fec_stats[i].offset);
1796  }
1797  
1798  static void fec_enet_get_strings(struct net_device *netdev,
1799 @@ -2420,17 +2449,12 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
1800                 return -EOPNOTSUPP;
1801         }
1802  }
1803 -
1804 -#else  /* !defined(CONFIG_M5272) */
1805 -#define FEC_STATS_SIZE 0
1806 -static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
1807 -{
1808 -}
1809  #endif /* !defined(CONFIG_M5272) */
1810  
1811  static int fec_enet_nway_reset(struct net_device *dev)
1812  {
1813 -       struct phy_device *phydev = dev->phydev;
1814 +       struct fec_enet_private *fep = netdev_priv(dev);
1815 +       struct phy_device *phydev = fep->phy_dev;
1816  
1817         if (!phydev)
1818                 return -ENODEV;
1819 @@ -2455,6 +2479,9 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
1820         struct fec_enet_private *fep = netdev_priv(ndev);
1821         int rx_itr, tx_itr;
1822  
1823 +       if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
1824 +               return;
1825 +
1826         /* Must be greater than zero to avoid unpredictable behavior */
1827         if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
1828             !fep->tx_time_itr || !fep->tx_pkts_itr)
1829 @@ -2477,12 +2504,10 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
1830  
1831         writel(tx_itr, fep->hwp + FEC_TXIC0);
1832         writel(rx_itr, fep->hwp + FEC_RXIC0);
1833 -       if (fep->quirks & FEC_QUIRK_HAS_AVB) {
1834 -               writel(tx_itr, fep->hwp + FEC_TXIC1);
1835 -               writel(rx_itr, fep->hwp + FEC_RXIC1);
1836 -               writel(tx_itr, fep->hwp + FEC_TXIC2);
1837 -               writel(rx_itr, fep->hwp + FEC_RXIC2);
1838 -       }
1839 +       writel(tx_itr, fep->hwp + FEC_TXIC1);
1840 +       writel(rx_itr, fep->hwp + FEC_RXIC1);
1841 +       writel(tx_itr, fep->hwp + FEC_TXIC2);
1842 +       writel(rx_itr, fep->hwp + FEC_RXIC2);
1843  }
1844  
1845  static int
1846 @@ -2490,7 +2515,7 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
1847  {
1848         struct fec_enet_private *fep = netdev_priv(ndev);
1849  
1850 -       if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
1851 +       if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
1852                 return -EOPNOTSUPP;
1853  
1854         ec->rx_coalesce_usecs = fep->rx_time_itr;
1855 @@ -2508,28 +2533,28 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
1856         struct fec_enet_private *fep = netdev_priv(ndev);
1857         unsigned int cycle;
1858  
1859 -       if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
1860 +       if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
1861                 return -EOPNOTSUPP;
1862  
1863         if (ec->rx_max_coalesced_frames > 255) {
1864 -               pr_err("Rx coalesced frames exceed hardware limitation\n");
1865 +               pr_err("Rx coalesced frames exceed hardware limiation");
1866                 return -EINVAL;
1867         }
1868  
1869         if (ec->tx_max_coalesced_frames > 255) {
1870 -               pr_err("Tx coalesced frame exceed hardware limitation\n");
1871 +               pr_err("Tx coalesced frame exceed hardware limiation");
1872                 return -EINVAL;
1873         }
1874  
1875         cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
1876         if (cycle > 0xFFFF) {
1877 -               pr_err("Rx coalesced usec exceed hardware limitation\n");
1878 +               pr_err("Rx coalesed usec exceeed hardware limiation");
1879                 return -EINVAL;
1880         }
1881  
1882         cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
1883         if (cycle > 0xFFFF) {
1884 -               pr_err("Rx coalesced usec exceed hardware limitation\n");
1885 +               pr_err("Rx coalesed usec exceeed hardware limiation");
1886                 return -EINVAL;
1887         }
1888  
1889 @@ -2629,6 +2654,8 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1890  }
1891  
1892  static const struct ethtool_ops fec_enet_ethtool_ops = {
1893 +       .get_settings           = fec_enet_get_settings,
1894 +       .set_settings           = fec_enet_set_settings,
1895         .get_drvinfo            = fec_enet_get_drvinfo,
1896         .get_regs_len           = fec_enet_get_regs_len,
1897         .get_regs               = fec_enet_get_regs,
1898 @@ -2648,14 +2675,12 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
1899         .set_tunable            = fec_enet_set_tunable,
1900         .get_wol                = fec_enet_get_wol,
1901         .set_wol                = fec_enet_set_wol,
1902 -       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
1903 -       .set_link_ksettings     = phy_ethtool_set_link_ksettings,
1904  };
1905  
1906  static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1907  {
1908         struct fec_enet_private *fep = netdev_priv(ndev);
1909 -       struct phy_device *phydev = ndev->phydev;
1910 +       struct phy_device *phydev = fep->phy_dev;
1911  
1912         if (!netif_running(ndev))
1913                 return -EINVAL;
1914 @@ -2685,25 +2710,25 @@ static void fec_enet_free_buffers(struct net_device *ndev)
1915  
1916         for (q = 0; q < fep->num_rx_queues; q++) {
1917                 rxq = fep->rx_queue[q];
1918 -               bdp = rxq->bd.base;
1919 -               for (i = 0; i < rxq->bd.ring_size; i++) {
1920 +               bdp = rxq->rx_bd_base;
1921 +               for (i = 0; i < rxq->rx_ring_size; i++) {
1922                         skb = rxq->rx_skbuff[i];
1923                         rxq->rx_skbuff[i] = NULL;
1924                         if (skb) {
1925                                 dma_unmap_single(&fep->pdev->dev,
1926 -                                                fec32_to_cpu(bdp->cbd_bufaddr),
1927 +                                                bdp->cbd_bufaddr,
1928                                                  FEC_ENET_RX_FRSIZE - fep->rx_align,
1929                                                  DMA_FROM_DEVICE);
1930                                 dev_kfree_skb(skb);
1931                         }
1932 -                       bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1933 +                       bdp = fec_enet_get_nextdesc(bdp, fep, q);
1934                 }
1935         }
1936  
1937         for (q = 0; q < fep->num_tx_queues; q++) {
1938                 txq = fep->tx_queue[q];
1939 -               bdp = txq->bd.base;
1940 -               for (i = 0; i < txq->bd.ring_size; i++) {
1941 +               bdp = txq->tx_bd_base;
1942 +               for (i = 0; i < txq->tx_ring_size; i++) {
1943                         kfree(txq->tx_bounce[i]);
1944                         txq->tx_bounce[i] = NULL;
1945                         skb = txq->tx_skbuff[i];
1946 @@ -2722,8 +2747,8 @@ static void fec_enet_free_queue(struct net_device *ndev)
1947         for (i = 0; i < fep->num_tx_queues; i++)
1948                 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
1949                         txq = fep->tx_queue[i];
1950 -                       dma_free_coherent(&fep->pdev->dev,
1951 -                                         txq->bd.ring_size * TSO_HEADER_SIZE,
1952 +                       dma_free_coherent(NULL,
1953 +                                         txq->tx_ring_size * TSO_HEADER_SIZE,
1954                                           txq->tso_hdrs,
1955                                           txq->tso_hdrs_dma);
1956                 }
1957 @@ -2749,15 +2774,15 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
1958                 }
1959  
1960                 fep->tx_queue[i] = txq;
1961 -               txq->bd.ring_size = TX_RING_SIZE;
1962 -               fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
1963 +               txq->tx_ring_size = TX_RING_SIZE;
1964 +               fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size;
1965  
1966                 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
1967                 txq->tx_wake_threshold =
1968 -                       (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
1969 +                               (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
1970  
1971 -               txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
1972 -                                       txq->bd.ring_size * TSO_HEADER_SIZE,
1973 +               txq->tso_hdrs = dma_alloc_coherent(NULL,
1974 +                                       txq->tx_ring_size * TSO_HEADER_SIZE,
1975                                         &txq->tso_hdrs_dma,
1976                                         GFP_KERNEL);
1977                 if (!txq->tso_hdrs) {
1978 @@ -2774,8 +2799,8 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
1979                         goto alloc_failed;
1980                 }
1981  
1982 -               fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
1983 -               fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
1984 +               fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE;
1985 +               fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size;
1986         }
1987         return ret;
1988  
1989 @@ -2794,8 +2819,8 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
1990         struct fec_enet_priv_rx_q *rxq;
1991  
1992         rxq = fep->rx_queue[queue];
1993 -       bdp = rxq->bd.base;
1994 -       for (i = 0; i < rxq->bd.ring_size; i++) {
1995 +       bdp = rxq->rx_bd_base;
1996 +       for (i = 0; i < rxq->rx_ring_size; i++) {
1997                 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1998                 if (!skb)
1999                         goto err_alloc;
2000 @@ -2806,19 +2831,19 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2001                 }
2002  
2003                 rxq->rx_skbuff[i] = skb;
2004 -               bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
2005 +               bdp->cbd_sc = BD_ENET_RX_EMPTY;
2006  
2007                 if (fep->bufdesc_ex) {
2008                         struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2009 -                       ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
2010 +                       ebdp->cbd_esc = BD_ENET_RX_INT;
2011                 }
2012  
2013 -               bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2014 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue);
2015         }
2016  
2017         /* Set the last buffer to wrap. */
2018 -       bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
2019 -       bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2020 +       bdp = fec_enet_get_prevdesc(bdp, fep, queue);
2021 +       bdp->cbd_sc |= BD_SC_WRAP;
2022         return 0;
2023  
2024   err_alloc:
2025 @@ -2835,26 +2860,26 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
2026         struct fec_enet_priv_tx_q *txq;
2027  
2028         txq = fep->tx_queue[queue];
2029 -       bdp = txq->bd.base;
2030 -       for (i = 0; i < txq->bd.ring_size; i++) {
2031 +       bdp = txq->tx_bd_base;
2032 +       for (i = 0; i < txq->tx_ring_size; i++) {
2033                 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
2034                 if (!txq->tx_bounce[i])
2035                         goto err_alloc;
2036  
2037 -               bdp->cbd_sc = cpu_to_fec16(0);
2038 -               bdp->cbd_bufaddr = cpu_to_fec32(0);
2039 +               bdp->cbd_sc = 0;
2040 +               bdp->cbd_bufaddr = 0;
2041  
2042                 if (fep->bufdesc_ex) {
2043                         struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2044 -                       ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
2045 +                       ebdp->cbd_esc = BD_ENET_TX_INT;
2046                 }
2047  
2048 -               bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
2049 +               bdp = fec_enet_get_nextdesc(bdp, fep, queue);
2050         }
2051  
2052         /* Set the last buffer to wrap. */
2053 -       bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
2054 -       bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2055 +       bdp = fec_enet_get_prevdesc(bdp, fep, queue);
2056 +       bdp->cbd_sc |= BD_SC_WRAP;
2057  
2058         return 0;
2059  
2060 @@ -2903,14 +2928,10 @@ fec_enet_open(struct net_device *ndev)
2061                                 platform_get_device_id(fep->pdev);
2062         int ret;
2063  
2064 -       ret = pm_runtime_get_sync(&fep->pdev->dev);
2065 -       if (ret < 0)
2066 -               return ret;
2067 -
2068         pinctrl_pm_select_default_state(&fep->pdev->dev);
2069         ret = fec_enet_clk_enable(ndev, true);
2070         if (ret)
2071 -               goto clk_enable;
2072 +               return ret;
2073  
2074         /* I should reset the ring buffers here, but I don't yet know
2075          * a simple way to do that.
2076 @@ -2928,13 +2949,11 @@ fec_enet_open(struct net_device *ndev)
2077         if (ret)
2078                 goto err_enet_mii_probe;
2079  
2080 -       if (fep->quirks & FEC_QUIRK_ERR006687)
2081 -               imx6q_cpuidle_fec_irqs_used();
2082 -
2083         napi_enable(&fep->napi);
2084 -       phy_start(ndev->phydev);
2085 +       phy_start(fep->phy_dev);
2086         netif_tx_start_all_queues(ndev);
2087  
2088 +       pm_runtime_get_sync(ndev->dev.parent);
2089         if ((id_entry->driver_data & FEC_QUIRK_BUG_WAITMODE) &&
2090             !fec_enet_irq_workaround(fep))
2091                 pm_qos_add_request(&fep->pm_qos_req,
2092 @@ -2947,16 +2966,14 @@ fec_enet_open(struct net_device *ndev)
2093  
2094         device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
2095                                  FEC_WOL_FLAG_ENABLE);
2096 +       fep->miibus_up_failed = false;
2097  
2098         return 0;
2099  
2100  err_enet_mii_probe:
2101         fec_enet_free_buffers(ndev);
2102  err_enet_alloc:
2103 -       fec_enet_clk_enable(ndev, false);
2104 -clk_enable:
2105 -       pm_runtime_mark_last_busy(&fep->pdev->dev);
2106 -       pm_runtime_put_autosuspend(&fep->pdev->dev);
2107 +       fep->miibus_up_failed = true;
2108         if (!fep->mii_bus_share)
2109                 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2110         return ret;
2111 @@ -2967,7 +2984,7 @@ fec_enet_close(struct net_device *ndev)
2112  {
2113         struct fec_enet_private *fep = netdev_priv(ndev);
2114  
2115 -       phy_stop(ndev->phydev);
2116 +       phy_stop(fep->phy_dev);
2117  
2118         if (netif_device_present(ndev)) {
2119                 napi_disable(&fep->napi);
2120 @@ -2975,21 +2992,13 @@ fec_enet_close(struct net_device *ndev)
2121                 fec_stop(ndev);
2122         }
2123  
2124 -       phy_disconnect(ndev->phydev);
2125 -       ndev->phydev = NULL;
2126 -
2127 -       if (fep->quirks & FEC_QUIRK_ERR006687)
2128 -               imx6q_cpuidle_fec_irqs_unused();
2129 -
2130 -       fec_enet_update_ethtool_stats(ndev);
2131 +       phy_disconnect(fep->phy_dev);
2132 +       fep->phy_dev = NULL;
2133  
2134         fec_enet_clk_enable(ndev, false);
2135         pm_qos_remove_request(&fep->pm_qos_req);
2136 -       if (!fep->mii_bus_share)
2137 -               pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2138 -       pm_runtime_mark_last_busy(&fep->pdev->dev);
2139 -       pm_runtime_put_autosuspend(&fep->pdev->dev);
2140 -
2141 +       pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2142 +       pm_runtime_put_sync_suspend(ndev->dev.parent);
2143         fec_enet_free_buffers(ndev);
2144  
2145         return 0;
2146 @@ -3005,7 +3014,7 @@ fec_enet_close(struct net_device *ndev)
2147   * this kind of feature?).
2148   */
2149  
2150 -#define FEC_HASH_BITS  6               /* #bits in hash */
2151 +#define HASH_BITS      6               /* #bits in hash */
2152  #define CRC32_POLY     0xEDB88320
2153  
2154  static void set_multicast_list(struct net_device *ndev)
2155 @@ -3014,7 +3023,6 @@ static void set_multicast_list(struct net_device *ndev)
2156         struct netdev_hw_addr *ha;
2157         unsigned int i, bit, data, crc, tmp;
2158         unsigned char hash;
2159 -       unsigned int hash_high, hash_low;
2160  
2161         if (ndev->flags & IFF_PROMISC) {
2162                 tmp = readl(fep->hwp + FEC_R_CNTRL);
2163 @@ -3037,10 +3045,10 @@ static void set_multicast_list(struct net_device *ndev)
2164                 return;
2165         }
2166  
2167 -       /* Add the addresses in hash register
2168 +       /* Clear filter and add the addresses in hash register
2169          */
2170 -       hash_high = 0;
2171 -       hash_low = 0;
2172 +       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2173 +       writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2174  
2175         netdev_for_each_mc_addr(ha, ndev) {
2176                 /* calculate crc32 value of mac address */
2177 @@ -3054,20 +3062,21 @@ static void set_multicast_list(struct net_device *ndev)
2178                         }
2179                 }
2180  
2181 -               /* only upper 6 bits (FEC_HASH_BITS) are used
2182 +               /* only upper 6 bits (HASH_BITS) are used
2183                  * which point to specific bit in he hash registers
2184                  */
2185 -               hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
2186 +               hash = (crc >> (32 - HASH_BITS)) & 0x3f;
2187  
2188                 if (hash > 31) {
2189 -                       hash_high |= 1 << (hash - 32);
2190 +                       tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2191 +                       tmp |= 1 << (hash - 32);
2192 +                       writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2193                 } else {
2194 -                       hash_low |= 1 << hash;
2195 +                       tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2196 +                       tmp |= 1 << hash;
2197 +                       writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2198                 }
2199         }
2200 -
2201 -       writel_relaxed(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2202 -       writel_relaxed(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2203  }
2204  
2205  /* Set a MAC change in hardware. */
2206 @@ -3122,6 +3131,7 @@ static void fec_poll_controller(struct net_device *dev)
2207  }
2208  #endif
2209  
2210 +#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
2211  static inline void fec_enet_set_netdev_features(struct net_device *netdev,
2212         netdev_features_t features)
2213  {
2214 @@ -3145,7 +3155,7 @@ static int fec_set_features(struct net_device *netdev,
2215         struct fec_enet_private *fep = netdev_priv(netdev);
2216         netdev_features_t changed = features ^ netdev->features;
2217  
2218 -       if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
2219 +       if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
2220                 napi_disable(&fep->napi);
2221                 netif_tx_lock_bh(netdev);
2222                 fec_stop(netdev);
2223 @@ -3209,14 +3219,6 @@ static const struct net_device_ops fec_netdev_ops = {
2224         .ndo_set_features       = fec_set_features,
2225  };
2226  
2227 -static const unsigned short offset_des_active_rxq[] = {
2228 -       FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
2229 -};
2230 -
2231 -static const unsigned short offset_des_active_txq[] = {
2232 -       FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
2233 -};
2234 -
2235   /*
2236    * XXX:  We need to clean up on failure exits here.
2237    *
2238 @@ -3224,16 +3226,14 @@ static const unsigned short offset_des_active_txq[] = {
2239  static int fec_enet_init(struct net_device *ndev)
2240  {
2241         struct fec_enet_private *fep = netdev_priv(ndev);
2242 +       struct fec_enet_priv_tx_q *txq;
2243 +       struct fec_enet_priv_rx_q *rxq;
2244         struct bufdesc *cbd_base;
2245         dma_addr_t bd_dma;
2246         int bd_size;
2247         unsigned int i;
2248 -       unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
2249 -                       sizeof(struct bufdesc);
2250 -       unsigned dsize_log2 = __fls(dsize);
2251  
2252 -       WARN_ON(dsize != (1 << dsize_log2));
2253 -#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
2254 +#if defined(CONFIG_ARM)
2255         fep->rx_align = 0xf;
2256         fep->tx_align = 0xf;
2257  #else
2258 @@ -3243,11 +3243,16 @@ static int fec_enet_init(struct net_device *ndev)
2259  
2260         fec_enet_alloc_queue(ndev);
2261  
2262 -       bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
2263 +       if (fep->bufdesc_ex)
2264 +               fep->bufdesc_size = sizeof(struct bufdesc_ex);
2265 +       else
2266 +               fep->bufdesc_size = sizeof(struct bufdesc);
2267 +       bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) *
2268 +                       fep->bufdesc_size;
2269  
2270         /* Allocate memory for buffer descriptors. */
2271 -       cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
2272 -                                      GFP_KERNEL);
2273 +       cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
2274 +                                     GFP_KERNEL);
2275         if (!cbd_base) {
2276                 return -ENOMEM;
2277         }
2278 @@ -3261,35 +3266,33 @@ static int fec_enet_init(struct net_device *ndev)
2279  
2280         /* Set receive and transmit descriptor base. */
2281         for (i = 0; i < fep->num_rx_queues; i++) {
2282 -               struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
2283 -               unsigned size = dsize * rxq->bd.ring_size;
2284 -
2285 -               rxq->bd.qid = i;
2286 -               rxq->bd.base = cbd_base;
2287 -               rxq->bd.cur = cbd_base;
2288 -               rxq->bd.dma = bd_dma;
2289 -               rxq->bd.dsize = dsize;
2290 -               rxq->bd.dsize_log2 = dsize_log2;
2291 -               rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
2292 -               bd_dma += size;
2293 -               cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
2294 -               rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
2295 +               rxq = fep->rx_queue[i];
2296 +               rxq->index = i;
2297 +               rxq->rx_bd_base = (struct bufdesc *)cbd_base;
2298 +               rxq->bd_dma = bd_dma;
2299 +               if (fep->bufdesc_ex) {
2300 +                       bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size;
2301 +                       cbd_base = (struct bufdesc *)
2302 +                               (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
2303 +               } else {
2304 +                       bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size;
2305 +                       cbd_base += rxq->rx_ring_size;
2306 +               }
2307         }
2308  
2309         for (i = 0; i < fep->num_tx_queues; i++) {
2310 -               struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
2311 -               unsigned size = dsize * txq->bd.ring_size;
2312 -
2313 -               txq->bd.qid = i;
2314 -               txq->bd.base = cbd_base;
2315 -               txq->bd.cur = cbd_base;
2316 -               txq->bd.dma = bd_dma;
2317 -               txq->bd.dsize = dsize;
2318 -               txq->bd.dsize_log2 = dsize_log2;
2319 -               txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
2320 -               bd_dma += size;
2321 -               cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
2322 -               txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
2323 +               txq = fep->tx_queue[i];
2324 +               txq->index = i;
2325 +               txq->tx_bd_base = (struct bufdesc *)cbd_base;
2326 +               txq->bd_dma = bd_dma;
2327 +               if (fep->bufdesc_ex) {
2328 +                       bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size;
2329 +                       cbd_base = (struct bufdesc *)
2330 +                        (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size);
2331 +               } else {
2332 +                       bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size;
2333 +                       cbd_base += txq->tx_ring_size;
2334 +               }
2335         }
2336  
2337  
2338 @@ -3323,60 +3326,62 @@ static int fec_enet_init(struct net_device *ndev)
2339  
2340         fec_restart(ndev);
2341  
2342 -       fec_enet_update_ethtool_stats(ndev);
2343 -
2344         return 0;
2345  }
2346  
2347  #ifdef CONFIG_OF
2348 -static int fec_reset_phy(struct platform_device *pdev)
2349 +static void fec_reset_phy(struct platform_device *pdev)
2350 +{
2351 +       struct net_device *ndev = platform_get_drvdata(pdev);
2352 +       struct fec_enet_private *fep = netdev_priv(ndev);
2353 +
2354 +       if (!gpio_is_valid(fep->phy_reset_gpio))
2355 +               return;
2356 +
2357 +       gpio_set_value_cansleep(fep->phy_reset_gpio, 0);
2358 +       msleep(fep->phy_reset_duration);
2359 +       gpio_set_value_cansleep(fep->phy_reset_gpio, 1);
2360 +}
2361 +
2362 +static int fec_get_reset_gpio(struct platform_device *pdev)
2363  {
2364         int err, phy_reset;
2365 -       bool active_high = false;
2366         int msec = 1;
2367         struct device_node *np = pdev->dev.of_node;
2368 -
2369 -       if (!np)
2370 -               return 0;
2371 -
2372 -       err = of_property_read_u32(np, "phy-reset-duration", &msec);
2373 -       /* A sane reset duration should not be longer than 1s */
2374 -       if (!err && msec > 1000)
2375 -               msec = 1;
2376 +       struct net_device *ndev = platform_get_drvdata(pdev);
2377 +       struct fec_enet_private *fep = netdev_priv(ndev);
2378  
2379         phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
2380 -       if (phy_reset == -EPROBE_DEFER)
2381 +       if (!gpio_is_valid(phy_reset))
2382                 return phy_reset;
2383 -       else if (!gpio_is_valid(phy_reset))
2384 -               return 0;
2385 -
2386 -       active_high = of_property_read_bool(np, "phy-reset-active-high");
2387  
2388         err = devm_gpio_request_one(&pdev->dev, phy_reset,
2389 -                       active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
2390 -                       "phy-reset");
2391 +                                   GPIOF_OUT_INIT_LOW, "phy-reset");
2392         if (err) {
2393                 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
2394                 return err;
2395         }
2396 -
2397 -       if (msec > 20)
2398 -               msleep(msec);
2399 -       else
2400 -               usleep_range(msec * 1000, msec * 1000 + 1000);
2401 -
2402 -       gpio_set_value_cansleep(phy_reset, !active_high);
2403 -
2404 -       return 0;
2405 +       
2406 +       of_property_read_u32(np, "phy-reset-duration", &msec);
2407 +       /* A sane reset duration should not be longer than 1s */
2408 +       if (msec > 1000)
2409 +               msec = 1;
2410 +       fep->phy_reset_duration = msec;
2411 +       
2412 +       return phy_reset;
2413  }
2414  #else /* CONFIG_OF */
2415 -static int fec_reset_phy(struct platform_device *pdev)
2416 +static void fec_reset_phy(struct platform_device *pdev)
2417  {
2418         /*
2419          * In case of platform probe, the reset has been done
2420          * by machine code.
2421          */
2422 -       return 0;
2423 +}
2424 +
2425 +static inline int fec_get_reset_gpio(struct platform_device *pdev)
2426 +{
2427 +       return -EINVAL;
2428  }
2429  #endif /* CONFIG_OF */
2430  
2431 @@ -3384,6 +3389,7 @@ static void
2432  fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
2433  {
2434         struct device_node *np = pdev->dev.of_node;
2435 +       int err;
2436  
2437         *num_tx = *num_rx = 1;
2438  
2439 @@ -3391,9 +3397,13 @@ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
2440                 return;
2441  
2442         /* parse the num of tx and rx queues */
2443 -       of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
2444 +       err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
2445 +       if (err)
2446 +               *num_tx = 1;
2447  
2448 -       of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
2449 +       err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
2450 +       if (err)
2451 +               *num_rx = 1;
2452  
2453         if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
2454                 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
2455 @@ -3460,13 +3470,11 @@ fec_probe(struct platform_device *pdev)
2456         int num_tx_qs;
2457         int num_rx_qs;
2458  
2459 -       of_dma_configure(&pdev->dev, np);
2460 -
2461         fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
2462  
2463         /* Init network device */
2464 -       ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
2465 -                                 FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
2466 +       ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private),
2467 +                                 num_tx_qs, num_rx_qs);
2468         if (!ndev)
2469                 return -ENOMEM;
2470  
2471 @@ -3505,13 +3513,14 @@ fec_probe(struct platform_device *pdev)
2472  
2473         platform_set_drvdata(pdev, ndev);
2474  
2475 -       if ((of_machine_is_compatible("fsl,imx6q") ||
2476 -            of_machine_is_compatible("fsl,imx6dl")) &&
2477 -           !of_property_read_bool(np, "fsl,err006687-workaround-present"))
2478 -               fep->quirks |= FEC_QUIRK_ERR006687;
2479 -
2480         fec_enet_of_parse_stop_mode(pdev);
2481  
2482 +       ret = fec_get_reset_gpio(pdev);
2483 +       if (ret == -EPROBE_DEFER)
2484 +               goto gpio_defer;
2485 +       fep->phy_reset_gpio = ret;
2486 +       
2487 +
2488         if (of_get_property(np, "fsl,magic-packet", NULL))
2489                 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
2490  
2491 @@ -3524,7 +3533,6 @@ fec_probe(struct platform_device *pdev)
2492                         goto failed_phy;
2493                 }
2494                 phy_node = of_node_get(np);
2495 -               fep->fixed_link = true;
2496         }
2497         fep->phy_node = phy_node;
2498  
2499 @@ -3539,10 +3547,6 @@ fec_probe(struct platform_device *pdev)
2500                 fep->phy_interface = ret;
2501         }
2502  
2503 -#if !defined(CONFIG_ARM64)
2504 -       request_bus_freq(BUS_FREQ_HIGH);
2505 -#endif
2506 -
2507         fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2508         if (IS_ERR(fep->clk_ipg)) {
2509                 ret = PTR_ERR(fep->clk_ipg);
2510 @@ -3577,39 +3581,24 @@ fec_probe(struct platform_device *pdev)
2511                 fep->bufdesc_ex = false;
2512         }
2513  
2514 +       pm_runtime_enable(&pdev->dev);
2515         ret = fec_enet_clk_enable(ndev, true);
2516         if (ret)
2517                 goto failed_clk;
2518  
2519 -       ret = clk_prepare_enable(fep->clk_ipg);
2520 -       if (ret)
2521 -               goto failed_clk_ipg;
2522 -       ret = clk_prepare_enable(fep->clk_ahb);
2523 -       if (ret)
2524 -               goto failed_clk_ahb;
2525 -
2526         fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
2527         if (!IS_ERR(fep->reg_phy)) {
2528                 ret = regulator_enable(fep->reg_phy);
2529                 if (ret) {
2530                         dev_err(&pdev->dev,
2531                                 "Failed to enable phy regulator: %d\n", ret);
2532 -                       clk_disable_unprepare(fep->clk_ipg);
2533                         goto failed_regulator;
2534                 }
2535         } else {
2536                 fep->reg_phy = NULL;
2537         }
2538  
2539 -       pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
2540 -       pm_runtime_use_autosuspend(&pdev->dev);
2541 -       pm_runtime_get_noresume(&pdev->dev);
2542 -       pm_runtime_set_active(&pdev->dev);
2543 -       pm_runtime_enable(&pdev->dev);
2544 -
2545 -       ret = fec_reset_phy(pdev);
2546 -       if (ret)
2547 -               goto failed_reset;
2548 +       fec_reset_phy(pdev);
2549  
2550         if (fep->bufdesc_ex)
2551                 fec_ptp_init(pdev);
2552 @@ -3641,15 +3630,9 @@ fec_probe(struct platform_device *pdev)
2553                 fep->wake_irq = fep->irq[0];
2554  
2555         init_completion(&fep->mdio_done);
2556 -
2557 -       /* board only enable one mii bus in default */
2558 -       if (!of_get_property(np, "fsl,mii-exclusive", NULL))
2559 -               fep->quirks |= FEC_QUIRK_SINGLE_MDIO;
2560         ret = fec_enet_mii_init(pdev);
2561 -       if (ret) {
2562 -               dev_id = 0;
2563 +       if (ret)
2564                 goto failed_mii_init;
2565 -       }
2566  
2567         /* Carrier starts down, phylib will bring it up */
2568         netif_carrier_off(ndev);
2569 @@ -3660,11 +3643,6 @@ fec_probe(struct platform_device *pdev)
2570         if (ret)
2571                 goto failed_register;
2572  
2573 -       if (!fep->fixed_link) {
2574 -               fep->fixups = of_fec_enet_parse_fixup(np);
2575 -               fec_enet_register_fixup(ndev);
2576 -       }
2577 -
2578         device_init_wakeup(&ndev->dev, fep->wol_flag &
2579                            FEC_WOL_HAS_MAGIC_PACKET);
2580  
2581 @@ -3673,10 +3651,6 @@ fec_probe(struct platform_device *pdev)
2582  
2583         fep->rx_copybreak = COPYBREAK_DEFAULT;
2584         INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
2585 -
2586 -       pm_runtime_mark_last_busy(&pdev->dev);
2587 -       pm_runtime_put_autosuspend(&pdev->dev);
2588 -
2589         return 0;
2590  
2591  failed_register:
2592 @@ -3684,22 +3658,14 @@ fec_probe(struct platform_device *pdev)
2593  failed_mii_init:
2594  failed_irq:
2595  failed_init:
2596 -       fec_ptp_stop(pdev);
2597         if (fep->reg_phy)
2598                 regulator_disable(fep->reg_phy);
2599 -failed_reset:
2600 -       pm_runtime_put(&pdev->dev);
2601 -       pm_runtime_disable(&pdev->dev);
2602  failed_regulator:
2603 -failed_clk_ahb:
2604 -       clk_disable_unprepare(fep->clk_ipg);
2605 -failed_clk_ipg:
2606         fec_enet_clk_enable(ndev, false);
2607  failed_clk:
2608 -       if (of_phy_is_fixed_link(np))
2609 -               of_phy_deregister_fixed_link(np);
2610  failed_phy:
2611         of_node_put(phy_node);
2612 +gpio_defer:
2613  failed_ioremap:
2614         free_netdev(ndev);
2615  
2616 @@ -3711,16 +3677,15 @@ fec_drv_remove(struct platform_device *pdev)
2617  {
2618         struct net_device *ndev = platform_get_drvdata(pdev);
2619         struct fec_enet_private *fep = netdev_priv(ndev);
2620 -       struct device_node *np = pdev->dev.of_node;
2621  
2622 +       cancel_delayed_work_sync(&fep->time_keep);
2623         cancel_work_sync(&fep->tx_timeout_work);
2624 -       fec_ptp_stop(pdev);
2625         unregister_netdev(ndev);
2626         fec_enet_mii_remove(fep);
2627         if (fep->reg_phy)
2628                 regulator_disable(fep->reg_phy);
2629 -       if (of_phy_is_fixed_link(np))
2630 -               of_phy_deregister_fixed_link(np);
2631 +       if (fep->ptp_clock)
2632 +               ptp_clock_unregister(fep->ptp_clock);
2633         of_node_put(fep->phy_node);
2634         free_netdev(ndev);
2635  
2636 @@ -3731,13 +3696,12 @@ static int __maybe_unused fec_suspend(struct device *dev)
2637  {
2638         struct net_device *ndev = dev_get_drvdata(dev);
2639         struct fec_enet_private *fep = netdev_priv(ndev);
2640 -       int ret = 0;
2641  
2642         rtnl_lock();
2643         if (netif_running(ndev)) {
2644                 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
2645                         fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
2646 -               phy_stop(ndev->phydev);
2647 +               phy_stop(fep->phy_dev);
2648                 napi_disable(&fep->napi);
2649                 netif_tx_lock_bh(ndev);
2650                 netif_device_detach(ndev);
2651 @@ -3751,12 +3715,8 @@ static int __maybe_unused fec_suspend(struct device *dev)
2652                         enable_irq_wake(fep->wake_irq);
2653                 }
2654                 fec_enet_clk_enable(ndev, false);
2655 -               fep->active_in_suspend = !pm_runtime_status_suspended(dev);
2656 -               if (fep->active_in_suspend)
2657 -                       ret = pm_runtime_force_suspend(dev);
2658 -               if (ret < 0)
2659 -                       return ret;
2660 -       } else if (fep->mii_bus_share && !ndev->phydev) {
2661 +       } else if (fep->mii_bus_share && fep->miibus_up_failed && !fep->phy_dev) {
2662 +               fec_enet_clk_enable(ndev, false);
2663                 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2664         }
2665         rtnl_unlock();
2666 @@ -3777,7 +3737,7 @@ static int __maybe_unused fec_resume(struct device *dev)
2667  {
2668         struct net_device *ndev = dev_get_drvdata(dev);
2669         struct fec_enet_private *fep = netdev_priv(ndev);
2670 -       int ret = 0;
2671 +       int ret;
2672         int val;
2673  
2674         if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
2675 @@ -3788,8 +3748,6 @@ static int __maybe_unused fec_resume(struct device *dev)
2676  
2677         rtnl_lock();
2678         if (netif_running(ndev)) {
2679 -               if (fep->active_in_suspend)
2680 -                       pm_runtime_force_resume(dev);
2681                 ret = fec_enet_clk_enable(ndev, true);
2682                 if (ret) {
2683                         rtnl_unlock();
2684 @@ -3812,15 +3770,16 @@ static int __maybe_unused fec_resume(struct device *dev)
2685                 netif_device_attach(ndev);
2686                 netif_tx_unlock_bh(ndev);
2687                 napi_enable(&fep->napi);
2688 -               phy_start(ndev->phydev);
2689 -       } else if (fep->mii_bus_share && !ndev->phydev) {
2690 +               phy_start(fep->phy_dev);
2691 +       } else if (fep->mii_bus_share && !fep->phy_dev) {
2692                 pinctrl_pm_select_default_state(&fep->pdev->dev);
2693 +               fep->miibus_up_failed = true;
2694                 /* And then recovery mii bus */
2695 -               ret = fec_restore_mii_bus(ndev);
2696 +               fec_restore_mii_bus(ndev);
2697         }
2698         rtnl_unlock();
2699  
2700 -       return ret;
2701 +       return 0;
2702  
2703  failed_clk:
2704         if (fep->reg_phy)
2705 @@ -3828,46 +3787,21 @@ static int __maybe_unused fec_resume(struct device *dev)
2706         return ret;
2707  }
2708  
2709 -static int __maybe_unused fec_runtime_suspend(struct device *dev)
2710 +static int fec_runtime_suspend(struct device *dev)
2711  {
2712 -       struct net_device *ndev = dev_get_drvdata(dev);
2713 -       struct fec_enet_private *fep = netdev_priv(ndev);
2714 -
2715 -       clk_disable_unprepare(fep->clk_ahb);
2716 -       clk_disable_unprepare(fep->clk_ipg);
2717 -#if !defined(CONFIG_ARM64)
2718         release_bus_freq(BUS_FREQ_HIGH);
2719 -#endif
2720 -
2721         return 0;
2722  }
2723  
2724 -static int __maybe_unused fec_runtime_resume(struct device *dev)
2725 +static int fec_runtime_resume(struct device *dev)
2726  {
2727 -       struct net_device *ndev = dev_get_drvdata(dev);
2728 -       struct fec_enet_private *fep = netdev_priv(ndev);
2729 -       int ret;
2730 -
2731 -#if !defined(CONFIG_ARM64)
2732         request_bus_freq(BUS_FREQ_HIGH);
2733 -#endif
2734 -       ret = clk_prepare_enable(fep->clk_ahb);
2735 -       if (ret)
2736 -               return ret;
2737 -       ret = clk_prepare_enable(fep->clk_ipg);
2738 -       if (ret)
2739 -               goto failed_clk_ipg;
2740 -
2741         return 0;
2742 -
2743 -failed_clk_ipg:
2744 -       clk_disable_unprepare(fep->clk_ahb);
2745 -       return ret;
2746  }
2747  
2748  static const struct dev_pm_ops fec_pm_ops = {
2749 -       SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
2750         SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
2751 +       SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
2752  };
2753  
2754  static struct platform_driver fec_driver = {
2755 diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
2756 index 446ae9d..afe7f39 100644
2757 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
2758 +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
2759 @@ -66,6 +66,7 @@ struct mpc52xx_fec_priv {
2760         /* MDIO link details */
2761         unsigned int mdio_speed;
2762         struct device_node *phy_node;
2763 +       struct phy_device *phydev;
2764         enum phy_state link;
2765         int seven_wire_mode;
2766  };
2767 @@ -164,7 +165,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task
2768  static void mpc52xx_fec_adjust_link(struct net_device *dev)
2769  {
2770         struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2771 -       struct phy_device *phydev = dev->phydev;
2772 +       struct phy_device *phydev = priv->phydev;
2773         int new_state = 0;
2774  
2775         if (phydev->link != PHY_DOWN) {
2776 @@ -214,17 +215,16 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev)
2777  static int mpc52xx_fec_open(struct net_device *dev)
2778  {
2779         struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2780 -       struct phy_device *phydev = NULL;
2781         int err = -EBUSY;
2782  
2783         if (priv->phy_node) {
2784 -               phydev = of_phy_connect(priv->ndev, priv->phy_node,
2785 -                                       mpc52xx_fec_adjust_link, 0, 0);
2786 -               if (!phydev) {
2787 +               priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
2788 +                                             mpc52xx_fec_adjust_link, 0, 0);
2789 +               if (!priv->phydev) {
2790                         dev_err(&dev->dev, "of_phy_connect failed\n");
2791                         return -ENODEV;
2792                 }
2793 -               phy_start(phydev);
2794 +               phy_start(priv->phydev);
2795         }
2796  
2797         if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
2798 @@ -268,9 +268,10 @@ static int mpc52xx_fec_open(struct net_device *dev)
2799   free_ctrl_irq:
2800         free_irq(dev->irq, dev);
2801   free_phy:
2802 -       if (phydev) {
2803 -               phy_stop(phydev);
2804 -               phy_disconnect(phydev);
2805 +       if (priv->phydev) {
2806 +               phy_stop(priv->phydev);
2807 +               phy_disconnect(priv->phydev);
2808 +               priv->phydev = NULL;
2809         }
2810  
2811         return err;
2812 @@ -279,7 +280,6 @@ static int mpc52xx_fec_open(struct net_device *dev)
2813  static int mpc52xx_fec_close(struct net_device *dev)
2814  {
2815         struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2816 -       struct phy_device *phydev = dev->phydev;
2817  
2818         netif_stop_queue(dev);
2819  
2820 @@ -291,10 +291,11 @@ static int mpc52xx_fec_close(struct net_device *dev)
2821         free_irq(priv->r_irq, dev);
2822         free_irq(priv->t_irq, dev);
2823  
2824 -       if (phydev) {
2825 +       if (priv->phydev) {
2826                 /* power down phy */
2827 -               phy_stop(phydev);
2828 -               phy_disconnect(phydev);
2829 +               phy_stop(priv->phydev);
2830 +               phy_disconnect(priv->phydev);
2831 +               priv->phydev = NULL;
2832         }
2833  
2834         return 0;
2835 @@ -762,6 +763,26 @@ static void mpc52xx_fec_reset(struct net_device *dev)
2836  
2837  /* ethtool interface */
2838  
2839 +static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2840 +{
2841 +       struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2842 +
2843 +       if (!priv->phydev)
2844 +               return -ENODEV;
2845 +
2846 +       return phy_ethtool_gset(priv->phydev, cmd);
2847 +}
2848 +
2849 +static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2850 +{
2851 +       struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2852 +
2853 +       if (!priv->phydev)
2854 +               return -ENODEV;
2855 +
2856 +       return phy_ethtool_sset(priv->phydev, cmd);
2857 +}
2858 +
2859  static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
2860  {
2861         struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2862 @@ -775,23 +796,23 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
2863  }
2864  
2865  static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
2866 +       .get_settings = mpc52xx_fec_get_settings,
2867 +       .set_settings = mpc52xx_fec_set_settings,
2868         .get_link = ethtool_op_get_link,
2869         .get_msglevel = mpc52xx_fec_get_msglevel,
2870         .set_msglevel = mpc52xx_fec_set_msglevel,
2871         .get_ts_info = ethtool_op_get_ts_info,
2872 -       .get_link_ksettings = phy_ethtool_get_link_ksettings,
2873 -       .set_link_ksettings = phy_ethtool_set_link_ksettings,
2874  };
2875  
2876  
2877  static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2878  {
2879 -       struct phy_device *phydev = dev->phydev;
2880 +       struct mpc52xx_fec_priv *priv = netdev_priv(dev);
2881  
2882 -       if (!phydev)
2883 +       if (!priv->phydev)
2884                 return -ENOTSUPP;
2885  
2886 -       return phy_mii_ioctl(phydev, rq, cmd);
2887 +       return phy_mii_ioctl(priv->phydev, rq, cmd);
2888  }
2889  
2890  static const struct net_device_ops mpc52xx_fec_netdev_ops = {
2891 @@ -1063,23 +1084,27 @@ static struct platform_driver mpc52xx_fec_driver = {
2892  /* Module                                                                   */
2893  /* ======================================================================== */
2894  
2895 -static struct platform_driver * const drivers[] = {
2896 -#ifdef CONFIG_FEC_MPC52xx_MDIO
2897 -       &mpc52xx_fec_mdio_driver,
2898 -#endif
2899 -       &mpc52xx_fec_driver,
2900 -};
2901 -
2902  static int __init
2903  mpc52xx_fec_init(void)
2904  {
2905 -       return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2906 +#ifdef CONFIG_FEC_MPC52xx_MDIO
2907 +       int ret;
2908 +       ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
2909 +       if (ret) {
2910 +               pr_err("failed to register mdio driver\n");
2911 +               return ret;
2912 +       }
2913 +#endif
2914 +       return platform_driver_register(&mpc52xx_fec_driver);
2915  }
2916  
2917  static void __exit
2918  mpc52xx_fec_exit(void)
2919  {
2920 -       platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2921 +       platform_driver_unregister(&mpc52xx_fec_driver);
2922 +#ifdef CONFIG_FEC_MPC52xx_MDIO
2923 +       platform_driver_unregister(&mpc52xx_fec_mdio_driver);
2924 +#endif
2925  }
2926  
2927  
2928 diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
2929 index b5497e3..1e647be 100644
2930 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
2931 +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
2932 @@ -22,6 +22,7 @@
2933  
2934  struct mpc52xx_fec_mdio_priv {
2935         struct mpc52xx_fec __iomem *regs;
2936 +       int mdio_irqs[PHY_MAX_ADDR];
2937  };
2938  
2939  static