linux-karo: Add initial version of linux-karo 4.9.11 based on NXP kernel tree
authorFlorian Boor <florian@kernelconcepts.de>
Sat, 2 Jun 2018 22:34:15 +0000 (00:34 +0200)
committerFlorian Boor <florian@kernelconcepts.de>
Sat, 2 Jun 2018 22:34:15 +0000 (00:34 +0200)
56 files changed:
recipes-kernel/linux/linux-imx.inc [new file with mode: 0644]
recipes-kernel/linux/linux-karo-4.9.11/0001-DTS-Add-Stratec-TX6-devicetree.patch [new file with mode: 0644]
recipes-kernel/linux/linux-karo-4.9.11/add-support-for-edt-m12-touch.patch [new file with mode: 0644]
recipes-kernel/linux/linux-karo-4.9.11/edt-ft5x06-fixes.patch [new file with mode: 0644]
recipes-kernel/linux/linux-karo-4.9.11/edt-ft5x06-make-distinction-between.patch [new file with mode: 0644]
recipes-kernel/linux/linux-karo-4.9.11/ethernet-update-driver.patch [new file with mode: 0644]
recipes-kernel/linux/linux-karo-4.9.11/gpu-viv_gcc6_indent_warning_error_fix.patch [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/reset-phy-minimal.patch [new file with mode: 0644]
recipes-kernel/linux/linux-karo-4.9.11/reset_ethernet_phy_whenever_the_enet_out_clock_is_being_enabled.patch [new file with mode: 0644]
recipes-kernel/linux/linux-karo-4.9.11/set-enet_ref_clk-to-50-mhz.patch [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/tx6/defconfig [new file with mode: 0644]
recipes-kernel/linux/linux-karo-4.9.11/tx6/imx6qdl-tx6-gpio.h [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/tx6/imx6qdl-tx6.dtsi [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis-lvds/imx6dl-tx6-emmc-aclavis-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis-lvds/imx6dl-tx6-nand-aclavis-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis-lvds/imx6q-tx6-emmc-aclavis-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis-lvds/imx6q-tx6-nand-aclavis-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis-lvds/imx6qp-tx6-emmc-aclavis-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis-lvds/imx6s-tx6-emmc-aclavis-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis-lvds/imx6s-tx6-nand-aclavis-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis-lvds/txbase-aclavis-lvds.dtsi [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis/imx6dl-tx6-emmc-aclavis.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis/imx6dl-tx6-nand-aclavis.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis/imx6q-tx6-emmc-aclavis.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis/imx6q-tx6-nand-aclavis.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis/imx6qp-tx6-emmc-aclavis.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis/imx6s-tx6-emmc-aclavis.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis/imx6s-tx6-nand-aclavis.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/aclavis/txbase-aclavis.dtsi [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/evalkit/imx6dl-tx6-emmc-evalkit.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/evalkit/imx6dl-tx6-nand-evalkit.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/evalkit/imx6q-tx6-emmc-evalkit.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/evalkit/imx6q-tx6-nand-evalkit.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/evalkit/imx6qp-tx6-emmc-evalkit.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/evalkit/imx6s-tx6-emmc-evalkit.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/evalkit/imx6s-tx6-nand-evalkit.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/evalkit/txbase-evalkit.dtsi [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7-lvds/imx6dl-tx6-emmc-mb7-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7-lvds/imx6dl-tx6-nand-mb7-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7-lvds/imx6q-tx6-emmc-mb7-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7-lvds/imx6q-tx6-nand-mb7-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7-lvds/imx6qp-tx6-emmc-mb7-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7-lvds/imx6s-tx6-emmc-mb7-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7-lvds/imx6s-tx6-nand-mb7-lvds.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7-lvds/txbase-mb7-lvds.dtsi [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7/imx6dl-tx6-emmc-mb7.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7/imx6dl-tx6-nand-mb7.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7/imx6q-tx6-emmc-mb7.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7/imx6q-tx6-nand-mb7.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7/imx6qp-tx6-emmc-mb7.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7/imx6s-tx6-emmc-mb7.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7/imx6s-tx6-nand-mb7.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/mb7/txbase-mb7.dtsi [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/myboard/imx6s-tx6-emmc-myboard.dts [new file with mode: 0755]
recipes-kernel/linux/linux-karo-4.9.11/txbase/myboard/txbase-myboard.dtsi [new file with mode: 0755]
recipes-kernel/linux/linux-karo_4.9.11.bb [new file with mode: 0644]

diff --git a/recipes-kernel/linux/linux-imx.inc b/recipes-kernel/linux/linux-imx.inc
new file mode 100644 (file)
index 0000000..9a8a035
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright (C) 2012, 2015 O.S. Systems Software LTDA.
+# Released under the MIT license (see COPYING.MIT for the terms)
+
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://COPYING;md5=d7810fab7487fb0aad327b76f1be7cd7"
+
+inherit kernel fsl-kernel-localversion fsl-vivante-kernel-driver-handler
+
+# Put a local version until we have a true SRCREV to point to
+LOCALVERSION ?= ""
+SCMVERSION ?= "y"
+SRCBRANCH ?= ""
+
+SRC_URI = "git://source.codeaurora.org/external/imx/linux-imx;protocol=https;branch=${SRCBRANCH} \
+           file://defconfig \
+"
+
+S = "${WORKDIR}/git"
+
+# We need to pass it as param since kernel might support more then one
+# machine, with different entry points
+KERNEL_EXTRA_ARGS += "LOADADDR=${UBOOT_ENTRYPOINT}"
diff --git a/recipes-kernel/linux/linux-karo-4.9.11/0001-DTS-Add-Stratec-TX6-devicetree.patch b/recipes-kernel/linux/linux-karo-4.9.11/0001-DTS-Add-Stratec-TX6-devicetree.patch
new file mode 100644 (file)
index 0000000..f30ba1d
--- /dev/null
@@ -0,0 +1,160 @@
+From 52902ee8a80e2aec3135e47a9189811f5d209c3c Mon Sep 17 00:00:00 2001
+From: Florian Boor <florian.boor@kernelconcepts.de>
+Date: Tue, 6 Mar 2018 13:59:22 +0100
+Subject: [PATCH] DTS: Add Stratec TX6 devicetree
+
+---
+ arch/arm/boot/dts/Makefile                        |   1 +
+ arch/arm/boot/dts/imx6dl-tx6-emmc-mb7-stratec.dts | 128 ++++++++++++++++++++++
+ 2 files changed, 129 insertions(+)
+ create mode 100644 arch/arm/boot/dts/imx6dl-tx6-emmc-mb7-stratec.dts
+
+diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
+index f151ec7..e03cd14 100644
+--- a/arch/arm/boot/dts/Makefile
++++ b/arch/arm/boot/dts/Makefile
+@@ -400,6 +400,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
+       imx6dl-tx6u-8033-mb7.dtb \
+       imx6dl-tx6u-811x.dtb \
+       imx6dl-tx6u-81xx-mb7.dtb \
++      imx6dl-tx6-emmc-mb7-stratec.dtb \
+       imx6dl-udoo.dtb \
+       imx6dl-wandboard.dtb \
+       imx6dl-wandboard-revb1.dtb \
+diff --git a/arch/arm/boot/dts/imx6dl-tx6-emmc-mb7-stratec.dts b/arch/arm/boot/dts/imx6dl-tx6-emmc-mb7-stratec.dts
+new file mode 100644
+index 0000000..0270e6c
+--- /dev/null
++++ b/arch/arm/boot/dts/imx6dl-tx6-emmc-mb7-stratec.dts
+@@ -0,0 +1,128 @@
++/*
++ * Copyright 2016 Lothar WaƟmann <LW@KARO-electronics.de>
++ * Copyright 2016 Oliver Wendt <OW@KARO-electronics.de>
++ * Copyright 2016 Michael Vyskocil <MV@KARO-electronics.de>
++ *
++ * This file is dual-licensed: you can use it either under the terms
++ * of the GPL or the X11 license, at your option. Note that this dual
++ * licensing only applies to this file, and not this project as a
++ * whole.
++ *
++ *  a) This file is free software; you can redistribute it and/or
++ *     modify it under the terms of the GNU General Public License
++ *     version 2 as published by the Free Software Foundation.
++ *
++ *     This file is distributed in the hope that it will be useful,
++ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *     GNU General Public License for more details.
++ *
++ * Or, alternatively,
++ *
++ *  b) Permission is hereby granted, free of charge, to any person
++ *     obtaining a copy of this software and associated documentation
++ *     files (the "Software"), to deal in the Software without
++ *     restriction, including without limitation the rights to use,
++ *     copy, modify, merge, publish, distribute, sublicense, and/or
++ *     sell copies of the Software, and to permit persons to whom the
++ *     Software is furnished to do so, subject to the following
++ *     conditions:
++ *
++ *     The above copyright notice and this permission notice shall be
++ *     included in all copies or substantial portions of the Software.
++ *
++ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ *     OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/*
++ * Please be reminded that - in general - DTSI file(s) are include files that
++ * are for more than one purpose (usually bound to a SoC) and as such shouldn't
++ * be edited. For end-user products it should be the DTS file(s) that choose
++ * which devices and pins are active and setup.
++ *
++ * The setup of DT files for Ka-Ro TX COM Modules under Yocto follow a
++ * different, non-standard, implementation to the mainline Linux kernel.
++ */
++
++/*
++ * Please see the imx6*-tx6-gpio.h && imx6*-tx6.dtsi in "linux-karo-x.y.ab/tx6"
++ * for the definitions like TX_NAND or TX_EMMC, etc.
++ */
++
++/dts-v1/;
++
++#include "imx6dl.dtsi"
++#include "imx6qdl-tx6.dtsi"
++#include "txbase-mb7.dtsi"
++
++/ {
++      model = "Ka-Ro electronics TX6U Module (eMMC & RGB)";
++      compatible = "karo,imx6dl-tx6dl", "fsl,imx6dl";
++};
++
++/*
++ * Every VOU is multi parted: backlight, device, framebuffer, pwm
++ * Thus all parts have to be enabled before a display can be used on the
++ * specific port.
++ */
++
++&backlight0 {
++      status = "okay";
++};
++
++/*
++ * NVM setup
++ */
++
++TX_EMMC {
++      status = "okay";
++};
++
++/*
++ * Remaining device references in alphabetical order
++ */
++
++TX_LCD {
++      status = "okay";
++      /*
++       * Users should add their custom timing here, therewith
++       * therewith overwriting the default/pre-defined timings. For more see:
++       *
++       * txbase/{BASEBOARD}-{DISPLAY-IF}/txbase-{BASEBOARD}-{DISPLAY-IF}.dtsi
++       *
++       * e.g.:
++       * txbase/mb7-lvds/txbase-aclavis.dtsi
++       */
++};
++
++TX_LCD_FB {
++      status = "okay";
++};
++
++TX_I2C {
++      status = "okay";
++
++      rtc@6f {
++              compatible = "microchip,mcp7940x";
++              reg = <0x6f>;
++      };
++
++      polytouch: edt-ft5x06@38 {
++              compatible = "edt,edt-ft5x06";
++              reg = <0x38>;
++              pinctrl-names = "default";
++              pinctrl-0 = <&pinctrl_edt_ft5x06>;
++              interrupts-extended = <TX_GPIO_PIN148 0>;
++              reset-gpios = <TX_GPIO_PIN149 GPIO_ACTIVE_LOW>;
++              wake-gpios = <TX_GPIO_PIN152 GPIO_ACTIVE_HIGH>;
++              linux,wakeup;
++      };
++};
++
+-- 
+2.1.4
+
diff --git a/recipes-kernel/linux/linux-karo-4.9.11/add-support-for-edt-m12-touch.patch b/recipes-kernel/linux/linux-karo-4.9.11/add-support-for-edt-m12-touch.patch
new file mode 100644 (file)
index 0000000..791eb55
--- /dev/null
@@ -0,0 +1,182 @@
+From patchwork Thu Oct  5 15:35:08 2017
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [3/3] input: edt-ft5x06: implement support for the EDT-M12 series.
+From: Simon Budig <simon.budig@kernelconcepts.de>
+X-Patchwork-Id: 9987471
+Message-Id: <20171005153508.32127-3-simon.budig@kernelconcepts.de>
+To: linux-input@vger.kernel.org
+Cc: dmitry.torokhov@gmail.com, Simon Budig <simon.budig@kernelconcepts.de>
+Date: Thu,  5 Oct 2017 17:35:08 +0200
+
+From: Simon Budig <simon.budig@kernelconcepts.de>
+
+This adds support for the EDT M12 series of touchscreens.
+
+Signed-off-by: Simon Budig <simon.budig@kernelconcepts.de>
+---
+ drivers/input/touchscreen/edt-ft5x06.c | 48 +++++++++++++++++++++++++++-------
+ 1 file changed, 38 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
+index d128445..8f5c2c0 100644
+--- a/drivers/input/touchscreen/edt-ft5x06.c
++++ b/drivers/input/touchscreen/edt-ft5x06.c
+@@ -72,6 +72,7 @@
+ enum edt_ver {
+       EDT_M06,
+       EDT_M09,
++      EDT_M12,
+       GENERIC_FT,
+ };
+@@ -188,6 +189,7 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
+               break;
+       case EDT_M09:
++      case EDT_M12:
+       case GENERIC_FT:
+               cmd = 0x0;
+               offset = 3;
+@@ -211,7 +213,7 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
+               goto out;
+       }
+-      /* M09 does not send header or CRC */
++      /* M09/M12 does not send header or CRC */
+       if (tsdata->version == EDT_M06) {
+               if (rdbuf[0] != 0xaa || rdbuf[1] != 0xaa ||
+                       rdbuf[2] != datalen) {
+@@ -274,6 +276,7 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
+               return edt_ft5x06_ts_readwrite(tsdata->client, 4,
+                                       wrbuf, 0, NULL);
+       case EDT_M09:
++      case EDT_M12:
+       case GENERIC_FT:
+               wrbuf[0] = addr;
+               wrbuf[1] = value;
+@@ -313,6 +316,7 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
+               break;
+       case EDT_M09:
++      case EDT_M12:
+       case GENERIC_FT:
+               wrbuf[0] = addr;
+               error = edt_ft5x06_ts_readwrite(tsdata->client, 1,
+@@ -377,6 +381,7 @@ static ssize_t edt_ft5x06_setting_show(struct device *dev,
+               break;
+       case EDT_M09:
++      case EDT_M12:
+       case GENERIC_FT:
+               addr = attr->addr_m09;
+               break;
+@@ -447,6 +452,7 @@ static ssize_t edt_ft5x06_setting_store(struct device *dev,
+               break;
+       case EDT_M09:
++      case EDT_M12:
+       case GENERIC_FT:
+               addr = attr->addr_m09;
+               break;
+@@ -472,14 +478,18 @@ static ssize_t edt_ft5x06_setting_store(struct device *dev,
+       return error ?: count;
+ }
++/* m06, m09: range 0-31, m12: range 0-5 */
+ static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN,
+               M09_REGISTER_GAIN, 0, 31);
++/* m06, m09: range 0-31, m12: range 0-16 */
+ static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET,
+               M09_REGISTER_OFFSET, 0, 31);
++/* m06: range 20 to 80, m09: range 0 to 30, m12: range 1 to 255... */
+ static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
+-              M09_REGISTER_THRESHOLD, 0, 80);
++              M09_REGISTER_THRESHOLD, 0, 255);
++/* m06: range 3 to 14, m12: (0x64: 100Hz) */
+ static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
+-              NO_REGISTER, 3, 14);
++              NO_REGISTER, 0, 255);
+ static struct attribute *edt_ft5x06_attrs[] = {
+       &edt_ft5x06_attr_gain.dattr.attr,
+@@ -551,7 +561,7 @@ static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
+       return error;
+ m09_out:
+-      dev_err(&client->dev, "No factory mode support for M09/GENERIC_FT\n");
++      dev_err(&client->dev, "No factory mode support for M09/M12/GENERIC_FT\n");
+       return -EINVAL;
+ }
+@@ -776,13 +786,14 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
+        * to have garbage in there
+        */
+       memset(rdbuf, 0, sizeof(rdbuf));
+-      error = edt_ft5x06_ts_readwrite(client, 1, "\xbb",
++      error = edt_ft5x06_ts_readwrite(client, 1, "\xBB",
+                                       EDT_NAME_LEN - 1, rdbuf);
+       if (error)
+               return error;
+-      /* if we find something consistent, stay with that assumption
+-       * at least M09 won't send 3 bytes here
++      /* probe content for something consistent.
++       * M06 starts with a response byte, M12 gives the data directly.
++       * M09/Generic does not provide model number information.
+        */
+       if (!(strncasecmp(rdbuf + 1, "EP0", 3))) {
+               tsdata->version = EDT_M06;
+@@ -798,8 +809,22 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
+                       *p++ = '\0';
+               strlcpy(model_name, rdbuf + 1, EDT_NAME_LEN);
+               strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
++      } else if (!(strncasecmp(rdbuf, "EP0", 3))) {
++              tsdata->version = EDT_M12;
++
++              /* remove last '$' end marker */
++              rdbuf[EDT_NAME_LEN - 2] = '\0';
++              if (rdbuf[EDT_NAME_LEN - 3] == '$')
++                      rdbuf[EDT_NAME_LEN - 3] = '\0';
++
++              /* look for Model/Version separator */
++              p = strchr(rdbuf, '*');
++              if (p)
++                      *p++ = '\0';
++              strlcpy(model_name, rdbuf, EDT_NAME_LEN);
++              strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
+       } else {
+-              /* If it is not an EDT M06 touchscreen, then the model
++              /* if it is not an EDT M06/M12 touchscreen then the model
+                * detection is a bit hairy. The different ft5x06
+                * firmares around don't reliably implement the
+                * identification registers. Well. we'll take a shot.
+@@ -894,7 +919,8 @@ edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
+               tsdata->report_rate = edt_ft5x06_register_read(tsdata,
+                                               reg_addr->reg_report_rate);
+       if (tsdata->version == EDT_M06 ||
+-          tsdata->version == EDT_M09) {
++          tsdata->version == EDT_M09 ||
++          tsdata->version == EDT_M12) {
+               tsdata->num_x = edt_ft5x06_register_read(tsdata,
+                                                        reg_addr->reg_num_x);
+               tsdata->num_y = edt_ft5x06_register_read(tsdata,
+@@ -921,6 +947,7 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
+               break;
+       case EDT_M09:
++      case EDT_M12:
+               reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
+               reg_addr->reg_report_rate = NO_REGISTER;
+               reg_addr->reg_gain = M09_REGISTER_GAIN;
+@@ -1029,7 +1056,8 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
+       input->dev.parent = &client->dev;
+       if (tsdata->version == EDT_M06 ||
+-          tsdata->version == EDT_M09) {
++          tsdata->version == EDT_M09 ||
++          tsdata->version == EDT_M12) {
+               input_set_abs_params(input, ABS_MT_POSITION_X,
+                                    0, tsdata->num_x * 64 - 1, 0, 0);
+               input_set_abs_params(input, ABS_MT_POSITION_Y,
diff --git a/recipes-kernel/linux/linux-karo-4.9.11/edt-ft5x06-fixes.patch b/recipes-kernel/linux/linux-karo-4.9.11/edt-ft5x06-fixes.patch
new file mode 100644 (file)
index 0000000..3e9cb95
--- /dev/null
@@ -0,0 +1,95 @@
+From aa3d4409b664813ceb86a24bd09458cdd29cbb8a Mon Sep 17 00:00:00 2001
+From: Martin Kepplinger <martin.kepplinger@ginzinger.com>
+Date: Mon, 22 May 2017 17:19:45 -0700
+Subject: [PATCH] Input: edt-ft5x06 - increase allowed data range for threshold
+ parameter
+
+The datasheet and application note does not mention an allowed range for
+the M09_REGISTER_THRESHOLD parameter. One of our customers needs to set
+lower values than 20 and they seem to work just fine on EDT EP0xx0M09 with
+T5x06 touch.
+
+So, lacking a known lower limit, we increase the range for thresholds,
+and set the lower limit to 0. The documentation is updated accordingly.
+
+Signed-off-by: Schoefegger Stefan <stefan.schoefegger@ginzinger.com>
+Signed-off-by: Manfred Schlaegl <manfred.schlaegl@ginzinger.com>
+Signed-off-by: Martin Kepplinger <martin.kepplinger@ginzinger.com>
+Acked-by: Rob Herring <robh@kernel.org>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+---
+ Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt | 2 +-
+ drivers/input/touchscreen/edt-ft5x06.c                             | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
+index 6db22103e2dd5..025cf8c9324ac 100644
+--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
++++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
+@@ -36,7 +36,7 @@ Optional properties:
+                 control gpios
+  - threshold:   allows setting the "click"-threshold in the range
+-                from 20 to 80.
++                from 0 to 80.
+  - gain:        allows setting the sensitivity in the range from 0 to
+                 31. Note that lower values indicate higher
+diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
+index 8cf8d8d5d4ef4..f872817e81e46 100644
+--- a/drivers/input/touchscreen/edt-ft5x06.c
++++ b/drivers/input/touchscreen/edt-ft5x06.c
+@@ -471,7 +471,7 @@ static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN,
+ static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET,
+               M09_REGISTER_OFFSET, 0, 31);
+ static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
+-              M09_REGISTER_THRESHOLD, 20, 80);
++              M09_REGISTER_THRESHOLD, 0, 80);
+ static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
+               NO_REGISTER, 3, 14);
+From 47014752829ed1b52fa6f876b381774ae530503c Mon Sep 17 00:00:00 2001
+From: Luca Ceresoli <luca@lucaceresoli.net>
+Date: Thu, 7 Sep 2017 14:28:28 -0700
+Subject: [PATCH] Input: edt-ft5x06 - fix access to non-existing register
+
+reg_addr->reg_report_rate is supposed to exist in M06, not M09.
+
+The driver is written to skip avoids access to non-existing registers
+when the register address is NO_REGISTER (0xff). But
+reg_addr->reg_report_rate is initialized to 0x00 by devm_kzalloc() (in
+edt_ft5x06_ts_probe()) and not changed thereafter. So the checks do
+not work and an access to register 0x00 is done.
+
+Fix by setting reg_addr->reg_report_rate to NO_REGISTER.
+
+Also fix the only place where reg_report_rate is checked against zero
+instead of NO_REGISTER.
+
+Signed-off-by: Luca Ceresoli <luca@lucaceresoli.net>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+---
+ drivers/input/touchscreen/edt-ft5x06.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
+index f872817e81e46..5bf63f76dddac 100644
+--- a/drivers/input/touchscreen/edt-ft5x06.c
++++ b/drivers/input/touchscreen/edt-ft5x06.c
+@@ -593,7 +593,7 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata)
+                                 tsdata->gain);
+       edt_ft5x06_register_write(tsdata, reg_addr->reg_offset,
+                                 tsdata->offset);
+-      if (reg_addr->reg_report_rate)
++      if (reg_addr->reg_report_rate != NO_REGISTER)
+               edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate,
+                                 tsdata->report_rate);
+@@ -874,6 +874,7 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
+       case M09:
+               reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
++              reg_addr->reg_report_rate = NO_REGISTER;
+               reg_addr->reg_gain = M09_REGISTER_GAIN;
+               reg_addr->reg_offset = M09_REGISTER_OFFSET;
+               reg_addr->reg_num_x = M09_REGISTER_NUM_X;
diff --git a/recipes-kernel/linux/linux-karo-4.9.11/edt-ft5x06-make-distinction-between.patch b/recipes-kernel/linux/linux-karo-4.9.11/edt-ft5x06-make-distinction-between.patch
new file mode 100644 (file)
index 0000000..2cc16b8
--- /dev/null
@@ -0,0 +1,296 @@
+From 169110c3645621497ca0b1d0d7bc80dcea8ff9d4 Mon Sep 17 00:00:00 2001
+From: Simon Budig <simon.budig@kernelconcepts.de>
+Date: Mon, 9 Oct 2017 20:58:11 -0700
+Subject: [PATCH] Input: edt-ft5x06 - make distinction between m06/m09/generic
+ more clear
+
+Since the driver also is useful for some non-EDT touchscreens based on
+the focaltec chips introduce the concept of a "generic" focaltec based
+touch.
+
+Use a better heuristics for model detection and be more specific in the
+source.
+
+Signed-off-by: Simon Budig <simon.budig@kernelconcepts.de>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+---
+ drivers/input/touchscreen/edt-ft5x06.c | 122 +++++++++++++++++++++++++--------
+ 1 file changed, 93 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
+index f879d14f7ffcc..56bfd0c14c91a 100644
+--- a/drivers/input/touchscreen/edt-ft5x06.c
++++ b/drivers/input/touchscreen/edt-ft5x06.c
+@@ -70,8 +70,9 @@
+ #define EDT_RAW_DATA_DELAY            1000 /* usec */
+ enum edt_ver {
+-      M06,
+-      M09,
++      EDT_M06,
++      EDT_M09,
++      GENERIC_FT,
+ };
+ struct edt_reg_addr {
+@@ -179,14 +180,15 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
+       int error;
+       switch (tsdata->version) {
+-      case M06:
++      case EDT_M06:
+               cmd = 0xf9; /* tell the controller to send touch data */
+               offset = 5; /* where the actual touch data starts */
+               tplen = 4;  /* data comes in so called frames */
+               crclen = 1; /* length of the crc data */
+               break;
+-      case M09:
++      case EDT_M09:
++      case GENERIC_FT:
+               cmd = 0x0;
+               offset = 3;
+               tplen = 6;
+@@ -210,7 +212,7 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
+       }
+       /* M09 does not send header or CRC */
+-      if (tsdata->version == M06) {
++      if (tsdata->version == EDT_M06) {
+               if (rdbuf[0] != 0xaa || rdbuf[1] != 0xaa ||
+                       rdbuf[2] != datalen) {
+                       dev_err_ratelimited(dev,
+@@ -233,7 +235,7 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
+                       continue;
+               /* M06 sometimes sends bogus coordinates in TOUCH_DOWN */
+-              if (tsdata->version == M06 && type == TOUCH_EVENT_DOWN)
++              if (tsdata->version == EDT_M06 && type == TOUCH_EVENT_DOWN)
+                       continue;
+               x = ((buf[0] << 8) | buf[1]) & 0x0fff;
+@@ -264,14 +266,15 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
+       u8 wrbuf[4];
+       switch (tsdata->version) {
+-      case M06:
++      case EDT_M06:
+               wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
+               wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
+               wrbuf[2] = value;
+               wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2];
+               return edt_ft5x06_ts_readwrite(tsdata->client, 4,
+                                       wrbuf, 0, NULL);
+-      case M09:
++      case EDT_M09:
++      case GENERIC_FT:
+               wrbuf[0] = addr;
+               wrbuf[1] = value;
+@@ -290,7 +293,7 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
+       int error;
+       switch (tsdata->version) {
+-      case M06:
++      case EDT_M06:
+               wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
+               wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
+               wrbuf[1] |= tsdata->factory_mode ? 0x80 : 0x40;
+@@ -309,7 +312,8 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
+               }
+               break;
+-      case M09:
++      case EDT_M09:
++      case GENERIC_FT:
+               wrbuf[0] = addr;
+               error = edt_ft5x06_ts_readwrite(tsdata->client, 1,
+                                               wrbuf, 1, rdbuf);
+@@ -368,11 +372,12 @@ static ssize_t edt_ft5x06_setting_show(struct device *dev,
+       }
+       switch (tsdata->version) {
+-      case M06:
++      case EDT_M06:
+               addr = attr->addr_m06;
+               break;
+-      case M09:
++      case EDT_M09:
++      case GENERIC_FT:
+               addr = attr->addr_m09;
+               break;
+@@ -437,11 +442,12 @@ static ssize_t edt_ft5x06_setting_store(struct device *dev,
+       }
+       switch (tsdata->version) {
+-      case M06:
++      case EDT_M06:
+               addr = attr->addr_m06;
+               break;
+-      case M09:
++      case EDT_M09:
++      case GENERIC_FT:
+               addr = attr->addr_m09;
+               break;
+@@ -508,7 +514,7 @@ static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
+       }
+       /* mode register is 0x3c when in the work mode */
+-      if (tsdata->version == M09)
++      if (tsdata->version != EDT_M06)
+               goto m09_out;
+       error = edt_ft5x06_register_write(tsdata, WORK_REGISTER_OPMODE, 0x03);
+@@ -545,7 +551,7 @@ static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
+       return error;
+ m09_out:
+-      dev_err(&client->dev, "No factory mode support for M09\n");
++      dev_err(&client->dev, "No factory mode support for M09/GENERIC_FT\n");
+       return -EINVAL;
+ }
+@@ -779,7 +785,7 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
+        * at least M09 won't send 3 bytes here
+        */
+       if (!(strncasecmp(rdbuf + 1, "EP0", 3))) {
+-              tsdata->version = M06;
++              tsdata->version = EDT_M06;
+               /* remove last '$' end marker */
+               rdbuf[EDT_NAME_LEN - 1] = '\0';
+@@ -793,8 +799,16 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
+               strlcpy(model_name, rdbuf + 1, EDT_NAME_LEN);
+               strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
+       } else {
+-              /* since there are only two versions around (M06, M09) */
+-              tsdata->version = M09;
++              /* If it is not an EDT M06 touchscreen, then the model
++               * detection is a bit hairy. The different ft5x06
++               * firmares around don't reliably implement the
++               * identification registers. Well, we'll take a shot.
++               *
++               * The main difference between generic focaltec based
++               * touches and EDT M09 is that we know how to retrieve
++               * the max coordinates for the latter.
++               */
++              tsdata->version = GENERIC_FT;
+               error = edt_ft5x06_ts_readwrite(client, 1, "\xA6",
+                                               2, rdbuf);
+@@ -808,8 +822,34 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
+               if (error)
+                       return error;
+-              snprintf(model_name, EDT_NAME_LEN, "EP0%i%i0M09",
+-                      rdbuf[0] >> 4, rdbuf[0] & 0x0F);
++              /* This "model identification" is not exact. Unfortunately
++               * not all firmwares for the ft5x06 put useful values in
++               * the identification registers.
++               */
++              switch (rdbuf[0]) {
++              case 0x35:   /* EDT EP0350M09 */
++              case 0x43:   /* EDT EP0430M09 */
++              case 0x50:   /* EDT EP0500M09 */
++              case 0x57:   /* EDT EP0570M09 */
++              case 0x70:   /* EDT EP0700M09 */
++                      tsdata->version = EDT_M09;
++                      snprintf(model_name, EDT_NAME_LEN, "EP0%i%i0M09",
++                              rdbuf[0] >> 4, rdbuf[0] & 0x0F);
++                      break;
++              case 0xa1:   /* EDT EP1010ML00 */
++                      tsdata->version = EDT_M09;
++                      snprintf(model_name, EDT_NAME_LEN, "EP%i%i0ML00",
++                              rdbuf[0] >> 4, rdbuf[0] & 0x0F);
++                      break;
++              case 0x5a:   /* Solomon Goldentek Display */
++                      snprintf(model_name, EDT_NAME_LEN, "GKTW50SCED1R0");
++                      break;
++              default:
++                      snprintf(model_name, EDT_NAME_LEN,
++                               "generic ft5x06 (%02x)",
++                               rdbuf[0]);
++                      break;
++              }
+       }
+       return 0;
+@@ -853,8 +893,16 @@ edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
+       if (reg_addr->reg_report_rate != NO_REGISTER)
+               tsdata->report_rate = edt_ft5x06_register_read(tsdata,
+                                               reg_addr->reg_report_rate);
+-      tsdata->num_x = edt_ft5x06_register_read(tsdata, reg_addr->reg_num_x);
+-      tsdata->num_y = edt_ft5x06_register_read(tsdata, reg_addr->reg_num_y);
++      if (tsdata->version == EDT_M06 ||
++          tsdata->version == EDT_M09) {
++              tsdata->num_x = edt_ft5x06_register_read(tsdata,
++                                                       reg_addr->reg_num_x);
++              tsdata->num_y = edt_ft5x06_register_read(tsdata,
++                                                       reg_addr->reg_num_y);
++      } else {
++              tsdata->num_x = -1;
++              tsdata->num_y = -1;
++      }
+ }
+ static void
+@@ -863,7 +911,7 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
+       struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
+       switch (tsdata->version) {
+-      case M06:
++      case EDT_M06:
+               reg_addr->reg_threshold = WORK_REGISTER_THRESHOLD;
+               reg_addr->reg_report_rate = WORK_REGISTER_REPORT_RATE;
+               reg_addr->reg_gain = WORK_REGISTER_GAIN;
+@@ -872,7 +920,7 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
+               reg_addr->reg_num_y = WORK_REGISTER_NUM_Y;
+               break;
+-      case M09:
++      case EDT_M09:
+               reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
+               reg_addr->reg_report_rate = NO_REGISTER;
+               reg_addr->reg_gain = M09_REGISTER_GAIN;
+@@ -880,6 +928,13 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
+               reg_addr->reg_num_x = M09_REGISTER_NUM_X;
+               reg_addr->reg_num_y = M09_REGISTER_NUM_Y;
+               break;
++
++      case GENERIC_FT:
++              /* this is a guesswork */
++              reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
++              reg_addr->reg_gain = M09_REGISTER_GAIN;
++              reg_addr->reg_offset = M09_REGISTER_OFFSET;
++              break;
+       }
+ }
+@@ -969,10 +1024,19 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
+       input->id.bustype = BUS_I2C;
+       input->dev.parent = &client->dev;
+-      input_set_abs_params(input, ABS_MT_POSITION_X,
+-                           0, tsdata->num_x * 64 - 1, 0, 0);
+-      input_set_abs_params(input, ABS_MT_POSITION_Y,
+-                           0, tsdata->num_y * 64 - 1, 0, 0);
++      if (tsdata->version == EDT_M06 ||
++          tsdata->version == EDT_M09) {
++              input_set_abs_params(input, ABS_MT_POSITION_X,
++                                   0, tsdata->num_x * 64 - 1, 0, 0);
++              input_set_abs_params(input, ABS_MT_POSITION_Y,
++                                   0, tsdata->num_y * 64 - 1, 0, 0);
++      } else {
++              /* Unknown maximum values. Specify via devicetree */
++              input_set_abs_params(input, ABS_MT_POSITION_X,
++                                   0, 65535, 0, 0);
++              input_set_abs_params(input, ABS_MT_POSITION_Y,
++                                   0, 65535, 0, 0);
++      }
+       touchscreen_parse_properties(input, true, &tsdata->prop);
diff --git a/recipes-kernel/linux/linux-karo-4.9.11/ethernet-update-driver.patch b/recipes-kernel/linux/linux-karo-4.9.11/ethernet-update-driver.patch
new file mode 100644 (file)
index 0000000..854443c
--- /dev/null
@@ -0,0 +1,17985 @@
+diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
+index 2204c57..25e3425 100644
+--- a/drivers/net/ethernet/freescale/Kconfig
++++ b/drivers/net/ethernet/freescale/Kconfig
+@@ -7,10 +7,11 @@ config NET_VENDOR_FREESCALE
+       default y
+       depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
+                  M523x || M527x || M5272 || M528x || M520x || M532x || \
+-                 ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
+-                 ARCH_LAYERSCAPE
++                 ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
+       ---help---
+-        If you have a network (Ethernet) card belonging to this class, say Y.
++        If you have a network (Ethernet) card belonging to this class, say Y
++        and read the Ethernet-HOWTO, available from
++        <http://www.tldp.org/docs.html#howto>.
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+@@ -22,8 +23,8 @@ if NET_VENDOR_FREESCALE
+ config FEC
+       tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+       depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
+-                 ARM || ARM64)
+-      default y
++                 ARCH_MXC || SOC_IMX28)
++      default ARCH_MXC || SOC_IMX28 if ARM
+       select PHYLIB
+       select PTP_1588_CLOCK
+       ---help---
+@@ -54,7 +55,6 @@ config FEC_MPC52xx_MDIO
+         If compiled as module, it will be called fec_mpc52xx_phy.
+ source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
+-source "drivers/net/ethernet/freescale/fman/Kconfig"
+ config FSL_PQ_MDIO
+       tristate "Freescale PQ MDIO"
+@@ -85,12 +85,12 @@ config UGETH_TX_ON_DEMAND
+ config GIANFAR
+       tristate "Gianfar Ethernet"
++      depends on FSL_SOC
+       select FSL_PQ_MDIO
+       select PHYLIB
+       select CRC32
+       ---help---
+         This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
+-        and MPC86xx family of chips, the eTSEC on LS1021A and the FEC
+-        on the 8540.
++        and MPC86xx family of chips, and the FEC on the 8540.
+ endif # NET_VENDOR_FREESCALE
+diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
+index 7f022dd..71debd1 100644
+--- a/drivers/net/ethernet/freescale/Makefile
++++ b/drivers/net/ethernet/freescale/Makefile
+@@ -3,10 +3,7 @@
+ #
+ obj-$(CONFIG_FEC) += fec.o
+-fec-objs :=fec_main.o fec_fixup.o fec_ptp.o
+-CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
+-CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
+-
++fec-objs :=fec_main.o fec_ptp.o
+ obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
+ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
+       obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
+@@ -20,5 +17,3 @@ gianfar_driver-objs := gianfar.o \
+               gianfar_ethtool.o
+ obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
+ ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
+-
+-obj-$(CONFIG_FSL_FMAN) += fman/
+diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
+index 1d7b3cc..ecdc711 100644
+--- a/drivers/net/ethernet/freescale/fec.h
++++ b/drivers/net/ethernet/freescale/fec.h
+@@ -20,8 +20,8 @@
+ #include <linux/timecounter.h>
+ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+-    defined(CONFIG_ARM64)
++    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
++    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+ /*
+  *    Just figures, Motorola would have to change the offsets for
+  *    registers in the same peripheral device on different models
+@@ -192,45 +192,28 @@
+ /*
+  *    Define the buffer descriptor structure.
+- *
+- *    Evidently, ARM SoCs have the FEC block generated in a
+- *    little endian mode so adjust endianness accordingly.
+  */
+-#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+-#define fec32_to_cpu le32_to_cpu
+-#define fec16_to_cpu le16_to_cpu
+-#define cpu_to_fec32 cpu_to_le32
+-#define cpu_to_fec16 cpu_to_le16
+-#define __fec32 __le32
+-#define __fec16 __le16
+-
++#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+ struct bufdesc {
+-      __fec16 cbd_datlen;     /* Data length */
+-      __fec16 cbd_sc;         /* Control and status info */
+-      __fec32 cbd_bufaddr;    /* Buffer address */
++      unsigned short cbd_datlen;      /* Data length */
++      unsigned short cbd_sc;  /* Control and status info */
++      unsigned long cbd_bufaddr;      /* Buffer address */
+ };
+ #else
+-#define fec32_to_cpu be32_to_cpu
+-#define fec16_to_cpu be16_to_cpu
+-#define cpu_to_fec32 cpu_to_be32
+-#define cpu_to_fec16 cpu_to_be16
+-#define __fec32 __be32
+-#define __fec16 __be16
+-
+ struct bufdesc {
+-      __fec16 cbd_sc;         /* Control and status info */
+-      __fec16 cbd_datlen;     /* Data length */
+-      __fec32 cbd_bufaddr;    /* Buffer address */
++      unsigned short  cbd_sc;                 /* Control and status info */
++      unsigned short  cbd_datlen;             /* Data length */
++      unsigned long   cbd_bufaddr;            /* Buffer address */
+ };
+ #endif
+ struct bufdesc_ex {
+       struct bufdesc desc;
+-      __fec32 cbd_esc;
+-      __fec32 cbd_prot;
+-      __fec32 cbd_bdu;
+-      __fec32 ts;
+-      __fec16 res0[4];
++      unsigned long cbd_esc;
++      unsigned long cbd_prot;
++      unsigned long cbd_bdu;
++      unsigned long ts;
++      unsigned short res0[4];
+ };
+ /*
+@@ -294,7 +277,7 @@ struct bufdesc_ex {
+ /* This device has up to three irqs on some platforms */
+-#define FEC_IRQ_NUM           4
++#define FEC_IRQ_NUM           3
+ /* Maximum number of queues supported
+  * ENET with AVB IP can support up to 3 independent tx queues and rx queues.
+@@ -312,6 +295,12 @@ struct bufdesc_ex {
+ #define FEC_R_BUFF_SIZE(X)    (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
+                               (((X) == 2) ? \
+                                       FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
++#define FEC_R_DES_ACTIVE(X)   (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \
++                              (((X) == 2) ? \
++                                 FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
++#define FEC_X_DES_ACTIVE(X)   (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \
++                              (((X) == 2) ? \
++                                 FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
+ #define FEC_DMA_CFG(X)                (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
+@@ -379,7 +368,6 @@ struct bufdesc_ex {
+ #define FEC_ENET_TS_TIMER       ((uint)0x00008000)
+ #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
+-#define FEC_NAPI_IMASK        (FEC_ENET_MII | FEC_ENET_TS_TIMER)
+ #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
+ #define FEC_ENET_ETHEREN      ((uint)0x00000002)
+@@ -448,32 +436,12 @@ struct bufdesc_ex {
+ #define FEC_QUIRK_SINGLE_MDIO         (1 << 11)
+ /* Controller supports RACC register */
+ #define FEC_QUIRK_HAS_RACC            (1 << 12)
+-/* Controller supports interrupt coalesc */
+-#define FEC_QUIRK_HAS_COALESCE                (1 << 13)
+-/* Interrupt doesn't wake CPU from deep idle */
+-#define FEC_QUIRK_ERR006687           (1 << 14)
+ /*
+  * i.MX6Q/DL ENET cannot wake up system in wait mode because ENET tx & rx
+  * interrupt signal don't connect to GPC. So use pm qos to avoid cpu enter
+  * to wait mode.
+  */
+-#define FEC_QUIRK_BUG_WAITMODE                (1 << 15)
+-
+-/* PHY fixup flag define */
+-#define FEC_QUIRK_AR8031_FIXUP                (1 << 0)
+-
+-struct bufdesc_prop {
+-      int qid;
+-      /* Address of Rx and Tx buffers */
+-      struct bufdesc  *base;
+-      struct bufdesc  *last;
+-      struct bufdesc  *cur;
+-      void __iomem    *reg_desc_active;
+-      dma_addr_t      dma;
+-      unsigned short ring_size;
+-      unsigned char dsize;
+-      unsigned char dsize_log2;
+-};
++#define FEC_QUIRK_BUG_WAITMODE         (1 << 13)
+ struct fec_enet_stop_mode {
+       struct regmap *gpr;
+@@ -482,21 +450,32 @@ struct fec_enet_stop_mode {
+ };
+ struct fec_enet_priv_tx_q {
+-      struct bufdesc_prop bd;
++      int index;
+       unsigned char *tx_bounce[TX_RING_SIZE];
+       struct  sk_buff *tx_skbuff[TX_RING_SIZE];
++      dma_addr_t      bd_dma;
++      struct bufdesc  *tx_bd_base;
++      uint tx_ring_size;
++
+       unsigned short tx_stop_threshold;
+       unsigned short tx_wake_threshold;
++      struct bufdesc  *cur_tx;
+       struct bufdesc  *dirty_tx;
+       char *tso_hdrs;
+       dma_addr_t tso_hdrs_dma;
+ };
+ struct fec_enet_priv_rx_q {
+-      struct bufdesc_prop bd;
++      int index;
+       struct  sk_buff *rx_skbuff[RX_RING_SIZE];
++
++      dma_addr_t      bd_dma;
++      struct bufdesc  *rx_bd_base;
++      uint rx_ring_size;
++
++      struct bufdesc  *cur_rx;
+ };
+ /* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
+@@ -536,20 +515,22 @@ struct fec_enet_private {
+       unsigned long work_ts;
+       unsigned long work_mdio;
++      unsigned short bufdesc_size;
++
+       struct  platform_device *pdev;
+       int     dev_id;
+       /* Phylib and MDIO interface */
+       struct  mii_bus *mii_bus;
++      struct  phy_device *phy_dev;
+       int     mii_timeout;
+       int     mii_bus_share;
+-      bool    active_in_suspend;
++      bool    miibus_up_failed;
+       uint    phy_speed;
+       phy_interface_t phy_interface;
+       struct device_node *phy_node;
+       int     link;
+-      bool    fixed_link;
+       int     full_duplex;
+       int     speed;
+       struct  completion mdio_done;
+@@ -559,7 +540,8 @@ struct fec_enet_private {
+       int     wol_flag;
+       int     wake_irq;
+       u32     quirks;
+-      u32     fixups;
++      int phy_reset_gpio;
++      int phy_reset_duration;
+       struct  napi_struct napi;
+       int     csum_flags;
+@@ -602,19 +584,14 @@ struct fec_enet_private {
+       int pps_enable;
+       unsigned int next_counter;
+-      u64 ethtool_stats[0];
+-
+       struct fec_enet_stop_mode gpr;
+ };
+ void fec_ptp_init(struct platform_device *pdev);
+-void fec_ptp_stop(struct platform_device *pdev);
+ void fec_ptp_start_cyclecounter(struct net_device *ndev);
+ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
+ int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
+ uint fec_ptp_check_pps_event(struct fec_enet_private *fep);
+-void fec_enet_register_fixup(struct net_device *ndev);
+-int of_fec_enet_parse_fixup(struct device_node *np);
+ /****************************************************************************/
+ #endif /* FEC_H */
+diff --git a/drivers/net/ethernet/freescale/fec_fixup.c b/drivers/net/ethernet/freescale/fec_fixup.c
+deleted file mode 100644
+index 5a8497c..0000000
+--- a/drivers/net/ethernet/freescale/fec_fixup.c
++++ /dev/null
+@@ -1,74 +0,0 @@
+-/*
+- * Copyright 2017 NXP
+- *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License
+- * as published by the Free Software Foundation; either version 2
+- * of the License, or (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- */
+-
+-#include <linux/netdevice.h>
+-#include <linux/phy.h>
+-#include "fec.h"
+-
+-#define PHY_ID_AR8031   0x004dd074
+-
+-static int ar8031_phy_fixup(struct phy_device *dev)
+-{
+-      u16 val;
+-
+-      /* Set RGMII IO voltage to 1.8V */
+-      phy_write(dev, 0x1d, 0x1f);
+-      phy_write(dev, 0x1e, 0x8);
+-
+-      /* Disable phy AR8031 SmartEEE function */
+-      phy_write(dev, 0xd, 0x3);
+-      phy_write(dev, 0xe, 0x805d);
+-      phy_write(dev, 0xd, 0x4003);
+-      val = phy_read(dev, 0xe);
+-      val &= ~(0x1 << 8);
+-      phy_write(dev, 0xe, val);
+-
+-      /* Introduce tx clock delay */
+-      phy_write(dev, 0x1d, 0x5);
+-      phy_write(dev, 0x1e, 0x100);
+-
+-      return 0;
+-}
+-
+-void fec_enet_register_fixup(struct net_device *ndev)
+-{
+-      struct fec_enet_private *fep = netdev_priv(ndev);
+-      static int registered = 0;
+-      int err;
+-
+-      if (!IS_BUILTIN(CONFIG_PHYLIB))
+-              return;
+-
+-      if (fep->fixups & FEC_QUIRK_AR8031_FIXUP) {
+-              static int ar8031_registered = 0;
+-
+-              if (ar8031_registered)
+-                      return;
+-              err = phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef,
+-                                      ar8031_phy_fixup);
+-              if (err)
+-                      netdev_info(ndev, "Cannot register PHY board fixup\n");
+-              registered = 1;
+-      }
+-}
+-
+-int of_fec_enet_parse_fixup(struct device_node *np)
+-{
+-      int fixups = 0;
+-
+-      if (of_get_property(np, "fsl,ar8031-phy-fixup", NULL))
+-              fixups |= FEC_QUIRK_AR8031_FIXUP;
+-
+-      return fixups;
+-}
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 41a31f2..15c06df 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -19,8 +19,6 @@
+  * Copyright (c) 2004-2006 Macq Electronique SA.
+  *
+  * Copyright (C) 2010-2014 Freescale Semiconductor, Inc.
+- *
+- * Copyright 2017 NXP
+  */
+ #include <linux/module.h>
+@@ -48,9 +46,7 @@
+ #include <linux/io.h>
+ #include <linux/irq.h>
+ #include <linux/clk.h>
+-#include <linux/clk/clk-conf.h>
+ #include <linux/platform_device.h>
+-#include <linux/mdio.h>
+ #include <linux/phy.h>
+ #include <linux/fec.h>
+ #include <linux/of.h>
+@@ -68,12 +64,12 @@
+ #include <linux/regmap.h>
+ #include <asm/cacheflush.h>
+-#include <soc/imx/cpuidle.h>
+ #include "fec.h"
+ static void set_multicast_list(struct net_device *ndev);
+ static void fec_enet_itr_coal_init(struct net_device *ndev);
++static void fec_reset_phy(struct platform_device *pdev);
+ #define DRIVER_NAME   "fec"
+@@ -87,7 +83,6 @@ static const u16 fec_enet_vlan_pri_to_queue[8] = {1, 1, 1, 1, 2, 2, 2, 2};
+ #define FEC_ENET_RAEM_V       0x8
+ #define FEC_ENET_RAFL_V       0x8
+ #define FEC_ENET_OPD_V        0xFFF0
+-#define FEC_MDIO_PM_TIMEOUT  100 /* ms */
+ static struct platform_device_id fec_devtype[] = {
+       {
+@@ -96,10 +91,10 @@ static struct platform_device_id fec_devtype[] = {
+               .driver_data = 0,
+       }, {
+               .name = "imx25-fec",
+-              .driver_data = FEC_QUIRK_USE_GASKET,
++              .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
+       }, {
+               .name = "imx27-fec",
+-              .driver_data = 0,
++              .driver_data = FEC_QUIRK_HAS_RACC,
+       }, {
+               .name = "imx28-fec",
+               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+@@ -119,20 +114,12 @@ static struct platform_device_id fec_devtype[] = {
+                               FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+                               FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+                               FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+-                              FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
++                              FEC_QUIRK_HAS_RACC,
+       }, {
+               .name = "imx6ul-fec",
+               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+                               FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+-                              FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
+-                              FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+-      }, {
+-              .name = "imx8qm-fec",
+-              .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+-                              FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+-                              FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+-                              FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+-                              FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
++                              FEC_QUIRK_HAS_VLAN,
+       }, {
+               /* sentinel */
+       }
+@@ -147,7 +134,6 @@ enum imx_fec_type {
+       MVF600_FEC,
+       IMX6SX_FEC,
+       IMX6UL_FEC,
+-      IMX8QM_FEC,
+ };
+ static const struct of_device_id fec_dt_ids[] = {
+@@ -158,7 +144,6 @@ static const struct of_device_id fec_dt_ids[] = {
+       { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
+       { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
+       { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
+-      { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
+       { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, fec_dt_ids);
+@@ -196,7 +181,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+ /* FEC receive acceleration */
+ #define FEC_RACC_IPDIS                (1 << 1)
+ #define FEC_RACC_PRODIS               (1 << 2)
+-#define FEC_RACC_SHIFT16      BIT(7)
+ #define FEC_RACC_OPTIONS      (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
+ /*
+@@ -205,8 +189,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+  * account when setting it.
+  */
+ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+-    defined(CONFIG_ARM64)
++    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+ #define       OPT_FRAME_SIZE  (PKT_MAXBUF_SIZE << 16)
+ #else
+ #define       OPT_FRAME_SIZE  0
+@@ -244,38 +227,86 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+ #define IS_TSO_HEADER(txq, addr) \
+       ((addr >= txq->tso_hdrs_dma) && \
+-      (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
++      (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+ static int mii_cnt;
+-static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
+-                                           struct bufdesc_prop *bd)
+-{
+-      return (bdp >= bd->last) ? bd->base
+-                      : (struct bufdesc *)(((void *)bdp) + bd->dsize);
+-}
++static inline
++struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
++                                    struct fec_enet_private *fep,
++                                    int queue_id)
++{
++      struct bufdesc *new_bd = bdp + 1;
++      struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
++      struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
++      struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
++      struct bufdesc_ex *ex_base;
++      struct bufdesc *base;
++      int ring_size;
++
++      if (bdp >= txq->tx_bd_base) {
++              base = txq->tx_bd_base;
++              ring_size = txq->tx_ring_size;
++              ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
++      } else {
++              base = rxq->rx_bd_base;
++              ring_size = rxq->rx_ring_size;
++              ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
++      }
+-static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
+-                                           struct bufdesc_prop *bd)
+-{
+-      return (bdp <= bd->base) ? bd->last
+-                      : (struct bufdesc *)(((void *)bdp) - bd->dsize);
++      if (fep->bufdesc_ex)
++              return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
++                      ex_base : ex_new_bd);
++      else
++              return (new_bd >= (base + ring_size)) ?
++                      base : new_bd;
++}
++
++static inline
++struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
++                                    struct fec_enet_private *fep,
++                                    int queue_id)
++{
++      struct bufdesc *new_bd = bdp - 1;
++      struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
++      struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
++      struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
++      struct bufdesc_ex *ex_base;
++      struct bufdesc *base;
++      int ring_size;
++
++      if (bdp >= txq->tx_bd_base) {
++              base = txq->tx_bd_base;
++              ring_size = txq->tx_ring_size;
++              ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
++      } else {
++              base = rxq->rx_bd_base;
++              ring_size = rxq->rx_ring_size;
++              ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
++      }
++
++      if (fep->bufdesc_ex)
++              return (struct bufdesc *)((ex_new_bd < ex_base) ?
++                      (ex_new_bd + ring_size) : ex_new_bd);
++      else
++              return (new_bd < base) ? (new_bd + ring_size) : new_bd;
+ }
+-static int fec_enet_get_bd_index(struct bufdesc *bdp,
+-                               struct bufdesc_prop *bd)
++static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
++                              struct fec_enet_private *fep)
+ {
+-      return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
++      return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
+ }
+-static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
++static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
++                                      struct fec_enet_priv_tx_q *txq)
+ {
+       int entries;
+-      entries = (((const char *)txq->dirty_tx -
+-                      (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
++      entries = ((const char *)txq->dirty_tx -
++                      (const char *)txq->cur_tx) / fep->bufdesc_size - 1;
+-      return entries >= 0 ? entries : entries + txq->bd.ring_size;
++      return entries >= 0 ? entries : entries + txq->tx_ring_size;
+ }
+ static void swap_buffer(void *bufaddr, int len)
+@@ -308,20 +339,18 @@ static void fec_dump(struct net_device *ndev)
+       pr_info("Nr     SC     addr       len  SKB\n");
+       txq = fep->tx_queue[0];
+-      bdp = txq->bd.base;
++      bdp = txq->tx_bd_base;
+       do {
+-              pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
++              pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
+                       index,
+-                      bdp == txq->bd.cur ? 'S' : ' ',
++                      bdp == txq->cur_tx ? 'S' : ' ',
+                       bdp == txq->dirty_tx ? 'H' : ' ',
+-                      fec16_to_cpu(bdp->cbd_sc),
+-                      fec32_to_cpu(bdp->cbd_bufaddr),
+-                      fec16_to_cpu(bdp->cbd_datlen),
++                      bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
+                       txq->tx_skbuff[index]);
+-              bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
++              bdp = fec_enet_get_nextdesc(bdp, fep, 0);
+               index++;
+-      } while (bdp != txq->bd.base);
++      } while (bdp != txq->tx_bd_base);
+ }
+ static inline bool is_ipv4_pkt(struct sk_buff *skb)
+@@ -352,9 +381,10 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
+                            struct net_device *ndev)
+ {
+       struct fec_enet_private *fep = netdev_priv(ndev);
+-      struct bufdesc *bdp = txq->bd.cur;
++      struct bufdesc *bdp = txq->cur_tx;
+       struct bufdesc_ex *ebdp;
+       int nr_frags = skb_shinfo(skb)->nr_frags;
++      unsigned short queue = skb_get_queue_mapping(skb);
+       int frag, frag_len;
+       unsigned short status;
+       unsigned int estatus = 0;
+@@ -366,10 +396,10 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
+       for (frag = 0; frag < nr_frags; frag++) {
+               this_frag = &skb_shinfo(skb)->frags[frag];
+-              bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
++              bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+               ebdp = (struct bufdesc_ex *)bdp;
+-              status = fec16_to_cpu(bdp->cbd_sc);
++              status = bdp->cbd_sc;
+               status &= ~BD_ENET_TX_STATS;
+               status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+               frag_len = skb_shinfo(skb)->frags[frag].size;
+@@ -387,16 +417,16 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
+               if (fep->bufdesc_ex) {
+                       if (fep->quirks & FEC_QUIRK_HAS_AVB)
+-                              estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
++                              estatus |= FEC_TX_BD_FTYPE(queue);
+                       if (skb->ip_summed == CHECKSUM_PARTIAL)
+                               estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+                       ebdp->cbd_bdu = 0;
+-                      ebdp->cbd_esc = cpu_to_fec32(estatus);
++                      ebdp->cbd_esc = estatus;
+               }
+               bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+-              index = fec_enet_get_bd_index(bdp, &txq->bd);
++              index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+               if (((unsigned long) bufaddr) & fep->tx_align ||
+                       fep->quirks & FEC_QUIRK_SWAP_FRAME) {
+                       memcpy(txq->tx_bounce[index], bufaddr, frag_len);
+@@ -409,27 +439,24 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
+               addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
+                                     DMA_TO_DEVICE);
+               if (dma_mapping_error(&fep->pdev->dev, addr)) {
++                      dev_kfree_skb_any(skb);
+                       if (net_ratelimit())
+                               netdev_err(ndev, "Tx DMA memory map failed\n");
+                       goto dma_mapping_error;
+               }
+-              bdp->cbd_bufaddr = cpu_to_fec32(addr);
+-              bdp->cbd_datlen = cpu_to_fec16(frag_len);
+-              /* Make sure the updates to rest of the descriptor are
+-               * performed before transferring ownership.
+-               */
+-              wmb();
+-              bdp->cbd_sc = cpu_to_fec16(status);
++              bdp->cbd_bufaddr = addr;
++              bdp->cbd_datlen = frag_len;
++              bdp->cbd_sc = status;
+       }
+       return bdp;
+ dma_mapping_error:
+-      bdp = txq->bd.cur;
++      bdp = txq->cur_tx;
+       for (i = 0; i < frag; i++) {
+-              bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+-              dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
+-                               fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
++              bdp = fec_enet_get_nextdesc(bdp, fep, queue);
++              dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
++                              bdp->cbd_datlen, DMA_TO_DEVICE);
+       }
+       return ERR_PTR(-ENOMEM);
+ }
+@@ -444,11 +471,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+       dma_addr_t addr;
+       unsigned short status;
+       unsigned short buflen;
++      unsigned short queue;
+       unsigned int estatus = 0;
+       unsigned int index;
+       int entries_free;
+-      entries_free = fec_enet_get_free_txdesc_num(txq);
++      entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+       if (entries_free < MAX_SKB_FRAGS + 1) {
+               dev_kfree_skb_any(skb);
+               if (net_ratelimit())
+@@ -463,16 +491,17 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+       }
+       /* Fill in a Tx ring entry */
+-      bdp = txq->bd.cur;
++      bdp = txq->cur_tx;
+       last_bdp = bdp;
+-      status = fec16_to_cpu(bdp->cbd_sc);
++      status = bdp->cbd_sc;
+       status &= ~BD_ENET_TX_STATS;
+       /* Set buffer length and buffer pointer */
+       bufaddr = skb->data;
+       buflen = skb_headlen(skb);
+-      index = fec_enet_get_bd_index(bdp, &txq->bd);
++      queue = skb_get_queue_mapping(skb);
++      index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+       if (((unsigned long) bufaddr) & fep->tx_align ||
+               fep->quirks & FEC_QUIRK_SWAP_FRAME) {
+               memcpy(txq->tx_bounce[index], skb->data, buflen);
+@@ -493,12 +522,8 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+       if (nr_frags) {
+               last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
+-              if (IS_ERR(last_bdp)) {
+-                      dma_unmap_single(&fep->pdev->dev, addr,
+-                                       buflen, DMA_TO_DEVICE);
+-                      dev_kfree_skb_any(skb);
++              if (IS_ERR(last_bdp))
+                       return NETDEV_TX_OK;
+-              }
+       } else {
+               status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+               if (fep->bufdesc_ex) {
+@@ -508,8 +533,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+                               estatus |= BD_ENET_TX_TS;
+               }
+       }
+-      bdp->cbd_bufaddr = cpu_to_fec32(addr);
+-      bdp->cbd_datlen = cpu_to_fec16(buflen);
+       if (fep->bufdesc_ex) {
+@@ -520,43 +543,41 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+                       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+               if (fep->quirks & FEC_QUIRK_HAS_AVB)
+-                      estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
++                      estatus |= FEC_TX_BD_FTYPE(queue);
+               if (skb->ip_summed == CHECKSUM_PARTIAL)
+                       estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+               ebdp->cbd_bdu = 0;
+-              ebdp->cbd_esc = cpu_to_fec32(estatus);
++              ebdp->cbd_esc = estatus;
+       }
+-      index = fec_enet_get_bd_index(last_bdp, &txq->bd);
++      index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
+       /* Save skb pointer */
+       txq->tx_skbuff[index] = skb;
+-      /* Make sure the updates to rest of the descriptor are performed before
+-       * transferring ownership.
+-       */
+-      wmb();
++      bdp->cbd_datlen = buflen;
++      bdp->cbd_bufaddr = addr;
+       /* Send it on its way.  Tell FEC it's ready, interrupt when done,
+        * it's the last BD of the frame, and to put the CRC on the end.
+        */
+       status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
+-      bdp->cbd_sc = cpu_to_fec16(status);
++      bdp->cbd_sc = status;
+       /* If this was the last BD in the ring, start at the beginning again. */
+-      bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
++      bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
+       skb_tx_timestamp(skb);
+       /* Make sure the update to bdp and tx_skbuff are performed before
+-       * txq->bd.cur.
++       * cur_tx.
+        */
+       wmb();
+-      txq->bd.cur = bdp;
++      txq->cur_tx = bdp;
+       /* Trigger transmission start */
+-      writel(0, txq->bd.reg_desc_active);
++      writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
+       return 0;
+ }
+@@ -569,11 +590,12 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
+ {
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
++      unsigned short queue = skb_get_queue_mapping(skb);
+       unsigned short status;
+       unsigned int estatus = 0;
+       dma_addr_t addr;
+-      status = fec16_to_cpu(bdp->cbd_sc);
++      status = bdp->cbd_sc;
+       status &= ~BD_ENET_TX_STATS;
+       status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+@@ -595,16 +617,16 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
+               return NETDEV_TX_BUSY;
+       }
+-      bdp->cbd_datlen = cpu_to_fec16(size);
+-      bdp->cbd_bufaddr = cpu_to_fec32(addr);
++      bdp->cbd_datlen = size;
++      bdp->cbd_bufaddr = addr;
+       if (fep->bufdesc_ex) {
+               if (fep->quirks & FEC_QUIRK_HAS_AVB)
+-                      estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
++                      estatus |= FEC_TX_BD_FTYPE(queue);
+               if (skb->ip_summed == CHECKSUM_PARTIAL)
+                       estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+               ebdp->cbd_bdu = 0;
+-              ebdp->cbd_esc = cpu_to_fec32(estatus);
++              ebdp->cbd_esc = estatus;
+       }
+       /* Handle the last BD specially */
+@@ -613,10 +635,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
+       if (is_last) {
+               status |= BD_ENET_TX_INTR;
+               if (fep->bufdesc_ex)
+-                      ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
++                      ebdp->cbd_esc |= BD_ENET_TX_INT;
+       }
+-      bdp->cbd_sc = cpu_to_fec16(status);
++      bdp->cbd_sc = status;
+       return 0;
+ }
+@@ -629,12 +651,13 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
++      unsigned short queue = skb_get_queue_mapping(skb);
+       void *bufaddr;
+       unsigned long dmabuf;
+       unsigned short status;
+       unsigned int estatus = 0;
+-      status = fec16_to_cpu(bdp->cbd_sc);
++      status = bdp->cbd_sc;
+       status &= ~BD_ENET_TX_STATS;
+       status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+@@ -658,19 +681,19 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
+               }
+       }
+-      bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
+-      bdp->cbd_datlen = cpu_to_fec16(hdr_len);
++      bdp->cbd_bufaddr = dmabuf;
++      bdp->cbd_datlen = hdr_len;
+       if (fep->bufdesc_ex) {
+               if (fep->quirks & FEC_QUIRK_HAS_AVB)
+-                      estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
++                      estatus |= FEC_TX_BD_FTYPE(queue);
+               if (skb->ip_summed == CHECKSUM_PARTIAL)
+                       estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+               ebdp->cbd_bdu = 0;
+-              ebdp->cbd_esc = cpu_to_fec32(estatus);
++              ebdp->cbd_esc = estatus;
+       }
+-      bdp->cbd_sc = cpu_to_fec16(status);
++      bdp->cbd_sc = status;
+       return 0;
+ }
+@@ -682,12 +705,13 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       int total_len, data_left;
+-      struct bufdesc *bdp = txq->bd.cur;
++      struct bufdesc *bdp = txq->cur_tx;
++      unsigned short queue = skb_get_queue_mapping(skb);
+       struct tso_t tso;
+       unsigned int index = 0;
+       int ret;
+-      if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
++      if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
+               dev_kfree_skb_any(skb);
+               if (net_ratelimit())
+                       netdev_err(ndev, "NOT enough BD for TSO!\n");
+@@ -707,7 +731,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
+       while (total_len > 0) {
+               char *hdr;
+-              index = fec_enet_get_bd_index(bdp, &txq->bd);
++              index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+               data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+               total_len -= data_left;
+@@ -722,8 +746,9 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
+                       int size;
+                       size = min_t(int, tso.size, data_left);
+-                      bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+-                      index = fec_enet_get_bd_index(bdp, &txq->bd);
++                      bdp = fec_enet_get_nextdesc(bdp, fep, queue);
++                      index = fec_enet_get_bd_index(txq->tx_bd_base,
++                                                    bdp, fep);
+                       ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
+                                                       bdp, index,
+                                                       tso.data, size,
+@@ -736,22 +761,22 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
+                       tso_build_data(skb, &tso, size);
+               }
+-              bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
++              bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+       }
+       /* Save skb pointer */
+       txq->tx_skbuff[index] = skb;
+       skb_tx_timestamp(skb);
+-      txq->bd.cur = bdp;
++      txq->cur_tx = bdp;
+       /* Trigger transmission start */
+       if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
+-          !readl(txq->bd.reg_desc_active) ||
+-          !readl(txq->bd.reg_desc_active) ||
+-          !readl(txq->bd.reg_desc_active) ||
+-          !readl(txq->bd.reg_desc_active))
+-              writel(0, txq->bd.reg_desc_active);
++          !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
++          !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
++          !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
++          !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)))
++              writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
+       return 0;
+@@ -781,7 +806,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+       if (ret)
+               return ret;
+-      entries_free = fec_enet_get_free_txdesc_num(txq);
++      entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+       if (entries_free <= txq->tx_stop_threshold)
+               netif_tx_stop_queue(nq);
+@@ -802,45 +827,45 @@ static void fec_enet_bd_init(struct net_device *dev)
+       for (q = 0; q < fep->num_rx_queues; q++) {
+               /* Initialize the receive buffer descriptors. */
+               rxq = fep->rx_queue[q];
+-              bdp = rxq->bd.base;
++              bdp = rxq->rx_bd_base;
+-              for (i = 0; i < rxq->bd.ring_size; i++) {
++              for (i = 0; i < rxq->rx_ring_size; i++) {
+                       /* Initialize the BD for every fragment in the page. */
+                       if (bdp->cbd_bufaddr)
+-                              bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
++                              bdp->cbd_sc = BD_ENET_RX_EMPTY;
+                       else
+-                              bdp->cbd_sc = cpu_to_fec16(0);
+-                      bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
++                              bdp->cbd_sc = 0;
++                      bdp = fec_enet_get_nextdesc(bdp, fep, q);
+               }
+               /* Set the last buffer to wrap */
+-              bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
+-              bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
++              bdp = fec_enet_get_prevdesc(bdp, fep, q);
++              bdp->cbd_sc |= BD_SC_WRAP;
+-              rxq->bd.cur = rxq->bd.base;
++              rxq->cur_rx = rxq->rx_bd_base;
+       }
+       for (q = 0; q < fep->num_tx_queues; q++) {
+               /* ...and the same for transmit */
+               txq = fep->tx_queue[q];
+-              bdp = txq->bd.base;
+-              txq->bd.cur = bdp;
++              bdp = txq->tx_bd_base;
++              txq->cur_tx = bdp;
+-              for (i = 0; i < txq->bd.ring_size; i++) {
++              for (i = 0; i < txq->tx_ring_size; i++) {
+                       /* Initialize the BD for every fragment in the page. */
+-                      bdp->cbd_sc = cpu_to_fec16(0);
++                      bdp->cbd_sc = 0;
+                       if (txq->tx_skbuff[i]) {
+                               dev_kfree_skb_any(txq->tx_skbuff[i]);
+                               txq->tx_skbuff[i] = NULL;
+                       }
+-                      bdp->cbd_bufaddr = cpu_to_fec32(0);
+-                      bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
++                      bdp->cbd_bufaddr = 0;
++                      bdp = fec_enet_get_nextdesc(bdp, fep, q);
+               }
+               /* Set the last buffer to wrap */
+-              bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
+-              bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
++              bdp = fec_enet_get_prevdesc(bdp, fep, q);
++              bdp->cbd_sc |= BD_SC_WRAP;
+               txq->dirty_tx = bdp;
+       }
+ }
+@@ -851,7 +876,7 @@ static void fec_enet_active_rxring(struct net_device *ndev)
+       int i;
+       for (i = 0; i < fep->num_rx_queues; i++)
+-              writel(0, fep->rx_queue[i]->bd.reg_desc_active);
++              writel(0, fep->hwp + FEC_R_DES_ACTIVE(i));
+ }
+ static void fec_enet_enable_ring(struct net_device *ndev)
+@@ -863,7 +888,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
+       for (i = 0; i < fep->num_rx_queues; i++) {
+               rxq = fep->rx_queue[i];
+-              writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
++              writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
+               writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+               /* enable DMA1/2 */
+@@ -874,7 +899,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
+       for (i = 0; i < fep->num_tx_queues; i++) {
+               txq = fep->tx_queue[i];
+-              writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
++              writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i));
+               /* enable DMA1/2 */
+               if (i)
+@@ -892,7 +917,7 @@ static void fec_enet_reset_skb(struct net_device *ndev)
+       for (i = 0; i < fep->num_tx_queues; i++) {
+               txq = fep->tx_queue[i];
+-              for (j = 0; j < txq->bd.ring_size; j++) {
++              for (j = 0; j < txq->tx_ring_size; j++) {
+                       if (txq->tx_skbuff[j]) {
+                               dev_kfree_skb_any(txq->tx_skbuff[j]);
+                               txq->tx_skbuff[j] = NULL;
+@@ -930,11 +955,11 @@ fec_restart(struct net_device *ndev)
+        * enet-mac reset will reset mac address registers too,
+        * so need to reconfigure it.
+        */
+-      memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+-      writel((__force u32)cpu_to_be32(temp_mac[0]),
+-             fep->hwp + FEC_ADDR_LOW);
+-      writel((__force u32)cpu_to_be32(temp_mac[1]),
+-             fep->hwp + FEC_ADDR_HIGH);
++      if (fep->quirks & FEC_QUIRK_ENET_MAC) {
++              memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
++              writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
++              writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
++      }
+       /* Clear any outstanding interrupt. */
+       writel(0xffffffff, fep->hwp + FEC_IEVENT);
+@@ -961,16 +986,13 @@ fec_restart(struct net_device *ndev)
+ #if !defined(CONFIG_M5272)
+       if (fep->quirks & FEC_QUIRK_HAS_RACC) {
++              /* set RX checksum */
+               val = readl(fep->hwp + FEC_RACC);
+-              /* align IP header */
+-              val |= FEC_RACC_SHIFT16;
+               if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
+-                      /* set RX checksum */
+                       val |= FEC_RACC_OPTIONS;
+               else
+                       val &= ~FEC_RACC_OPTIONS;
+               writel(val, fep->hwp + FEC_RACC);
+-              writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
+       }
+       writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
+ #endif
+@@ -995,10 +1017,10 @@ fec_restart(struct net_device *ndev)
+                       rcntl &= ~(1 << 8);
+               /* 1G, 100M or 10M */
+-              if (ndev->phydev) {
+-                      if (ndev->phydev->speed == SPEED_1000)
++              if (fep->phy_dev) {
++                      if (fep->phy_dev->speed == SPEED_1000)
+                               ecntl |= (1 << 5);
+-                      else if (ndev->phydev->speed == SPEED_100)
++                      else if (fep->phy_dev->speed == SPEED_100)
+                               rcntl &= ~(1 << 9);
+                       else
+                               rcntl |= (1 << 9);
+@@ -1019,7 +1041,7 @@ fec_restart(struct net_device *ndev)
+                        */
+                       cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+                               ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
+-                      if (ndev->phydev && ndev->phydev->speed == SPEED_10)
++                      if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
+                               cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
+                       writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
+@@ -1033,7 +1055,7 @@ fec_restart(struct net_device *ndev)
+       /* enable pause frame*/
+       if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
+           ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
+-           ndev->phydev && ndev->phydev->pause)) {
++           fep->phy_dev && fep->phy_dev->pause)) {
+               rcntl |= FEC_ENET_FCE;
+               /* set FIFO threshold parameter to reduce overrun */
+@@ -1213,12 +1235,13 @@ static void
+ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
+ {
+       struct  fec_enet_private *fep;
+-      struct bufdesc *bdp;
++      struct bufdesc *bdp, *bdp_t;
+       unsigned short status;
+       struct  sk_buff *skb;
+       struct fec_enet_priv_tx_q *txq;
+       struct netdev_queue *nq;
+       int     index = 0;
++      int     i, bdnum;
+       int     entries_free;
+       fep = netdev_priv(ndev);
+@@ -1231,27 +1254,37 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
+       bdp = txq->dirty_tx;
+       /* get next bdp of dirty_tx */
+-      bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
++      bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+-      while (bdp != READ_ONCE(txq->bd.cur)) {
+-              /* Order the load of bd.cur and cbd_sc */
++      while (bdp != READ_ONCE(txq->cur_tx)) {
++              /* Order the load of cur_tx and cbd_sc */
+               rmb();
+-              status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
++              status = READ_ONCE(bdp->cbd_sc);
+               if (status & BD_ENET_TX_READY)
+                       break;
+-              index = fec_enet_get_bd_index(bdp, &txq->bd);
+-
++              bdp_t = bdp;
++              bdnum = 1;
++              index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
+               skb = txq->tx_skbuff[index];
++              while (!skb) {
++                      bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
++                      index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
++                      skb = txq->tx_skbuff[index];
++                      bdnum++;
++              }
++              if ((status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
++                      break;
++
++              for (i = 0; i < bdnum; i++) {
++                      if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
++                              dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
++                                               bdp->cbd_datlen, DMA_TO_DEVICE);
++                      bdp->cbd_bufaddr = 0;
++                      if (i < bdnum - 1)
++                              bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
++              }
+               txq->tx_skbuff[index] = NULL;
+-              if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+-                      dma_unmap_single(&fep->pdev->dev,
+-                                       fec32_to_cpu(bdp->cbd_bufaddr),
+-                                       fec16_to_cpu(bdp->cbd_datlen),
+-                                       DMA_TO_DEVICE);
+-              bdp->cbd_bufaddr = cpu_to_fec32(0);
+-              if (!skb)
+-                      goto skb_done;
+               /* Check for errors. */
+               if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+@@ -1278,7 +1311,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
+                       struct skb_shared_hwtstamps shhwtstamps;
+                       struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+-                      fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
++                      fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
+                       skb_tstamp_tx(skb, &shhwtstamps);
+               }
+@@ -1290,7 +1323,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
+               /* Free the sk buffer associated with this last transmit */
+               dev_kfree_skb_any(skb);
+-skb_done:
++
+               /* Make sure the update to bdp and tx_skbuff are performed
+                * before dirty_tx
+                */
+@@ -1298,21 +1331,21 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
+               txq->dirty_tx = bdp;
+               /* Update pointer to next buffer descriptor to be transmitted */
+-              bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
++              bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+               /* Since we have freed up a buffer, the ring is no longer full
+                */
+               if (netif_queue_stopped(ndev)) {
+-                      entries_free = fec_enet_get_free_txdesc_num(txq);
++                      entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+                       if (entries_free >= txq->tx_wake_threshold)
+                               netif_tx_wake_queue(nq);
+               }
+       }
+       /* ERR006538: Keep the transmitter going */
+-      if (bdp != txq->bd.cur &&
+-          readl(txq->bd.reg_desc_active) == 0)
+-              writel(0, txq->bd.reg_desc_active);
++      if (bdp != txq->cur_tx &&
++          readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0)
++              writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id));
+ }
+ static void
+@@ -1338,8 +1371,10 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
+       if (off)
+               skb_reserve(skb, fep->rx_align + 1 - off);
+-      bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
+-      if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
++      bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
++                                        FEC_ENET_RX_FRSIZE - fep->rx_align,
++                                        DMA_FROM_DEVICE);
++      if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+               if (net_ratelimit())
+                       netdev_err(ndev, "Rx DMA memory map failed\n");
+               return -ENOMEM;
+@@ -1361,8 +1396,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
+       if (!new_skb)
+               return false;
+-      dma_sync_single_for_cpu(&fep->pdev->dev,
+-                              fec32_to_cpu(bdp->cbd_bufaddr),
++      dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
+                               FEC_ENET_RX_FRSIZE - fep->rx_align,
+                               DMA_FROM_DEVICE);
+       if (!swap)
+@@ -1374,7 +1408,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
+       return true;
+ }
+-/* During a receive, the bd_rx.cur points to the current incoming buffer.
++/* During a receive, the cur_rx points to the current incoming buffer.
+  * When we update through the ring, if the next incoming buffer has
+  * not been given to the system, we just set the empty indicator,
+  * effectively tossing the packet.
+@@ -1407,9 +1441,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+       /* First, grab all of the stats for the incoming packet.
+        * These get messed up if we get called due to a busy condition.
+        */
+-      bdp = rxq->bd.cur;
++      bdp = rxq->cur_rx;
+-      while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
++      while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+               if (pkt_received >= budget)
+                       break;
+@@ -1445,10 +1479,10 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+               /* Process the incoming frame. */
+               ndev->stats.rx_packets++;
+-              pkt_len = fec16_to_cpu(bdp->cbd_datlen);
++              pkt_len = bdp->cbd_datlen;
+               ndev->stats.rx_bytes += pkt_len;
+-              index = fec_enet_get_bd_index(bdp, &rxq->bd);
++              index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
+               skb = rxq->rx_skbuff[index];
+               /* The packet length includes FCS, but we don't want to
+@@ -1463,8 +1497,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+                               ndev->stats.rx_dropped++;
+                               goto rx_processing_done;
+                       }
+-                      dma_unmap_single(&fep->pdev->dev,
+-                                       fec32_to_cpu(bdp->cbd_bufaddr),
++                      dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+                                        FEC_ENET_RX_FRSIZE - fep->rx_align,
+                                        DMA_FROM_DEVICE);
+               }
+@@ -1472,15 +1505,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+               prefetch(skb->data - NET_IP_ALIGN);
+               skb_put(skb, pkt_len - 4);
+               data = skb->data;
+-
+               if (!is_copybreak && need_swap)
+                       swap_buffer(data, pkt_len);
+-#if !defined(CONFIG_M5272)
+-              if (fep->quirks & FEC_QUIRK_HAS_RACC)
+-                      data = skb_pull_inline(skb, 2);
+-#endif
+-
+               /* Extract the enhanced buffer descriptor */
+               ebdp = NULL;
+               if (fep->bufdesc_ex)
+@@ -1489,8 +1516,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+               /* If this is a VLAN packet remove the VLAN Tag */
+               vlan_packet_rcvd = false;
+               if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+-                  fep->bufdesc_ex &&
+-                  (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
++                      fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
+                       /* Push and remove the vlan tag */
+                       struct vlan_hdr *vlan_header =
+                                       (struct vlan_hdr *) (data + ETH_HLEN);
+@@ -1506,12 +1532,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+               /* Get receive timestamp from the skb */
+               if (fep->hwts_rx_en && fep->bufdesc_ex)
+-                      fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
++                      fec_enet_hwtstamp(fep, ebdp->ts,
+                                         skb_hwtstamps(skb));
+               if (fep->bufdesc_ex &&
+                   (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
+-                      if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
++                      if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
+                               /* don't check it */
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       } else {
+@@ -1528,8 +1554,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+               napi_gro_receive(&fep->napi, skb);
+               if (is_copybreak) {
+-                      dma_sync_single_for_device(&fep->pdev->dev,
+-                                                 fec32_to_cpu(bdp->cbd_bufaddr),
++                      dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
+                                                  FEC_ENET_RX_FRSIZE - fep->rx_align,
+                                                  DMA_FROM_DEVICE);
+               } else {
+@@ -1543,30 +1568,26 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+               /* Mark the buffer empty */
+               status |= BD_ENET_RX_EMPTY;
++              bdp->cbd_sc = status;
+               if (fep->bufdesc_ex) {
+                       struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+-                      ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
++                      ebdp->cbd_esc = BD_ENET_RX_INT;
+                       ebdp->cbd_prot = 0;
+                       ebdp->cbd_bdu = 0;
+               }
+-              /* Make sure the updates to rest of the descriptor are
+-               * performed before transferring ownership.
+-               */
+-              wmb();
+-              bdp->cbd_sc = cpu_to_fec16(status);
+               /* Update BD pointer to next entry */
+-              bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
++              bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+               /* Doing this here will keep the FEC running while we process
+                * incoming frames.  On a heavily loaded network, we should be
+                * able to keep up at the expense of system resources.
+                */
+-              writel(0, rxq->bd.reg_desc_active);
++              writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id));
+       }
+-      rxq->bd.cur = bdp;
++      rxq->cur_rx = bdp;
+       return pkt_received;
+ }
+@@ -1578,15 +1599,9 @@ fec_enet_rx(struct net_device *ndev, int budget)
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
+-              int ret;
+-
+-              ret = fec_enet_rx_queue(ndev,
++              clear_bit(queue_id, &fep->work_rx);
++              pkt_received += fec_enet_rx_queue(ndev,
+                                       budget - pkt_received, queue_id);
+-
+-              if (ret < budget - pkt_received)
+-                      clear_bit(queue_id, &fep->work_rx);
+-
+-              pkt_received += ret;
+       }
+       return pkt_received;
+ }
+@@ -1631,7 +1646,7 @@ fec_enet_interrupt(int irq, void *dev_id)
+               if (napi_schedule_prep(&fep->napi)) {
+                       /* Disable the NAPI interrupts */
+-                      writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
++                      writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+                       __napi_schedule(&fep->napi);
+               }
+       }
+@@ -1742,7 +1757,7 @@ static void fec_get_mac(struct net_device *ndev)
+ static void fec_enet_adjust_link(struct net_device *ndev)
+ {
+       struct fec_enet_private *fep = netdev_priv(ndev);
+-      struct phy_device *phy_dev = ndev->phydev;
++      struct phy_device *phy_dev = fep->phy_dev;
+       int status_change = 0;
+       /* Prevent a state halted on mii error */
+@@ -1802,16 +1817,10 @@ static void fec_enet_adjust_link(struct net_device *ndev)
+ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+ {
+       struct fec_enet_private *fep = bus->priv;
+-      struct device *dev = &fep->pdev->dev;
+       unsigned long time_left;
+-      int ret = 0;
+-
+-      ret = pm_runtime_get_sync(dev);
+-      if (ret < 0)
+-              return ret;
+       fep->mii_timeout = 0;
+-      reinit_completion(&fep->mdio_done);
++      init_completion(&fep->mdio_done);
+       /* start a read op */
+       writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+@@ -1824,35 +1833,21 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+       if (time_left == 0) {
+               fep->mii_timeout = 1;
+               netdev_err(fep->netdev, "MDIO read timeout\n");
+-              ret = -ETIMEDOUT;
+-              goto out;
++              return -ETIMEDOUT;
+       }
+-      ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+-
+-out:
+-      pm_runtime_mark_last_busy(dev);
+-      pm_runtime_put_autosuspend(dev);
+-
+-      return ret;
++      /* return value */
++      return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+ }
+ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+                          u16 value)
+ {
+       struct fec_enet_private *fep = bus->priv;
+-      struct device *dev = &fep->pdev->dev;
+       unsigned long time_left;
+-      int ret;
+-
+-      ret = pm_runtime_get_sync(dev);
+-      if (ret < 0)
+-              return ret;
+-      else
+-              ret = 0;
+       fep->mii_timeout = 0;
+-      reinit_completion(&fep->mdio_done);
++      init_completion(&fep->mdio_done);
+       /* start a write op */
+       writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
+@@ -1866,13 +1861,10 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+       if (time_left == 0) {
+               fep->mii_timeout = 1;
+               netdev_err(fep->netdev, "MDIO write timeout\n");
+-              ret  = -ETIMEDOUT;
++              return -ETIMEDOUT;
+       }
+-      pm_runtime_mark_last_busy(dev);
+-      pm_runtime_put_autosuspend(dev);
+-
+-      return ret;
++      return 0;
+ }
+ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+@@ -1881,10 +1873,18 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+       int ret;
+       if (enable) {
++              ret = clk_prepare_enable(fep->clk_ahb);
++              if (ret)
++                      return ret;
++              ret = clk_prepare_enable(fep->clk_ipg);
++              if (ret)
++                      goto failed_clk_ipg;
+               if (fep->clk_enet_out) {
+                       ret = clk_prepare_enable(fep->clk_enet_out);
+                       if (ret)
+-                              return ret;
++                              goto failed_clk_enet_out;
++
++                      fec_reset_phy(fep->pdev);
+               }
+               if (fep->clk_ptp) {
+                       mutex_lock(&fep->ptp_clk_mutex);
+@@ -1903,6 +1903,8 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+                               goto failed_clk_ref;
+               }
+       } else {
++              clk_disable_unprepare(fep->clk_ahb);
++              clk_disable_unprepare(fep->clk_ipg);
+               if (fep->clk_enet_out)
+                       clk_disable_unprepare(fep->clk_enet_out);
+               if (fep->clk_ptp) {
+@@ -1923,27 +1925,23 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ failed_clk_ptp:
+       if (fep->clk_enet_out)
+               clk_disable_unprepare(fep->clk_enet_out);
++failed_clk_enet_out:
++              clk_disable_unprepare(fep->clk_ipg);
++failed_clk_ipg:
++              clk_disable_unprepare(fep->clk_ahb);
+       return ret;
+ }
+-static int fec_restore_mii_bus(struct net_device *ndev)
++static void fec_restore_mii_bus(struct net_device *ndev)
+ {
+       struct fec_enet_private *fep = netdev_priv(ndev);
+-      int ret;
+-
+-      ret = pm_runtime_get_sync(&fep->pdev->dev);
+-      if (ret < 0)
+-              return ret;
++      fec_enet_clk_enable(ndev, true);
+       writel(0xffc00000, fep->hwp + FEC_IEVENT);
+       writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+       writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+       writel(FEC_ENET_ETHEREN, fep->hwp + FEC_ECNTRL);
+-
+-      pm_runtime_mark_last_busy(&fep->pdev->dev);
+-      pm_runtime_put_autosuspend(&fep->pdev->dev);
+-      return 0;
+ }
+ static int fec_enet_mii_probe(struct net_device *ndev)
+@@ -1955,6 +1953,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
+       int phy_id;
+       int dev_id = fep->dev_id;
++      fep->phy_dev = NULL;
++
+       if (fep->phy_node) {
+               phy_dev = of_phy_connect(ndev, fep->phy_node,
+                                        &fec_enet_adjust_link, 0,
+@@ -1964,7 +1964,11 @@ static int fec_enet_mii_probe(struct net_device *ndev)
+       } else {
+               /* check for attached phy */
+               for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
+-                      if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
++                      if ((fep->mii_bus->phy_mask & (1 << phy_id)))
++                              continue;
++                      if (fep->mii_bus->mdio_map[phy_id] == NULL)
++                              continue;
++                      if (fep->mii_bus->mdio_map[phy_id]->addr == 0)
+                               continue;
+                       if (dev_id--)
+                               continue;
+@@ -2002,10 +2006,13 @@ static int fec_enet_mii_probe(struct net_device *ndev)
+       phy_dev->advertising = phy_dev->supported;
++      fep->phy_dev = phy_dev;
+       fep->link = 0;
+       fep->full_duplex = 0;
+-      phy_attached_info(phy_dev);
++      netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
++                  fep->phy_dev->drv->name, NULL,
++                  fep->phy_dev->irq);
+       return 0;
+ }
+@@ -2017,7 +2024,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       struct device_node *node;
+-      int err = -ENXIO;
++      int err = -ENXIO, i;
+       u32 mii_speed, holdtime;
+       /*
+@@ -2036,7 +2043,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
+        * mdio interface in board design, and need to be configured by
+        * fec0 mii_bus.
+        */
+-      if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
++      if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
+               /* fec1 uses fec0 mii_bus */
+               if (mii_cnt && fec0_mii_bus) {
+                       fep->mii_bus = fec0_mii_bus;
+@@ -2100,29 +2107,38 @@ static int fec_enet_mii_init(struct platform_device *pdev)
+       fep->mii_bus->priv = fep;
+       fep->mii_bus->parent = &pdev->dev;
++/*    fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
++      if (!fep->mii_bus->irq) {
++              err = -ENOMEM;
++              goto err_out_free_mdiobus;
++      }
++*/
++      for (i = 0; i < PHY_MAX_ADDR; i++)
++              fep->mii_bus->irq[i] = PHY_POLL;
++
+       node = of_get_child_by_name(pdev->dev.of_node, "mdio");
+       if (node) {
+               err = of_mdiobus_register(fep->mii_bus, node);
+               of_node_put(node);
+-      } else if (fep->phy_node && !fep->fixed_link) {
+-              err = -EPROBE_DEFER;
+       } else {
+               err = mdiobus_register(fep->mii_bus);
+       }
+       if (err)
+-              goto err_out_free_mdiobus;
++              goto err_out_free_mdio_irq;
+       mii_cnt++;
+       /* save fec0 mii_bus */
+-      if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) {
++      if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+               fec0_mii_bus = fep->mii_bus;
+               fec_mii_bus_share = &fep->mii_bus_share;
+       }
+       return 0;
++err_out_free_mdio_irq:
++      kfree(fep->mii_bus->irq);
+ err_out_free_mdiobus:
+       mdiobus_free(fep->mii_bus);
+ err_out:
+@@ -2133,10 +2149,35 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
+ {
+       if (--mii_cnt == 0) {
+               mdiobus_unregister(fep->mii_bus);
++              kfree(fep->mii_bus->irq);
+               mdiobus_free(fep->mii_bus);
+       }
+ }
++static int fec_enet_get_settings(struct net_device *ndev,
++                                struct ethtool_cmd *cmd)
++{
++      struct fec_enet_private *fep = netdev_priv(ndev);
++      struct phy_device *phydev = fep->phy_dev;
++
++      if (!phydev)
++              return -ENODEV;
++
++      return phy_ethtool_gset(phydev, cmd);
++}
++
++static int fec_enet_set_settings(struct net_device *ndev,
++                               struct ethtool_cmd *cmd)
++{
++      struct fec_enet_private *fep = netdev_priv(ndev);
++      struct phy_device *phydev = fep->phy_dev;
++
++      if (!phydev)
++              return -ENODEV;
++
++      return phy_ethtool_sset(phydev, cmd);
++}
++
+ static void fec_enet_get_drvinfo(struct net_device *ndev,
+                                struct ethtool_drvinfo *info)
+ {
+@@ -2163,8 +2204,8 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
+ /* List of registers that can be safety be read to dump them with ethtool */
+ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+-      defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+-      defined(CONFIG_ARM64)
++      defined(CONFIG_M520x) || defined(CONFIG_M532x) ||               \
++      defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+ static u32 fec_enet_register_offset[] = {
+       FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+       FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+@@ -2270,7 +2311,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
+ {
+       struct fec_enet_private *fep = netdev_priv(ndev);
+-      if (!ndev->phydev)
++      if (!fep->phy_dev)
+               return -ENODEV;
+       if (pause->tx_pause != pause->rx_pause) {
+@@ -2286,17 +2327,17 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
+       fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
+       if (pause->rx_pause || pause->autoneg) {
+-              ndev->phydev->supported |= ADVERTISED_Pause;
+-              ndev->phydev->advertising |= ADVERTISED_Pause;
++              fep->phy_dev->supported |= ADVERTISED_Pause;
++              fep->phy_dev->advertising |= ADVERTISED_Pause;
+       } else {
+-              ndev->phydev->supported &= ~ADVERTISED_Pause;
+-              ndev->phydev->advertising &= ~ADVERTISED_Pause;
++              fep->phy_dev->supported &= ~ADVERTISED_Pause;
++              fep->phy_dev->advertising &= ~ADVERTISED_Pause;
+       }
+       if (pause->autoneg) {
+               if (netif_running(ndev))
+                       fec_stop(ndev);
+-              phy_start_aneg(ndev->phydev);
++              phy_start_aneg(fep->phy_dev);
+       }
+       if (netif_running(ndev)) {
+               napi_disable(&fep->napi);
+@@ -2376,26 +2417,14 @@ static const struct fec_stat {
+       { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
+ };
+-#define FEC_STATS_SIZE                (ARRAY_SIZE(fec_stats) * sizeof(u64))
+-
+-static void fec_enet_update_ethtool_stats(struct net_device *dev)
++static void fec_enet_get_ethtool_stats(struct net_device *dev,
++      struct ethtool_stats *stats, u64 *data)
+ {
+       struct fec_enet_private *fep = netdev_priv(dev);
+       int i;
+       for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+-              fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
+-}
+-
+-static void fec_enet_get_ethtool_stats(struct net_device *dev,
+-                                     struct ethtool_stats *stats, u64 *data)
+-{
+-      struct fec_enet_private *fep = netdev_priv(dev);
+-
+-      if (netif_running(dev))
+-              fec_enet_update_ethtool_stats(dev);
+-
+-      memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
++              data[i] = readl(fep->hwp + fec_stats[i].offset);
+ }
+ static void fec_enet_get_strings(struct net_device *netdev,
+@@ -2420,17 +2449,12 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
+               return -EOPNOTSUPP;
+       }
+ }
+-
+-#else /* !defined(CONFIG_M5272) */
+-#define FEC_STATS_SIZE        0
+-static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
+-{
+-}
+ #endif /* !defined(CONFIG_M5272) */
+ static int fec_enet_nway_reset(struct net_device *dev)
+ {
+-      struct phy_device *phydev = dev->phydev;
++      struct fec_enet_private *fep = netdev_priv(dev);
++      struct phy_device *phydev = fep->phy_dev;
+       if (!phydev)
+               return -ENODEV;
+@@ -2455,6 +2479,9 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       int rx_itr, tx_itr;
++      if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
++              return;
++
+       /* Must be greater than zero to avoid unpredictable behavior */
+       if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
+           !fep->tx_time_itr || !fep->tx_pkts_itr)
+@@ -2477,12 +2504,10 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
+       writel(tx_itr, fep->hwp + FEC_TXIC0);
+       writel(rx_itr, fep->hwp + FEC_RXIC0);
+-      if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+-              writel(tx_itr, fep->hwp + FEC_TXIC1);
+-              writel(rx_itr, fep->hwp + FEC_RXIC1);
+-              writel(tx_itr, fep->hwp + FEC_TXIC2);
+-              writel(rx_itr, fep->hwp + FEC_RXIC2);
+-      }
++      writel(tx_itr, fep->hwp + FEC_TXIC1);
++      writel(rx_itr, fep->hwp + FEC_RXIC1);
++      writel(tx_itr, fep->hwp + FEC_TXIC2);
++      writel(rx_itr, fep->hwp + FEC_RXIC2);
+ }
+ static int
+@@ -2490,7 +2515,7 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
+ {
+       struct fec_enet_private *fep = netdev_priv(ndev);
+-      if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
++      if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+               return -EOPNOTSUPP;
+       ec->rx_coalesce_usecs = fep->rx_time_itr;
+@@ -2508,28 +2533,28 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       unsigned int cycle;
+-      if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
++      if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+               return -EOPNOTSUPP;
+       if (ec->rx_max_coalesced_frames > 255) {
+-              pr_err("Rx coalesced frames exceed hardware limitation\n");
++              pr_err("Rx coalesced frames exceed hardware limiation");
+               return -EINVAL;
+       }
+       if (ec->tx_max_coalesced_frames > 255) {
+-              pr_err("Tx coalesced frame exceed hardware limitation\n");
++              pr_err("Tx coalesced frame exceed hardware limiation");
+               return -EINVAL;
+       }
+       cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+       if (cycle > 0xFFFF) {
+-              pr_err("Rx coalesced usec exceed hardware limitation\n");
++              pr_err("Rx coalesed usec exceeed hardware limiation");
+               return -EINVAL;
+       }
+       cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
+       if (cycle > 0xFFFF) {
+-              pr_err("Rx coalesced usec exceed hardware limitation\n");
++              pr_err("Rx coalesed usec exceeed hardware limiation");
+               return -EINVAL;
+       }
+@@ -2629,6 +2654,8 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+ }
+ static const struct ethtool_ops fec_enet_ethtool_ops = {
++      .get_settings           = fec_enet_get_settings,
++      .set_settings           = fec_enet_set_settings,
+       .get_drvinfo            = fec_enet_get_drvinfo,
+       .get_regs_len           = fec_enet_get_regs_len,
+       .get_regs               = fec_enet_get_regs,
+@@ -2648,14 +2675,12 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
+       .set_tunable            = fec_enet_set_tunable,
+       .get_wol                = fec_enet_get_wol,
+       .set_wol                = fec_enet_set_wol,
+-      .get_link_ksettings     = phy_ethtool_get_link_ksettings,
+-      .set_link_ksettings     = phy_ethtool_set_link_ksettings,
+ };
+ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+ {
+       struct fec_enet_private *fep = netdev_priv(ndev);
+-      struct phy_device *phydev = ndev->phydev;
++      struct phy_device *phydev = fep->phy_dev;
+       if (!netif_running(ndev))
+               return -EINVAL;
+@@ -2685,25 +2710,25 @@ static void fec_enet_free_buffers(struct net_device *ndev)
+       for (q = 0; q < fep->num_rx_queues; q++) {
+               rxq = fep->rx_queue[q];
+-              bdp = rxq->bd.base;
+-              for (i = 0; i < rxq->bd.ring_size; i++) {
++              bdp = rxq->rx_bd_base;
++              for (i = 0; i < rxq->rx_ring_size; i++) {
+                       skb = rxq->rx_skbuff[i];
+                       rxq->rx_skbuff[i] = NULL;
+                       if (skb) {
+                               dma_unmap_single(&fep->pdev->dev,
+-                                               fec32_to_cpu(bdp->cbd_bufaddr),
++                                               bdp->cbd_bufaddr,
+                                                FEC_ENET_RX_FRSIZE - fep->rx_align,
+                                                DMA_FROM_DEVICE);
+                               dev_kfree_skb(skb);
+                       }
+-                      bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
++                      bdp = fec_enet_get_nextdesc(bdp, fep, q);
+               }
+       }
+       for (q = 0; q < fep->num_tx_queues; q++) {
+               txq = fep->tx_queue[q];
+-              bdp = txq->bd.base;
+-              for (i = 0; i < txq->bd.ring_size; i++) {
++              bdp = txq->tx_bd_base;
++              for (i = 0; i < txq->tx_ring_size; i++) {
+                       kfree(txq->tx_bounce[i]);
+                       txq->tx_bounce[i] = NULL;
+                       skb = txq->tx_skbuff[i];
+@@ -2722,8 +2747,8 @@ static void fec_enet_free_queue(struct net_device *ndev)
+       for (i = 0; i < fep->num_tx_queues; i++)
+               if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
+                       txq = fep->tx_queue[i];
+-                      dma_free_coherent(&fep->pdev->dev,
+-                                        txq->bd.ring_size * TSO_HEADER_SIZE,
++                      dma_free_coherent(NULL,
++                                        txq->tx_ring_size * TSO_HEADER_SIZE,
+                                         txq->tso_hdrs,
+                                         txq->tso_hdrs_dma);
+               }
+@@ -2749,15 +2774,15 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
+               }
+               fep->tx_queue[i] = txq;
+-              txq->bd.ring_size = TX_RING_SIZE;
+-              fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
++              txq->tx_ring_size = TX_RING_SIZE;
++              fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size;
+               txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
+               txq->tx_wake_threshold =
+-                      (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
++                              (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
+-              txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
+-                                      txq->bd.ring_size * TSO_HEADER_SIZE,
++              txq->tso_hdrs = dma_alloc_coherent(NULL,
++                                      txq->tx_ring_size * TSO_HEADER_SIZE,
+                                       &txq->tso_hdrs_dma,
+                                       GFP_KERNEL);
+               if (!txq->tso_hdrs) {
+@@ -2774,8 +2799,8 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
+                       goto alloc_failed;
+               }
+-              fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
+-              fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
++              fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE;
++              fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size;
+       }
+       return ret;
+@@ -2794,8 +2819,8 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+       struct fec_enet_priv_rx_q *rxq;
+       rxq = fep->rx_queue[queue];
+-      bdp = rxq->bd.base;
+-      for (i = 0; i < rxq->bd.ring_size; i++) {
++      bdp = rxq->rx_bd_base;
++      for (i = 0; i < rxq->rx_ring_size; i++) {
+               skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
+               if (!skb)
+                       goto err_alloc;
+@@ -2806,19 +2831,19 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+               }
+               rxq->rx_skbuff[i] = skb;
+-              bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
++              bdp->cbd_sc = BD_ENET_RX_EMPTY;
+               if (fep->bufdesc_ex) {
+                       struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+-                      ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
++                      ebdp->cbd_esc = BD_ENET_RX_INT;
+               }
+-              bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
++              bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+       }
+       /* Set the last buffer to wrap. */
+-      bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
+-      bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
++      bdp = fec_enet_get_prevdesc(bdp, fep, queue);
++      bdp->cbd_sc |= BD_SC_WRAP;
+       return 0;
+  err_alloc:
+@@ -2835,26 +2860,26 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
+       struct fec_enet_priv_tx_q *txq;
+       txq = fep->tx_queue[queue];
+-      bdp = txq->bd.base;
+-      for (i = 0; i < txq->bd.ring_size; i++) {
++      bdp = txq->tx_bd_base;
++      for (i = 0; i < txq->tx_ring_size; i++) {
+               txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
+               if (!txq->tx_bounce[i])
+                       goto err_alloc;
+-              bdp->cbd_sc = cpu_to_fec16(0);
+-              bdp->cbd_bufaddr = cpu_to_fec32(0);
++              bdp->cbd_sc = 0;
++              bdp->cbd_bufaddr = 0;
+               if (fep->bufdesc_ex) {
+                       struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+-                      ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
++                      ebdp->cbd_esc = BD_ENET_TX_INT;
+               }
+-              bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
++              bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+       }
+       /* Set the last buffer to wrap. */
+-      bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
+-      bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
++      bdp = fec_enet_get_prevdesc(bdp, fep, queue);
++      bdp->cbd_sc |= BD_SC_WRAP;
+       return 0;
+@@ -2903,14 +2928,10 @@ fec_enet_open(struct net_device *ndev)
+                               platform_get_device_id(fep->pdev);
+       int ret;
+-      ret = pm_runtime_get_sync(&fep->pdev->dev);
+-      if (ret < 0)
+-              return ret;
+-
+       pinctrl_pm_select_default_state(&fep->pdev->dev);
+       ret = fec_enet_clk_enable(ndev, true);
+       if (ret)
+-              goto clk_enable;
++              return ret;
+       /* I should reset the ring buffers here, but I don't yet know
+        * a simple way to do that.
+@@ -2928,13 +2949,11 @@ fec_enet_open(struct net_device *ndev)
+       if (ret)
+               goto err_enet_mii_probe;
+-      if (fep->quirks & FEC_QUIRK_ERR006687)
+-              imx6q_cpuidle_fec_irqs_used();
+-
+       napi_enable(&fep->napi);
+-      phy_start(ndev->phydev);
++      phy_start(fep->phy_dev);
+       netif_tx_start_all_queues(ndev);
++      pm_runtime_get_sync(ndev->dev.parent);
+       if ((id_entry->driver_data & FEC_QUIRK_BUG_WAITMODE) &&
+           !fec_enet_irq_workaround(fep))
+               pm_qos_add_request(&fep->pm_qos_req,
+@@ -2947,16 +2966,14 @@ fec_enet_open(struct net_device *ndev)
+       device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
+                                FEC_WOL_FLAG_ENABLE);
++      fep->miibus_up_failed = false;
+       return 0;
+ err_enet_mii_probe:
+       fec_enet_free_buffers(ndev);
+ err_enet_alloc:
+-      fec_enet_clk_enable(ndev, false);
+-clk_enable:
+-      pm_runtime_mark_last_busy(&fep->pdev->dev);
+-      pm_runtime_put_autosuspend(&fep->pdev->dev);
++      fep->miibus_up_failed = true;
+       if (!fep->mii_bus_share)
+               pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+       return ret;
+@@ -2967,7 +2984,7 @@ fec_enet_close(struct net_device *ndev)
+ {
+       struct fec_enet_private *fep = netdev_priv(ndev);
+-      phy_stop(ndev->phydev);
++      phy_stop(fep->phy_dev);
+       if (netif_device_present(ndev)) {
+               napi_disable(&fep->napi);
+@@ -2975,21 +2992,13 @@ fec_enet_close(struct net_device *ndev)
+               fec_stop(ndev);
+       }
+-      phy_disconnect(ndev->phydev);
+-      ndev->phydev = NULL;
+-
+-      if (fep->quirks & FEC_QUIRK_ERR006687)
+-              imx6q_cpuidle_fec_irqs_unused();
+-
+-      fec_enet_update_ethtool_stats(ndev);
++      phy_disconnect(fep->phy_dev);
++      fep->phy_dev = NULL;
+       fec_enet_clk_enable(ndev, false);
+       pm_qos_remove_request(&fep->pm_qos_req);
+-      if (!fep->mii_bus_share)
+-              pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+-      pm_runtime_mark_last_busy(&fep->pdev->dev);
+-      pm_runtime_put_autosuspend(&fep->pdev->dev);
+-
++      pinctrl_pm_select_sleep_state(&fep->pdev->dev);
++      pm_runtime_put_sync_suspend(ndev->dev.parent);
+       fec_enet_free_buffers(ndev);
+       return 0;
+@@ -3005,7 +3014,7 @@ fec_enet_close(struct net_device *ndev)
+  * this kind of feature?).
+  */
+-#define FEC_HASH_BITS 6               /* #bits in hash */
++#define HASH_BITS     6               /* #bits in hash */
+ #define CRC32_POLY    0xEDB88320
+ static void set_multicast_list(struct net_device *ndev)
+@@ -3014,7 +3023,6 @@ static void set_multicast_list(struct net_device *ndev)
+       struct netdev_hw_addr *ha;
+       unsigned int i, bit, data, crc, tmp;
+       unsigned char hash;
+-      unsigned int hash_high, hash_low;
+       if (ndev->flags & IFF_PROMISC) {
+               tmp = readl(fep->hwp + FEC_R_CNTRL);
+@@ -3037,10 +3045,10 @@ static void set_multicast_list(struct net_device *ndev)
+               return;
+       }
+-      /* Add the addresses in hash register
++      /* Clear filter and add the addresses in hash register
+        */
+-      hash_high = 0;
+-      hash_low = 0;
++      writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
++      writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+       netdev_for_each_mc_addr(ha, ndev) {
+               /* calculate crc32 value of mac address */
+@@ -3054,20 +3062,21 @@ static void set_multicast_list(struct net_device *ndev)
+                       }
+               }
+-              /* only upper 6 bits (FEC_HASH_BITS) are used
++              /* only upper 6 bits (HASH_BITS) are used
+                * which point to specific bit in he hash registers
+                */
+-              hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
++              hash = (crc >> (32 - HASH_BITS)) & 0x3f;
+               if (hash > 31) {
+-                      hash_high |= 1 << (hash - 32);
++                      tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
++                      tmp |= 1 << (hash - 32);
++                      writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+               } else {
+-                      hash_low |= 1 << hash;
++                      tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
++                      tmp |= 1 << hash;
++                      writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+               }
+       }
+-
+-      writel_relaxed(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+-      writel_relaxed(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+ }
+ /* Set a MAC change in hardware. */
+@@ -3122,6 +3131,7 @@ static void fec_poll_controller(struct net_device *dev)
+ }
+ #endif
++#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
+ static inline void fec_enet_set_netdev_features(struct net_device *netdev,
+       netdev_features_t features)
+ {
+@@ -3145,7 +3155,7 @@ static int fec_set_features(struct net_device *netdev,
+       struct fec_enet_private *fep = netdev_priv(netdev);
+       netdev_features_t changed = features ^ netdev->features;
+-      if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
++      if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
+               napi_disable(&fep->napi);
+               netif_tx_lock_bh(netdev);
+               fec_stop(netdev);
+@@ -3209,14 +3219,6 @@ static const struct net_device_ops fec_netdev_ops = {
+       .ndo_set_features       = fec_set_features,
+ };
+-static const unsigned short offset_des_active_rxq[] = {
+-      FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
+-};
+-
+-static const unsigned short offset_des_active_txq[] = {
+-      FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
+-};
+-
+  /*
+   * XXX:  We need to clean up on failure exits here.
+   *
+@@ -3224,16 +3226,14 @@ static const unsigned short offset_des_active_txq[] = {
+ static int fec_enet_init(struct net_device *ndev)
+ {
+       struct fec_enet_private *fep = netdev_priv(ndev);
++      struct fec_enet_priv_tx_q *txq;
++      struct fec_enet_priv_rx_q *rxq;
+       struct bufdesc *cbd_base;
+       dma_addr_t bd_dma;
+       int bd_size;
+       unsigned int i;
+-      unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
+-                      sizeof(struct bufdesc);
+-      unsigned dsize_log2 = __fls(dsize);
+-      WARN_ON(dsize != (1 << dsize_log2));
+-#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++#if defined(CONFIG_ARM)
+       fep->rx_align = 0xf;
+       fep->tx_align = 0xf;
+ #else
+@@ -3243,11 +3243,16 @@ static int fec_enet_init(struct net_device *ndev)
+       fec_enet_alloc_queue(ndev);
+-      bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
++      if (fep->bufdesc_ex)
++              fep->bufdesc_size = sizeof(struct bufdesc_ex);
++      else
++              fep->bufdesc_size = sizeof(struct bufdesc);
++      bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) *
++                      fep->bufdesc_size;
+       /* Allocate memory for buffer descriptors. */
+-      cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
+-                                     GFP_KERNEL);
++      cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
++                                    GFP_KERNEL);
+       if (!cbd_base) {
+               return -ENOMEM;
+       }
+@@ -3261,35 +3266,33 @@ static int fec_enet_init(struct net_device *ndev)
+       /* Set receive and transmit descriptor base. */
+       for (i = 0; i < fep->num_rx_queues; i++) {
+-              struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
+-              unsigned size = dsize * rxq->bd.ring_size;
+-
+-              rxq->bd.qid = i;
+-              rxq->bd.base = cbd_base;
+-              rxq->bd.cur = cbd_base;
+-              rxq->bd.dma = bd_dma;
+-              rxq->bd.dsize = dsize;
+-              rxq->bd.dsize_log2 = dsize_log2;
+-              rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
+-              bd_dma += size;
+-              cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
+-              rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
++              rxq = fep->rx_queue[i];
++              rxq->index = i;
++              rxq->rx_bd_base = (struct bufdesc *)cbd_base;
++              rxq->bd_dma = bd_dma;
++              if (fep->bufdesc_ex) {
++                      bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size;
++                      cbd_base = (struct bufdesc *)
++                              (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
++              } else {
++                      bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size;
++                      cbd_base += rxq->rx_ring_size;
++              }
+       }
+       for (i = 0; i < fep->num_tx_queues; i++) {
+-              struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
+-              unsigned size = dsize * txq->bd.ring_size;
+-
+-              txq->bd.qid = i;
+-              txq->bd.base = cbd_base;
+-              txq->bd.cur = cbd_base;
+-              txq->bd.dma = bd_dma;
+-              txq->bd.dsize = dsize;
+-              txq->bd.dsize_log2 = dsize_log2;
+-              txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
+-              bd_dma += size;
+-              cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
+-              txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
++              txq = fep->tx_queue[i];
++              txq->index = i;
++              txq->tx_bd_base = (struct bufdesc *)cbd_base;
++              txq->bd_dma = bd_dma;
++              if (fep->bufdesc_ex) {
++                      bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size;
++                      cbd_base = (struct bufdesc *)
++                       (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size);
++              } else {
++                      bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size;
++                      cbd_base += txq->tx_ring_size;
++              }
+       }
+@@ -3323,60 +3326,62 @@ static int fec_enet_init(struct net_device *ndev)
+       fec_restart(ndev);
+-      fec_enet_update_ethtool_stats(ndev);
+-
+       return 0;
+ }
+ #ifdef CONFIG_OF
+-static int fec_reset_phy(struct platform_device *pdev)
++static void fec_reset_phy(struct platform_device *pdev)
++{
++      struct net_device *ndev = platform_get_drvdata(pdev);
++      struct fec_enet_private *fep = netdev_priv(ndev);
++
++      if (!gpio_is_valid(fep->phy_reset_gpio))
++              return;
++
++      gpio_set_value_cansleep(fep->phy_reset_gpio, 0);
++      msleep(fep->phy_reset_duration);
++      gpio_set_value_cansleep(fep->phy_reset_gpio, 1);
++}
++
++static int fec_get_reset_gpio(struct platform_device *pdev)
+ {
+       int err, phy_reset;
+-      bool active_high = false;
+       int msec = 1;
+       struct device_node *np = pdev->dev.of_node;
+-
+-      if (!np)
+-              return 0;
+-
+-      err = of_property_read_u32(np, "phy-reset-duration", &msec);
+-      /* A sane reset duration should not be longer than 1s */
+-      if (!err && msec > 1000)
+-              msec = 1;
++      struct net_device *ndev = platform_get_drvdata(pdev);
++      struct fec_enet_private *fep = netdev_priv(ndev);
+       phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
+-      if (phy_reset == -EPROBE_DEFER)
++      if (!gpio_is_valid(phy_reset))
+               return phy_reset;
+-      else if (!gpio_is_valid(phy_reset))
+-              return 0;
+-
+-      active_high = of_property_read_bool(np, "phy-reset-active-high");
+       err = devm_gpio_request_one(&pdev->dev, phy_reset,
+-                      active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+-                      "phy-reset");
++                                  GPIOF_OUT_INIT_LOW, "phy-reset");
+       if (err) {
+               dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
+               return err;
+       }
+-
+-      if (msec > 20)
+-              msleep(msec);
+-      else
+-              usleep_range(msec * 1000, msec * 1000 + 1000);
+-
+-      gpio_set_value_cansleep(phy_reset, !active_high);
+-
+-      return 0;
++      
++      of_property_read_u32(np, "phy-reset-duration", &msec);
++      /* A sane reset duration should not be longer than 1s */
++      if (msec > 1000)
++              msec = 1;
++      fep->phy_reset_duration = msec;
++      
++      return phy_reset;
+ }
+ #else /* CONFIG_OF */
+-static int fec_reset_phy(struct platform_device *pdev)
++static void fec_reset_phy(struct platform_device *pdev)
+ {
+       /*
+        * In case of platform probe, the reset has been done
+        * by machine code.
+        */
+-      return 0;
++}
++
++static inline int fec_get_reset_gpio(struct platform_device *pdev)
++{
++      return -EINVAL;
+ }
+ #endif /* CONFIG_OF */
+@@ -3384,6 +3389,7 @@ static void
+ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
+ {
+       struct device_node *np = pdev->dev.of_node;
++      int err;
+       *num_tx = *num_rx = 1;
+@@ -3391,9 +3397,13 @@ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
+               return;
+       /* parse the num of tx and rx queues */
+-      of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
++      err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
++      if (err)
++              *num_tx = 1;
+-      of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
++      err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
++      if (err)
++              *num_rx = 1;
+       if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
+               dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
+@@ -3460,13 +3470,11 @@ fec_probe(struct platform_device *pdev)
+       int num_tx_qs;
+       int num_rx_qs;
+-      of_dma_configure(&pdev->dev, np);
+-
+       fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
+       /* Init network device */
+-      ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
+-                                FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
++      ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private),
++                                num_tx_qs, num_rx_qs);
+       if (!ndev)
+               return -ENOMEM;
+@@ -3505,13 +3513,14 @@ fec_probe(struct platform_device *pdev)
+       platform_set_drvdata(pdev, ndev);
+-      if ((of_machine_is_compatible("fsl,imx6q") ||
+-           of_machine_is_compatible("fsl,imx6dl")) &&
+-          !of_property_read_bool(np, "fsl,err006687-workaround-present"))
+-              fep->quirks |= FEC_QUIRK_ERR006687;
+-
+       fec_enet_of_parse_stop_mode(pdev);
++      ret = fec_get_reset_gpio(pdev);
++      if (ret == -EPROBE_DEFER)
++              goto gpio_defer;
++      fep->phy_reset_gpio = ret;
++      
++
+       if (of_get_property(np, "fsl,magic-packet", NULL))
+               fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
+@@ -3524,7 +3533,6 @@ fec_probe(struct platform_device *pdev)
+                       goto failed_phy;
+               }
+               phy_node = of_node_get(np);
+-              fep->fixed_link = true;
+       }
+       fep->phy_node = phy_node;
+@@ -3539,10 +3547,6 @@ fec_probe(struct platform_device *pdev)
+               fep->phy_interface = ret;
+       }
+-#if !defined(CONFIG_ARM64)
+-      request_bus_freq(BUS_FREQ_HIGH);
+-#endif
+-
+       fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+       if (IS_ERR(fep->clk_ipg)) {
+               ret = PTR_ERR(fep->clk_ipg);
+@@ -3577,39 +3581,24 @@ fec_probe(struct platform_device *pdev)
+               fep->bufdesc_ex = false;
+       }
++      pm_runtime_enable(&pdev->dev);
+       ret = fec_enet_clk_enable(ndev, true);
+       if (ret)
+               goto failed_clk;
+-      ret = clk_prepare_enable(fep->clk_ipg);
+-      if (ret)
+-              goto failed_clk_ipg;
+-      ret = clk_prepare_enable(fep->clk_ahb);
+-      if (ret)
+-              goto failed_clk_ahb;
+-
+       fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
+       if (!IS_ERR(fep->reg_phy)) {
+               ret = regulator_enable(fep->reg_phy);
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "Failed to enable phy regulator: %d\n", ret);
+-                      clk_disable_unprepare(fep->clk_ipg);
+                       goto failed_regulator;
+               }
+       } else {
+               fep->reg_phy = NULL;
+       }
+-      pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
+-      pm_runtime_use_autosuspend(&pdev->dev);
+-      pm_runtime_get_noresume(&pdev->dev);
+-      pm_runtime_set_active(&pdev->dev);
+-      pm_runtime_enable(&pdev->dev);
+-
+-      ret = fec_reset_phy(pdev);
+-      if (ret)
+-              goto failed_reset;
++      fec_reset_phy(pdev);
+       if (fep->bufdesc_ex)
+               fec_ptp_init(pdev);
+@@ -3641,15 +3630,9 @@ fec_probe(struct platform_device *pdev)
+               fep->wake_irq = fep->irq[0];
+       init_completion(&fep->mdio_done);
+-
+-      /* board only enable one mii bus in default */
+-      if (!of_get_property(np, "fsl,mii-exclusive", NULL))
+-              fep->quirks |= FEC_QUIRK_SINGLE_MDIO;
+       ret = fec_enet_mii_init(pdev);
+-      if (ret) {
+-              dev_id = 0;
++      if (ret)
+               goto failed_mii_init;
+-      }
+       /* Carrier starts down, phylib will bring it up */
+       netif_carrier_off(ndev);
+@@ -3660,11 +3643,6 @@ fec_probe(struct platform_device *pdev)
+       if (ret)
+               goto failed_register;
+-      if (!fep->fixed_link) {
+-              fep->fixups = of_fec_enet_parse_fixup(np);
+-              fec_enet_register_fixup(ndev);
+-      }
+-
+       device_init_wakeup(&ndev->dev, fep->wol_flag &
+                          FEC_WOL_HAS_MAGIC_PACKET);
+@@ -3673,10 +3651,6 @@ fec_probe(struct platform_device *pdev)
+       fep->rx_copybreak = COPYBREAK_DEFAULT;
+       INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
+-
+-      pm_runtime_mark_last_busy(&pdev->dev);
+-      pm_runtime_put_autosuspend(&pdev->dev);
+-
+       return 0;
+ failed_register:
+@@ -3684,22 +3658,14 @@ fec_probe(struct platform_device *pdev)
+ failed_mii_init:
+ failed_irq:
+ failed_init:
+-      fec_ptp_stop(pdev);
+       if (fep->reg_phy)
+               regulator_disable(fep->reg_phy);
+-failed_reset:
+-      pm_runtime_put(&pdev->dev);
+-      pm_runtime_disable(&pdev->dev);
+ failed_regulator:
+-failed_clk_ahb:
+-      clk_disable_unprepare(fep->clk_ipg);
+-failed_clk_ipg:
+       fec_enet_clk_enable(ndev, false);
+ failed_clk:
+-      if (of_phy_is_fixed_link(np))
+-              of_phy_deregister_fixed_link(np);
+ failed_phy:
+       of_node_put(phy_node);
++gpio_defer:
+ failed_ioremap:
+       free_netdev(ndev);
+@@ -3711,16 +3677,15 @@ fec_drv_remove(struct platform_device *pdev)
+ {
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
+-      struct device_node *np = pdev->dev.of_node;
++      cancel_delayed_work_sync(&fep->time_keep);
+       cancel_work_sync(&fep->tx_timeout_work);
+-      fec_ptp_stop(pdev);
+       unregister_netdev(ndev);
+       fec_enet_mii_remove(fep);
+       if (fep->reg_phy)
+               regulator_disable(fep->reg_phy);
+-      if (of_phy_is_fixed_link(np))
+-              of_phy_deregister_fixed_link(np);
++      if (fep->ptp_clock)
++              ptp_clock_unregister(fep->ptp_clock);
+       of_node_put(fep->phy_node);
+       free_netdev(ndev);
+@@ -3731,13 +3696,12 @@ static int __maybe_unused fec_suspend(struct device *dev)
+ {
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
+-      int ret = 0;
+       rtnl_lock();
+       if (netif_running(ndev)) {
+               if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
+                       fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
+-              phy_stop(ndev->phydev);
++              phy_stop(fep->phy_dev);
+               napi_disable(&fep->napi);
+               netif_tx_lock_bh(ndev);
+               netif_device_detach(ndev);
+@@ -3751,12 +3715,8 @@ static int __maybe_unused fec_suspend(struct device *dev)
+                       enable_irq_wake(fep->wake_irq);
+               }
+               fec_enet_clk_enable(ndev, false);
+-              fep->active_in_suspend = !pm_runtime_status_suspended(dev);
+-              if (fep->active_in_suspend)
+-                      ret = pm_runtime_force_suspend(dev);
+-              if (ret < 0)
+-                      return ret;
+-      } else if (fep->mii_bus_share && !ndev->phydev) {
++      } else if (fep->mii_bus_share && fep->miibus_up_failed && !fep->phy_dev) {
++              fec_enet_clk_enable(ndev, false);
+               pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+       }
+       rtnl_unlock();
+@@ -3777,7 +3737,7 @@ static int __maybe_unused fec_resume(struct device *dev)
+ {
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
+-      int ret = 0;
++      int ret;
+       int val;
+       if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
+@@ -3788,8 +3748,6 @@ static int __maybe_unused fec_resume(struct device *dev)
+       rtnl_lock();
+       if (netif_running(ndev)) {
+-              if (fep->active_in_suspend)
+-                      pm_runtime_force_resume(dev);
+               ret = fec_enet_clk_enable(ndev, true);
+               if (ret) {
+                       rtnl_unlock();
+@@ -3812,15 +3770,16 @@ static int __maybe_unused fec_resume(struct device *dev)
+               netif_device_attach(ndev);
+               netif_tx_unlock_bh(ndev);
+               napi_enable(&fep->napi);
+-              phy_start(ndev->phydev);
+-      } else if (fep->mii_bus_share && !ndev->phydev) {
++              phy_start(fep->phy_dev);
++      } else if (fep->mii_bus_share && !fep->phy_dev) {
+               pinctrl_pm_select_default_state(&fep->pdev->dev);
++              fep->miibus_up_failed = true;
+               /* And then recovery mii bus */
+-              ret = fec_restore_mii_bus(ndev);
++              fec_restore_mii_bus(ndev);
+       }
+       rtnl_unlock();
+-      return ret;
++      return 0;
+ failed_clk:
+       if (fep->reg_phy)
+@@ -3828,46 +3787,21 @@ static int __maybe_unused fec_resume(struct device *dev)
+       return ret;
+ }
+-static int __maybe_unused fec_runtime_suspend(struct device *dev)
++static int fec_runtime_suspend(struct device *dev)
+ {
+-      struct net_device *ndev = dev_get_drvdata(dev);
+-      struct fec_enet_private *fep = netdev_priv(ndev);
+-
+-      clk_disable_unprepare(fep->clk_ahb);
+-      clk_disable_unprepare(fep->clk_ipg);
+-#if !defined(CONFIG_ARM64)
+       release_bus_freq(BUS_FREQ_HIGH);
+-#endif
+-
+       return 0;
+ }
+-static int __maybe_unused fec_runtime_resume(struct device *dev)
++static int fec_runtime_resume(struct device *dev)
+ {
+-      struct net_device *ndev = dev_get_drvdata(dev);
+-      struct fec_enet_private *fep = netdev_priv(ndev);
+-      int ret;
+-
+-#if !defined(CONFIG_ARM64)
+       request_bus_freq(BUS_FREQ_HIGH);
+-#endif
+-      ret = clk_prepare_enable(fep->clk_ahb);
+-      if (ret)
+-              return ret;
+-      ret = clk_prepare_enable(fep->clk_ipg);
+-      if (ret)
+-              goto failed_clk_ipg;
+-
+       return 0;
+-
+-failed_clk_ipg:
+-      clk_disable_unprepare(fep->clk_ahb);
+-      return ret;
+ }
+ static const struct dev_pm_ops fec_pm_ops = {
+-      SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+       SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
++      SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+ };
+ static struct platform_driver fec_driver = {
+diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
+index 446ae9d..afe7f39 100644
+--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
++++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
+@@ -66,6 +66,7 @@ struct mpc52xx_fec_priv {
+       /* MDIO link details */
+       unsigned int mdio_speed;
+       struct device_node *phy_node;
++      struct phy_device *phydev;
+       enum phy_state link;
+       int seven_wire_mode;
+ };
+@@ -164,7 +165,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task
+ static void mpc52xx_fec_adjust_link(struct net_device *dev)
+ {
+       struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+-      struct phy_device *phydev = dev->phydev;
++      struct phy_device *phydev = priv->phydev;
+       int new_state = 0;
+       if (phydev->link != PHY_DOWN) {
+@@ -214,17 +215,16 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev)
+ static int mpc52xx_fec_open(struct net_device *dev)
+ {
+       struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+-      struct phy_device *phydev = NULL;
+       int err = -EBUSY;
+       if (priv->phy_node) {
+-              phydev = of_phy_connect(priv->ndev, priv->phy_node,
+-                                      mpc52xx_fec_adjust_link, 0, 0);
+-              if (!phydev) {
++              priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
++                                            mpc52xx_fec_adjust_link, 0, 0);
++              if (!priv->phydev) {
+                       dev_err(&dev->dev, "of_phy_connect failed\n");
+                       return -ENODEV;
+               }
+-              phy_start(phydev);
++              phy_start(priv->phydev);
+       }
+       if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
+@@ -268,9 +268,10 @@ static int mpc52xx_fec_open(struct net_device *dev)
+  free_ctrl_irq:
+       free_irq(dev->irq, dev);
+  free_phy:
+-      if (phydev) {
+-              phy_stop(phydev);
+-              phy_disconnect(phydev);
++      if (priv->phydev) {
++              phy_stop(priv->phydev);
++              phy_disconnect(priv->phydev);
++              priv->phydev = NULL;
+       }
+       return err;
+@@ -279,7 +280,6 @@ static int mpc52xx_fec_open(struct net_device *dev)
+ static int mpc52xx_fec_close(struct net_device *dev)
+ {
+       struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+-      struct phy_device *phydev = dev->phydev;
+       netif_stop_queue(dev);
+@@ -291,10 +291,11 @@ static int mpc52xx_fec_close(struct net_device *dev)
+       free_irq(priv->r_irq, dev);
+       free_irq(priv->t_irq, dev);
+-      if (phydev) {
++      if (priv->phydev) {
+               /* power down phy */
+-              phy_stop(phydev);
+-              phy_disconnect(phydev);
++              phy_stop(priv->phydev);
++              phy_disconnect(priv->phydev);
++              priv->phydev = NULL;
+       }
+       return 0;
+@@ -762,6 +763,26 @@ static void mpc52xx_fec_reset(struct net_device *dev)
+ /* ethtool interface */
++static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
++{
++      struct mpc52xx_fec_priv *priv = netdev_priv(dev);
++
++      if (!priv->phydev)
++              return -ENODEV;
++
++      return phy_ethtool_gset(priv->phydev, cmd);
++}
++
++static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
++{
++      struct mpc52xx_fec_priv *priv = netdev_priv(dev);
++
++      if (!priv->phydev)
++              return -ENODEV;
++
++      return phy_ethtool_sset(priv->phydev, cmd);
++}
++
+ static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
+ {
+       struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+@@ -775,23 +796,23 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
+ }
+ static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
++      .get_settings = mpc52xx_fec_get_settings,
++      .set_settings = mpc52xx_fec_set_settings,
+       .get_link = ethtool_op_get_link,
+       .get_msglevel = mpc52xx_fec_get_msglevel,
+       .set_msglevel = mpc52xx_fec_set_msglevel,
+       .get_ts_info = ethtool_op_get_ts_info,
+-      .get_link_ksettings = phy_ethtool_get_link_ksettings,
+-      .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ };
+ static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ {
+-      struct phy_device *phydev = dev->phydev;
++      struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+-      if (!phydev)
++      if (!priv->phydev)
+               return -ENOTSUPP;
+-      return phy_mii_ioctl(phydev, rq, cmd);
++      return phy_mii_ioctl(priv->phydev, rq, cmd);
+ }
+ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
+@@ -1063,23 +1084,27 @@ static struct platform_driver mpc52xx_fec_driver = {
+ /* Module                                                                   */
+ /* ======================================================================== */
+-static struct platform_driver * const drivers[] = {
+-#ifdef CONFIG_FEC_MPC52xx_MDIO
+-      &mpc52xx_fec_mdio_driver,
+-#endif
+-      &mpc52xx_fec_driver,
+-};
+-
+ static int __init
+ mpc52xx_fec_init(void)
+ {
+-      return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
++#ifdef CONFIG_FEC_MPC52xx_MDIO
++      int ret;
++      ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
++      if (ret) {
++              pr_err("failed to register mdio driver\n");
++              return ret;
++      }
++#endif
++      return platform_driver_register(&mpc52xx_fec_driver);
+ }
+ static void __exit
+ mpc52xx_fec_exit(void)
+ {
+-      platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
++      platform_driver_unregister(&mpc52xx_fec_driver);
++#ifdef CONFIG_FEC_MPC52xx_MDIO
++      platform_driver_unregister(&mpc52xx_fec_mdio_driver);
++#endif
+ }
+diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+index b5497e3..1e647be 100644
+--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
++++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+@@ -22,6 +22,7 @@
+ struct mpc52xx_fec_mdio_priv {
+       struct mpc52xx_fec __iomem *regs;
++      int mdio_irqs[PHY_MAX_ADDR];
+ };
+ static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
+@@ -82,6 +83,9 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
+       bus->read = mpc52xx_fec_mdio_read;
+       bus->write = mpc52xx_fec_mdio_write;
++      /* setup irqs */
++      bus->irq = priv->mdio_irqs;
++
+       /* setup registers */
+       err = of_address_to_resource(np, 0, &res);
+       if (err)
+diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
+index f9e7446..7a8386a 100644
+--- a/drivers/net/ethernet/freescale/fec_ptp.c
++++ b/drivers/net/ethernet/freescale/fec_ptp.c
+@@ -112,8 +112,9 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
+       unsigned long flags;
+       u32 val, tempval;
+       int inc;
+-      struct timespec64 ts;
++      struct timespec ts;
+       u64 ns;
++      u32 remainder;
+       val = 0;
+       if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
+@@ -162,7 +163,8 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
+               tempval = readl(fep->hwp + FEC_ATIME);
+               /* Convert the ptp local counter to 1588 timestamp */
+               ns = timecounter_cyc2time(&fep->tc, tempval);
+-              ts = ns_to_timespec64(ns);
++              ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
++              ts.tv_nsec = remainder;
+               /* The tempval is  less than 3 seconds, and  so val is less than
+                * 4 seconds. No overflow for 32bit calculation.
+@@ -596,16 +598,6 @@ void fec_ptp_init(struct platform_device *pdev)
+       schedule_delayed_work(&fep->time_keep, HZ);
+ }
+-void fec_ptp_stop(struct platform_device *pdev)
+-{
+-      struct net_device *ndev = platform_get_drvdata(pdev);
+-      struct fec_enet_private *fep = netdev_priv(ndev);
+-
+-      cancel_delayed_work_sync(&fep->time_keep);
+-      if (fep->ptp_clock)
+-              ptp_clock_unregister(fep->ptp_clock);
+-}
+-
+ /**
+  * fec_ptp_check_pps_event
+  * @fep: the fec_enet_private structure handle
+diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
+deleted file mode 100644
+index 79b7c84..0000000
+--- a/drivers/net/ethernet/freescale/fman/Kconfig
++++ /dev/null
+@@ -1,9 +0,0 @@
+-config FSL_FMAN
+-      tristate "FMan support"
+-      depends on FSL_SOC || COMPILE_TEST
+-      select GENERIC_ALLOCATOR
+-      select PHYLIB
+-      default n
+-      help
+-              Freescale Data-Path Acceleration Architecture Frame Manager
+-              (FMan) support
+diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile
+deleted file mode 100644
+index 6049177..0000000
+--- a/drivers/net/ethernet/freescale/fman/Makefile
++++ /dev/null
+@@ -1,9 +0,0 @@
+-subdir-ccflags-y +=  -I$(srctree)/drivers/net/ethernet/freescale/fman
+-
+-obj-$(CONFIG_FSL_FMAN) += fsl_fman.o
+-obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o
+-obj-$(CONFIG_FSL_FMAN) += fsl_mac.o
+-
+-fsl_fman-objs := fman_muram.o fman.o fman_sp.o
+-fsl_fman_port-objs := fman_port.o
+-fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
+diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
+deleted file mode 100644
+index dafd9e1..0000000
+--- a/drivers/net/ethernet/freescale/fman/fman.c
++++ /dev/null
+@@ -1,2967 +0,0 @@
+-/*
+- * Copyright 2008-2015 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- *     * Redistributions of source code must retain the above copyright
+- *       notice, this list of conditions and the following disclaimer.
+- *     * Redistributions in binary form must reproduce the above copyright
+- *       notice, this list of conditions and the following disclaimer in the
+- *       documentation and/or other materials provided with the distribution.
+- *     * Neither the name of Freescale Semiconductor nor the
+- *       names of its contributors may be used to endorse or promote products
+- *       derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- */
+-
+-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+-
+-#include "fman.h"
+-#include "fman_muram.h"
+-
+-#include <linux/fsl/guts.h>
+-#include <linux/slab.h>
+-#include <linux/delay.h>
+-#include <linux/module.h>
+-#include <linux/of_platform.h>
+-#include <linux/clk.h>
+-#include <linux/of_address.h>
+-#include <linux/of_irq.h>
+-#include <linux/interrupt.h>
+-#include <linux/libfdt_env.h>
+-
+-/* General defines */
+-#define FMAN_LIODN_TBL                        64      /* size of LIODN table */
+-#define MAX_NUM_OF_MACS                       10
+-#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS        4
+-#define BASE_RX_PORTID                        0x08
+-#define BASE_TX_PORTID                        0x28
+-
+-/* Modules registers offsets */
+-#define BMI_OFFSET            0x00080000
+-#define QMI_OFFSET            0x00080400
+-#define DMA_OFFSET            0x000C2000
+-#define FPM_OFFSET            0x000C3000
+-#define IMEM_OFFSET           0x000C4000
+-#define CGP_OFFSET            0x000DB000
+-
+-/* Exceptions bit map */
+-#define EX_DMA_BUS_ERROR              0x80000000
+-#define EX_DMA_READ_ECC                       0x40000000
+-#define EX_DMA_SYSTEM_WRITE_ECC       0x20000000
+-#define EX_DMA_FM_WRITE_ECC           0x10000000
+-#define EX_FPM_STALL_ON_TASKS         0x08000000
+-#define EX_FPM_SINGLE_ECC             0x04000000
+-#define EX_FPM_DOUBLE_ECC             0x02000000
+-#define EX_QMI_SINGLE_ECC             0x01000000
+-#define EX_QMI_DEQ_FROM_UNKNOWN_PORTID        0x00800000
+-#define EX_QMI_DOUBLE_ECC             0x00400000
+-#define EX_BMI_LIST_RAM_ECC           0x00200000
+-#define EX_BMI_STORAGE_PROFILE_ECC    0x00100000
+-#define EX_BMI_STATISTICS_RAM_ECC     0x00080000
+-#define EX_IRAM_ECC                   0x00040000
+-#define EX_MURAM_ECC                  0x00020000
+-#define EX_BMI_DISPATCH_RAM_ECC       0x00010000
+-#define EX_DMA_SINGLE_PORT_ECC                0x00008000
+-
+-/* DMA defines */
+-/* masks */
+-#define DMA_MODE_BER                  0x00200000
+-#define DMA_MODE_ECC                  0x00000020
+-#define DMA_MODE_SECURE_PROT          0x00000800
+-#define DMA_MODE_AXI_DBG_MASK         0x0F000000
+-
+-#define DMA_TRANSFER_PORTID_MASK      0xFF000000
+-#define DMA_TRANSFER_TNUM_MASK                0x00FF0000
+-#define DMA_TRANSFER_LIODN_MASK       0x00000FFF
+-
+-#define DMA_STATUS_BUS_ERR            0x08000000
+-#define DMA_STATUS_READ_ECC           0x04000000
+-#define DMA_STATUS_SYSTEM_WRITE_ECC   0x02000000
+-#define DMA_STATUS_FM_WRITE_ECC       0x01000000
+-#define DMA_STATUS_FM_SPDAT_ECC       0x00080000
+-
+-#define DMA_MODE_CACHE_OR_SHIFT               30
+-#define DMA_MODE_AXI_DBG_SHIFT                        24
+-#define DMA_MODE_CEN_SHIFT                    13
+-#define DMA_MODE_CEN_MASK                     0x00000007
+-#define DMA_MODE_DBG_SHIFT                    7
+-#define DMA_MODE_AID_MODE_SHIFT               4
+-
+-#define DMA_THRESH_COMMQ_SHIFT                        24
+-#define DMA_THRESH_READ_INT_BUF_SHIFT         16
+-#define DMA_THRESH_READ_INT_BUF_MASK          0x0000003f
+-#define DMA_THRESH_WRITE_INT_BUF_MASK         0x0000003f
+-
+-#define DMA_TRANSFER_PORTID_SHIFT             24
+-#define DMA_TRANSFER_TNUM_SHIFT               16
+-
+-#define DMA_CAM_SIZEOF_ENTRY                  0x40
+-#define DMA_CAM_UNITS                         8
+-
+-#define DMA_LIODN_SHIFT               16
+-#define DMA_LIODN_BASE_MASK   0x00000FFF
+-
+-/* FPM defines */
+-#define FPM_EV_MASK_DOUBLE_ECC                0x80000000
+-#define FPM_EV_MASK_STALL             0x40000000
+-#define FPM_EV_MASK_SINGLE_ECC                0x20000000
+-#define FPM_EV_MASK_RELEASE_FM                0x00010000
+-#define FPM_EV_MASK_DOUBLE_ECC_EN     0x00008000
+-#define FPM_EV_MASK_STALL_EN          0x00004000
+-#define FPM_EV_MASK_SINGLE_ECC_EN     0x00002000
+-#define FPM_EV_MASK_EXTERNAL_HALT     0x00000008
+-#define FPM_EV_MASK_ECC_ERR_HALT      0x00000004
+-
+-#define FPM_RAM_MURAM_ECC             0x00008000
+-#define FPM_RAM_IRAM_ECC              0x00004000
+-#define FPM_IRAM_ECC_ERR_EX_EN                0x00020000
+-#define FPM_MURAM_ECC_ERR_EX_EN       0x00040000
+-#define FPM_RAM_IRAM_ECC_EN           0x40000000
+-#define FPM_RAM_RAMS_ECC_EN           0x80000000
+-#define FPM_RAM_RAMS_ECC_EN_SRC_SEL   0x08000000
+-
+-#define FPM_REV1_MAJOR_MASK           0x0000FF00
+-#define FPM_REV1_MINOR_MASK           0x000000FF
+-
+-#define FPM_DISP_LIMIT_SHIFT          24
+-
+-#define FPM_PRT_FM_CTL1                       0x00000001
+-#define FPM_PRT_FM_CTL2                       0x00000002
+-#define FPM_PORT_FM_CTL_PORTID_SHIFT  24
+-#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT  16
+-
+-#define FPM_THR1_PRS_SHIFT            24
+-#define FPM_THR1_KG_SHIFT             16
+-#define FPM_THR1_PLCR_SHIFT           8
+-#define FPM_THR1_BMI_SHIFT            0
+-
+-#define FPM_THR2_QMI_ENQ_SHIFT                24
+-#define FPM_THR2_QMI_DEQ_SHIFT                0
+-#define FPM_THR2_FM_CTL1_SHIFT                16
+-#define FPM_THR2_FM_CTL2_SHIFT                8
+-
+-#define FPM_EV_MASK_CAT_ERR_SHIFT     1
+-#define FPM_EV_MASK_DMA_ERR_SHIFT     0
+-
+-#define FPM_REV1_MAJOR_SHIFT          8
+-
+-#define FPM_RSTC_FM_RESET             0x80000000
+-#define FPM_RSTC_MAC0_RESET           0x40000000
+-#define FPM_RSTC_MAC1_RESET           0x20000000
+-#define FPM_RSTC_MAC2_RESET           0x10000000
+-#define FPM_RSTC_MAC3_RESET           0x08000000
+-#define FPM_RSTC_MAC8_RESET           0x04000000
+-#define FPM_RSTC_MAC4_RESET           0x02000000
+-#define FPM_RSTC_MAC5_RESET           0x01000000
+-#define FPM_RSTC_MAC6_RESET           0x00800000
+-#define FPM_RSTC_MAC7_RESET           0x00400000
+-#define FPM_RSTC_MAC9_RESET           0x00200000
+-
+-#define FPM_TS_INT_SHIFT              16
+-#define FPM_TS_CTL_EN                 0x80000000
+-
+-/* BMI defines */
+-#define BMI_INIT_START                                0x80000000
+-#define BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC   0x80000000
+-#define BMI_ERR_INTR_EN_LIST_RAM_ECC          0x40000000
+-#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC    0x20000000
+-#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC      0x10000000
+-#define BMI_NUM_OF_TASKS_MASK                 0x3F000000
+-#define BMI_NUM_OF_EXTRA_TASKS_MASK           0x000F0000
+-#define BMI_NUM_OF_DMAS_MASK                  0x00000F00
+-#define BMI_NUM_OF_EXTRA_DMAS_MASK            0x0000000F
+-#define BMI_FIFO_SIZE_MASK                    0x000003FF
+-#define BMI_EXTRA_FIFO_SIZE_MASK              0x03FF0000
+-#define BMI_CFG2_DMAS_MASK                    0x0000003F
+-#define BMI_CFG2_TASKS_MASK                   0x0000003F
+-
+-#define BMI_CFG2_TASKS_SHIFT          16
+-#define BMI_CFG2_DMAS_SHIFT           0
+-#define BMI_CFG1_FIFO_SIZE_SHIFT      16
+-#define BMI_NUM_OF_TASKS_SHIFT                24
+-#define BMI_EXTRA_NUM_OF_TASKS_SHIFT  16
+-#define BMI_NUM_OF_DMAS_SHIFT         8
+-#define BMI_EXTRA_NUM_OF_DMAS_SHIFT   0
+-
+-#define BMI_FIFO_ALIGN                        0x100
+-
+-#define BMI_EXTRA_FIFO_SIZE_SHIFT     16
+-
+-/* QMI defines */
+-#define QMI_CFG_ENQ_EN                        0x80000000
+-#define QMI_CFG_DEQ_EN                        0x40000000
+-#define QMI_CFG_EN_COUNTERS           0x10000000
+-#define QMI_CFG_DEQ_MASK              0x0000003F
+-#define QMI_CFG_ENQ_MASK              0x00003F00
+-#define QMI_CFG_ENQ_SHIFT             8
+-
+-#define QMI_ERR_INTR_EN_DOUBLE_ECC    0x80000000
+-#define QMI_ERR_INTR_EN_DEQ_FROM_DEF  0x40000000
+-#define QMI_INTR_EN_SINGLE_ECC                0x80000000
+-
+-#define QMI_GS_HALT_NOT_BUSY          0x00000002
+-
+-/* IRAM defines */
+-#define IRAM_IADD_AIE                 0x80000000
+-#define IRAM_READY                    0x80000000
+-
+-/* Default values */
+-#define DEFAULT_CATASTROPHIC_ERR              0
+-#define DEFAULT_DMA_ERR                               0
+-#define DEFAULT_AID_MODE                      FMAN_DMA_AID_OUT_TNUM
+-#define DEFAULT_DMA_COMM_Q_LOW                        0x2A
+-#define DEFAULT_DMA_COMM_Q_HIGH               0x3F
+-#define DEFAULT_CACHE_OVERRIDE                        0
+-#define DEFAULT_DMA_CAM_NUM_OF_ENTRIES                64
+-#define DEFAULT_DMA_DBG_CNT_MODE              0
+-#define DEFAULT_DMA_SOS_EMERGENCY             0
+-#define DEFAULT_DMA_WATCHDOG                  0
+-#define DEFAULT_DISP_LIMIT                    0
+-#define DEFAULT_PRS_DISP_TH                   16
+-#define DEFAULT_PLCR_DISP_TH                  16
+-#define DEFAULT_KG_DISP_TH                    16
+-#define DEFAULT_BMI_DISP_TH                   16
+-#define DEFAULT_QMI_ENQ_DISP_TH               16
+-#define DEFAULT_QMI_DEQ_DISP_TH               16
+-#define DEFAULT_FM_CTL1_DISP_TH               16
+-#define DEFAULT_FM_CTL2_DISP_TH               16
+-
+-#define DFLT_AXI_DBG_NUM_OF_BEATS             1
+-
+-#define DFLT_DMA_READ_INT_BUF_LOW(dma_thresh_max_buf) \
+-      ((dma_thresh_max_buf + 1) / 2)
+-#define DFLT_DMA_READ_INT_BUF_HIGH(dma_thresh_max_buf)        \
+-      ((dma_thresh_max_buf + 1) * 3 / 4)
+-#define DFLT_DMA_WRITE_INT_BUF_LOW(dma_thresh_max_buf)        \
+-      ((dma_thresh_max_buf + 1) / 2)
+-#define DFLT_DMA_WRITE_INT_BUF_HIGH(dma_thresh_max_buf)\
+-      ((dma_thresh_max_buf + 1) * 3 / 4)
+-
+-#define DMA_COMM_Q_LOW_FMAN_V3                0x2A
+-#define DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq)          \
+-      ((dma_thresh_max_commq + 1) / 2)
+-#define DFLT_DMA_COMM_Q_LOW(major, dma_thresh_max_commq)      \
+-      ((major == 6) ? DMA_COMM_Q_LOW_FMAN_V3 :                \
+-      DMA_COMM_Q_LOW_FMAN_V2(dma_thresh_max_commq))
+-
+-#define DMA_COMM_Q_HIGH_FMAN_V3       0x3f
+-#define DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq)         \
+-      ((dma_thresh_max_commq + 1) * 3 / 4)
+-#define DFLT_DMA_COMM_Q_HIGH(major, dma_thresh_max_commq)     \
+-      ((major == 6) ? DMA_COMM_Q_HIGH_FMAN_V3 :               \
+-      DMA_COMM_Q_HIGH_FMAN_V2(dma_thresh_max_commq))
+-
+-#define TOTAL_NUM_OF_TASKS_FMAN_V3L   59
+-#define TOTAL_NUM_OF_TASKS_FMAN_V3H   124
+-#define DFLT_TOTAL_NUM_OF_TASKS(major, minor, bmi_max_num_of_tasks)   \
+-      ((major == 6) ? ((minor == 1 || minor == 4) ?                   \
+-      TOTAL_NUM_OF_TASKS_FMAN_V3L : TOTAL_NUM_OF_TASKS_FMAN_V3H) :    \
+-      bmi_max_num_of_tasks)
+-
+-#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V3                64
+-#define DMA_CAM_NUM_OF_ENTRIES_FMAN_V2                32
+-#define DFLT_DMA_CAM_NUM_OF_ENTRIES(major)                    \
+-      (major == 6 ? DMA_CAM_NUM_OF_ENTRIES_FMAN_V3 :          \
+-      DMA_CAM_NUM_OF_ENTRIES_FMAN_V2)
+-
+-#define FM_TIMESTAMP_1_USEC_BIT             8
+-
+-/* Defines used for enabling/disabling FMan interrupts */
+-#define ERR_INTR_EN_DMA         0x00010000
+-#define ERR_INTR_EN_FPM         0x80000000
+-#define ERR_INTR_EN_BMI         0x00800000
+-#define ERR_INTR_EN_QMI         0x00400000
+-#define ERR_INTR_EN_MURAM       0x00040000
+-#define ERR_INTR_EN_MAC0        0x00004000
+-#define ERR_INTR_EN_MAC1        0x00002000
+-#define ERR_INTR_EN_MAC2        0x00001000
+-#define ERR_INTR_EN_MAC3        0x00000800
+-#define ERR_INTR_EN_MAC4        0x00000400
+-#define ERR_INTR_EN_MAC5        0x00000200
+-#define ERR_INTR_EN_MAC6        0x00000100
+-#define ERR_INTR_EN_MAC7        0x00000080
+-#define ERR_INTR_EN_MAC8        0x00008000
+-#define ERR_INTR_EN_MAC9        0x00000040
+-
+-#define INTR_EN_QMI             0x40000000
+-#define INTR_EN_MAC0            0x00080000
+-#define INTR_EN_MAC1            0x00040000
+-#define INTR_EN_MAC2            0x00020000
+-#define INTR_EN_MAC3            0x00010000
+-#define INTR_EN_MAC4            0x00000040
+-#define INTR_EN_MAC5            0x00000020
+-#define INTR_EN_MAC6            0x00000008
+-#define INTR_EN_MAC7            0x00000002
+-#define INTR_EN_MAC8            0x00200000
+-#define INTR_EN_MAC9            0x00100000
+-#define INTR_EN_REV0            0x00008000
+-#define INTR_EN_REV1            0x00004000
+-#define INTR_EN_REV2            0x00002000
+-#define INTR_EN_REV3            0x00001000
+-#define INTR_EN_TMR             0x01000000
+-
+-enum fman_dma_aid_mode {
+-      FMAN_DMA_AID_OUT_PORT_ID = 0,             /* 4 LSB of PORT_ID */
+-      FMAN_DMA_AID_OUT_TNUM                     /* 4 LSB of TNUM */
+-};
+-
+-struct fman_iram_regs {
+-      u32 iadd;       /* FM IRAM instruction address register */
+-      u32 idata;      /* FM IRAM instruction data register */
+-      u32 itcfg;      /* FM IRAM timing config register */
+-      u32 iready;     /* FM IRAM ready register */
+-};
+-
+-struct fman_fpm_regs {
+-      u32 fmfp_tnc;           /* FPM TNUM Control 0x00 */
+-      u32 fmfp_prc;           /* FPM Port_ID FmCtl Association 0x04 */
+-      u32 fmfp_brkc;          /* FPM Breakpoint Control 0x08 */
+-      u32 fmfp_mxd;           /* FPM Flush Control 0x0c */
+-      u32 fmfp_dist1;         /* FPM Dispatch Thresholds1 0x10 */
+-      u32 fmfp_dist2;         /* FPM Dispatch Thresholds2 0x14 */
+-      u32 fm_epi;             /* FM Error Pending Interrupts 0x18 */
+-      u32 fm_rie;             /* FM Error Interrupt Enable 0x1c */
+-      u32 fmfp_fcev[4];       /* FPM FMan-Controller Event 1-4 0x20-0x2f */
+-      u32 res0030[4];         /* res 0x30 - 0x3f */
+-      u32 fmfp_cee[4];        /* PM FMan-Controller Event 1-4 0x40-0x4f */
+-      u32 res0050[4];         /* res 0x50-0x5f */
+-      u32 fmfp_tsc1;          /* FPM TimeStamp Control1 0x60 */
+-      u32 fmfp_tsc2;          /* FPM TimeStamp Control2 0x64 */
+-      u32 fmfp_tsp;           /* FPM Time Stamp 0x68 */
+-      u32 fmfp_tsf;           /* FPM Time Stamp Fraction 0x6c */
+-      u32 fm_rcr;             /* FM Rams Control 0x70 */
+-      u32 fmfp_extc;          /* FPM External Requests Control 0x74 */
+-      u32 fmfp_ext1;          /* FPM External Requests Config1 0x78 */
+-      u32 fmfp_ext2;          /* FPM External Requests Config2 0x7c */
+-      u32 fmfp_drd[16];       /* FPM Data_Ram Data 0-15 0x80 - 0xbf */
+-      u32 fmfp_dra;           /* FPM Data Ram Access 0xc0 */
+-      u32 fm_ip_rev_1;        /* FM IP Block Revision 1 0xc4 */
+-      u32 fm_ip_rev_2;        /* FM IP Block Revision 2 0xc8 */
+-      u32 fm_rstc;            /* FM Reset Command 0xcc */
+-      u32 fm_cld;             /* FM Classifier Debug 0xd0 */
+-      u32 fm_npi;             /* FM Normal Pending Interrupts 0xd4 */
+-      u32 fmfp_exte;          /* FPM External Requests Enable 0xd8 */
+-      u32 fmfp_ee;            /* FPM Event&Mask 0xdc */
+-      u32 fmfp_cev[4];        /* FPM CPU Event 1-4 0xe0-0xef */
+-      u32 res00f0[4];         /* res 0xf0-0xff */
+-      u32 fmfp_ps[50];        /* FPM Port Status 0x100-0x1c7 */
+-      u32 res01c8[14];        /* res 0x1c8-0x1ff */
+-      u32 fmfp_clfabc;        /* FPM CLFABC 0x200 */
+-      u32 fmfp_clfcc;         /* FPM CLFCC 0x204 */
+-      u32 fmfp_clfaval;       /* FPM CLFAVAL 0x208 */
+-      u32 fmfp_clfbval;       /* FPM CLFBVAL 0x20c */
+-      u32 fmfp_clfcval;       /* FPM CLFCVAL 0x210 */
+-      u32 fmfp_clfamsk;       /* FPM CLFAMSK 0x214 */
+-      u32 fmfp_clfbmsk;       /* FPM CLFBMSK 0x218 */
+-      u32 fmfp_clfcmsk;       /* FPM CLFCMSK 0x21c */
+-      u32 fmfp_clfamc;        /* FPM CLFAMC 0x220 */
+-      u32 fmfp_clfbmc;        /* FPM CLFBMC 0x224 */
+-      u32 fmfp_clfcmc;        /* FPM CLFCMC 0x228 */
+-      u32 fmfp_decceh;        /* FPM DECCEH 0x22c */
+-      u32 res0230[116];       /* res 0x230 - 0x3ff */
+-      u32 fmfp_ts[128];       /* 0x400: FPM Task Status 0x400 - 0x5ff */
+-      u32 res0600[0x400 - 384];
+-};
+-
+-struct fman_bmi_regs {
+-      u32 fmbm_init;          /* BMI Initialization 0x00 */
+-      u32 fmbm_cfg1;          /* BMI Configuration 1 0x04 */
+-      u32 fmbm_cfg2;          /* BMI Configuration 2 0x08 */
+-      u32 res000c[5];         /* 0x0c - 0x1f */
+-      u32 fmbm_ievr;          /* Interrupt Event Register 0x20 */
+-      u32 fmbm_ier;           /* Interrupt Enable Register 0x24 */
+-      u32 fmbm_ifr;           /* Interrupt Force Register 0x28 */
+-      u32 res002c[5];         /* 0x2c - 0x3f */
+-      u32 fmbm_arb[8];        /* BMI Arbitration 0x40 - 0x5f */
+-      u32 res0060[12];        /* 0x60 - 0x8f */
+-      u32 fmbm_dtc[3];        /* Debug Trap Counter 0x90 - 0x9b */
+-      u32 res009c;            /* 0x9c */
+-      u32 fmbm_dcv[3][4];     /* Debug Compare val 0xa0-0xcf */
+-      u32 fmbm_dcm[3][4];     /* Debug Compare Mask 0xd0-0xff */
+-      u32 fmbm_gde;           /* BMI Global Debug Enable 0x100 */
+-      u32 fmbm_pp[63];        /* BMI Port Parameters 0x104 - 0x1ff */
+-      u32 res0200;            /* 0x200 */
+-      u32 fmbm_pfs[63];       /* BMI Port FIFO Size 0x204 - 0x2ff */
+-      u32 res0300;            /* 0x300 */
+-      u32 fmbm_spliodn[63];   /* Port Partition ID 0x304 - 0x3ff */
+-};
+-
+-struct fman_qmi_regs {
+-      u32 fmqm_gc;            /* General Configuration Register 0x00 */
+-      u32 res0004;            /* 0x04 */
+-      u32 fmqm_eie;           /* Error Interrupt Event Register 0x08 */
+-      u32 fmqm_eien;          /* Error Interrupt Enable Register 0x0c */
+-      u32 fmqm_eif;           /* Error Interrupt Force Register 0x10 */
+-      u32 fmqm_ie;            /* Interrupt Event Register 0x14 */
+-      u32 fmqm_ien;           /* Interrupt Enable Register 0x18 */
+-      u32 fmqm_if;            /* Interrupt Force Register 0x1c */
+-      u32 fmqm_gs;            /* Global Status Register 0x20 */
+-      u32 fmqm_ts;            /* Task Status Register 0x24 */
+-      u32 fmqm_etfc;          /* Enqueue Total Frame Counter 0x28 */
+-      u32 fmqm_dtfc;          /* Dequeue Total Frame Counter 0x2c */
+-      u32 fmqm_dc0;           /* Dequeue Counter 0 0x30 */
+-      u32 fmqm_dc1;           /* Dequeue Counter 1 0x34 */
+-      u32 fmqm_dc2;           /* Dequeue Counter 2 0x38 */
+-      u32 fmqm_dc3;           /* Dequeue Counter 3 0x3c */
+-      u32 fmqm_dfdc;          /* Dequeue FQID from Default Counter 0x40 */
+-      u32 fmqm_dfcc;          /* Dequeue FQID from Context Counter 0x44 */
+-      u32 fmqm_dffc;          /* Dequeue FQID from FD Counter 0x48 */
+-      u32 fmqm_dcc;           /* Dequeue Confirm Counter 0x4c */
+-      u32 res0050[7];         /* 0x50 - 0x6b */
+-      u32 fmqm_tapc;          /* Tnum Aging Period Control 0x6c */
+-      u32 fmqm_dmcvc;         /* Dequeue MAC Command Valid Counter 0x70 */
+-      u32 fmqm_difdcc;        /* Dequeue Invalid FD Command Counter 0x74 */
+-      u32 fmqm_da1v;          /* Dequeue A1 Valid Counter 0x78 */
+-      u32 res007c;            /* 0x7c */
+-      u32 fmqm_dtc;           /* 0x80 Debug Trap Counter 0x80 */
+-      u32 fmqm_efddd;         /* 0x84 Enqueue Frame desc Dynamic dbg 0x84 */
+-      u32 res0088[2];         /* 0x88 - 0x8f */
+-      struct {
+-              u32 fmqm_dtcfg1;        /* 0x90 dbg trap cfg 1 Register 0x00 */
+-              u32 fmqm_dtval1;        /* Debug Trap Value 1 Register 0x04 */
+-              u32 fmqm_dtm1;          /* Debug Trap Mask 1 Register 0x08 */
+-              u32 fmqm_dtc1;          /* Debug Trap Counter 1 Register 0x0c */
+-              u32 fmqm_dtcfg2;        /* dbg Trap cfg 2 Register 0x10 */
+-              u32 fmqm_dtval2;        /* Debug Trap Value 2 Register 0x14 */
+-              u32 fmqm_dtm2;          /* Debug Trap Mask 2 Register 0x18 */
+-              u32 res001c;            /* 0x1c */
+-      } dbg_traps[3];                 /* 0x90 - 0xef */
+-      u8 res00f0[0x400 - 0xf0];       /* 0xf0 - 0x3ff */
+-};
+-
+-struct fman_dma_regs {
+-      u32 fmdmsr;     /* FM DMA status register 0x00 */
+-      u32 fmdmmr;     /* FM DMA mode register 0x04 */
+-      u32 fmdmtr;     /* FM DMA bus threshold register 0x08 */
+-      u32 fmdmhy;     /* FM DMA bus hysteresis register 0x0c */
+-      u32 fmdmsetr;   /* FM DMA SOS emergency Threshold Register 0x10 */
+-      u32 fmdmtah;    /* FM DMA transfer bus address high reg 0x14 */
+-      u32 fmdmtal;    /* FM DMA transfer bus address low reg 0x18 */
+-      u32 fmdmtcid;   /* FM DMA transfer bus communication ID reg 0x1c */
+-      u32 fmdmra;     /* FM DMA bus internal ram address register 0x20 */
+-      u32 fmdmrd;     /* FM DMA bus internal ram data register 0x24 */
+-      u32 fmdmwcr;    /* FM DMA CAM watchdog counter value 0x28 */
+-      u32 fmdmebcr;   /* FM DMA CAM base in MURAM register 0x2c */
+-      u32 fmdmccqdr;  /* FM DMA CAM and CMD Queue Debug reg 0x30 */
+-      u32 fmdmccqvr1; /* FM DMA CAM and CMD Queue Value reg #1 0x34 */
+-      u32 fmdmccqvr2; /* FM DMA CAM and CMD Queue Value reg #2 0x38 */
+-      u32 fmdmcqvr3;  /* FM DMA CMD Queue Value register #3 0x3c */
+-      u32 fmdmcqvr4;  /* FM DMA CMD Queue Value register #4 0x40 */
+-      u32 fmdmcqvr5;  /* FM DMA CMD Queue Value register #5 0x44 */
+-      u32 fmdmsefrc;  /* FM DMA Semaphore Entry Full Reject Cntr 0x48 */
+-      u32 fmdmsqfrc;  /* FM DMA Semaphore Queue Full Reject Cntr 0x4c */
+-      u32 fmdmssrc;   /* FM DMA Semaphore SYNC Reject Counter 0x50 */
+-      u32 fmdmdcr;    /* FM DMA Debug Counter 0x54 */
+-      u32 fmdmemsr;   /* FM DMA Emergency Smoother Register 0x58 */
+-      u32 res005c;    /* 0x5c */
+-      u32 fmdmplr[FMAN_LIODN_TBL / 2];        /* DMA LIODN regs 0x60-0xdf */
+-      u32 res00e0[0x400 - 56];
+-};
+-
+-/* Structure that holds current FMan state.
+- * Used for saving run time information.
+- */
+-struct fman_state_struct {
+-      u8 fm_id;
+-      u16 fm_clk_freq;
+-      struct fman_rev_info rev_info;
+-      bool enabled_time_stamp;
+-      u8 count1_micro_bit;
+-      u8 total_num_of_tasks;
+-      u8 accumulated_num_of_tasks;
+-      u32 accumulated_fifo_size;
+-      u8 accumulated_num_of_open_dmas;
+-      u8 accumulated_num_of_deq_tnums;
+-      u32 exceptions;
+-      u32 extra_fifo_pool_size;
+-      u8 extra_tasks_pool_size;
+-      u8 extra_open_dmas_pool_size;
+-      u16 port_mfl[MAX_NUM_OF_MACS];
+-      u16 mac_mfl[MAX_NUM_OF_MACS];
+-
+-      /* SOC specific */
+-      u32 fm_iram_size;
+-      /* DMA */
+-      u32 dma_thresh_max_commq;
+-      u32 dma_thresh_max_buf;
+-      u32 max_num_of_open_dmas;
+-      /* QMI */
+-      u32 qmi_max_num_of_tnums;
+-      u32 qmi_def_tnums_thresh;
+-      /* BMI */
+-      u32 bmi_max_num_of_tasks;
+-      u32 bmi_max_fifo_size;
+-      /* General */
+-      u32 fm_port_num_of_cg;
+-      u32 num_of_rx_ports;
+-      u32 total_fifo_size;
+-
+-      u32 qman_channel_base;
+-      u32 num_of_qman_channels;
+-
+-      struct resource *res;
+-};
+-
+-/* Structure that holds FMan initial configuration */
+-struct fman_cfg {
+-      u8 disp_limit_tsh;
+-      u8 prs_disp_tsh;
+-      u8 plcr_disp_tsh;
+-      u8 kg_disp_tsh;
+-      u8 bmi_disp_tsh;
+-      u8 qmi_enq_disp_tsh;
+-      u8 qmi_deq_disp_tsh;
+-      u8 fm_ctl1_disp_tsh;
+-      u8 fm_ctl2_disp_tsh;
+-      int dma_cache_override;
+-      enum fman_dma_aid_mode dma_aid_mode;
+-      u32 dma_axi_dbg_num_of_beats;
+-      u32 dma_cam_num_of_entries;
+-      u32 dma_watchdog;
+-      u8 dma_comm_qtsh_asrt_emer;
+-      u32 dma_write_buf_tsh_asrt_emer;
+-      u32 dma_read_buf_tsh_asrt_emer;
+-      u8 dma_comm_qtsh_clr_emer;
+-      u32 dma_write_buf_tsh_clr_emer;
+-      u32 dma_read_buf_tsh_clr_emer;
+-      u32 dma_sos_emergency;
+-      int dma_dbg_cnt_mode;
+-      int catastrophic_err;
+-      int dma_err;
+-      u32 exceptions;
+-      u16 clk_freq;
+-      u32 cam_base_addr;
+-      u32 fifo_base_addr;
+-      u32 total_fifo_size;
+-      u32 total_num_of_tasks;
+-      u32 qmi_def_tnums_thresh;
+-};
+-
+-/* Structure that holds information received from device tree */
+-struct fman_dts_params {
+-      void __iomem *base_addr;                /* FMan virtual address */
+-      struct resource *res;                   /* FMan memory resource */
+-      u8 id;                                  /* FMan ID */
+-
+-      int err_irq;                            /* FMan Error IRQ */
+-
+-      u16 clk_freq;                           /* FMan clock freq (In Mhz) */
+-
+-      u32 qman_channel_base;                  /* QMan channels base */
+-      u32 num_of_qman_channels;               /* Number of QMan channels */
+-
+-      struct resource muram_res;              /* MURAM resource */
+-};
+-
+-/** fman_exceptions_cb
+- * fman               - Pointer to FMan
+- * exception  - The exception.
+- *
+- * Exceptions user callback routine, will be called upon an exception
+- * passing the exception identification.
+- *
+- * Return: irq status
+- */
+-typedef irqreturn_t (fman_exceptions_cb)(struct fman *fman,
+-                                       enum fman_exceptions exception);
+-
+-/** fman_bus_error_cb
+- * fman               - Pointer to FMan
+- * port_id    - Port id
+- * addr               - Address that caused the error
+- * tnum               - Owner of error
+- * liodn      - Logical IO device number
+- *
+- * Bus error user callback routine, will be called upon bus error,
+- * passing parameters describing the errors and the owner.
+- *
+- * Return: IRQ status
+- */
+-typedef irqreturn_t (fman_bus_error_cb)(struct fman *fman, u8 port_id,
+-                                      u64 addr, u8 tnum, u16 liodn);
+-
+-struct fman {
+-      struct device *dev;
+-      void __iomem *base_addr;
+-      struct fman_intr_src intr_mng[FMAN_EV_CNT];
+-
+-      struct fman_fpm_regs __iomem *fpm_regs;
+-      struct fman_bmi_regs __iomem *bmi_regs;
+-      struct fman_qmi_regs __iomem *qmi_regs;
+-      struct fman_dma_regs __iomem *dma_regs;
+-      fman_exceptions_cb *exception_cb;
+-      fman_bus_error_cb *bus_error_cb;
+-      /* Spinlock for FMan use */
+-      spinlock_t spinlock;
+-      struct fman_state_struct *state;
+-
+-      struct fman_cfg *cfg;
+-      struct muram_info *muram;
+-      /* cam section in muram */
+-      unsigned long cam_offset;
+-      size_t cam_size;
+-      /* Fifo in MURAM */
+-      unsigned long fifo_offset;
+-      size_t fifo_size;
+-
+-      u32 liodn_base[64];
+-      u32 liodn_offset[64];
+-
+-      struct fman_dts_params dts_params;
+-};
+-
+-static irqreturn_t fman_exceptions(struct fman *fman,
+-                                 enum fman_exceptions exception)
+-{
+-      dev_dbg(fman->dev, "%s: FMan[%d] exception %d\n",
+-              __func__, fman->state->fm_id, exception);
+-
+-      return IRQ_HANDLED;
+-}
+-
+-static irqreturn_t fman_bus_error(struct fman *fman, u8 __maybe_unused port_id,
+-                                u64 __maybe_unused addr,
+-                                u8 __maybe_unused tnum,
+-                                u16 __maybe_unused liodn)
+-{
+-      dev_dbg(fman->dev, "%s: FMan[%d] bus error: port_id[%d]\n",
+-              __func__, fman->state->fm_id, port_id);
+-
+-      return IRQ_HANDLED;
+-}
+-
+-static inline irqreturn_t call_mac_isr(struct fman *fman, u8 id)
+-{
+-      if (fman->intr_mng[id].isr_cb) {
+-              fman->intr_mng[id].isr_cb(fman->intr_mng[id].src_handle);
+-
+-              return IRQ_HANDLED;
+-      }
+-
+-      return IRQ_NONE;
+-}
+-
+-static inline u8 hw_port_id_to_sw_port_id(u8 major, u8 hw_port_id)
+-{
+-      u8 sw_port_id = 0;
+-
+-      if (hw_port_id >= BASE_TX_PORTID)
+-              sw_port_id = hw_port_id - BASE_TX_PORTID;
+-      else if (hw_port_id >= BASE_RX_PORTID)
+-              sw_port_id = hw_port_id - BASE_RX_PORTID;
+-      else
+-              sw_port_id = 0;
+-
+-      return sw_port_id;
+-}
+-
+-static void set_port_order_restoration(struct fman_fpm_regs __iomem *fpm_rg,
+-                                     u8 port_id)
+-{
+-      u32 tmp = 0;
+-
+-      tmp = port_id << FPM_PORT_FM_CTL_PORTID_SHIFT;
+-
+-      tmp |= FPM_PRT_FM_CTL2 | FPM_PRT_FM_CTL1;
+-
+-      /* order restoration */
+-      if (port_id % 2)
+-              tmp |= FPM_PRT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
+-      else
+-              tmp |= FPM_PRT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT;
+-
+-      iowrite32be(tmp, &fpm_rg->fmfp_prc);
+-}
+-
+-static void set_port_liodn(struct fman *fman, u8 port_id,
+-                         u32 liodn_base, u32 liodn_ofst)
+-{
+-      u32 tmp;
+-
+-      /* set LIODN base for this port */
+-      tmp = ioread32be(&fman->dma_regs->fmdmplr[port_id / 2]);
+-      if (port_id % 2) {
+-              tmp &= ~DMA_LIODN_BASE_MASK;
+-              tmp |= liodn_base;
+-      } else {
+-              tmp &= ~(DMA_LIODN_BASE_MASK << DMA_LIODN_SHIFT);
+-              tmp |= liodn_base << DMA_LIODN_SHIFT;
+-      }
+-      iowrite32be(tmp, &fman->dma_regs->fmdmplr[port_id / 2]);
+-      iowrite32be(liodn_ofst, &fman->bmi_regs->fmbm_spliodn[port_id - 1]);
+-}
+-
+-static void enable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
+-{
+-      u32 tmp;
+-
+-      tmp = ioread32be(&fpm_rg->fm_rcr);
+-      if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
+-              iowrite32be(tmp | FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+-      else
+-              iowrite32be(tmp | FPM_RAM_RAMS_ECC_EN |
+-                          FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+-}
+-
+-static void disable_rams_ecc(struct fman_fpm_regs __iomem *fpm_rg)
+-{
+-      u32 tmp;
+-
+-      tmp = ioread32be(&fpm_rg->fm_rcr);
+-      if (tmp & FPM_RAM_RAMS_ECC_EN_SRC_SEL)
+-              iowrite32be(tmp & ~FPM_RAM_IRAM_ECC_EN, &fpm_rg->fm_rcr);
+-      else
+-              iowrite32be(tmp & ~(FPM_RAM_RAMS_ECC_EN | FPM_RAM_IRAM_ECC_EN),
+-                          &fpm_rg->fm_rcr);
+-}
+-
+-static void fman_defconfig(struct fman_cfg *cfg)
+-{
+-      memset(cfg, 0, sizeof(struct fman_cfg));
+-
+-      cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
+-      cfg->dma_err = DEFAULT_DMA_ERR;
+-      cfg->dma_aid_mode = DEFAULT_AID_MODE;
+-      cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
+-      cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
+-      cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
+-      cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
+-      cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
+-      cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
+-      cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
+-      cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
+-      cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
+-      cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
+-      cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH;
+-      cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH;
+-      cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH;
+-      cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
+-      cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
+-      cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
+-}
+-
+-static int dma_init(struct fman *fman)
+-{
+-      struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+-      struct fman_cfg *cfg = fman->cfg;
+-      u32 tmp_reg;
+-
+-      /* Init DMA Registers */
+-
+-      /* clear status reg events */
+-      tmp_reg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC |
+-                 DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC);
+-      iowrite32be(ioread32be(&dma_rg->fmdmsr) | tmp_reg, &dma_rg->fmdmsr);
+-
+-      /* configure mode register */
+-      tmp_reg = 0;
+-      tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
+-      if (cfg->exceptions & EX_DMA_BUS_ERROR)
+-              tmp_reg |= DMA_MODE_BER;
+-      if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) |
+-          (cfg->exceptions & EX_DMA_READ_ECC) |
+-          (cfg->exceptions & EX_DMA_FM_WRITE_ECC))
+-              tmp_reg |= DMA_MODE_ECC;
+-      if (cfg->dma_axi_dbg_num_of_beats)
+-              tmp_reg |= (DMA_MODE_AXI_DBG_MASK &
+-                      ((cfg->dma_axi_dbg_num_of_beats - 1)
+-                      << DMA_MODE_AXI_DBG_SHIFT));
+-
+-      tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) &
+-              DMA_MODE_CEN_MASK) << DMA_MODE_CEN_SHIFT;
+-      tmp_reg |= DMA_MODE_SECURE_PROT;
+-      tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
+-      tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
+-
+-      iowrite32be(tmp_reg, &dma_rg->fmdmmr);
+-
+-      /* configure thresholds register */
+-      tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer <<
+-              DMA_THRESH_COMMQ_SHIFT);
+-      tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer &
+-              DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
+-      tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer &
+-              DMA_THRESH_WRITE_INT_BUF_MASK;
+-
+-      iowrite32be(tmp_reg, &dma_rg->fmdmtr);
+-
+-      /* configure hysteresis register */
+-      tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer <<
+-              DMA_THRESH_COMMQ_SHIFT);
+-      tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer &
+-              DMA_THRESH_READ_INT_BUF_MASK) << DMA_THRESH_READ_INT_BUF_SHIFT;
+-      tmp_reg |= cfg->dma_write_buf_tsh_clr_emer &
+-              DMA_THRESH_WRITE_INT_BUF_MASK;
+-
+-      iowrite32be(tmp_reg, &dma_rg->fmdmhy);
+-
+-      /* configure emergency threshold */
+-      iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr);
+-
+-      /* configure Watchdog */
+-      iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr);
+-
+-      iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr);
+-
+-      /* Allocate MURAM for CAM */
+-      fman->cam_size =
+-              (u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY);
+-      fman->cam_offset = fman_muram_alloc(fman->muram, fman->cam_size);
+-      if (IS_ERR_VALUE(fman->cam_offset)) {
+-              dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
+-                      __func__);
+-              return -ENOMEM;
+-      }
+-
+-      if (fman->state->rev_info.major == 2) {
+-              u32 __iomem *cam_base_addr;
+-
+-              fman_muram_free_mem(fman->muram, fman->cam_offset,
+-                                  fman->cam_size);
+-
+-              fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128;
+-              fman->cam_offset = fman_muram_alloc(fman->muram,
+-                                                  fman->cam_size);
+-              if (IS_ERR_VALUE(fman->cam_offset)) {
+-                      dev_err(fman->dev, "%s: MURAM alloc for DMA CAM failed\n",
+-                              __func__);
+-                      return -ENOMEM;
+-              }
+-
+-              if (fman->cfg->dma_cam_num_of_entries % 8 ||
+-                  fman->cfg->dma_cam_num_of_entries > 32) {
+-                      dev_err(fman->dev, "%s: wrong dma_cam_num_of_entries\n",
+-                              __func__);
+-                      return -EINVAL;
+-              }
+-
+-              cam_base_addr = (u32 __iomem *)
+-                      fman_muram_offset_to_vbase(fman->muram,
+-                                                 fman->cam_offset);
+-              iowrite32be(~((1 <<
+-                          (32 - fman->cfg->dma_cam_num_of_entries)) - 1),
+-                          cam_base_addr);
+-      }
+-
+-      fman->cfg->cam_base_addr = fman->cam_offset;
+-
+-      return 0;
+-}
+-
+-static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
+-{
+-      u32 tmp_reg;
+-      int i;
+-
+-      /* Init FPM Registers */
+-
+-      tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT);
+-      iowrite32be(tmp_reg, &fpm_rg->fmfp_mxd);
+-
+-      tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) |
+-                 ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) |
+-                 ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) |
+-                 ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT));
+-      iowrite32be(tmp_reg, &fpm_rg->fmfp_dist1);
+-
+-      tmp_reg =
+-              (((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) |
+-               ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) |
+-               ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) |
+-               ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT));
+-      iowrite32be(tmp_reg, &fpm_rg->fmfp_dist2);
+-
+-      /* define exceptions and error behavior */
+-      tmp_reg = 0;
+-      /* Clear events */
+-      tmp_reg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC |
+-                  FPM_EV_MASK_SINGLE_ECC);
+-      /* enable interrupts */
+-      if (cfg->exceptions & EX_FPM_STALL_ON_TASKS)
+-              tmp_reg |= FPM_EV_MASK_STALL_EN;
+-      if (cfg->exceptions & EX_FPM_SINGLE_ECC)
+-              tmp_reg |= FPM_EV_MASK_SINGLE_ECC_EN;
+-      if (cfg->exceptions & EX_FPM_DOUBLE_ECC)
+-              tmp_reg |= FPM_EV_MASK_DOUBLE_ECC_EN;
+-      tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
+-      tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
+-      /* FMan is not halted upon external halt activation */
+-      tmp_reg |= FPM_EV_MASK_EXTERNAL_HALT;
+-      /* Man is not halted upon  Unrecoverable ECC error behavior */
+-      tmp_reg |= FPM_EV_MASK_ECC_ERR_HALT;
+-      iowrite32be(tmp_reg, &fpm_rg->fmfp_ee);
+-
+-      /* clear all fmCtls event registers */
+-      for (i = 0; i < FM_NUM_OF_FMAN_CTRL_EVENT_REGS; i++)
+-              iowrite32be(0xFFFFFFFF, &fpm_rg->fmfp_cev[i]);
+-
+-      /* RAM ECC -  enable and clear events */
+-      /* first we need to clear all parser memory,
+-       * as it is uninitialized and may cause ECC errors
+-       */
+-      /* event bits */
+-      tmp_reg = (FPM_RAM_MURAM_ECC | FPM_RAM_IRAM_ECC);
+-
+-      iowrite32be(tmp_reg, &fpm_rg->fm_rcr);
+-
+-      tmp_reg = 0;
+-      if (cfg->exceptions & EX_IRAM_ECC) {
+-              tmp_reg |= FPM_IRAM_ECC_ERR_EX_EN;
+-              enable_rams_ecc(fpm_rg);
+-      }
+-      if (cfg->exceptions & EX_MURAM_ECC) {
+-              tmp_reg |= FPM_MURAM_ECC_ERR_EX_EN;
+-              enable_rams_ecc(fpm_rg);
+-      }
+-      iowrite32be(tmp_reg, &fpm_rg->fm_rie);
+-}
+-
+-static void bmi_init(struct fman_bmi_regs __iomem *bmi_rg,
+-                   struct fman_cfg *cfg)
+-{
+-      u32 tmp_reg;
+-
+-      /* Init BMI Registers */
+-
+-      /* define common resources */
+-      tmp_reg = cfg->fifo_base_addr;
+-      tmp_reg = tmp_reg / BMI_FIFO_ALIGN;
+-
+-      tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) <<
+-                  BMI_CFG1_FIFO_SIZE_SHIFT);
+-      iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg1);
+-
+-      tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) <<
+-                 BMI_CFG2_TASKS_SHIFT;
+-      /* num of DMA's will be dynamically updated when each port is set */
+-      iowrite32be(tmp_reg, &bmi_rg->fmbm_cfg2);
+-
+-      /* define unmaskable exceptions, enable and clear events */
+-      tmp_reg = 0;
+-      iowrite32be(BMI_ERR_INTR_EN_LIST_RAM_ECC |
+-                  BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC |
+-                  BMI_ERR_INTR_EN_STATISTICS_RAM_ECC |
+-                  BMI_ERR_INTR_EN_DISPATCH_RAM_ECC, &bmi_rg->fmbm_ievr);
+-
+-      if (cfg->exceptions & EX_BMI_LIST_RAM_ECC)
+-              tmp_reg |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
+-      if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC)
+-              tmp_reg |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+-      if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC)
+-              tmp_reg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+-      if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC)
+-              tmp_reg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+-      iowrite32be(tmp_reg, &bmi_rg->fmbm_ier);
+-}
+-
+-static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
+-                   struct fman_cfg *cfg)
+-{
+-      u32 tmp_reg;
+-
+-      /* Init QMI Registers */
+-
+-      /* Clear error interrupt events */
+-
+-      iowrite32be(QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF,
+-                  &qmi_rg->fmqm_eie);
+-      tmp_reg = 0;
+-      if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID)
+-              tmp_reg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+-      if (cfg->exceptions & EX_QMI_DOUBLE_ECC)
+-              tmp_reg |= QMI_ERR_INTR_EN_DOUBLE_ECC;
+-      /* enable events */
+-      iowrite32be(tmp_reg, &qmi_rg->fmqm_eien);
+-
+-      tmp_reg = 0;
+-      /* Clear interrupt events */
+-      iowrite32be(QMI_INTR_EN_SINGLE_ECC, &qmi_rg->fmqm_ie);
+-      if (cfg->exceptions & EX_QMI_SINGLE_ECC)
+-              tmp_reg |= QMI_INTR_EN_SINGLE_ECC;
+-      /* enable events */
+-      iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
+-}
+-
+-static int enable(struct fman *fman, struct fman_cfg *cfg)
+-{
+-      u32 cfg_reg = 0;
+-
+-      /* Enable all modules */
+-
+-      /* clear&enable global counters - calculate reg and save for later,
+-       * because it's the same reg for QMI enable
+-       */
+-      cfg_reg = QMI_CFG_EN_COUNTERS;
+-
+-      /* Set enqueue and dequeue thresholds */
+-      cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh;
+-
+-      iowrite32be(BMI_INIT_START, &fman->bmi_regs->fmbm_init);
+-      iowrite32be(cfg_reg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN,
+-                  &fman->qmi_regs->fmqm_gc);
+-
+-      return 0;
+-}
+-
+-static int set_exception(struct fman *fman,
+-                       enum fman_exceptions exception, bool enable)
+-{
+-      u32 tmp;
+-
+-      switch (exception) {
+-      case FMAN_EX_DMA_BUS_ERROR:
+-              tmp = ioread32be(&fman->dma_regs->fmdmmr);
+-              if (enable)
+-                      tmp |= DMA_MODE_BER;
+-              else
+-                      tmp &= ~DMA_MODE_BER;
+-              /* disable bus error */
+-              iowrite32be(tmp, &fman->dma_regs->fmdmmr);
+-              break;
+-      case FMAN_EX_DMA_READ_ECC:
+-      case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
+-      case FMAN_EX_DMA_FM_WRITE_ECC:
+-              tmp = ioread32be(&fman->dma_regs->fmdmmr);
+-              if (enable)
+-                      tmp |= DMA_MODE_ECC;
+-              else
+-                      tmp &= ~DMA_MODE_ECC;
+-              iowrite32be(tmp, &fman->dma_regs->fmdmmr);
+-              break;
+-      case FMAN_EX_FPM_STALL_ON_TASKS:
+-              tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
+-              if (enable)
+-                      tmp |= FPM_EV_MASK_STALL_EN;
+-              else
+-                      tmp &= ~FPM_EV_MASK_STALL_EN;
+-              iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
+-              break;
+-      case FMAN_EX_FPM_SINGLE_ECC:
+-              tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
+-              if (enable)
+-                      tmp |= FPM_EV_MASK_SINGLE_ECC_EN;
+-              else
+-                      tmp &= ~FPM_EV_MASK_SINGLE_ECC_EN;
+-              iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
+-              break;
+-      case FMAN_EX_FPM_DOUBLE_ECC:
+-              tmp = ioread32be(&fman->fpm_regs->fmfp_ee);
+-              if (enable)
+-                      tmp |= FPM_EV_MASK_DOUBLE_ECC_EN;
+-              else
+-                      tmp &= ~FPM_EV_MASK_DOUBLE_ECC_EN;
+-              iowrite32be(tmp, &fman->fpm_regs->fmfp_ee);
+-              break;
+-      case FMAN_EX_QMI_SINGLE_ECC:
+-              tmp = ioread32be(&fman->qmi_regs->fmqm_ien);
+-              if (enable)
+-                      tmp |= QMI_INTR_EN_SINGLE_ECC;
+-              else
+-                      tmp &= ~QMI_INTR_EN_SINGLE_ECC;
+-              iowrite32be(tmp, &fman->qmi_regs->fmqm_ien);
+-              break;
+-      case FMAN_EX_QMI_DOUBLE_ECC:
+-              tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
+-              if (enable)
+-                      tmp |= QMI_ERR_INTR_EN_DOUBLE_ECC;
+-              else
+-                      tmp &= ~QMI_ERR_INTR_EN_DOUBLE_ECC;
+-              iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
+-              break;
+-      case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
+-              tmp = ioread32be(&fman->qmi_regs->fmqm_eien);
+-              if (enable)
+-                      tmp |= QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+-              else
+-                      tmp &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF;
+-              iowrite32be(tmp, &fman->qmi_regs->fmqm_eien);
+-              break;
+-      case FMAN_EX_BMI_LIST_RAM_ECC:
+-              tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
+-              if (enable)
+-                      tmp |= BMI_ERR_INTR_EN_LIST_RAM_ECC;
+-              else
+-                      tmp &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC;
+-              iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
+-              break;
+-      case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
+-              tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
+-              if (enable)
+-                      tmp |= BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+-              else
+-                      tmp &= ~BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC;
+-              iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
+-              break;
+-      case FMAN_EX_BMI_STATISTICS_RAM_ECC:
+-              tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
+-              if (enable)
+-                      tmp |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+-              else
+-                      tmp &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC;
+-              iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
+-              break;
+-      case FMAN_EX_BMI_DISPATCH_RAM_ECC:
+-              tmp = ioread32be(&fman->bmi_regs->fmbm_ier);
+-              if (enable)
+-                      tmp |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+-              else
+-                      tmp &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC;
+-              iowrite32be(tmp, &fman->bmi_regs->fmbm_ier);
+-              break;
+-      case FMAN_EX_IRAM_ECC:
+-              tmp = ioread32be(&fman->fpm_regs->fm_rie);
+-              if (enable) {
+-                      /* enable ECC if not enabled */
+-                      enable_rams_ecc(fman->fpm_regs);
+-                      /* enable ECC interrupts */
+-                      tmp |= FPM_IRAM_ECC_ERR_EX_EN;
+-              } else {
+-                      /* ECC mechanism may be disabled,
+-                       * depending on driver status
+-                       */
+-                      disable_rams_ecc(fman->fpm_regs);
+-                      tmp &= ~FPM_IRAM_ECC_ERR_EX_EN;
+-              }
+-              iowrite32be(tmp, &fman->fpm_regs->fm_rie);
+-              break;
+-      case FMAN_EX_MURAM_ECC:
+-              tmp = ioread32be(&fman->fpm_regs->fm_rie);
+-              if (enable) {
+-                      /* enable ECC if not enabled */
+-                      enable_rams_ecc(fman->fpm_regs);
+-                      /* enable ECC interrupts */
+-                      tmp |= FPM_MURAM_ECC_ERR_EX_EN;
+-              } else {
+-                      /* ECC mechanism may be disabled,
+-                       * depending on driver status
+-                       */
+-                      disable_rams_ecc(fman->fpm_regs);
+-                      tmp &= ~FPM_MURAM_ECC_ERR_EX_EN;
+-              }
+-              iowrite32be(tmp, &fman->fpm_regs->fm_rie);
+-              break;
+-      default:
+-              return -EINVAL;
+-      }
+-      return 0;
+-}
+-
+-static void resume(struct fman_fpm_regs __iomem *fpm_rg)
+-{
+-      u32 tmp;
+-
+-      tmp = ioread32be(&fpm_rg->fmfp_ee);
+-      /* clear tmp_reg event bits in order not to clear standing events */
+-      tmp &= ~(FPM_EV_MASK_DOUBLE_ECC |
+-               FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC);
+-      tmp |= FPM_EV_MASK_RELEASE_FM;
+-
+-      iowrite32be(tmp, &fpm_rg->fmfp_ee);
+-}
+-
+-static int fill_soc_specific_params(struct fman_state_struct *state)
+-{
+-      u8 minor = state->rev_info.minor;
+-      /* P4080 - Major 2
+-       * P2041/P3041/P5020/P5040 - Major 3
+-       * Tx/Bx - Major 6
+-       */
+-      switch (state->rev_info.major) {
+-      case 3:
+-              state->bmi_max_fifo_size        = 160 * 1024;
+-              state->fm_iram_size             = 64 * 1024;
+-              state->dma_thresh_max_commq     = 31;
+-              state->dma_thresh_max_buf       = 127;
+-              state->qmi_max_num_of_tnums     = 64;
+-              state->qmi_def_tnums_thresh     = 48;
+-              state->bmi_max_num_of_tasks     = 128;
+-              state->max_num_of_open_dmas     = 32;
+-              state->fm_port_num_of_cg        = 256;
+-              state->num_of_rx_ports  = 6;
+-              state->total_fifo_size  = 122 * 1024;
+-              break;
+-
+-      case 2:
+-              state->bmi_max_fifo_size        = 160 * 1024;
+-              state->fm_iram_size             = 64 * 1024;
+-              state->dma_thresh_max_commq     = 31;
+-              state->dma_thresh_max_buf       = 127;
+-              state->qmi_max_num_of_tnums     = 64;
+-              state->qmi_def_tnums_thresh     = 48;
+-              state->bmi_max_num_of_tasks     = 128;
+-              state->max_num_of_open_dmas     = 32;
+-              state->fm_port_num_of_cg        = 256;
+-              state->num_of_rx_ports  = 5;
+-              state->total_fifo_size  = 100 * 1024;
+-              break;
+-
+-      case 6:
+-              state->dma_thresh_max_commq     = 83;
+-              state->dma_thresh_max_buf       = 127;
+-              state->qmi_max_num_of_tnums     = 64;
+-              state->qmi_def_tnums_thresh     = 32;
+-              state->fm_port_num_of_cg        = 256;
+-
+-              /* FManV3L */
+-              if (minor == 1 || minor == 4) {
+-                      state->bmi_max_fifo_size        = 192 * 1024;
+-                      state->bmi_max_num_of_tasks     = 64;
+-                      state->max_num_of_open_dmas     = 32;
+-                      state->num_of_rx_ports          = 5;
+-                      if (minor == 1)
+-                              state->fm_iram_size     = 32 * 1024;
+-                      else
+-                              state->fm_iram_size     = 64 * 1024;
+-                      state->total_fifo_size          = 156 * 1024;
+-              }
+-              /* FManV3H */
+-              else if (minor == 0 || minor == 2 || minor == 3) {
+-                      state->bmi_max_fifo_size        = 384 * 1024;
+-                      state->fm_iram_size             = 64 * 1024;
+-                      state->bmi_max_num_of_tasks     = 128;
+-                      state->max_num_of_open_dmas     = 84;
+-                      state->num_of_rx_ports          = 8;
+-                      state->total_fifo_size          = 295 * 1024;
+-              } else {
+-                      pr_err("Unsupported FManv3 version\n");
+-                      return -EINVAL;
+-              }
+-
+-              break;
+-      default:
+-              pr_err("Unsupported FMan version\n");
+-              return -EINVAL;
+-      }
+-
+-      return 0;
+-}
+-
+-static bool is_init_done(struct fman_cfg *cfg)
+-{
+-      /* Checks if FMan driver parameters were initialized */
+-      if (!cfg)
+-              return true;
+-
+-      return false;
+-}
+-
+-static void free_init_resources(struct fman *fman)
+-{
+-      if (fman->cam_offset)
+-              fman_muram_free_mem(fman->muram, fman->cam_offset,
+-                                  fman->cam_size);
+-      if (fman->fifo_offset)
+-              fman_muram_free_mem(fman->muram, fman->fifo_offset,
+-                                  fman->fifo_size);
+-}
+-
+-static irqreturn_t bmi_err_event(struct fman *fman)
+-{
+-      u32 event, mask, force;
+-      struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+-      irqreturn_t ret = IRQ_NONE;
+-
+-      event = ioread32be(&bmi_rg->fmbm_ievr);
+-      mask = ioread32be(&bmi_rg->fmbm_ier);
+-      event &= mask;
+-      /* clear the forced events */
+-      force = ioread32be(&bmi_rg->fmbm_ifr);
+-      if (force & event)
+-              iowrite32be(force & ~event, &bmi_rg->fmbm_ifr);
+-      /* clear the acknowledged events */
+-      iowrite32be(event, &bmi_rg->fmbm_ievr);
+-
+-      if (event & BMI_ERR_INTR_EN_STORAGE_PROFILE_ECC)
+-              ret = fman->exception_cb(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC);
+-      if (event & BMI_ERR_INTR_EN_LIST_RAM_ECC)
+-              ret = fman->exception_cb(fman, FMAN_EX_BMI_LIST_RAM_ECC);
+-      if (event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC)
+-              ret = fman->exception_cb(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC);
+-      if (event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)
+-              ret = fman->exception_cb(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC);
+-
+-      return ret;
+-}
+-
+-static irqreturn_t qmi_err_event(struct fman *fman)
+-{
+-      u32 event, mask, force;
+-      struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+-      irqreturn_t ret = IRQ_NONE;
+-
+-      event = ioread32be(&qmi_rg->fmqm_eie);
+-      mask = ioread32be(&qmi_rg->fmqm_eien);
+-      event &= mask;
+-
+-      /* clear the forced events */
+-      force = ioread32be(&qmi_rg->fmqm_eif);
+-      if (force & event)
+-              iowrite32be(force & ~event, &qmi_rg->fmqm_eif);
+-      /* clear the acknowledged events */
+-      iowrite32be(event, &qmi_rg->fmqm_eie);
+-
+-      if (event & QMI_ERR_INTR_EN_DOUBLE_ECC)
+-              ret = fman->exception_cb(fman, FMAN_EX_QMI_DOUBLE_ECC);
+-      if (event & QMI_ERR_INTR_EN_DEQ_FROM_DEF)
+-              ret = fman->exception_cb(fman,
+-                                       FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID);
+-
+-      return ret;
+-}
+-
+-static irqreturn_t dma_err_event(struct fman *fman)
+-{
+-      u32 status, mask, com_id;
+-      u8 tnum, port_id, relative_port_id;
+-      u16 liodn;
+-      struct fman_dma_regs __iomem *dma_rg = fman->dma_regs;
+-      irqreturn_t ret = IRQ_NONE;
+-
+-      status = ioread32be(&dma_rg->fmdmsr);
+-      mask = ioread32be(&dma_rg->fmdmmr);
+-
+-      /* clear DMA_STATUS_BUS_ERR if mask has no DMA_MODE_BER */
+-      if ((mask & DMA_MODE_BER) != DMA_MODE_BER)
+-              status &= ~DMA_STATUS_BUS_ERR;
+-
+-      /* clear relevant bits if mask has no DMA_MODE_ECC */
+-      if ((mask & DMA_MODE_ECC) != DMA_MODE_ECC)
+-              status &= ~(DMA_STATUS_FM_SPDAT_ECC |
+-                          DMA_STATUS_READ_ECC |
+-                          DMA_STATUS_SYSTEM_WRITE_ECC |
+-                          DMA_STATUS_FM_WRITE_ECC);
+-
+-      /* clear set events */
+-      iowrite32be(status, &dma_rg->fmdmsr);
+-
+-      if (status & DMA_STATUS_BUS_ERR) {
+-              u64 addr;
+-
+-              addr = (u64)ioread32be(&dma_rg->fmdmtal);
+-              addr |= ((u64)(ioread32be(&dma_rg->fmdmtah)) << 32);
+-
+-              com_id = ioread32be(&dma_rg->fmdmtcid);
+-              port_id = (u8)(((com_id & DMA_TRANSFER_PORTID_MASK) >>
+-                             DMA_TRANSFER_PORTID_SHIFT));
+-              relative_port_id =
+-              hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
+-              tnum = (u8)((com_id & DMA_TRANSFER_TNUM_MASK) >>
+-                          DMA_TRANSFER_TNUM_SHIFT);
+-              liodn = (u16)(com_id & DMA_TRANSFER_LIODN_MASK);
+-              ret = fman->bus_error_cb(fman, relative_port_id, addr, tnum,
+-                                       liodn);
+-      }
+-      if (status & DMA_STATUS_FM_SPDAT_ECC)
+-              ret = fman->exception_cb(fman, FMAN_EX_DMA_SINGLE_PORT_ECC);
+-      if (status & DMA_STATUS_READ_ECC)
+-              ret = fman->exception_cb(fman, FMAN_EX_DMA_READ_ECC);
+-      if (status & DMA_STATUS_SYSTEM_WRITE_ECC)
+-              ret = fman->exception_cb(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC);
+-      if (status & DMA_STATUS_FM_WRITE_ECC)
+-              ret = fman->exception_cb(fman, FMAN_EX_DMA_FM_WRITE_ECC);
+-
+-      return ret;
+-}
+-
+-static irqreturn_t fpm_err_event(struct fman *fman)
+-{
+-      u32 event;
+-      struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+-      irqreturn_t ret = IRQ_NONE;
+-
+-      event = ioread32be(&fpm_rg->fmfp_ee);
+-      /* clear the all occurred events */
+-      iowrite32be(event, &fpm_rg->fmfp_ee);
+-
+-      if ((event & FPM_EV_MASK_DOUBLE_ECC) &&
+-          (event & FPM_EV_MASK_DOUBLE_ECC_EN))
+-              ret = fman->exception_cb(fman, FMAN_EX_FPM_DOUBLE_ECC);
+-      if ((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN))
+-              ret = fman->exception_cb(fman, FMAN_EX_FPM_STALL_ON_TASKS);
+-      if ((event & FPM_EV_MASK_SINGLE_ECC) &&
+-          (event & FPM_EV_MASK_SINGLE_ECC_EN))
+-              ret = fman->exception_cb(fman, FMAN_EX_FPM_SINGLE_ECC);
+-
+-      return ret;
+-}
+-
+-static irqreturn_t muram_err_intr(struct fman *fman)
+-{
+-      u32 event, mask;
+-      struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+-      irqreturn_t ret = IRQ_NONE;
+-
+-      event = ioread32be(&fpm_rg->fm_rcr);
+-      mask = ioread32be(&fpm_rg->fm_rie);
+-
+-      /* clear MURAM event bit (do not clear IRAM event) */
+-      iowrite32be(event & ~FPM_RAM_IRAM_ECC, &fpm_rg->fm_rcr);
+-
+-      if ((mask & FPM_MURAM_ECC_ERR_EX_EN) && (event & FPM_RAM_MURAM_ECC))
+-              ret = fman->exception_cb(fman, FMAN_EX_MURAM_ECC);
+-
+-      return ret;
+-}
+-
+-static irqreturn_t qmi_event(struct fman *fman)
+-{
+-      u32 event, mask, force;
+-      struct fman_qmi_regs __iomem *qmi_rg = fman->qmi_regs;
+-      irqreturn_t ret = IRQ_NONE;
+-
+-      event = ioread32be(&qmi_rg->fmqm_ie);
+-      mask = ioread32be(&qmi_rg->fmqm_ien);
+-      event &= mask;
+-      /* clear the forced events */
+-      force = ioread32be(&qmi_rg->fmqm_if);
+-      if (force & event)
+-              iowrite32be(force & ~event, &qmi_rg->fmqm_if);
+-      /* clear the acknowledged events */
+-      iowrite32be(event, &qmi_rg->fmqm_ie);
+-
+-      if (event & QMI_INTR_EN_SINGLE_ECC)
+-              ret = fman->exception_cb(fman, FMAN_EX_QMI_SINGLE_ECC);
+-
+-      return ret;
+-}
+-
+-static void enable_time_stamp(struct fman *fman)
+-{
+-      struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+-      u16 fm_clk_freq = fman->state->fm_clk_freq;
+-      u32 tmp, intgr, ts_freq;
+-      u64 frac;
+-
+-      ts_freq = (u32)(1 << fman->state->count1_micro_bit);
+-      /* configure timestamp so that bit 8 will count 1 microsecond
+-       * Find effective count rate at TIMESTAMP least significant bits:
+-       * Effective_Count_Rate = 1MHz x 2^8 = 256MHz
+-       * Find frequency ratio between effective count rate and the clock:
+-       * Effective_Count_Rate / CLK e.g. for 600 MHz clock:
+-       * 256/600 = 0.4266666...
+-       */
+-
+-      intgr = ts_freq / fm_clk_freq;
+-      /* we multiply by 2^16 to keep the fraction of the division
+-       * we do not div back, since we write this value as a fraction
+-       * see spec
+-       */
+-
+-      frac = ((ts_freq << 16) - (intgr << 16) * fm_clk_freq) / fm_clk_freq;
+-      /* we check remainder of the division in order to round up if not int */
+-      if (((ts_freq << 16) - (intgr << 16) * fm_clk_freq) % fm_clk_freq)
+-              frac++;
+-
+-      tmp = (intgr << FPM_TS_INT_SHIFT) | (u16)frac;
+-      iowrite32be(tmp, &fpm_rg->fmfp_tsc2);
+-
+-      /* enable timestamp with original clock */
+-      iowrite32be(FPM_TS_CTL_EN, &fpm_rg->fmfp_tsc1);
+-      fman->state->enabled_time_stamp = true;
+-}
+-
+-static int clear_iram(struct fman *fman)
+-{
+-      struct fman_iram_regs __iomem *iram;
+-      int i, count;
+-
+-      iram = fman->base_addr + IMEM_OFFSET;
+-
+-      /* Enable the auto-increment */
+-      iowrite32be(IRAM_IADD_AIE, &iram->iadd);
+-      count = 100;
+-      do {
+-              udelay(1);
+-      } while ((ioread32be(&iram->iadd) != IRAM_IADD_AIE) && --count);
+-      if (count == 0)
+-              return -EBUSY;
+-
+-      for (i = 0; i < (fman->state->fm_iram_size / 4); i++)
+-              iowrite32be(0xffffffff, &iram->idata);
+-
+-      iowrite32be(fman->state->fm_iram_size - 4, &iram->iadd);
+-      count = 100;
+-      do {
+-              udelay(1);
+-      } while ((ioread32be(&iram->idata) != 0xffffffff) && --count);
+-      if (count == 0)
+-              return -EBUSY;
+-
+-      return 0;
+-}
+-
+-static u32 get_exception_flag(enum fman_exceptions exception)
+-{
+-      u32 bit_mask;
+-
+-      switch (exception) {
+-      case FMAN_EX_DMA_BUS_ERROR:
+-              bit_mask = EX_DMA_BUS_ERROR;
+-              break;
+-      case FMAN_EX_DMA_SINGLE_PORT_ECC:
+-              bit_mask = EX_DMA_SINGLE_PORT_ECC;
+-              break;
+-      case FMAN_EX_DMA_READ_ECC:
+-              bit_mask = EX_DMA_READ_ECC;
+-              break;
+-      case FMAN_EX_DMA_SYSTEM_WRITE_ECC:
+-              bit_mask = EX_DMA_SYSTEM_WRITE_ECC;
+-              break;
+-      case FMAN_EX_DMA_FM_WRITE_ECC:
+-              bit_mask = EX_DMA_FM_WRITE_ECC;
+-              break;
+-      case FMAN_EX_FPM_STALL_ON_TASKS:
+-              bit_mask = EX_FPM_STALL_ON_TASKS;
+-              break;
+-      case FMAN_EX_FPM_SINGLE_ECC:
+-              bit_mask = EX_FPM_SINGLE_ECC;
+-              break;
+-      case FMAN_EX_FPM_DOUBLE_ECC:
+-              bit_mask = EX_FPM_DOUBLE_ECC;
+-              break;
+-      case FMAN_EX_QMI_SINGLE_ECC:
+-              bit_mask = EX_QMI_SINGLE_ECC;
+-              break;
+-      case FMAN_EX_QMI_DOUBLE_ECC:
+-              bit_mask = EX_QMI_DOUBLE_ECC;
+-              break;
+-      case FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID:
+-              bit_mask = EX_QMI_DEQ_FROM_UNKNOWN_PORTID;
+-              break;
+-      case FMAN_EX_BMI_LIST_RAM_ECC:
+-              bit_mask = EX_BMI_LIST_RAM_ECC;
+-              break;
+-      case FMAN_EX_BMI_STORAGE_PROFILE_ECC:
+-              bit_mask = EX_BMI_STORAGE_PROFILE_ECC;
+-              break;
+-      case FMAN_EX_BMI_STATISTICS_RAM_ECC:
+-              bit_mask = EX_BMI_STATISTICS_RAM_ECC;
+-              break;
+-      case FMAN_EX_BMI_DISPATCH_RAM_ECC:
+-              bit_mask = EX_BMI_DISPATCH_RAM_ECC;
+-              break;
+-      case FMAN_EX_MURAM_ECC:
+-              bit_mask = EX_MURAM_ECC;
+-              break;
+-      default:
+-              bit_mask = 0;
+-              break;
+-      }
+-
+-      return bit_mask;
+-}
+-
+-static int get_module_event(enum fman_event_modules module, u8 mod_id,
+-                          enum fman_intr_type intr_type)
+-{
+-      int event;
+-
+-      switch (module) {
+-      case FMAN_MOD_MAC:
+-              if (intr_type == FMAN_INTR_TYPE_ERR)
+-                      event = FMAN_EV_ERR_MAC0 + mod_id;
+-              else
+-                      event = FMAN_EV_MAC0 + mod_id;
+-              break;
+-      case FMAN_MOD_FMAN_CTRL:
+-              if (intr_type == FMAN_INTR_TYPE_ERR)
+-                      event = FMAN_EV_CNT;
+-              else
+-                      event = (FMAN_EV_FMAN_CTRL_0 + mod_id);
+-              break;
+-      case FMAN_MOD_DUMMY_LAST:
+-              event = FMAN_EV_CNT;
+-              break;
+-      default:
+-              event = FMAN_EV_CNT;
+-              break;
+-      }
+-
+-      return event;
+-}
+-
+-static int set_size_of_fifo(struct fman *fman, u8 port_id, u32 *size_of_fifo,
+-                          u32 *extra_size_of_fifo)
+-{
+-      struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+-      u32 fifo = *size_of_fifo;
+-      u32 extra_fifo = *extra_size_of_fifo;
+-      u32 tmp;
+-
+-      /* if this is the first time a port requires extra_fifo_pool_size,
+-       * the total extra_fifo_pool_size must be initialized to 1 buffer per
+-       * port
+-       */
+-      if (extra_fifo && !fman->state->extra_fifo_pool_size)
+-              fman->state->extra_fifo_pool_size =
+-                      fman->state->num_of_rx_ports * FMAN_BMI_FIFO_UNITS;
+-
+-      fman->state->extra_fifo_pool_size =
+-              max(fman->state->extra_fifo_pool_size, extra_fifo);
+-
+-      /* check that there are enough uncommitted fifo size */
+-      if ((fman->state->accumulated_fifo_size + fifo) >
+-          (fman->state->total_fifo_size -
+-          fman->state->extra_fifo_pool_size)) {
+-              dev_err(fman->dev, "%s: Requested fifo size and extra size exceed total FIFO size.\n",
+-                      __func__);
+-              return -EAGAIN;
+-      }
+-
+-      /* Read, modify and write to HW */
+-      tmp = (fifo / FMAN_BMI_FIFO_UNITS - 1) |
+-             ((extra_fifo / FMAN_BMI_FIFO_UNITS) <<
+-             BMI_EXTRA_FIFO_SIZE_SHIFT);
+-      iowrite32be(tmp, &bmi_rg->fmbm_pfs[port_id - 1]);
+-
+-      /* update accumulated */
+-      fman->state->accumulated_fifo_size += fifo;
+-
+-      return 0;
+-}
+-
+-static int set_num_of_tasks(struct fman *fman, u8 port_id, u8 *num_of_tasks,
+-                          u8 *num_of_extra_tasks)
+-{
+-      struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+-      u8 tasks = *num_of_tasks;
+-      u8 extra_tasks = *num_of_extra_tasks;
+-      u32 tmp;
+-
+-      if (extra_tasks)
+-              fman->state->extra_tasks_pool_size =
+-              max(fman->state->extra_tasks_pool_size, extra_tasks);
+-
+-      /* check that there are enough uncommitted tasks */
+-      if ((fman->state->accumulated_num_of_tasks + tasks) >
+-          (fman->state->total_num_of_tasks -
+-           fman->state->extra_tasks_pool_size)) {
+-              dev_err(fman->dev, "%s: Requested num_of_tasks and extra tasks pool for fm%d exceed total num_of_tasks.\n",
+-                      __func__, fman->state->fm_id);
+-              return -EAGAIN;
+-      }
+-      /* update accumulated */
+-      fman->state->accumulated_num_of_tasks += tasks;
+-
+-      /* Write to HW */
+-      tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
+-          ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK);
+-      tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) |
+-              (u32)(extra_tasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT));
+-      iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
+-
+-      return 0;
+-}
+-
+-static int set_num_of_open_dmas(struct fman *fman, u8 port_id,
+-                              u8 *num_of_open_dmas,
+-                              u8 *num_of_extra_open_dmas)
+-{
+-      struct fman_bmi_regs __iomem *bmi_rg = fman->bmi_regs;
+-      u8 open_dmas = *num_of_open_dmas;
+-      u8 extra_open_dmas = *num_of_extra_open_dmas;
+-      u8 total_num_dmas = 0, current_val = 0, current_extra_val = 0;
+-      u32 tmp;
+-
+-      if (!open_dmas) {
+-              /* Configuration according to values in the HW.
+-               * read the current number of open Dma's
+-               */
+-              tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
+-              current_extra_val = (u8)((tmp & BMI_NUM_OF_EXTRA_DMAS_MASK) >>
+-                                       BMI_EXTRA_NUM_OF_DMAS_SHIFT);
+-
+-              tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]);
+-              current_val = (u8)(((tmp & BMI_NUM_OF_DMAS_MASK) >>
+-                                 BMI_NUM_OF_DMAS_SHIFT) + 1);
+-
+-              /* This is the first configuration and user did not
+-               * specify value (!open_dmas), reset values will be used
+-               * and we just save these values for resource management
+-               */
+-              fman->state->extra_open_dmas_pool_size =
+-                      (u8)max(fman->state->extra_open_dmas_pool_size,
+-                              current_extra_val);
+-              fman->state->accumulated_num_of_open_dmas += current_val;
+-              *num_of_open_dmas = current_val;
+-              *num_of_extra_open_dmas = current_extra_val;
+-              return 0;
+-      }
+-
+-      if (extra_open_dmas > current_extra_val)
+-              fman->state->extra_open_dmas_pool_size =
+-                  (u8)max(fman->state->extra_open_dmas_pool_size,
+-                          extra_open_dmas);
+-
+-      if ((fman->state->rev_info.major < 6) &&
+-          (fman->state->accumulated_num_of_open_dmas - current_val +
+-           open_dmas > fman->state->max_num_of_open_dmas)) {
+-              dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds total num_of_open_dmas.\n",
+-                      __func__, fman->state->fm_id);
+-              return -EAGAIN;
+-      } else if ((fman->state->rev_info.major >= 6) &&
+-                 !((fman->state->rev_info.major == 6) &&
+-                 (fman->state->rev_info.minor == 0)) &&
+-                 (fman->state->accumulated_num_of_open_dmas -
+-                 current_val + open_dmas >
+-                 fman->state->dma_thresh_max_commq + 1)) {
+-              dev_err(fman->dev, "%s: Requested num_of_open_dmas for fm%d exceeds DMA Command queue (%d)\n",
+-                      __func__, fman->state->fm_id,
+-                     fman->state->dma_thresh_max_commq + 1);
+-              return -EAGAIN;
+-      }
+-
+-      WARN_ON(fman->state->accumulated_num_of_open_dmas < current_val);
+-      /* update acummulated */
+-      fman->state->accumulated_num_of_open_dmas -= current_val;
+-      fman->state->accumulated_num_of_open_dmas += open_dmas;
+-
+-      if (fman->state->rev_info.major < 6)
+-              total_num_dmas =
+-                  (u8)(fman->state->accumulated_num_of_open_dmas +
+-                  fman->state->extra_open_dmas_pool_size);
+-
+-      /* calculate reg */
+-      tmp = ioread32be(&bmi_rg->fmbm_pp[port_id - 1]) &
+-          ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK);
+-      tmp |= (u32)(((open_dmas - 1) << BMI_NUM_OF_DMAS_SHIFT) |
+-                         (extra_open_dmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT));
+-      iowrite32be(tmp, &bmi_rg->fmbm_pp[port_id - 1]);
+-
+-      /* update total num of DMA's with committed number of open DMAS,
+-       * and max uncommitted pool.
+-       */
+-      if (total_num_dmas) {
+-              tmp = ioread32be(&bmi_rg->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK;
+-              tmp |= (u32)(total_num_dmas - 1) << BMI_CFG2_DMAS_SHIFT;
+-              iowrite32be(tmp, &bmi_rg->fmbm_cfg2);
+-      }
+-
+-      return 0;
+-}
+-
+-static int fman_config(struct fman *fman)
+-{
+-      void __iomem *base_addr;
+-      int err;
+-
+-      base_addr = fman->dts_params.base_addr;
+-
+-      fman->state = kzalloc(sizeof(*fman->state), GFP_KERNEL);
+-      if (!fman->state)
+-              goto err_fm_state;
+-
+-      /* Allocate the FM driver's parameters structure */
+-      fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL);
+-      if (!fman->cfg)
+-              goto err_fm_drv;
+-
+-      /* Initialize MURAM block */
+-      fman->muram =
+-              fman_muram_init(fman->dts_params.muram_res.start,
+-                              resource_size(&fman->dts_params.muram_res));
+-      if (!fman->muram)
+-              goto err_fm_soc_specific;
+-
+-      /* Initialize FM parameters which will be kept by the driver */
+-      fman->state->fm_id = fman->dts_params.id;
+-      fman->state->fm_clk_freq = fman->dts_params.clk_freq;
+-      fman->state->qman_channel_base = fman->dts_params.qman_channel_base;
+-      fman->state->num_of_qman_channels =
+-              fman->dts_params.num_of_qman_channels;
+-      fman->state->res = fman->dts_params.res;
+-      fman->exception_cb = fman_exceptions;
+-      fman->bus_error_cb = fman_bus_error;
+-      fman->fpm_regs = base_addr + FPM_OFFSET;
+-      fman->bmi_regs = base_addr + BMI_OFFSET;
+-      fman->qmi_regs = base_addr + QMI_OFFSET;
+-      fman->dma_regs = base_addr + DMA_OFFSET;
+-      fman->base_addr = base_addr;
+-
+-      spin_lock_init(&fman->spinlock);
+-      fman_defconfig(fman->cfg);
+-
+-      fman->state->extra_fifo_pool_size = 0;
+-      fman->state->exceptions = (EX_DMA_BUS_ERROR                 |
+-                                      EX_DMA_READ_ECC              |
+-                                      EX_DMA_SYSTEM_WRITE_ECC      |
+-                                      EX_DMA_FM_WRITE_ECC          |
+-                                      EX_FPM_STALL_ON_TASKS        |
+-                                      EX_FPM_SINGLE_ECC            |
+-                                      EX_FPM_DOUBLE_ECC            |
+-                                      EX_QMI_DEQ_FROM_UNKNOWN_PORTID |
+-                                      EX_BMI_LIST_RAM_ECC          |
+-                                      EX_BMI_STORAGE_PROFILE_ECC   |
+-                                      EX_BMI_STATISTICS_RAM_ECC    |
+-                                      EX_MURAM_ECC                 |
+-                                      EX_BMI_DISPATCH_RAM_ECC      |
+-                                      EX_QMI_DOUBLE_ECC            |
+-                                      EX_QMI_SINGLE_ECC);
+-
+-      /* Read FMan revision for future use*/
+-      fman_get_revision(fman, &fman->state->rev_info);
+-
+-      err = fill_soc_specific_params(fman->state);
+-      if (err)
+-              goto err_fm_soc_specific;
+-
+-      /* FM_AID_MODE_NO_TNUM_SW005 Errata workaround */
+-      if (fman->state->rev_info.major >= 6)
+-              fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID;
+-
+-      fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh;
+-
+-      fman->state->total_num_of_tasks =
+-      (u8)DFLT_TOTAL_NUM_OF_TASKS(fman->state->rev_info.major,
+-                                  fman->state->rev_info.minor,
+-                                  fman->state->bmi_max_num_of_tasks);
+-
+-      if (fman->state->rev_info.major < 6) {
+-              fman->cfg->dma_comm_qtsh_clr_emer =
+-              (u8)DFLT_DMA_COMM_Q_LOW(fman->state->rev_info.major,
+-                                      fman->state->dma_thresh_max_commq);
+-
+-              fman->cfg->dma_comm_qtsh_asrt_emer =
+-              (u8)DFLT_DMA_COMM_Q_HIGH(fman->state->rev_info.major,
+-                                       fman->state->dma_thresh_max_commq);
+-
+-              fman->cfg->dma_cam_num_of_entries =
+-              DFLT_DMA_CAM_NUM_OF_ENTRIES(fman->state->rev_info.major);
+-
+-              fman->cfg->dma_read_buf_tsh_clr_emer =
+-              DFLT_DMA_READ_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
+-
+-              fman->cfg->dma_read_buf_tsh_asrt_emer =
+-              DFLT_DMA_READ_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
+-
+-              fman->cfg->dma_write_buf_tsh_clr_emer =
+-              DFLT_DMA_WRITE_INT_BUF_LOW(fman->state->dma_thresh_max_buf);
+-
+-              fman->cfg->dma_write_buf_tsh_asrt_emer =
+-              DFLT_DMA_WRITE_INT_BUF_HIGH(fman->state->dma_thresh_max_buf);
+-
+-              fman->cfg->dma_axi_dbg_num_of_beats =
+-              DFLT_AXI_DBG_NUM_OF_BEATS;
+-      }
+-
+-      return 0;
+-
+-err_fm_soc_specific:
+-      kfree(fman->cfg);
+-err_fm_drv:
+-      kfree(fman->state);
+-err_fm_state:
+-      kfree(fman);
+-      return -EINVAL;
+-}
+-
+-static int fman_reset(struct fman *fman)
+-{
+-      u32 count;
+-      int err = 0;
+-
+-      if (fman->state->rev_info.major < 6) {
+-              iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+-              /* Wait for reset completion */
+-              count = 100;
+-              do {
+-                      udelay(1);
+-              } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+-                       FPM_RSTC_FM_RESET) && --count);
+-              if (count == 0)
+-                      err = -EBUSY;
+-
+-              goto _return;
+-      } else {
+-              struct device_node *guts_node;
+-              struct ccsr_guts __iomem *guts_regs;
+-              u32 devdisr2, reg;
+-
+-              /* Errata A007273 */
+-              guts_node =
+-                      of_find_compatible_node(NULL, NULL,
+-                                              "fsl,qoriq-device-config-2.0");
+-              if (!guts_node) {
+-                      dev_err(fman->dev, "%s: Couldn't find guts node\n",
+-                              __func__);
+-                      goto guts_node;
+-              }
+-
+-              guts_regs = of_iomap(guts_node, 0);
+-              if (!guts_regs) {
+-                      dev_err(fman->dev, "%s: Couldn't map %s regs\n",
+-                              __func__, guts_node->full_name);
+-                      goto guts_regs;
+-              }
+-#define FMAN1_ALL_MACS_MASK   0xFCC00000
+-#define FMAN2_ALL_MACS_MASK   0x000FCC00
+-              /* Read current state */
+-              devdisr2 = ioread32be(&guts_regs->devdisr2);
+-              if (fman->dts_params.id == 0)
+-                      reg = devdisr2 & ~FMAN1_ALL_MACS_MASK;
+-              else
+-                      reg = devdisr2 & ~FMAN2_ALL_MACS_MASK;
+-
+-              /* Enable all MACs */
+-              iowrite32be(reg, &guts_regs->devdisr2);
+-
+-              /* Perform FMan reset */
+-              iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+-
+-              /* Wait for reset completion */
+-              count = 100;
+-              do {
+-                      udelay(1);
+-              } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+-                       FPM_RSTC_FM_RESET) && --count);
+-              if (count == 0) {
+-                      iounmap(guts_regs);
+-                      of_node_put(guts_node);
+-                      err = -EBUSY;
+-                      goto _return;
+-              }
+-
+-              /* Restore devdisr2 value */
+-              iowrite32be(devdisr2, &guts_regs->devdisr2);
+-
+-              iounmap(guts_regs);
+-              of_node_put(guts_node);
+-
+-              goto _return;
+-
+-guts_regs:
+-              of_node_put(guts_node);
+-guts_node:
+-              dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
+-                      __func__);
+-      }
+-_return:
+-      return err;
+-}
+-
+-static int fman_init(struct fman *fman)
+-{
+-      struct fman_cfg *cfg = NULL;
+-      int err = 0, i, count;
+-
+-      if (is_init_done(fman->cfg))
+-              return -EINVAL;
+-
+-      fman->state->count1_micro_bit = FM_TIMESTAMP_1_USEC_BIT;
+-
+-      cfg = fman->cfg;
+-
+-      /* clear revision-dependent non existing exception */
+-      if (fman->state->rev_info.major < 6)
+-              fman->state->exceptions &= ~FMAN_EX_BMI_DISPATCH_RAM_ECC;
+-
+-      if (fman->state->rev_info.major >= 6)
+-              fman->state->exceptions &= ~FMAN_EX_QMI_SINGLE_ECC;
+-
+-      /* clear CPG */
+-      memset_io((void __iomem *)(fman->base_addr + CGP_OFFSET), 0,
+-                fman->state->fm_port_num_of_cg);
+-
+-      /* Save LIODN info before FMan reset
+-       * Skipping non-existent port 0 (i = 1)
+-       */
+-      for (i = 1; i < FMAN_LIODN_TBL; i++) {
+-              u32 liodn_base;
+-
+-              fman->liodn_offset[i] =
+-                      ioread32be(&fman->bmi_regs->fmbm_spliodn[i - 1]);
+-              liodn_base = ioread32be(&fman->dma_regs->fmdmplr[i / 2]);
+-              if (i % 2) {
+-                      /* FMDM_PLR LSB holds LIODN base for odd ports */
+-                      liodn_base &= DMA_LIODN_BASE_MASK;
+-              } else {
+-                      /* FMDM_PLR MSB holds LIODN base for even ports */
+-                      liodn_base >>= DMA_LIODN_SHIFT;
+-                      liodn_base &= DMA_LIODN_BASE_MASK;
+-              }
+-              fman->liodn_base[i] = liodn_base;
+-      }
+-
+-      err = fman_reset(fman);
+-      if (err)
+-              return err;
+-
+-      if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) {
+-              resume(fman->fpm_regs);
+-              /* Wait until QMI is not in halt not busy state */
+-              count = 100;
+-              do {
+-                      udelay(1);
+-              } while (((ioread32be(&fman->qmi_regs->fmqm_gs)) &
+-                       QMI_GS_HALT_NOT_BUSY) && --count);
+-              if (count == 0)
+-                      dev_warn(fman->dev, "%s: QMI is in halt not busy state\n",
+-                               __func__);
+-      }
+-
+-      if (clear_iram(fman) != 0)
+-              return -EINVAL;
+-
+-      cfg->exceptions = fman->state->exceptions;
+-
+-      /* Init DMA Registers */
+-
+-      err = dma_init(fman);
+-      if (err != 0) {
+-              free_init_resources(fman);
+-              return err;
+-      }
+-
+-      /* Init FPM Registers */
+-      fpm_init(fman->fpm_regs, fman->cfg);
+-
+-      /* define common resources */
+-      /* allocate MURAM for FIFO according to total size */
+-      fman->fifo_offset = fman_muram_alloc(fman->muram,
+-                                           fman->state->total_fifo_size);
+-      if (IS_ERR_VALUE(fman->fifo_offset)) {
+-              free_init_resources(fman);
+-              dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n",
+-                      __func__);
+-              return -ENOMEM;
+-      }
+-
+-      cfg->fifo_base_addr = fman->fifo_offset;
+-      cfg->total_fifo_size = fman->state->total_fifo_size;
+-      cfg->total_num_of_tasks = fman->state->total_num_of_tasks;
+-      cfg->clk_freq = fman->state->fm_clk_freq;
+-
+-      /* Init BMI Registers */
+-      bmi_init(fman->bmi_regs, fman->cfg);
+-
+-      /* Init QMI Registers */
+-      qmi_init(fman->qmi_regs, fman->cfg);
+-
+-      err = enable(fman, cfg);
+-      if (err != 0)
+-              return err;
+-
+-      enable_time_stamp(fman);
+-
+-      kfree(fman->cfg);
+-      fman->cfg = NULL;
+-
+-      return 0;
+-}
+-
+-static int fman_set_exception(struct fman *fman,
+-                            enum fman_exceptions exception, bool enable)
+-{
+-      u32 bit_mask = 0;
+-
+-      if (!is_init_done(fman->cfg))
+-              return -EINVAL;
+-
+-      bit_mask = get_exception_flag(exception);
+-      if (bit_mask) {
+-              if (enable)
+-                      fman->state->exceptions |= bit_mask;
+-              else
+-                      fman->state->exceptions &= ~bit_mask;
+-      } else {
+-              dev_err(fman->dev, "%s: Undefined exception (%d)\n",
+-                      __func__, exception);
+-              return -EINVAL;
+-      }
+-
+-      return set_exception(fman, exception, enable);
+-}
+-
+-/**
+- * fman_register_intr
+- * @fman:     A Pointer to FMan device
+- * @mod:      Calling module
+- * @mod_id:   Module id (if more than 1 exists, '0' if not)
+- * @intr_type:        Interrupt type (error/normal) selection.
+- * @f_isr:    The interrupt service routine.
+- * @h_src_arg:        Argument to be passed to f_isr.
+- *
+- * Used to register an event handler to be processed by FMan
+- *
+- * Return: 0 on success; Error code otherwise.
+- */
+-void fman_register_intr(struct fman *fman, enum fman_event_modules module,
+-                      u8 mod_id, enum fman_intr_type intr_type,
+-                      void (*isr_cb)(void *src_arg), void *src_arg)
+-{
+-      int event = 0;
+-
+-      event = get_module_event(module, mod_id, intr_type);
+-      WARN_ON(event >= FMAN_EV_CNT);
+-
+-      /* register in local FM structure */
+-      fman->intr_mng[event].isr_cb = isr_cb;
+-      fman->intr_mng[event].src_handle = src_arg;
+-}
+-EXPORT_SYMBOL(fman_register_intr);
+-
+-/**
+- * fman_unregister_intr
+- * @fman:     A Pointer to FMan device
+- * @mod:      Calling module
+- * @mod_id:   Module id (if more than 1 exists, '0' if not)
+- * @intr_type:        Interrupt type (error/normal) selection.
+- *
+- * Used to unregister an event handler to be processed by FMan
+- *
+- * Return: 0 on success; Error code otherwise.
+- */
+-void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
+-                        u8 mod_id, enum fman_intr_type intr_type)
+-{
+-      int event = 0;
+-
+-      event = get_module_event(module, mod_id, intr_type);
+-      WARN_ON(event >= FMAN_EV_CNT);
+-
+-      fman->intr_mng[event].isr_cb = NULL;
+-      fman->intr_mng[event].src_handle = NULL;
+-}
+-EXPORT_SYMBOL(fman_unregister_intr);
+-
+-/**
+- * fman_set_port_params
+- * @fman:             A Pointer to FMan device
+- * @port_params:      Port parameters
+- *
+- * Used by FMan Port to pass parameters to the FMan
+- *
+- * Return: 0 on success; Error code otherwise.
+- */
+-int fman_set_port_params(struct fman *fman,
+-                       struct fman_port_init_params *port_params)
+-{
+-      int err;
+-      unsigned long flags;
+-      u8 port_id = port_params->port_id, mac_id;
+-
+-      spin_lock_irqsave(&fman->spinlock, flags);
+-
+-      err = set_num_of_tasks(fman, port_params->port_id,
+-                             &port_params->num_of_tasks,
+-                             &port_params->num_of_extra_tasks);
+-      if (err)
+-              goto return_err;
+-
+-      /* TX Ports */
+-      if (port_params->port_type != FMAN_PORT_TYPE_RX) {
+-              u32 enq_th, deq_th, reg;
+-
+-              /* update qmi ENQ/DEQ threshold */
+-              fman->state->accumulated_num_of_deq_tnums +=
+-                      port_params->deq_pipeline_depth;
+-              enq_th = (ioread32be(&fman->qmi_regs->fmqm_gc) &
+-                        QMI_CFG_ENQ_MASK) >> QMI_CFG_ENQ_SHIFT;
+-              /* if enq_th is too big, we reduce it to the max value
+-               * that is still 0
+-               */
+-              if (enq_th >= (fman->state->qmi_max_num_of_tnums -
+-                  fman->state->accumulated_num_of_deq_tnums)) {
+-                      enq_th =
+-                      fman->state->qmi_max_num_of_tnums -
+-                      fman->state->accumulated_num_of_deq_tnums - 1;
+-
+-                      reg = ioread32be(&fman->qmi_regs->fmqm_gc);
+-                      reg &= ~QMI_CFG_ENQ_MASK;
+-                      reg |= (enq_th << QMI_CFG_ENQ_SHIFT);
+-                      iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
+-              }
+-
+-              deq_th = ioread32be(&fman->qmi_regs->fmqm_gc) &
+-                                  QMI_CFG_DEQ_MASK;
+-              /* if deq_th is too small, we enlarge it to the min
+-               * value that is still 0.
+-               * depTh may not be larger than 63
+-               * (fman->state->qmi_max_num_of_tnums-1).
+-               */
+-              if ((deq_th <= fman->state->accumulated_num_of_deq_tnums) &&
+-                  (deq_th < fman->state->qmi_max_num_of_tnums - 1)) {
+-                      deq_th = fman->state->accumulated_num_of_deq_tnums + 1;
+-                      reg = ioread32be(&fman->qmi_regs->fmqm_gc);
+-                      reg &= ~QMI_CFG_DEQ_MASK;
+-                      reg |= deq_th;
+-                      iowrite32be(reg, &fman->qmi_regs->fmqm_gc);
+-              }
+-      }
+-
+-      err = set_size_of_fifo(fman, port_params->port_id,
+-                             &port_params->size_of_fifo,
+-                             &port_params->extra_size_of_fifo);
+-      if (err)
+-              goto return_err;
+-
+-      err = set_num_of_open_dmas(fman, port_params->port_id,
+-                                 &port_params->num_of_open_dmas,
+-                                 &port_params->num_of_extra_open_dmas);
+-      if (err)
+-              goto return_err;
+-
+-      set_port_liodn(fman, port_id, fman->liodn_base[port_id],
+-                     fman->liodn_offset[port_id]);
+-
+-      if (fman->state->rev_info.major < 6)
+-              set_port_order_restoration(fman->fpm_regs, port_id);
+-
+-      mac_id = hw_port_id_to_sw_port_id(fman->state->rev_info.major, port_id);
+-
+-      if (port_params->max_frame_length >= fman->state->mac_mfl[mac_id]) {
+-              fman->state->port_mfl[mac_id] = port_params->max_frame_length;
+-      } else {
+-              dev_warn(fman->dev, "%s: Port (%d) max_frame_length is smaller than MAC (%d) current MTU\n",
+-                       __func__, port_id, mac_id);
+-              err = -EINVAL;
+-              goto return_err;
+-      }
+-
+-      spin_unlock_irqrestore(&fman->spinlock, flags);
+-
+-      return 0;
+-
+-return_err:
+-      spin_unlock_irqrestore(&fman->spinlock, flags);
+-      return err;
+-}
+-EXPORT_SYMBOL(fman_set_port_params);
+-
+-/**
+- * fman_reset_mac
+- * @fman:     A Pointer to FMan device
+- * @mac_id:   MAC id to be reset
+- *
+- * Reset a specific MAC
+- *
+- * Return: 0 on success; Error code otherwise.
+- */
+-int fman_reset_mac(struct fman *fman, u8 mac_id)
+-{
+-      struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+-      u32 msk, timeout = 100;
+-
+-      if (fman->state->rev_info.major >= 6) {
+-              dev_err(fman->dev, "%s: FMan MAC reset no available for FMan V3!\n",
+-                      __func__);
+-              return -EINVAL;
+-      }
+-
+-      /* Get the relevant bit mask */
+-      switch (mac_id) {
+-      case 0:
+-              msk = FPM_RSTC_MAC0_RESET;
+-              break;
+-      case 1:
+-              msk = FPM_RSTC_MAC1_RESET;
+-              break;
+-      case 2:
+-              msk = FPM_RSTC_MAC2_RESET;
+-              break;
+-      case 3:
+-              msk = FPM_RSTC_MAC3_RESET;
+-              break;
+-      case 4:
+-              msk = FPM_RSTC_MAC4_RESET;
+-              break;
+-      case 5:
+-              msk = FPM_RSTC_MAC5_RESET;
+-              break;
+-      case 6:
+-              msk = FPM_RSTC_MAC6_RESET;
+-              break;
+-      case 7:
+-              msk = FPM_RSTC_MAC7_RESET;
+-              break;
+-      case 8:
+-              msk = FPM_RSTC_MAC8_RESET;
+-              break;
+-      case 9:
+-              msk = FPM_RSTC_MAC9_RESET;
+-              break;
+-      default:
+-              dev_warn(fman->dev, "%s: Illegal MAC Id [%d]\n",
+-                       __func__, mac_id);
+-              return -EINVAL;
+-      }
+-
+-      /* reset */
+-      iowrite32be(msk, &fpm_rg->fm_rstc);
+-      while ((ioread32be(&fpm_rg->fm_rstc) & msk) && --timeout)
+-              udelay(10);
+-
+-      if (!timeout)
+-              return -EIO;
+-
+-      return 0;
+-}
+-EXPORT_SYMBOL(fman_reset_mac);
+-
+-/**
+- * fman_set_mac_max_frame
+- * @fman:     A Pointer to FMan device
+- * @mac_id:   MAC id
+- * @mfl:      Maximum frame length
+- *
+- * Set maximum frame length of specific MAC in FMan driver
+- *
+- * Return: 0 on success; Error code otherwise.
+- */
+-int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
+-{
+-      /* if port is already initialized, check that MaxFrameLength is smaller
+-       * or equal to the port's max
+-       */
+-      if ((!fman->state->port_mfl[mac_id]) ||
+-          (mfl <= fman->state->port_mfl[mac_id])) {
+-              fman->state->mac_mfl[mac_id] = mfl;
+-      } else {
+-              dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n",
+-                       __func__);
+-              return -EINVAL;
+-      }
+-      return 0;
+-}
+-EXPORT_SYMBOL(fman_set_mac_max_frame);
+-
+-/**
+- * fman_get_clock_freq
+- * @fman:     A Pointer to FMan device
+- *
+- * Get FMan clock frequency
+- *
+- * Return: FMan clock frequency
+- */
+-u16 fman_get_clock_freq(struct fman *fman)
+-{
+-      return fman->state->fm_clk_freq;
+-}
+-
+-/**
+- * fman_get_bmi_max_fifo_size
+- * @fman:     A Pointer to FMan device
+- *
+- * Get FMan maximum FIFO size
+- *
+- * Return: FMan Maximum FIFO size
+- */
+-u32 fman_get_bmi_max_fifo_size(struct fman *fman)
+-{
+-      return fman->state->bmi_max_fifo_size;
+-}
+-EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
+-
+-/**
+- * fman_get_revision
+- * @fman              - Pointer to the FMan module
+- * @rev_info          - A structure of revision information parameters.
+- *
+- * Returns the FM revision
+- *
+- * Allowed only following fman_init().
+- *
+- * Return: 0 on success; Error code otherwise.
+- */
+-void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
+-{
+-      u32 tmp;
+-
+-      tmp = ioread32be(&fman->fpm_regs->fm_ip_rev_1);
+-      rev_info->major = (u8)((tmp & FPM_REV1_MAJOR_MASK) >>
+-                              FPM_REV1_MAJOR_SHIFT);
+-      rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
+-}
+-EXPORT_SYMBOL(fman_get_revision);
+-
+-/**
+- * fman_get_qman_channel_id
+- * @fman:     A Pointer to FMan device
+- * @port_id:  Port id
+- *
+- * Get QMan channel ID associated to the Port id
+- *
+- * Return: QMan channel ID
+- */
+-u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
+-{
+-      int i;
+-
+-      if (fman->state->rev_info.major >= 6) {
+-              u32 port_ids[] = {0x30, 0x31, 0x28, 0x29, 0x2a, 0x2b,
+-                                0x2c, 0x2d, 0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
+-              for (i = 0; i < fman->state->num_of_qman_channels; i++) {
+-                      if (port_ids[i] == port_id)
+-                              break;
+-              }
+-      } else {
+-              u32 port_ids[] = {0x30, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x1,
+-                                0x2, 0x3, 0x4, 0x5, 0x7, 0x7};
+-              for (i = 0; i < fman->state->num_of_qman_channels; i++) {
+-                      if (port_ids[i] == port_id)
+-                              break;
+-              }
+-      }
+-
+-      if (i == fman->state->num_of_qman_channels)
+-              return 0;
+-
+-      return fman->state->qman_channel_base + i;
+-}
+-EXPORT_SYMBOL(fman_get_qman_channel_id);
+-
+-/**
+- * fman_get_mem_region
+- * @fman:     A Pointer to FMan device
+- *
+- * Get FMan memory region
+- *
+- * Return: A structure with FMan memory region information
+- */
+-struct resource *fman_get_mem_region(struct fman *fman)
+-{
+-      return fman->state->res;
+-}
+-EXPORT_SYMBOL(fman_get_mem_region);
+-
+-/* Bootargs defines */
+-/* Extra headroom for RX buffers - Default, min and max */
+-#define FSL_FM_RX_EXTRA_HEADROOM      64
+-#define FSL_FM_RX_EXTRA_HEADROOM_MIN  16
+-#define FSL_FM_RX_EXTRA_HEADROOM_MAX  384
+-
+-/* Maximum frame length */
+-#define FSL_FM_MAX_FRAME_SIZE                 1522
+-#define FSL_FM_MAX_POSSIBLE_FRAME_SIZE                9600
+-#define FSL_FM_MIN_POSSIBLE_FRAME_SIZE                64
+-
+-/* Extra headroom for Rx buffers.
+- * FMan is instructed to allocate, on the Rx path, this amount of
+- * space at the beginning of a data buffer, beside the DPA private
+- * data area and the IC fields.
+- * Does not impact Tx buffer layout.
+- * Configurable from bootargs. 64 by default, it's needed on
+- * particular forwarding scenarios that add extra headers to the
+- * forwarded frame.
+- */
+-static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+-module_param(fsl_fm_rx_extra_headroom, int, 0);
+-MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
+-
+-/* Max frame size, across all interfaces.
+- * Configurable from bootargs, to avoid allocating oversized (socket)
+- * buffers when not using jumbo frames.
+- * Must be large enough to accommodate the network MTU, but small enough
+- * to avoid wasting skb memory.
+- *
+- * Could be overridden once, at boot-time, via the
+- * fm_set_max_frm() callback.
+- */
+-static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+-module_param(fsl_fm_max_frm, int, 0);
+-MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
+-
+-/**
+- * fman_get_max_frm
+- *
+- * Return: Max frame length configured in the FM driver
+- */
+-u16 fman_get_max_frm(void)
+-{
+-      static bool fm_check_mfl;
+-
+-      if (!fm_check_mfl) {
+-              if (fsl_fm_max_frm > FSL_FM_MAX_POSSIBLE_FRAME_SIZE ||
+-                  fsl_fm_max_frm < FSL_FM_MIN_POSSIBLE_FRAME_SIZE) {
+-                      pr_warn("Invalid fsl_fm_max_frm value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
+-                              fsl_fm_max_frm,
+-                              FSL_FM_MIN_POSSIBLE_FRAME_SIZE,
+-                              FSL_FM_MAX_POSSIBLE_FRAME_SIZE,
+-                              FSL_FM_MAX_FRAME_SIZE);
+-                      fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+-              }
+-              fm_check_mfl = true;
+-      }
+-
+-      return fsl_fm_max_frm;
+-}
+-EXPORT_SYMBOL(fman_get_max_frm);
+-
+-/**
+- * fman_get_rx_extra_headroom
+- *
+- * Return: Extra headroom size configured in the FM driver
+- */
+-int fman_get_rx_extra_headroom(void)
+-{
+-      static bool fm_check_rx_extra_headroom;
+-
+-      if (!fm_check_rx_extra_headroom) {
+-              if (fsl_fm_rx_extra_headroom > FSL_FM_RX_EXTRA_HEADROOM_MAX ||
+-                  fsl_fm_rx_extra_headroom < FSL_FM_RX_EXTRA_HEADROOM_MIN) {
+-                      pr_warn("Invalid fsl_fm_rx_extra_headroom value (%d) in bootargs, valid range is %d-%d. Falling back to the default (%d)\n",
+-                              fsl_fm_rx_extra_headroom,
+-                              FSL_FM_RX_EXTRA_HEADROOM_MIN,
+-                              FSL_FM_RX_EXTRA_HEADROOM_MAX,
+-                              FSL_FM_RX_EXTRA_HEADROOM);
+-                      fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+-              }
+-
+-              fm_check_rx_extra_headroom = true;
+-              fsl_fm_rx_extra_headroom = ALIGN(fsl_fm_rx_extra_headroom, 16);
+-      }
+-
+-      return fsl_fm_rx_extra_headroom;
+-}
+-EXPORT_SYMBOL(fman_get_rx_extra_headroom);
+-
+-/**
+- * fman_bind
+- * @dev:      FMan OF device pointer
+- *
+- * Bind to a specific FMan device.
+- *
+- * Allowed only after the port was created.
+- *
+- * Return: A pointer to the FMan device
+- */
+-struct fman *fman_bind(struct device *fm_dev)
+-{
+-      return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
+-}
+-EXPORT_SYMBOL(fman_bind);
+-
+-static irqreturn_t fman_err_irq(int irq, void *handle)
+-{
+-      struct fman *fman = (struct fman *)handle;
+-      u32 pending;
+-      struct fman_fpm_regs __iomem *fpm_rg;
+-      irqreturn_t single_ret, ret = IRQ_NONE;
+-
+-      if (!is_init_done(fman->cfg))
+-              return IRQ_NONE;
+-
+-      fpm_rg = fman->fpm_regs;
+-
+-      /* error interrupts */
+-      pending = ioread32be(&fpm_rg->fm_epi);
+-      if (!pending)
+-              return IRQ_NONE;
+-
+-      if (pending & ERR_INTR_EN_BMI) {
+-              single_ret = bmi_err_event(fman);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_QMI) {
+-              single_ret = qmi_err_event(fman);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_FPM) {
+-              single_ret = fpm_err_event(fman);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_DMA) {
+-              single_ret = dma_err_event(fman);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_MURAM) {
+-              single_ret = muram_err_intr(fman);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-
+-      /* MAC error interrupts */
+-      if (pending & ERR_INTR_EN_MAC0) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 0);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_MAC1) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 1);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_MAC2) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 2);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_MAC3) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 3);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_MAC4) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 4);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_MAC5) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 5);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_MAC6) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 6);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_MAC7) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 7);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_MAC8) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 8);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & ERR_INTR_EN_MAC9) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_ERR_MAC0 + 9);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-
+-      return ret;
+-}
+-
+-static irqreturn_t fman_irq(int irq, void *handle)
+-{
+-      struct fman *fman = (struct fman *)handle;
+-      u32 pending;
+-      struct fman_fpm_regs __iomem *fpm_rg;
+-      irqreturn_t single_ret, ret = IRQ_NONE;
+-
+-      if (!is_init_done(fman->cfg))
+-              return IRQ_NONE;
+-
+-      fpm_rg = fman->fpm_regs;
+-
+-      /* normal interrupts */
+-      pending = ioread32be(&fpm_rg->fm_npi);
+-      if (!pending)
+-              return IRQ_NONE;
+-
+-      if (pending & INTR_EN_QMI) {
+-              single_ret = qmi_event(fman);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-
+-      /* MAC interrupts */
+-      if (pending & INTR_EN_MAC0) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 0);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & INTR_EN_MAC1) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 1);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & INTR_EN_MAC2) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 2);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & INTR_EN_MAC3) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 3);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & INTR_EN_MAC4) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 4);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & INTR_EN_MAC5) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 5);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & INTR_EN_MAC6) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 6);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & INTR_EN_MAC7) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 7);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & INTR_EN_MAC8) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 8);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-      if (pending & INTR_EN_MAC9) {
+-              single_ret = call_mac_isr(fman, FMAN_EV_MAC0 + 9);
+-              if (single_ret == IRQ_HANDLED)
+-                      ret = IRQ_HANDLED;
+-      }
+-
+-      return ret;
+-}
+-
+-static const struct of_device_id fman_muram_match[] = {
+-      {
+-              .compatible = "fsl,fman-muram"},
+-      {}
+-};
+-MODULE_DEVICE_TABLE(of, fman_muram_match);
+-
+-static struct fman *read_dts_node(struct platform_device *of_dev)
+-{
+-      struct fman *fman;
+-      struct device_node *fm_node, *muram_node;
+-      struct resource *res;
+-      u32 val, range[2];
+-      int err, irq;
+-      struct clk *clk;
+-      u32 clk_rate;
+-      phys_addr_t phys_base_addr;
+-      resource_size_t mem_size;
+-
+-      fman = kzalloc(sizeof(*fman), GFP_KERNEL);
+-      if (!fman)
+-              return NULL;
+-
+-      fm_node = of_node_get(of_dev->dev.of_node);
+-
+-      err = of_property_read_u32(fm_node, "cell-index", &val);
+-      if (err) {
+-              dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n",
+-                      __func__, fm_node->full_name);
+-              goto fman_node_put;
+-      }
+-      fman->dts_params.id = (u8)val;
+-
+-      /* Get the FM interrupt */
+-      res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
+-      if (!res) {
+-              dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n",
+-                      __func__);
+-              goto fman_node_put;
+-      }
+-      irq = res->start;
+-
+-      /* Get the FM error interrupt */
+-      res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
+-      if (!res) {
+-              dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n",
+-                      __func__);
+-              goto fman_node_put;
+-      }
+-      fman->dts_params.err_irq = res->start;
+-
+-      /* Get the FM address */
+-      res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
+-      if (!res) {
+-              dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
+-                      __func__);
+-              goto fman_node_put;
+-      }
+-
+-      phys_base_addr = res->start;
+-      mem_size = resource_size(res);
+-
+-      clk = of_clk_get(fm_node, 0);
+-      if (IS_ERR(clk)) {
+-              dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
+-                      __func__, fman->dts_params.id);
+-              goto fman_node_put;
+-      }
+-
+-      clk_rate = clk_get_rate(clk);
+-      if (!clk_rate) {
+-              dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
+-                      __func__, fman->dts_params.id);
+-              goto fman_node_put;
+-      }
+-      /* Rounding to MHz */
+-      fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000);
+-
+-      err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range",
+-                                       &range[0], 2);
+-      if (err) {
+-              dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n",
+-                      __func__, fm_node->full_name);
+-              goto fman_node_put;
+-      }
+-      fman->dts_params.qman_channel_base = range[0];
+-      fman->dts_params.num_of_qman_channels = range[1];
+-
+-      /* Get the MURAM base address and size */
+-      muram_node = of_find_matching_node(fm_node, fman_muram_match);
+-      if (!muram_node) {
+-              dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
+-                      __func__);
+-              goto fman_node_put;
+-      }
+-
+-      err = of_address_to_resource(muram_node, 0,
+-                                   &fman->dts_params.muram_res);
+-      if (err) {
+-              of_node_put(muram_node);
+-              dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
+-                      __func__, err);
+-              goto fman_node_put;
+-      }
+-
+-      of_node_put(muram_node);
+-      of_node_put(fm_node);
+-
+-      err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
+-      if (err < 0) {
+-              dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+-                      __func__, irq, err);
+-              goto fman_free;
+-      }
+-
+-      if (fman->dts_params.err_irq != 0) {
+-              err = devm_request_irq(&of_dev->dev, fman->dts_params.err_irq,
+-                                     fman_err_irq, IRQF_SHARED,
+-                                     "fman-err", fman);
+-              if (err < 0) {
+-                      dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
+-                              __func__, fman->dts_params.err_irq, err);
+-                      goto fman_free;
+-              }
+-      }
+-
+-      fman->dts_params.res =
+-              devm_request_mem_region(&of_dev->dev, phys_base_addr,
+-                                      mem_size, "fman");
+-      if (!fman->dts_params.res) {
+-              dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
+-                      __func__);
+-              goto fman_free;
+-      }
+-
+-      fman->dts_params.base_addr =
+-              devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
+-      if (!fman->dts_params.base_addr) {
+-              dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
+-              goto fman_free;
+-      }
+-
+-      fman->dev = &of_dev->dev;
+-
+-      return fman;
+-
+-fman_node_put:
+-      of_node_put(fm_node);
+-fman_free:
+-      kfree(fman);
+-      return NULL;
+-}
+-
+-static int fman_probe(struct platform_device *of_dev)
+-{
+-      struct fman *fman;
+-      struct device *dev;
+-      int err;
+-
+-      dev = &of_dev->dev;
+-
+-      fman = read_dts_node(of_dev);
+-      if (!fman)
+-              return -EIO;
+-
+-      err = fman_config(fman);
+-      if (err) {
+-              dev_err(dev, "%s: FMan config failed\n", __func__);
+-              return -EINVAL;
+-      }
+-
+-      if (fman_init(fman) != 0) {
+-              dev_err(dev, "%s: FMan init failed\n", __func__);
+-              return -EINVAL;
+-      }
+-
+-      if (fman->dts_params.err_irq == 0) {
+-              fman_set_exception(fman, FMAN_EX_DMA_BUS_ERROR, false);
+-              fman_set_exception(fman, FMAN_EX_DMA_READ_ECC, false);
+-              fman_set_exception(fman, FMAN_EX_DMA_SYSTEM_WRITE_ECC, false);
+-              fman_set_exception(fman, FMAN_EX_DMA_FM_WRITE_ECC, false);
+-              fman_set_exception(fman, FMAN_EX_DMA_SINGLE_PORT_ECC, false);
+-              fman_set_exception(fman, FMAN_EX_FPM_STALL_ON_TASKS, false);
+-              fman_set_exception(fman, FMAN_EX_FPM_SINGLE_ECC, false);
+-              fman_set_exception(fman, FMAN_EX_FPM_DOUBLE_ECC, false);
+-              fman_set_exception(fman, FMAN_EX_QMI_SINGLE_ECC, false);
+-              fman_set_exception(fman, FMAN_EX_QMI_DOUBLE_ECC, false);
+-              fman_set_exception(fman,
+-                                 FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID, false);
+-              fman_set_exception(fman, FMAN_EX_BMI_LIST_RAM_ECC, false);
+-              fman_set_exception(fman, FMAN_EX_BMI_STORAGE_PROFILE_ECC,
+-                                 false);
+-              fman_set_exception(fman, FMAN_EX_BMI_STATISTICS_RAM_ECC, false);
+-              fman_set_exception(fman, FMAN_EX_BMI_DISPATCH_RAM_ECC, false);
+-      }
+-
+-      dev_set_drvdata(dev, fman);
+-
+-      dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
+-
+-      return 0;
+-}
+-
+-static const struct of_device_id fman_match[] = {
+-      {
+-              .compatible = "fsl,fman"},
+-      {}
+-};
+-
+-MODULE_DEVICE_TABLE(of, fman_match);
+-
+-static struct platform_driver fman_driver = {
+-      .driver = {
+-              .name = "fsl-fman",
+-              .of_match_table = fman_match,
+-      },
+-      .probe = fman_probe,
+-};
+-
+-static int __init fman_load(void)
+-{
+-      int err;
+-
+-      pr_debug("FSL DPAA FMan driver\n");
+-
+-      err = platform_driver_register(&fman_driver);
+-      if (err < 0)
+-              pr_err("Error, platform_driver_register() = %d\n", err);
+-
+-      return err;
+-}
+-module_init(fman_load);
+-
+-static void __exit fman_unload(void)
+-{
+-      platform_driver_unregister(&fman_driver);
+-}
+-module_exit(fman_unload);
+-
+-MODULE_LICENSE("Dual BSD/GPL");
+-MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver");
+diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
+deleted file mode 100644
+index 57aae8d..0000000
+--- a/drivers/net/ethernet/freescale/fman/fman.h
++++ /dev/null
+@@ -1,325 +0,0 @@
+-/*
+- * Copyright 2008-2015 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- *     * Redistributions of source code must retain the above copyright
+- *       notice, this list of conditions and the following disclaimer.
+- *     * Redistributions in binary form must reproduce the above copyright
+- *       notice, this list of conditions and the following disclaimer in the
+- *       documentation and/or other materials provided with the distribution.
+- *     * Neither the name of Freescale Semiconductor nor the
+- *       names of its contributors may be used to endorse or promote products
+- *       derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- */
+-
+-#ifndef __FM_H
+-#define __FM_H
+-
+-#include <linux/io.h>
+-
+-/* FM Frame descriptor macros  */
+-/* Frame queue Context Override */
+-#define FM_FD_CMD_FCO                   0x80000000
+-#define FM_FD_CMD_RPD                   0x40000000  /* Read Prepended Data */
+-#define FM_FD_CMD_DTC                   0x10000000  /* Do L4 Checksum */
+-
+-/* TX-Port: Unsupported Format */
+-#define FM_FD_ERR_UNSUPPORTED_FORMAT    0x04000000
+-/* TX Port: Length Error */
+-#define FM_FD_ERR_LENGTH                0x02000000
+-#define FM_FD_ERR_DMA                   0x01000000  /* DMA Data error */
+-
+-/* IPR frame (not error) */
+-#define FM_FD_IPR                       0x00000001
+-/* IPR non-consistent-sp */
+-#define FM_FD_ERR_IPR_NCSP              (0x00100000 | FM_FD_IPR)
+-/* IPR error */
+-#define FM_FD_ERR_IPR                   (0x00200000 | FM_FD_IPR)
+-/* IPR timeout */
+-#define FM_FD_ERR_IPR_TO                (0x00300000 | FM_FD_IPR)
+-/* TX Port: Length Error */
+-#define FM_FD_ERR_IPRE                  (FM_FD_ERR_IPR & ~FM_FD_IPR)
+-
+-/* Rx FIFO overflow, FCS error, code error, running disparity error
+- * (SGMII and TBI modes), FIFO parity error. PHY Sequence error,
+- * PHY error control character detected.
+- */
+-#define FM_FD_ERR_PHYSICAL              0x00080000
+-/* Frame too long OR Frame size exceeds max_length_frame  */
+-#define FM_FD_ERR_SIZE                  0x00040000
+-/* classification discard */
+-#define FM_FD_ERR_CLS_DISCARD           0x00020000
+-/* Extract Out of Frame */
+-#define FM_FD_ERR_EXTRACTION            0x00008000
+-/* No Scheme Selected */
+-#define FM_FD_ERR_NO_SCHEME             0x00004000
+-/* Keysize Overflow */
+-#define FM_FD_ERR_KEYSIZE_OVERFLOW      0x00002000
+-/* Frame color is red */
+-#define FM_FD_ERR_COLOR_RED             0x00000800
+-/* Frame color is yellow */
+-#define FM_FD_ERR_COLOR_YELLOW          0x00000400
+-/* Parser Time out Exceed */
+-#define FM_FD_ERR_PRS_TIMEOUT           0x00000080
+-/* Invalid Soft Parser instruction */
+-#define FM_FD_ERR_PRS_ILL_INSTRUCT      0x00000040
+-/* Header error was identified during parsing */
+-#define FM_FD_ERR_PRS_HDR_ERR           0x00000020
+-/* Frame parsed beyind 256 first bytes */
+-#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED  0x00000008
+-
+-/* non Frame-Manager error */
+-#define FM_FD_RX_STATUS_ERR_NON_FM      0x00400000
+-
+-/* FMan driver defines */
+-#define FMAN_BMI_FIFO_UNITS           0x100
+-#define OFFSET_UNITS                  16
+-
+-/* BMan defines */
+-#define BM_MAX_NUM_OF_POOLS           64 /* Buffers pools */
+-#define FMAN_PORT_MAX_EXT_POOLS_NUM   8  /* External BM pools per Rx port */
+-
+-struct fman; /* FMan data */
+-
+-/* Enum for defining port types */
+-enum fman_port_type {
+-      FMAN_PORT_TYPE_TX = 0,  /* TX Port */
+-      FMAN_PORT_TYPE_RX,      /* RX Port */
+-};
+-
+-struct fman_rev_info {
+-      u8 major;                       /* Major revision */
+-      u8 minor;                       /* Minor revision */
+-};
+-
+-enum fman_exceptions {
+-      FMAN_EX_DMA_BUS_ERROR = 0,      /* DMA bus error. */
+-      FMAN_EX_DMA_READ_ECC,           /* Read Buffer ECC error */
+-      FMAN_EX_DMA_SYSTEM_WRITE_ECC,   /* Write Buffer ECC err on sys side */
+-      FMAN_EX_DMA_FM_WRITE_ECC,       /* Write Buffer ECC error on FM side */
+-      FMAN_EX_DMA_SINGLE_PORT_ECC,    /* Single Port ECC error on FM side */
+-      FMAN_EX_FPM_STALL_ON_TASKS,     /* Stall of tasks on FPM */
+-      FMAN_EX_FPM_SINGLE_ECC,         /* Single ECC on FPM. */
+-      FMAN_EX_FPM_DOUBLE_ECC,         /* Double ECC error on FPM ram access */
+-      FMAN_EX_QMI_SINGLE_ECC, /* Single ECC on QMI. */
+-      FMAN_EX_QMI_DOUBLE_ECC, /* Double bit ECC occurred on QMI */
+-      FMAN_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/* DeQ from unknown port id */
+-      FMAN_EX_BMI_LIST_RAM_ECC,       /* Linked List RAM ECC error */
+-      FMAN_EX_BMI_STORAGE_PROFILE_ECC,/* storage profile */
+-      FMAN_EX_BMI_STATISTICS_RAM_ECC,/* Statistics RAM ECC Err Enable */
+-      FMAN_EX_BMI_DISPATCH_RAM_ECC,   /* Dispatch RAM ECC Error Enable */
+-      FMAN_EX_IRAM_ECC,               /* Double bit ECC occurred on IRAM */
+-      FMAN_EX_MURAM_ECC               /* Double bit ECC occurred on MURAM */
+-};
+-
+-/* Parse results memory layout */
+-struct fman_prs_result {
+-      u8 lpid;                /* Logical port id */
+-      u8 shimr;               /* Shim header result  */
+-      u16 l2r;                /* Layer 2 result */
+-      u16 l3r;                /* Layer 3 result */
+-      u8 l4r;         /* Layer 4 result */
+-      u8 cplan;               /* Classification plan id */
+-      u16 nxthdr;             /* Next Header  */
+-      u16 cksum;              /* Running-sum */
+-      /* Flags&fragment-offset field of the last IP-header */
+-      u16 flags_frag_off;
+-      /* Routing type field of a IPV6 routing extension header */
+-      u8 route_type;
+-      /* Routing Extension Header Present; last bit is IP valid */
+-      u8 rhp_ip_valid;
+-      u8 shim_off[2];         /* Shim offset */
+-      u8 ip_pid_off;          /* IP PID (last IP-proto) offset */
+-      u8 eth_off;             /* ETH offset */
+-      u8 llc_snap_off;        /* LLC_SNAP offset */
+-      u8 vlan_off[2];         /* VLAN offset */
+-      u8 etype_off;           /* ETYPE offset */
+-      u8 pppoe_off;           /* PPP offset */
+-      u8 mpls_off[2];         /* MPLS offset */
+-      u8 ip_off[2];           /* IP offset */
+-      u8 gre_off;             /* GRE offset */
+-      u8 l4_off;              /* Layer 4 offset */
+-      u8 nxthdr_off;          /* Parser end point */
+-};
+-
+-/* A structure for defining buffer prefix area content. */
+-struct fman_buffer_prefix_content {
+-      /* Number of bytes to be left at the beginning of the external
+-       * buffer; Note that the private-area will start from the base
+-       * of the buffer address.
+-       */
+-      u16 priv_data_size;
+-      /* true to pass the parse result to/from the FM;
+-       * User may use FM_PORT_GetBufferPrsResult() in
+-       * order to get the parser-result from a buffer.
+-       */
+-      bool pass_prs_result;
+-      /* true to pass the timeStamp to/from the FM User */
+-      bool pass_time_stamp;
+-      /* true to pass the KG hash result to/from the FM User may
+-       * use FM_PORT_GetBufferHashResult() in order to get the
+-       * parser-result from a buffer.
+-       */
+-      bool pass_hash_result;
+-      /* Add all other Internal-Context information: AD,
+-       * hash-result, key, etc.
+-       */
+-      u16 data_align;
+-};
+-
+-/* A structure of information about each of the external
+- * buffer pools used by a port or storage-profile.
+- */
+-struct fman_ext_pool_params {
+-      u8 id;              /* External buffer pool id */
+-      u16 size;                   /* External buffer pool buffer size */
+-};
+-
+-/* A structure for informing the driver about the external
+- * buffer pools allocated in the BM and used by a port or a
+- * storage-profile.
+- */
+-struct fman_ext_pools {
+-      u8 num_of_pools_used; /* Number of pools use by this port */
+-      struct fman_ext_pool_params ext_buf_pool[FMAN_PORT_MAX_EXT_POOLS_NUM];
+-                                      /* Parameters for each port */
+-};
+-
+-/* A structure for defining BM pool depletion criteria */
+-struct fman_buf_pool_depletion {
+-      /* select mode in which pause frames will be sent after a
+-       * number of pools (all together!) are depleted
+-       */
+-      bool pools_grp_mode_enable;
+-      /* the number of depleted pools that will invoke pause
+-       * frames transmission.
+-       */
+-      u8 num_of_pools;
+-      /* For each pool, true if it should be considered for
+-       * depletion (Note - this pool must be used by this port!).
+-       */
+-      bool pools_to_consider[BM_MAX_NUM_OF_POOLS];
+-      /* select mode in which pause frames will be sent
+-       * after a single-pool is depleted;
+-       */
+-      bool single_pool_mode_enable;
+-      /* For each pool, true if it should be considered
+-       * for depletion (Note - this pool must be used by this port!)
+-       */
+-      bool pools_to_consider_for_single_mode[BM_MAX_NUM_OF_POOLS];
+-};
+-
+-/* Enum for inter-module interrupts registration */
+-enum fman_event_modules {
+-      FMAN_MOD_MAC = 0,               /* MAC event */
+-      FMAN_MOD_FMAN_CTRL,     /* FMAN Controller */
+-      FMAN_MOD_DUMMY_LAST
+-};
+-
+-/* Enum for interrupts types */
+-enum fman_intr_type {
+-      FMAN_INTR_TYPE_ERR,
+-      FMAN_INTR_TYPE_NORMAL
+-};
+-
+-/* Enum for inter-module interrupts registration */
+-enum fman_inter_module_event {
+-      FMAN_EV_ERR_MAC0 = 0,   /* MAC 0 error event */
+-      FMAN_EV_ERR_MAC1,               /* MAC 1 error event */
+-      FMAN_EV_ERR_MAC2,               /* MAC 2 error event */
+-      FMAN_EV_ERR_MAC3,               /* MAC 3 error event */
+-      FMAN_EV_ERR_MAC4,               /* MAC 4 error event */
+-      FMAN_EV_ERR_MAC5,               /* MAC 5 error event */
+-      FMAN_EV_ERR_MAC6,               /* MAC 6 error event */
+-      FMAN_EV_ERR_MAC7,               /* MAC 7 error event */
+-      FMAN_EV_ERR_MAC8,               /* MAC 8 error event */
+-      FMAN_EV_ERR_MAC9,               /* MAC 9 error event */
+-      FMAN_EV_MAC0,           /* MAC 0 event (Magic packet detection) */
+-      FMAN_EV_MAC1,           /* MAC 1 event (Magic packet detection) */
+-      FMAN_EV_MAC2,           /* MAC 2 (Magic packet detection) */
+-      FMAN_EV_MAC3,           /* MAC 3 (Magic packet detection) */
+-      FMAN_EV_MAC4,           /* MAC 4 (Magic packet detection) */
+-      FMAN_EV_MAC5,           /* MAC 5 (Magic packet detection) */
+-      FMAN_EV_MAC6,           /* MAC 6 (Magic packet detection) */
+-      FMAN_EV_MAC7,           /* MAC 7 (Magic packet detection) */
+-      FMAN_EV_MAC8,           /* MAC 8 event (Magic packet detection) */
+-      FMAN_EV_MAC9,           /* MAC 9 event (Magic packet detection) */
+-      FMAN_EV_FMAN_CTRL_0,    /* Fman controller event 0 */
+-      FMAN_EV_FMAN_CTRL_1,    /* Fman controller event 1 */
+-      FMAN_EV_FMAN_CTRL_2,    /* Fman controller event 2 */
+-      FMAN_EV_FMAN_CTRL_3,    /* Fman controller event 3 */
+-      FMAN_EV_CNT
+-};
+-
+-struct fman_intr_src {
+-      void (*isr_cb)(void *src_arg);
+-      void *src_handle;
+-};
+-
+-/* Structure for port-FM communication during fman_port_init. */
+-struct fman_port_init_params {
+-      u8 port_id;                     /* port Id */
+-      enum fman_port_type port_type;  /* Port type */
+-      u16 port_speed;                 /* Port speed */
+-      u16 liodn_offset;               /* Port's requested resource */
+-      u8 num_of_tasks;                /* Port's requested resource */
+-      u8 num_of_extra_tasks;          /* Port's requested resource */
+-      u8 num_of_open_dmas;            /* Port's requested resource */
+-      u8 num_of_extra_open_dmas;      /* Port's requested resource */
+-      u32 size_of_fifo;               /* Port's requested resource */
+-      u32 extra_size_of_fifo;         /* Port's requested resource */
+-      u8 deq_pipeline_depth;          /* Port's requested resource */
+-      u16 max_frame_length;           /* Port's max frame length. */
+-      u16 liodn_base;
+-      /* LIODN base for this port, to be used together with LIODN offset. */
+-};
+-
+-void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info);
+-
+-void fman_register_intr(struct fman *fman, enum fman_event_modules mod,
+-                      u8 mod_id, enum fman_intr_type intr_type,
+-                      void (*f_isr)(void *h_src_arg), void *h_src_arg);
+-
+-void fman_unregister_intr(struct fman *fman, enum fman_event_modules mod,
+-                        u8 mod_id, enum fman_intr_type intr_type);
+-
+-int fman_set_port_params(struct fman *fman,
+-                       struct fman_port_init_params *port_params);
+-
+-int fman_reset_mac(struct fman *fman, u8 mac_id);
+-
+-u16 fman_get_clock_freq(struct fman *fman);
+-
+-u32 fman_get_bmi_max_fifo_size(struct fman *fman);
+-
+-int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl);
+-
+-u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id);
+-
+-struct resource *fman_get_mem_region(struct fman *fman);
+-
+-u16 fman_get_max_frm(void);
+-
+-int fman_get_rx_extra_headroom(void);
+-
+-struct fman *fman_bind(struct device *dev);
+-
+-#endif /* __FM_H */
+diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+deleted file mode 100644
+index c88918c..0000000
+--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
++++ /dev/null
+@@ -1,1451 +0,0 @@
+-/*
+- * Copyright 2008-2015 Freescale Semiconductor Inc.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions are met:
+- *     * Redistributions of source code must retain the above copyright
+- *       notice, this list of conditions and the following disclaimer.
+- *     * Redistributions in binary form must reproduce the above copyright
+- *       notice, this list of conditions and the following disclaimer in the
+- *       documentation and/or other materials provided with the distribution.
+- *     * Neither the name of Freescale Semiconductor nor the
+- *       names of its contributors may be used to endorse or promote products
+- *       derived from this software without specific prior written permission.
+- *
+- *
+- * ALTERNATIVELY, this software may be distributed under the terms of the
+- * GNU General Public License ("GPL") as published by the Free Software
+- * Foundation, either version 2 of that License or (at your option) any
+- * later version.
+- *
+- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- */
+-
+-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+-
+-#include "fman_dtsec.h"
+-#include "fman.h"
+-
+-#include <linux/slab.h>
+-#include <linux/bitrev.h>
+-#include <linux/io.h>
+-#include <linux/delay.h>
+-#include <linux/phy.h>
+-#include <linux/crc32.h>
+-#include <linux/of_mdio.h>
+-#include <linux/mii.h>
+-
+-/* TBI register addresses */
+-#define MII_TBICON            0x11
+-
+-/* TBICON register bit fields */
+-#define TBICON_SOFT_RESET     0x8000  /* Soft reset */
+-#define TBICON_DISABLE_RX_DIS 0x2000  /* Disable receive disparity */
+-#define TBICON_DISABLE_TX_DIS 0x1000  /* Disable transmit disparity */
+-#define TBICON_AN_SENSE               0x0100  /* Auto-negotiation sense enable */
+-#define TBICON_CLK_SELECT     0x0020  /* Clock select */
+-#define TBICON_MI_MODE                0x0010  /* GMII mode (TBI if not set) */
+-
+-#define TBIANA_SGMII          0x4001
+-#define TBIANA_1000X          0x01a0
+-
+-/* Interrupt Mask Register (IMASK) */
+-#define DTSEC_IMASK_BREN      0x80000000
+-#define DTSEC_IMASK_RXCEN     0x40000000
+-#define DTSEC_IMASK_MSROEN    0x04000000
+-#define DTSEC_IMASK_GTSCEN    0x02000000
+-#define DTSEC_IMASK_BTEN      0x01000000
+-#define DTSEC_IMASK_TXCEN     0x00800000
+-#define DTSEC_IMASK_TXEEN     0x00400000
+-#define DTSEC_IMASK_LCEN      0x00040000
+-#define DTSEC_IMASK_CRLEN     0x00020000
+-#define DTSEC_IMASK_XFUNEN    0x00010000
+-#define DTSEC_IMASK_ABRTEN    0x00008000
+-#define DTSEC_IMASK_IFERREN   0x00004000
+-#define DTSEC_IMASK_MAGEN     0x00000800
+-#define DTSEC_IMASK_MMRDEN    0x00000400
+-#define DTSEC_IMASK_MMWREN    0x00000200
+-#define DTSEC_IMASK_GRSCEN    0x00000100
+-#define DTSEC_IMASK_TDPEEN    0x00000002
+-#define DTSEC_IMASK_RDPEEN    0x00000001
+-
+-#define DTSEC_EVENTS_MASK             \
+-       ((u32)(DTSEC_IMASK_BREN    |   \
+-              DTSEC_IMASK_RXCEN   |   \
+-              DTSEC_IMASK_BTEN    |   \
+-              DTSEC_IMASK_TXCEN   |   \
+-              DTSEC_IMASK_TXEEN   |   \
+-              DTSEC_IMASK_ABRTEN  |   \
+-              DTSEC_IMASK_LCEN    |   \
+-              DTSEC_IMASK_CRLEN   |   \
+-              DTSEC_IMASK_XFUNEN  |   \
+-              DTSEC_IMASK_IFERREN |   \
+-              DTSEC_IMASK_MAGEN   |   \
+-              DTSEC_IMASK_TDPEEN  |   \
+-              DTSEC_IMASK_RDPEEN))
+-
+-/* dtsec timestamp event bits */
+-#define TMR_PEMASK_TSREEN     0x00010000
+-#define TMR_PEVENT_TSRE               0x00010000
+-
+-/* Group address bit indication */
+-#define MAC_GROUP_ADDRESS     0x0000010000000000ULL
+-
+-/* Defaults */
+-#define DEFAULT_HALFDUP_RETRANSMIT            0xf
+-#define DEFAULT_HALFDUP_COLL_WINDOW           0x37
+-#define DEFAULT_TX_PAUSE_TIME                 0xf000
+-#define DEFAULT_RX_PREPEND                    0
+-#define DEFAULT_PREAMBLE_LEN                  7
+-#define DEFAULT_TX_PAUSE_TIME_EXTD            0
+-#define DEFAULT_NON_BACK_TO_BACK_IPG1         0x40
+-#define DEFAULT_NON_BACK_TO_BACK_IPG2         0x60
+-#define DEFAULT_MIN_IFG_ENFORCEMENT           0x50
+-#define DEFAULT_BACK_TO_BACK_IPG              0x60
+-#define DEFAULT_MAXIMUM_FRAME                 0x600
+-
+-/* register related defines (bits, field offsets..) */
+-#define DTSEC_ID2_INT_REDUCED_OFF     0x00010000
+-
+-#define DTSEC_ECNTRL_GMIIM            0x00000040
+-#define DTSEC_ECNTRL_TBIM             0x00000020
+-#define DTSEC_ECNTRL_SGMIIM           0x00000002
+-#define DTSEC_ECNTRL_RPM              0x00000010
+-#define DTSEC_ECNTRL_R100M            0x00000008
+-#define DTSEC_ECNTRL_QSGMIIM          0x00000001
+-
+-#define DTSEC_TCTRL_GTS                       0x00000020
+-
+-#define RCTRL_PAL_MASK                        0x001f0000
+-#define RCTRL_PAL_SHIFT                       16
+-#define RCTRL_GHTX                    0x00000400
+-#define RCTRL_GRS                     0x00000020
+-#define RCTRL_MPROM                   0x00000008
+-#define RCTRL_RSF                     0x00000004
+-#define RCTRL_UPROM                   0x00000001
+-
+-#define MACCFG1_SOFT_RESET            0x80000000
+-#define MACCFG1_RX_FLOW                       0x00000020
+-#define MACCFG1_TX_FLOW                       0x00000010
+-#define MACCFG1_TX_EN                 0x00000001
+-#define MACCFG1_RX_EN                 0x00000004
+-
+-#define MACCFG2_NIBBLE_MODE           0x00000100
+-#define MACCFG2_BYTE_MODE             0x00000200
+-#define MACCFG2_PAD_CRC_EN            0x00000004
+-#define MACCFG2_FULL_DUPLEX           0x00000001
+-#define MACCFG2_PREAMBLE_LENGTH_MASK  0x0000f000
+-#define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
+-
+-#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT   24
+-#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT   16
+-#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT      8
+-
+-#define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
+-#define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
+-#define IPGIFG_MIN_IFG_ENFORCEMENT    0x0000FF00
+-#define IPGIFG_BACK_TO_BACK_IPG       0x0000007F
+-
+-#define HAFDUP_EXCESS_DEFER                   0x00010000
+-#define HAFDUP_COLLISION_WINDOW               0x000003ff
+-#define HAFDUP_RETRANSMISSION_MAX_SHIFT       12
+-#define HAFDUP_RETRANSMISSION_MAX             0x0000f000
+-
+-#define NUM_OF_HASH_REGS      8       /* Number of hash table registers */
+-
+-#define PTV_PTE_MASK          0xffff0000
+-#define PTV_PT_MASK           0x0000ffff
+-#define PTV_PTE_SHIFT         16
+-
+-#define MAX_PACKET_ALIGNMENT          31
+-#define MAX_INTER_PACKET_GAP          0x7f
+-#define MAX_RETRANSMISSION            0x0f
+-#define MAX_COLLISION_WINDOW          0x03ff
+-
+-/* Hash table size (32 bits*8 regs) */
+-#define DTSEC_HASH_TABLE_SIZE         256
+-/* Extended Hash table size (32 bits*16 regs) */
+-#define EXTENDED_HASH_TABLE_SIZE      512
+-
+-/* dTSEC Memory Map registers */
+-struct dtsec_regs {
+-      /* dTSEC General Control and Status Registers */
+-      u32 tsec_id;            /* 0x000 ETSEC_ID register */
+-      u32 tsec_id2;           /* 0x004 ETSEC_ID2 register */
+-      u32 ievent;             /* 0x008 Interrupt event register */
+-      u32 imask;              /* 0x00C Interrupt mask register */
+-      u32 reserved0010[1];
+-      u32 ecntrl;             /* 0x014 E control register */
+-      u32 ptv;                /* 0x018 Pause time value register */
+-      u32 tbipa;              /* 0x01C TBI PHY address register */
+-      u32 tmr_ctrl;           /* 0x020 Time-stamp Control register */
+-      u32 tmr_pevent;         /* 0x024 Time-stamp event register */
+-      u32 tmr_pemask;         /* 0x028 Timer event mask register */
+-      u32 reserved002c[5];
+-      u32 tctrl;              /* 0x040 Transmit control register */
+-      u32 reserved0044[3];
+-      u32 rctrl;              /* 0x050 Receive control register */
+-      u32 reserved0054[11];
+-      u32 igaddr[8];          /* 0x080-0x09C Individual/group address */
+-      u32 gaddr[8];           /* 0x0A0-0x0BC Group address registers 0-7 */
+-      u32 reserved00c0[16];
+-      u32 maccfg1;            /* 0x100 MAC configuration #1 */
+-      u32 maccfg2;            /* 0x104 MAC configuration #2 */
+-      u32 ipgifg;             /* 0x108 IPG/IFG */
+-      u32 hafdup;             /* 0x10C Half-duplex */
+-      u32 maxfrm;             /* 0x110 Maximum frame */
+-      u32 reserved0114[10];
+-      u32 ifstat;             /* 0x13C Interface status */
+-      u32 macstnaddr1;        /* 0x140 Station Address,part 1 */
+-      u32 macstnaddr2;        /* 0x144 Station Address,part 2 */
+-      struct {
+-              u32 exact_match1;       /* octets 1-4 */
+-              u32 exact_match2;       /* octets 5-6 */
+-      } macaddr[15];          /* 0x148-0x1BC mac exact match addresses 1-15 */
+-      u32 reserved01c0[16];
+-      u32 tr64;       /* 0x200 Tx and Rx 64 byte frame counter */
+-      u32 tr127;      /* 0x204 Tx and Rx 65 to 127 byte frame counter */
+-      u32 tr255;      /* 0x208 Tx and Rx 128 to 255 byte frame counter */
+-      u32 tr511;      /* 0x20C Tx and Rx 256 to 511 byte frame counter */
+-      u32 tr1k;       /* 0x210 Tx and Rx 512 to 1023 byte frame counter */
+-      u32 trmax;      /* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
+-      u32 trmgv;
+-      /* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
+-      u32 rbyt;       /* 0x21C receive byte counter */
+-      u32 rpkt;       /* 0x220 receive packet counter */
+-      u32 rfcs;       /* 0x224 receive FCS error counter */
+-      u32 rmca;       /* 0x228 RMCA Rx multicast packet counter */
+-      u32 rbca;       /* 0x22C Rx broadcast packet counter */
+-      u32 rxcf;       /* 0x230 Rx control frame packet counter */
+-      u32 rxpf;       /* 0x234 Rx pause frame packet counter */
+-      u32 rxuo;       /* 0x238 Rx unknown OP code counter */
+-      u32 raln;       /* 0x23C Rx alignment error counter */
+-      u32 rflr;       /* 0x240 Rx frame length error counter */
+-      u32 rcde;       /* 0x244 Rx code error counter */
+-      u32 rcse;       /* 0x248 Rx carrier sense error counter */
+-      u32 rund;       /* 0x24C Rx undersize packet counter */
+-      u32 rovr;       /* 0x250 Rx oversize packet counter */
+-      u32 rfrg;       /* 0x254 Rx fragments counter */
+-      u32 rjbr;       /* 0x258 Rx jabber counter */
+-      u32 rdrp;       /* 0x25C Rx drop */
+-      u32 tbyt;       /* 0x260 Tx byte counter */
+-      u32 tpkt;       /* 0x264 Tx packet counter */
+-      u32 tmca;       /* 0x268 Tx multicast packet counter */
+-      u32 tbca;       /* 0x26C Tx broadcast packet counter */
+-      u32 txpf;       /* 0x270 Tx pause control frame counter */
+-      u32 tdfr;       /* 0x274 Tx deferral packet counter */
+-      u32 tedf;       /* 0x278 Tx excessive deferral packet counter */
+-      u32 tscl;       /* 0x27C Tx single collision packet counter */
+-      u32 tmcl;       /* 0x280 Tx multiple collision packet counter */
+-      u32 tlcl;       /* 0x284 Tx late collision packet counter */
+-      u32 txcl;       /* 0x288 Tx excessive collision packet counter */
+-      u32 tncl;       /* 0x28C Tx total collision counter */
+-      u32 reserved0290[1];
+-      u32 tdrp;       /* 0x294 Tx drop frame counter */
+-      u32 tjbr;       /* 0x298 Tx jabber frame counter */
+-      u32 tfcs;       /* 0x29C Tx FCS error counter */
+-      u32 txcf;       /* 0x2A0 Tx control frame counter */
+-      u32 tovr;       /* 0x2A4 Tx oversize frame counter */
+-      u32 tund;       /* 0x2A8 Tx undersize frame counter */
+-      u32 tfrg;       /* 0x2AC Tx fragments frame counter */
+-      u32 car1;       /* 0x2B0 carry register one register* */
+-      u32 car2;       /* 0x2B4 carry register two register* */
+-      u32 cam1;       /* 0x2B8 carry register one mask register */
+-      u32 cam2;       /* 0x2BC carry register two mask register */
+-      u32 reserved02c0[848];
+-};
+-
+-/* struct dtsec_cfg - dTSEC configuration
+- * Transmit half-duplex flow control, under software control for 10/100-Mbps
+- * half-duplex media. If set, back pressure is applied to media by raising
+- * carrier.
+- * halfdup_retransmit:
+- * Number of retransmission attempts following a collision.
+- * If this is exceeded dTSEC aborts transmission due to excessive collisions.
+- * The standard specifies the attempt limit to be 15.
+- * halfdup_coll_window:
+- * The number of bytes of the frame during which collisions may occur.
+- * The default value of 55 corresponds to the frame byte at the end of the
+- * standard 512-bit slot time window. If collisions are detected after this
+- * byte, the late collision event is asserted and transmission of current
+- * frame is aborted.
+- * tx_pad_crc:
+- * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
+- * appends a CRC to every frame regardless of padding requirement.
+- * tx_pause_time:
+- * Transmit pause time value. This pause value is used as part of the pause
+- * frame to be sent when a transmit pause frame is initiated.
+- * If set to 0 this disables transmission of pause frames.
+- * preamble_len:
+- * Length, in bytes, of the preamble field preceding each Ethernet
+- * start-of-frame delimiter byte. The default value of 0x7 should be used in
+- * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
+- * rx_prepend:
+- * Packet alignment padding length. The specified number of bytes (1-31)
+- * of zero padding are inserted before the start of each received frame.
+- * For Ethernet, where optional preamble extraction is enabled, the padding
+- * appears before the preamble, otherwise the padding precedes the
+- * layer 2 header.
+- *
+- * This structure contains basic dTSEC configuration and must be passed to
+- * init() function. A default set of configuration values can be
+- * obtained by calling set_dflts().
+- */
+-struct dtsec_cfg {
+-      u16 halfdup_retransmit;
+-      u16 halfdup_coll_window;
+-      bool tx_pad_crc;
+-      u16 tx_pause_time;
+-      bool ptp_tsu_en;
+-      bool ptp_exception_en;
+-      u32 preamble_len;
+-      u32 rx_prepend;
+-      u16 tx_pause_time_extd;
+-      u16 maximum_frame;
+-      u32 non_back_to_back_ipg1;
+-      u32 non_back_to_back_ipg2;
+-      u32 min_ifg_enforcement;
+-      u32 back_to_back_ipg;
+-};
+-
+-struct fman_mac {
+-      /* pointer to dTSEC memory mapped registers */
+-      struct dtsec_regs __iomem *regs;
+-      /* MAC address of device */
+-      u64 addr;
+-      /* Ethernet physical interface */
+-      phy_interface_t phy_if;
+-      u16 max_speed;
+-      void *dev_id; /* device cookie used by the exception cbs */
+-      fman_mac_exception_cb *exception_cb;
+-      fman_mac_exception_cb *event_cb;
+-      /* Number of individual addresses in registers for this station */
+-      u8 num_of_ind_addr_in_regs;
+-      /* pointer to driver's global address hash table */
+-      struct eth_hash_t *multicast_addr_hash;
+-      /* pointer to driver's individual address hash table */
+-      struct eth_hash_t *unicast_addr_hash;
+-      u8 mac_id;
+-      u32 exceptions;
+-      bool ptp_tsu_enabled;
+-      bool en_tsu_err_exeption;
+-      struct dtsec_cfg *dtsec_drv_param;
+-      void *fm;
+-      struct fman_rev_info fm_rev_info;
+-      bool basex_if;
+-      struct phy_device *tbiphy;
+-};
+-
+-static void set_dflts(struct dtsec_cfg *cfg)
+-{
+-      cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
+-      cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
+-      cfg->tx_pad_crc = true;
+-      cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
+-      /* PHY address 0 is reserved (DPAA RM) */
+-      cfg->rx_prepend = DEFAULT_RX_PREPEND;
+-      cfg->ptp_tsu_en = true;
+-      cfg->ptp_exception_en = true;
+-      cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
+-      cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
+-      cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
+-      cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
+-      cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
+-      cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
+-      cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
+-}
+-
+-static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
+-              phy_interface_t iface, u16 iface_speed, u8 *macaddr,
+-              u32 exception_mask, u8 tbi_addr)
+-{
+-      bool is_rgmii, is_sgmii, is_qsgmii;
+-      int i;
+-      u32 tmp;
+-
+-      /* Soft reset */
+-      iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
+-      iowrite32be(0, &regs->maccfg1);
+-
+-      /* dtsec_id2 */
+-      tmp = ioread32be(&regs->tsec_id2);
+-
+-      /* check RGMII support */
+-      if (iface == PHY_INTERFACE_MODE_RGMII ||
+-          iface == PHY_INTERFACE_MODE_RMII)
+-              if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
+-                      return -EINVAL;
+-
+-      if (iface == PHY_INTERFACE_MODE_SGMII ||
+-          iface == PHY_INTERFACE_MODE_MII)
+-              if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
+-                      return -EINVAL;
+-
+-      is_rgmii = iface == PHY_INTERFACE_MODE_RGMII;
+-      is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
+-      is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
+-
+-      tmp = 0;
+-      if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
+-              tmp |= DTSEC_ECNTRL_GMIIM;
+-      if (is_sgmii)
+-              tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
+-      if (is_qsgmii)
+-              tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
+-                      DTSEC_ECNTRL_QSGMIIM);
+-      if (is_rgmii)
+-              tmp |= DTSEC_ECNTRL_RPM;
+-      if (iface_speed == SPEED_100)
+-              tmp |= DTSEC_ECNTRL_R100M;
+-
+-      iowrite32be(tmp, &regs->ecntrl);
+-
+-      tmp = 0;
+-
+-      if (cfg->tx_pause_time)
+-              tmp |= cfg->tx_pause_time;
+-      if (cfg->tx_pause_time_extd)
+-              tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
+-      iowrite32be(tmp, &regs->ptv);
+-
+-      tmp = 0;
+-      tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
+-      /* Accept short frames */
+-      tmp |= RCTRL_RSF;
+-
+-      iowrite32be(tmp, &regs->rctrl);
+-
+-      /* Assign a Phy Address to the TBI (TBIPA).
+-       * Done also in cases where TBI is not selected to avoid conflict with
+-       * the external PHY's Physical address
+-       */
+-      iowrite32be(tbi_addr, &regs->tbipa);
+-
+-      iowrite32be(0, &regs->tmr_ctrl);
+-
+-      if (cfg->ptp_tsu_en) {
+-              tmp = 0;
+-              tmp |= TMR_PEVENT_TSRE;
+-              iowrite32be(tmp, &regs->tmr_pevent);
+-
+-              if (cfg->ptp_exception_en) {
+-                      tmp = 0;
+-                      tmp |= TMR_PEMASK_TSREEN;
+-                      iowrite32be(tmp, &regs->tmr_pemask);
+-              }
+-      }
+-
+-      tmp = 0;
+-      tmp |= MACCFG1_RX_FLOW;
+-      tmp |= MACCFG1_TX_FLOW;
+-      iowrite32be(tmp, &regs->maccfg1);
+-
+-      tmp = 0;
+-
+-      if (iface_speed < SPEED_1000)
+-              tmp |= MACCFG2_NIBBLE_MODE;
+-      else if (iface_speed == SPEED_1000)
+-              tmp |= MACCFG2_BYTE_MODE;
+-
+-      tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
+-              MACCFG2_PREAMBLE_LENGTH_MASK;
+-      if (cfg->tx_pad_crc)
+-              tmp |= MACCFG2_PAD_CRC_EN;
+-      /* Full Duplex */
+-      tmp |= MACCFG2_FULL_DUPLEX;
+-      iowrite32be(tmp, &regs->maccfg2);
+-
+-      tmp = (((cfg->non_back_to_back_ipg1 <<
+-               IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
+-              & IPGIFG_NON_BACK_TO_BACK_IPG_1)
+-             | ((cfg->non_back_to_back_ipg2 <<
+-                 IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
+-               & IPGIFG_NON_BACK_TO_BACK_IPG_2)
+-             | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
+-               & IPGIFG_MIN_IFG_ENFORCEMENT)
+-             | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
+-      iowrite32be(tmp, &regs->ipgifg);
+-
+-      tmp = 0;
+-      tmp |= HAFDUP_EXCESS_DEFER;
+-      tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
+-              & HAFDUP_RETRANSMISSION_MAX);
+-      tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
+-
+-      iowrite32be(tmp, &regs->hafdup);
+-
+-      /* Initialize Maximum frame length */
+-      iowrite32be(cfg->maximum_frame, &regs->maxfrm);
+-
+-      iowrite32be(0xffffffff, &regs->cam1);
+-      iowrite32be(0xffffffff, &regs->cam2);
+-
+-      iowrite32be(exception_mask, &regs->imask);
+-
+-      iowrite32be(0xffffffff, &regs->ievent);
+-
+-      tmp = (u32)((macaddr[5] << 24) |
+-                  (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
+-      iowrite32be(tmp, &regs->macstnaddr1);
+-
+-      tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
+-      iowrite32be(tmp, &regs->macstnaddr2);
+-
+-      /* HASH */
+-      for (i = 0; i < NUM_OF_HASH_REGS; i++) {
+-              /* Initialize IADDRx */
+-              iowrite32be(0, &regs->igaddr[i]);
+-              /* Initialize GADDRx */
+-              iowrite32be(0, &regs->gaddr[i]);
+-      }
+-
+-      return 0;
+-}
+-
+-static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
+-{
+-      u32 tmp;
+-
+-      tmp = (u32)((adr[5] << 24) |
+-                  (adr[4] << 16) | (adr[3] << 8) | adr[2]);
+-      iowrite32be(tmp, &regs->macstnaddr1);
+-
+-      tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
+-      iowrite32be(tmp, &regs->macstnaddr2);
+-}
+-
+-static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
+-                     bool enable)
+-{
+-      int reg_idx = (bucket >> 5) & 0xf;
+-      int bit_idx = bucket & 0x1f;
+-      u32 bit_mask = 0x80000000 >> bit_idx;
+-      u32 __iomem *reg;
+-
+-      if (reg_idx > 7)
+-              reg = &regs->gaddr[reg_idx - 8];
+-      else
+-              reg = &regs->igaddr[reg_idx];
+-
+-      if (enable)
+-              iowrite32be(ioread32be(reg) | bit_mask, reg);
+-      else
+-              iowrite32be(ioread32be(reg) & (~bit_mask), reg);
+-}
+-
+-static int check_init_parameters(struct fman_mac *dtsec)
+-{
+-      if (dtsec->max_speed >= SPEED_10000) {
+-              pr_err("1G MAC driver supports 1G or lower speeds\n");
+-              return -EINVAL;
+-      }
+-      if (dtsec->addr == 0) {
+-              pr_err("Ethernet MAC Must have a valid MAC Address\n");
+-              return -EINVAL;
+-      }
+-      if ((dtsec->dtsec_drv_param)->rx_prepend >
+-          MAX_PACKET_ALIGNMENT) {
+-              pr_err("packetAlignmentPadding can't be > than %d\n",
+-                     MAX_PACKET_ALIGNMENT);
+-              return -EINVAL;
+-      }
+-      if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
+-           MAX_INTER_PACKET_GAP) ||
+-          ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
+-           MAX_INTER_PACKET_GAP) ||
+-           ((dtsec->dtsec_drv_param)->back_to_back_ipg >
+-            MAX_INTER_PACKET_GAP)) {
+-              pr_err("Inter packet gap can't be greater than %d\n",
+-                     MAX_INTER_PACKET_GAP);
+-              return -EINVAL;
+-      }
+-      if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
+-          MAX_RETRANSMISSION) {
+-              pr_err("maxRetransmission can't be greater than %d\n",
+-                     MAX_RETRANSMISSION);
+-              return -EINVAL;
+-      }
+-      if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
+-          MAX_COLLISION_WINDOW) {
+-              pr_err("collisionWindow can't be greater than %d\n",
+-                     MAX_COLLISION_WINDOW);
+-              return -EINVAL;
+-      /* If Auto negotiation process is disabled, need to set up the PHY
+-       * using the MII Management Interface
+-       */
+-      }
+-      if (!dtsec->exception_cb) {
+-              pr_err("uninitialized exception_cb\n");
+-              return -EINVAL;
+-      }
+-      if (!dtsec->event_cb) {
+-              pr_err("uninitialized event_cb\n");
+-              return -EINVAL;
+-      }
+-
+-      return 0;
+-}
+-
+-static int get_exception_flag(enum fman_mac_exceptions exception)
+-{
+-      u32 bit_mask;
+-
+-      switch (exception) {
+-      case FM_MAC_EX_1G_BAB_RX:
+-              bit_mask = DTSEC_IMASK_BREN;
+-              break;
+-      case FM_MAC_EX_1G_RX_CTL:
+-              bit_mask = DTSEC_IMASK_RXCEN;
+-              break;
+-      case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
+-              bit_mask = DTSEC_IMASK_GTSCEN;
+-              break;
+-      case FM_MAC_EX_1G_BAB_TX:
+-              bit_mask = DTSEC_IMASK_BTEN;
+-              break;
+-      case FM_MAC_EX_1G_TX_CTL:
+-              bit_mask = DTSEC_IMASK_TXCEN;
+-              break;
+-      case FM_MAC_EX_1G_TX_ERR:
+-              bit_mask = DTSEC_IMASK_TXEEN;
+-              break;
+-      case FM_MAC_EX_1G_LATE_COL:
+-              bit_mask = DTSEC_IMASK_LCEN;
+-              break;
+-      case FM_MAC_EX_1G_COL_RET_LMT:
+-              bit_mask = DTSEC_IMASK_CRLEN;
+-              break;
+-      case FM_MAC_EX_1G_TX_FIFO_UNDRN:
+-              bit_mask = DTSEC_IMASK_XFUNEN;
+-              break;
+-      case FM_MAC_EX_1G_MAG_PCKT:
+-              bit_mask = DTSEC_IMASK_MAGEN;
+-              break;
+-      case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
+-              bit_mask = DTSEC_IMASK_MMRDEN;
+-              break;
+-      case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
+-              bit_mask = DTSEC_IMASK_MMWREN;
+-              break;
+-      case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
+-              bit_mask = DTSEC_IMASK_GRSCEN;
+-              break;
+-      case FM_MAC_EX_1G_DATA_ERR:
+-              bit_mask = DTSEC_IMASK_TDPEEN;
+-              break;
+-      case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
+-              bit_mask = DTSEC_IMASK_MSROEN;
+-              break;
+-      default:
+-              bit_mask = 0;
+-              break;
+-      }
+-
+-      return bit_mask;
+-}
+-
+-static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
+-{
+-      /* Checks if dTSEC driver parameters were initialized */
+-      if (!dtsec_drv_params)
+-              return true;
+-
+-      return false;
+-}
+-
+-static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
+-{
+-      struct dtsec_regs __iomem *regs = dtsec->regs;
+-
+-      if (is_init_done(dtsec->dtsec_drv_param))
+-              return 0;
+-
+-      return (u16)ioread32be(&regs->maxfrm);
+-}
+-
+-static void dtsec_isr(void *handle)
+-{
+-      struct fman_mac *dtsec = (struct fman_mac *)handle;
+-      struct dtsec_regs __iomem *regs = dtsec->regs;
+-      u32 event;
+-
+-      /* do not handle MDIO events */
+-      event = ioread32be(&regs->ievent) &
+-              (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
+-
+-      event &= ioread32be(&regs->imask);
+-
+-      iowrite32be(event, &regs->ievent);
+-
+-      if (event & DTSEC_IMASK_BREN)
+-              dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
+-      if (event & DTSEC_IMASK_RXCEN)
+-              dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
+-      if (event & DTSEC_IMASK_GTSCEN)
+-              dtsec->exception_cb(dtsec->dev_id,
+-                                  FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
+-      if (event & DTSEC_IMASK_BTEN)
+-              dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
+-      if (event & DTSEC_IMASK_TXCEN)
+-              dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
+-      if (event & DTSEC_IMASK_TXEEN)
+-              dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
+-      if (event & DTSEC_IMASK_LCEN)
+-              dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
+-      if (event & DTSEC_IMASK_CRLEN)
+-              dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
+-      if (event & DTSEC_IMASK_XFUNEN) {
+-              /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
+-              if (dtsec->fm_rev_info.major == 2) {
+-                      u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
+-                      /* a. Write 0x00E0_0C00 to DTSEC_ID
+-                       *      This is a read only register
+-                       * b. Read and save the value of TPKT
+-                       */
+-                      tpkt1 = ioread32be(&regs->tpkt);
+-
+-                      /* c. Read the register at dTSEC address offset 0x32C */
+-                      tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
+-
+-                      /* d. Compare bits [9:15] to bits [25:31] of the
+-                       * register at address offset 0x32C.
+-                       */
+-                      if ((tmp_reg1 & 0x007F0000) !=
+-                              (tmp_reg1 & 0x0000007F)) {
+-                              /* If they are not equal, save the value of
+-                               * this register and wait for at least
+-                               * MAXFRM*16 ns
+-                               */
+-                              usleep_range((u32)(min
+-                                      (dtsec_get_max_frame_length(dtsec) *
+-                                      16 / 1000, 1)), (u32)
+-                                      (min(dtsec_get_max_frame_length
+-                                      (dtsec) * 16 / 1000, 1) + 1));
+-                      }
+-
+-                      /* e. Read and save TPKT again and read the register
+-                       * at dTSEC address offset 0x32C again
+-                       */
+-                      tpkt2 = ioread32be(&regs->tpkt);
+-                      tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
+-
+-                      /* f. Compare the value of TPKT saved in step b to
+-                       * value read in step e. Also compare bits [9:15] of
+-                       * the register at offset 0x32C saved in step d to the
+-                       * value of bits [9:15] saved in step e. If the two
+-                       * registers values are unchanged, then the transmit
+-                       * portion of the dTSEC controller is locked up and
+-                       * the user should proceed to the recover sequence.
+-                       */
+-                      if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
+-                              (tmp_reg2 & 0x007F0000))) {
+-                              /* recover sequence */
+-
+-                              /* a.Write a 1 to RCTRL[GRS] */
+-
+-                              iowrite32be(ioread32be(&regs->rctrl) |
+-                                          RCTRL_GRS, &regs->rctrl);
+-
+-                              /* b.Wait until IEVENT[GRSC]=1, or at least
+-                               * 100 us has elapsed.
+-                               */
+-                              for (i = 0; i < 100; i++) {
+-                                      if (ioread32be(&regs->ievent) &
+-                                          DTSEC_IMASK_GRSCEN)
+-                                              break;
+-                                      udelay(1);
+-                              }
+-                              if (ioread32be(&regs->ievent) &
+-                                  DTSEC_IMASK_GRSCEN)
+-                                      iowrite32be(DTSEC_IMASK_GRSCEN,
+-                                                  &regs->ievent);
+-                              else
+-                                      pr_debug("Rx lockup due to Tx lockup\n");
+-
+-                              /* c.Write a 1 to bit n of FM_RSTC
+-                               * (offset 0x0CC of FPM)
+-                               */
+-                              fman_reset_mac(dtsec->fm, dtsec->mac_id);
+-
+-                              /* d.Wait 4 Tx clocks (32 ns) */
+-                              udelay(1);
+-
+-                              /* e.Write a 0 to bit n of FM_RSTC. */
+-                              /* cleared by FMAN
+-                               */
+-                      }
+-              }
+-
+-              dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
+-      }
+-      if (event & DTSEC_IMASK_MAGEN)
+-              dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
+-      if (event & DTSEC_IMASK_GRSCEN)
+-              dtsec->exception_cb(dtsec->dev_id,
+-                                  FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
+-      if (event & DTSEC_IMASK_TDPEEN)
+-              dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
+-      if (event & DTSEC_IMASK_RDPEEN)
+-              dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
+-
+-      /* masked interrupts */
+-      WARN_ON(event & DTSEC_IMASK_ABRTEN);
+-      WARN_ON(event & DTSEC_IMASK_IFERREN);
+-}
+-
+-static void dtsec_1588_isr(void *handle)
+-{
+-      struct fman_mac *dtsec = (struct fman_mac *)handle;
+-      struct dtsec_regs __iomem *regs = dtsec->regs;
+-      u32 event;
+-
+-      if (dtsec->ptp_tsu_enabled) {
+-              event = ioread32be(&regs->tmr_pevent);
+-              event &= ioread32be(&regs->tmr_pemask);
+-
+-              if (event) {
+-                      iowrite32be(event, &regs->tmr_pevent);
+-                      WARN_ON(event & TMR_PEVENT_TSRE);
+-                      dtsec->exception_cb(dtsec->dev_id,
+-                                          FM_MAC_EX_1G_1588_TS_RX_ERR);
+-              }
+-      }
+-}
+-
+-static void free_init_resources(struct fman_mac *dtsec)
+-{
+-      fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+-                           FMAN_INTR_TYPE_ERR);
+-      fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
+-                           FMAN_INTR_TYPE_NORMAL);
+-
+-      /* release the driver's group hash table */
+-      free_hash_table(dtsec->multicast_addr_hash);
+-      dtsec->multicast_addr_hash = NULL;
+-
+-      /* release the driver's individual hash table */
+-      free_hash_table(dtsec->unicast_addr_hash);
+-      dtsec->unicast_addr_hash = NULL;
+-}
+-
+-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
+-{
+-      if (is_init_done(dtsec->dtsec_drv_param))
+-              return -EINVAL;
+-
+-      dtsec->dtsec_drv_param->maximum_frame = new_val;
+-
+-      return 0;
+-}
+-
+-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
+-{
+-      if (is_init_done(dtsec->dtsec_drv_param))
+-              return -EINVAL;
+-
+-      dtsec->dtsec_drv_param->tx_pad_crc = new_val;
+-
+-      return 0;
+-}
+-
+-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
+-{
+-      struct dtsec_regs __iomem *regs = dtsec->regs;
+-      u32 tmp;
+-
+-      if (!is_init_done(dtsec->dtsec_drv_param))
+-              return -EINVAL;
+-
+-      /* Enable */
+-      tmp = ioread32be(&regs->maccfg1);
+-      if (mode & COMM_MODE_RX)
+-              tmp |= MACCFG1_RX_EN;
+-      if (mode & COMM_MODE_TX)
+-              tmp |= MACCFG1_TX_EN;
+-
+-      iowrite32be(tmp, &regs->maccfg1);
+-
+-      /* Graceful start - clear the graceful receive stop bit */
+-      if (mode & COMM_MODE_TX)
+-              iowrite32be(ioread32be(&regs->tctrl) & ~DTSEC_TCTRL_GTS,
+-                          &regs->tctrl);
+-      if (mode & COMM_MODE_RX)
+-              iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS,
+-                          &regs->rctrl);
+-
+-      return 0;
+-}
+-
+-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
+-{
+-      struct dtsec_regs __iomem *regs = dtsec->regs;
+-      u32 tmp;
+-
+-      if (!is_init_done(dtsec->dtsec_drv_param))
+-              return -EINVAL;
+-
+-      /* Gracefull stop - Assert the graceful transmit stop bit */
+-      if (mode & COMM_MODE_RX) {
+-              tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
+-              iowrite32be(tmp, &regs->rctrl);
+-
+-              if (dtsec->fm_rev_info.major == 2)
+-                      usleep_range(100, 200);
+-              else
+-                      udelay(10);
+-      }
+-
+-      if (mode & COMM_MODE_TX) {
+-              if (dtsec->fm_rev_info.major == 2)
+-                      pr_debug("GTS not supported due to DTSEC_A004 errata.\n");
+-              else
+-                      pr_debug("GTS not supported due to DTSEC_A0014 errata.\n");
+-      }
+-
+-      tmp = ioread32be(&regs->maccfg1);
+-      if (mode & COMM_MODE_RX)
+-              tmp &= ~MACCFG1_RX_EN;
+-      if (mode & COMM_MODE_TX)
+-              tmp &= ~MACCFG1_TX_EN;
+-
+-      iowrite32be(tmp, &regs->maccfg1);
+-
+-      return 0;
+-}
+-
+-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
+-                            u8 __maybe_unused priority,
+-                            u16 pause_time, u16 __maybe_unused thresh_time)
+-{
+-      struct dtsec_regs __iomem *regs = dtsec->regs;
+-      u32 ptv = 0;
+-
+-      if (!is_init_done(dtsec->dtsec_drv_param))
+-              return -EINVAL;
+-
+-      if (pause_time) {
+-              /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
+-              if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
+-                      pr_warn("pause-time: %d illegal.Should be > 320\n",
+-                              pause_time);
+-                      return -EINVAL;
+-              }
+-
+-              ptv = ioread32be(&regs->ptv);
+-              ptv &= PTV_PTE_MASK;
+-              ptv |= pause_time & PTV_PT_MASK;
+-              iowrite32be(ptv, &regs->ptv);
+-
+-              /* trigger the transmission of a flow-control pause frame */
+-              iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
+-                          &regs->maccfg1);
+-      } else
+-              iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
+-           &n