Merge tag 'mmc-2020-10-14' of https://gitlab.denx.de/u-boot/custodians/u-boot-mmc

- fsl_esdhc_imx cleanup
- not send cm13 if send_status is 0.
- Add reinit API
- Add mmc HS400 for fsl_esdhc
- Several cleanup for fsl_esdhc
- Add ADMA2 for sdhci
diff --git a/Makefile b/Makefile
index 1de1f38..28c9f31 100644
--- a/Makefile
+++ b/Makefile
@@ -1574,21 +1574,30 @@
 	$(call if_changed,pad_cat)
 
 ifneq ($(CONFIG_ARCH_SOCFPGA),)
+quiet_cmd_gensplx4 = GENSPLX4 $@
+cmd_gensplx4 = cat	spl/u-boot-spl.sfp spl/u-boot-spl.sfp	\
+			spl/u-boot-spl.sfp spl/u-boot-spl.sfp > $@ || rm -f $@
+spl/u-boot-splx4.sfp: spl/u-boot-spl.sfp FORCE
+	$(call if_changed,gensplx4)
+
 quiet_cmd_socboot = SOCBOOT $@
-cmd_socboot = cat	spl/u-boot-spl.sfp spl/u-boot-spl.sfp	\
-			spl/u-boot-spl.sfp spl/u-boot-spl.sfp	\
-			u-boot.img > $@ || rm -f $@
-u-boot-with-spl.sfp: spl/u-boot-spl.sfp u-boot.img FORCE
+cmd_socboot = cat	spl/u-boot-splx4.sfp u-boot.img > $@ || rm -f $@
+u-boot-with-spl.sfp: spl/u-boot-splx4.sfp u-boot.img FORCE
 	$(call if_changed,socboot)
 
-quiet_cmd_socnandboot = SOCNANDBOOT $@
-cmd_socnandboot =  dd if=/dev/zero of=spl/u-boot-spl.pad bs=64 count=1024 ; \
+quiet_cmd_gensplpadx4 = GENSPLPADX4 $@
+cmd_gensplpadx4 =  dd if=/dev/zero of=spl/u-boot-spl.pad bs=64 count=1024 ; \
 		   cat	spl/u-boot-spl.sfp spl/u-boot-spl.pad \
 			spl/u-boot-spl.sfp spl/u-boot-spl.pad \
 			spl/u-boot-spl.sfp spl/u-boot-spl.pad \
-			spl/u-boot-spl.sfp spl/u-boot-spl.pad \
-			u-boot.img > $@ || rm -f $@ spl/u-boot-spl.pad
-u-boot-with-nand-spl.sfp: spl/u-boot-spl.sfp u-boot.img FORCE
+			spl/u-boot-spl.sfp spl/u-boot-spl.pad > $@ || \
+			rm -f $@ spl/u-boot-spl.pad
+u-boot-spl-padx4.sfp: spl/u-boot-spl.sfp FORCE
+	$(call if_changed,gensplpadx4)
+
+quiet_cmd_socnandboot = SOCNANDBOOT $@
+cmd_socnandboot = cat	u-boot-spl-padx4.sfp u-boot.img > $@ || rm -f $@
+u-boot-with-nand-spl.sfp: u-boot-spl-padx4.sfp u-boot.img FORCE
 	$(call if_changed,socnandboot)
 
 endif
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a067a07..b885b9e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -938,6 +938,7 @@
 	select OF_CONTROL
 	select PL01X_SERIAL
 	imply CMD_DM
+	imply DM_RNG
 	imply DM_RTC
 	imply RTC_PL031
 
@@ -994,7 +995,7 @@
 	select SYS_THUMB_BUILD if TARGET_SOCFPGA_GEN5 || TARGET_SOCFPGA_ARRIA10
 	select SYSRESET
 	select SYSRESET_SOCFPGA if TARGET_SOCFPGA_GEN5 || TARGET_SOCFPGA_ARRIA10
-	select SYSRESET_SOCFPGA_S10 if TARGET_SOCFPGA_STRATIX10
+	select SYSRESET_SOCFPGA_SOC64 if TARGET_SOCFPGA_STRATIX10 || TARGET_SOCFPGA_AGILEX
 	imply CMD_DM
 	imply CMD_MTDPARTS
 	imply CRC32_VERIFY
diff --git a/arch/arm/dts/Makefile b/arch/arm/dts/Makefile
index 72b6fe1..b195723 100644
--- a/arch/arm/dts/Makefile
+++ b/arch/arm/dts/Makefile
@@ -202,6 +202,7 @@
 dtb-$(CONFIG_ARCH_MVEBU) +=			\
 	armada-3720-db.dtb			\
 	armada-3720-espressobin.dtb		\
+	armada-3720-espressobin-emmc.dtb	\
 	armada-3720-turris-mox.dtb		\
 	armada-3720-uDPU.dtb			\
 	armada-375-db.dtb			\
@@ -218,6 +219,7 @@
 	armada-8040-clearfog-gt-8k.dtb		\
 	armada-8040-db.dtb			\
 	armada-8040-mcbin.dtb			\
+	armada-8040-puzzle-m801.dtb		\
 	armada-xp-crs305-1g-4s.dtb		\
 	armada-xp-crs305-1g-4s-bit.dtb		\
 	armada-xp-crs326-24g-2s.dtb		\
diff --git a/arch/arm/dts/armada-3720-espressobin-emmc.dts b/arch/arm/dts/armada-3720-espressobin-emmc.dts
new file mode 100644
index 0000000..29ccb6a
--- /dev/null
+++ b/arch/arm/dts/armada-3720-espressobin-emmc.dts
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree file for Globalscale Marvell ESPRESSOBin Board with eMMC
+ * Copyright (C) 2018 Marvell
+ *
+ * Romain Perier <romain.perier@free-electrons.com>
+ * Konstantin Porotchkin <kostap@marvell.com>
+ *
+ */
+/*
+ * Schematic available at http://espressobin.net/wp-content/uploads/2017/08/ESPRESSObin_V5_Schematics.pdf
+ */
+
+/dts-v1/;
+
+#include "armada-3720-espressobin.dtsi"
+
+/ {
+	model = "Globalscale Marvell ESPRESSOBin Board (eMMC)";
+	compatible = "globalscale,espressobin-emmc", "globalscale,espressobin",
+		     "marvell,armada3720", "marvell,armada3710";
+};
+
+/* U11 */
+&sdhci1 {
+	non-removable;
+	bus-width = <8>;
+	mmc-ddr-1_8v;
+	mmc-hs400-1_8v;
+	marvell,xenon-emmc;
+	marvell,xenon-tun-count = <9>;
+	marvell,pad-type = "fixed-1-8v";
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc_pins>;
+	status = "okay";
+
+	#address-cells = <1>;
+	#size-cells = <0>;
+	mmccard: mmccard@0 {
+		compatible = "mmc-card";
+		reg = <0>;
+	};
+};
diff --git a/arch/arm/dts/armada-3720-espressobin.dts b/arch/arm/dts/armada-3720-espressobin.dts
index 4534f5f..1542d83 100644
--- a/arch/arm/dts/armada-3720-espressobin.dts
+++ b/arch/arm/dts/armada-3720-espressobin.dts
@@ -1,170 +1,20 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
 /*
- * Device Tree file for Marvell Armada 3720 community board
- * (ESPRESSOBin)
+ * Device Tree file for Globalscale Marvell ESPRESSOBin Board
  * Copyright (C) 2016 Marvell
  *
- * Gregory CLEMENT <gregory.clement@free-electrons.com>
- * Konstantin Porotchkin <kostap@marvell.com>
+ * Romain Perier <romain.perier@free-electrons.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Schematic available at http://espressobin.net/wp-content/uploads/2017/08/ESPRESSObin_V5_Schematics.pdf
  */
 
 /dts-v1/;
 
-#include "armada-372x.dtsi"
+#include "armada-3720-espressobin.dtsi"
 
 / {
-	model = "Marvell Armada 3720 Community Board ESPRESSOBin";
-	compatible = "marvell,armada-3720-espressobin", "marvell,armada3720", "marvell,armada3710";
-
-	chosen {
-		stdout-path = "serial0:115200n8";
-	};
-
-	aliases {
-		ethernet0 = &eth0;
-		i2c0 = &i2c0;
-		spi0 = &spi0;
-	};
-
-	memory {
-		device_type = "memory";
-		reg = <0x00000000 0x00000000 0x00000000 0x20000000>;
-	};
-
-	vcc_sd_reg0: regulator@0 {
-		compatible = "regulator-gpio";
-		regulator-name = "vcc_sd0";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <3300000>;
-		regulator-type = "voltage";
-		states = <1800000 0x1
-			  3300000 0x0>;
-		gpios = <&gpionb 4 GPIO_ACTIVE_HIGH>;
-	};
-};
-
-&comphy {
-	max-lanes = <3>;
-	phy0 {
-		phy-type = <PHY_TYPE_USB3_HOST0>;
-		phy-speed = <PHY_SPEED_5G>;
-	};
-
-	phy1 {
-		phy-type = <PHY_TYPE_PEX0>;
-		phy-speed = <PHY_SPEED_2_5G>;
-	};
-
-	phy2 {
-		phy-type = <PHY_TYPE_SATA0>;
-		phy-speed = <PHY_SPEED_5G>;
-	};
-};
-
-&eth0 {
-	status = "okay";
-	pinctrl-names = "default";
-	pinctrl-0 = <&rgmii_pins>, <&smi_pins>;
-	phy-mode = "rgmii";
-	phy_addr = <0x1>;
-	fixed-link {
-		speed = <1000>;
-		full-duplex;
-	};
-};
-
-&i2c0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&i2c1_pins>;
-	status = "okay";
-};
-
-/* CON3 */
-&sata {
-	status = "okay";
-};
-
-&sdhci0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&sdio_pins>;
-	bus-width = <4>;
-	cd-gpios = <&gpionb 3 GPIO_ACTIVE_LOW>;
-	vqmmc-supply = <&vcc_sd_reg0>;
-	status = "okay";
-};
-
-&spi0 {
-	status = "okay";
-	pinctrl-names = "default";
-	pinctrl-0 = <&spi_quad_pins>;
-
-	spi-flash@0 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "st,m25p128", "jedec,spi-nor";
-		reg = <0>; /* Chip select 0 */
-		spi-max-frequency = <50000000>;
-		m25p,fast-read;
-	};
-};
-
-/* Exported on the micro USB connector CON32 through an FTDI */
-&uart0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&uart1_pins>;
-	status = "okay";
-};
-
-/* CON29 */
-&usb2 {
-	status = "okay";
-};
-
-/* CON31 */
-&usb3 {
-	status = "okay";
-};
-
-&pcie0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&pcie_pins>;
-	reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
-	status = "okay";
+	model = "Globalscale Marvell ESPRESSOBin Board";
+	compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710";
 };
diff --git a/arch/arm/dts/armada-3720-espressobin.dtsi b/arch/arm/dts/armada-3720-espressobin.dtsi
new file mode 100644
index 0000000..05dec89
--- /dev/null
+++ b/arch/arm/dts/armada-3720-espressobin.dtsi
@@ -0,0 +1,167 @@
+/*
+ * Device Tree file for Marvell Armada 3720 community board
+ * (ESPRESSOBin)
+ * Copyright (C) 2016 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Konstantin Porotchkin <kostap@marvell.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "armada-372x.dtsi"
+
+/ {
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	aliases {
+		ethernet0 = &eth0;
+		i2c0 = &i2c0;
+		spi0 = &spi0;
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x00000000 0x00000000 0x20000000>;
+	};
+
+	vcc_sd_reg0: regulator@0 {
+		compatible = "regulator-gpio";
+		regulator-name = "vcc_sd0";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-type = "voltage";
+		states = <1800000 0x1
+			  3300000 0x0>;
+		gpios = <&gpionb 4 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+&comphy {
+	max-lanes = <3>;
+	phy0 {
+		phy-type = <PHY_TYPE_USB3_HOST0>;
+		phy-speed = <PHY_SPEED_5G>;
+	};
+
+	phy1 {
+		phy-type = <PHY_TYPE_PEX0>;
+		phy-speed = <PHY_SPEED_2_5G>;
+	};
+
+	phy2 {
+		phy-type = <PHY_TYPE_SATA0>;
+		phy-speed = <PHY_SPEED_5G>;
+	};
+};
+
+&eth0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&rgmii_pins>, <&smi_pins>;
+	phy-mode = "rgmii";
+	phy_addr = <0x1>;
+	fixed-link {
+		speed = <1000>;
+		full-duplex;
+	};
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins>;
+	status = "okay";
+};
+
+/* CON3 */
+&sata {
+	status = "okay";
+};
+
+&sdhci0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdio_pins>;
+	bus-width = <4>;
+	cd-gpios = <&gpionb 3 GPIO_ACTIVE_LOW>;
+	vqmmc-supply = <&vcc_sd_reg0>;
+	status = "okay";
+};
+
+&spi0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi_quad_pins>;
+
+	spi-flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "st,m25p128", "jedec,spi-nor";
+		reg = <0>; /* Chip select 0 */
+		spi-max-frequency = <50000000>;
+		m25p,fast-read;
+	};
+};
+
+/* Exported on the micro USB connector CON32 through an FTDI */
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart1_pins>;
+	status = "okay";
+};
+
+/* CON29 */
+&usb2 {
+	status = "okay";
+};
+
+/* CON31 */
+&usb3 {
+	status = "okay";
+};
+
+&pcie0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pcie_pins>;
+	reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
diff --git a/arch/arm/dts/armada-8040-puzzle-m801.dts b/arch/arm/dts/armada-8040-puzzle-m801.dts
new file mode 100644
index 0000000..58edb5b
--- /dev/null
+++ b/arch/arm/dts/armada-8040-puzzle-m801.dts
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Marvell International Ltd.
+ * Copyright (C) 2020 Sartura Ltd.
+ */
+
+#include "armada-8040.dtsi" /* include SoC device tree */
+
+/ {
+	model = "iEi-Puzzle-M801";
+	compatible = "marvell,armada8040-puzzle-m801",
+		     "marvell,armada8040";
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	aliases {
+		i2c0 = &i2c0;
+		i2c1 = &cpm_i2c0;
+		i2c2 = &cpm_i2c1;
+		i2c3 = &i2c_switch;
+		spi0 = &spi0;
+		gpio0 = &ap_gpio0;
+		gpio1 = &cpm_gpio0;
+		gpio2 = &cpm_gpio1;
+		gpio3 = &sfpplus_gpio;
+	};
+
+	memory@00000000 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>;
+	};
+
+	simple-bus {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		reg_usb3h0_vbus: usb3-vbus0 {
+			compatible = "regulator-fixed";
+			pinctrl-names = "default";
+			pinctrl-0 = <&cpm_xhci_vbus_pins>;
+			regulator-name = "reg-usb3h0-vbus";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			startup-delay-us = <500000>;
+			enable-active-high;
+			regulator-always-on;
+			regulator-boot-on;
+			gpio = <&cpm_gpio1 15 GPIO_ACTIVE_HIGH>; /* GPIO[47] */
+		};
+	};
+};
+
+&i2c0 {
+	status = "okay";
+	clock-frequency = <100000>;
+
+	rtc@32 {
+		compatible = "epson,rx8010";
+		reg = <0x32>;
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&ap_pinctl {
+	/*
+	 * MPP Bus:
+	 * AP SPI0 [0-3]
+	 * AP I2C [4-5]
+	 * AP GPIO [6]
+	 * AP UART 1 RX/TX [7-8]
+	 * AP GPIO [9-10]
+	 * AP GPIO [12]
+	 * UART0 [11,19]
+	 */
+		  /* 0 1 2 3 4 5 6 7 8 9 */
+	pin-func = < 3 3 3 3 3 3 3 3 3 0
+		     0 3 0 0 0 0 0 0 0 3 >;
+};
+
+&cpm_pinctl {
+	/*
+	 * MPP Bus:
+	 * [0-31] = 0xff: Keep default CP0_shared_pins:
+	 * [11] CLKOUT_MPP_11 (out)
+	 * [23] LINK_RD_IN_CP2CP (in)
+	 * [25] CLKOUT_MPP_25 (out)
+	 * [29] AVS_FB_IN_CP2CP (in)
+	 * [32,34] SMI
+	 * [33]    MSS power down
+	 * [35-38] CP0 I2C1 and I2C0
+	 * [39] MSS CKE Enable
+	 * [40,41] CP0 UART1 TX/RX
+	 * [42,43] XSMI (controls two 10G phys)
+	 * [47] USB VBUS EN
+	 * [48] FAN PWM
+	 * [49] 10G port 1 interrupt
+	 * [50] 10G port 0 interrupt
+	 * [51] 2.5G SFP TX fault
+	 * [52] PCIe reset out
+	 * [53] 2.5G SFP mode
+	 * [54] 2.5G SFP LOS
+	 * [55] Micro SD card detect
+	 * [56-61] Micro SD
+	 * [62] CP1 SFI SFP FAULT
+	 */
+		/*   0    1    2    3    4    5    6    7    8    9 */
+	pin-func = < 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff
+		     0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff
+		     0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff
+		     0xff 0    7    0xa  7    2    2    2    2    0xa
+		     7    7    8    8    0    0    0    0    0    0
+		     0    0    0    0    0    0    0xe  0xe  0xe  0xe
+		     0xe  0xe  0 >;
+
+	cpm_xhci_vbus_pins: cpm-xhci-vbus-pins {
+		marvell,pins = < 47 >;
+		marvell,function = <0>;
+	};
+
+	cpm_pcie_reset_pins: cpm-pcie-reset-pins {
+		marvell,pins = < 52 >;
+		marvell,function = <0>;
+	};
+};
+
+&cpm_sdhci0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&cpm_sdhci_pins>;
+	bus-width= <4>;
+	status = "okay";
+};
+
+&cpm_pcie0 {
+	num-lanes = <1>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&cpm_pcie_reset_pins>;
+	marvell,reset-gpio = <&cpm_gpio1 20 GPIO_ACTIVE_LOW>; /* GPIO[52] */
+	status = "okay";
+};
+
+&cpm_i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&cpm_i2c0_pins>;
+	status = "okay";
+	clock-frequency = <100000>;
+
+	sfpplus_gpio: gpio@21 {
+		compatible = "nxp,pca9555";
+		reg = <0x21>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+};
+
+&cpm_i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&cpm_i2c1_pins>;
+	status = "okay";
+	clock-frequency = <100000>;
+
+	i2c_switch: i2c-switch@70 {
+		compatible = "nxp,pca9544";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x70>;
+	};
+};
+
+&cpm_sata0 {
+	status = "okay";
+};
+
+&cpm_ethernet {
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&cpm_mdio {
+	status = "okay";
+	cpm_ge_phy0: ethernet-phy@1 {
+		reg = <0>;
+	};
+
+	cpm_ge_phy1: ethernet-phy@2 {
+		reg = <1>;
+	};
+};
+
+&cpm_eth0 {
+	status = "okay";
+	phy-mode = "sfi";
+};
+
+&cpm_eth1 {
+	status = "okay";
+	phy-mode = "sgmii";
+	phy = <&cpm_ge_phy0>;
+};
+
+&cpm_eth2 {
+	status = "okay";
+	phy-mode = "sgmii";
+	phy = <&cpm_ge_phy1>;
+};
+
+&cpm_comphy {
+	/*
+	 * CP0 Serdes Configuration:
+	 * Lane 0: PCIe0 (x1)
+	 * Lane 1: SGMII2
+	 * Lane 2: SATA0
+	 * Lane 3: SGMII1
+	 * Lane 4: SFI (10G)
+	 * Lane 5: SATA1
+	 */
+	phy0 {
+		phy-type = <PHY_TYPE_PEX0>;
+	};
+	phy1 {
+		phy-type = <PHY_TYPE_SGMII2>;
+		phy-speed = <PHY_SPEED_1_25G>;
+	};
+	phy2 {
+		phy-type = <PHY_TYPE_SATA0>;
+	};
+	phy3 {
+		phy-type = <PHY_TYPE_SGMII1>;
+		phy-speed = <PHY_SPEED_1_25G>;
+	};
+	phy4 {
+		phy-type = <PHY_TYPE_SFI>;
+	};
+	phy5 {
+		phy-type = <PHY_TYPE_SATA1>;
+	};
+};
+
+&cps_mdio {
+	status = "okay";
+	cps_ge_phy0: ethernet-phy@3 {
+		reg = <1>;
+	};
+
+	cps_ge_phy1: ethernet-phy@4 {
+		reg = <0>;
+	};
+};
+
+&cps_pcie0 {
+	num-lanes = <2>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&cps_usb3_0 {
+	vbus-supply = <&reg_usb3h0_vbus>;
+	status = "okay";
+};
+
+&cps_utmi0 {
+	status = "okay";
+};
+
+&cps_ethernet {
+	status = "okay";
+};
+
+&cps_eth0 {
+	status = "okay";
+	phy-mode = "sfi";
+};
+
+&cps_eth1 {
+	status = "okay";
+	phy = <&cps_ge_phy0>;
+	phy-mode = "sgmii";
+};
+
+&cps_eth2 {
+	status = "okay";
+	phy = <&cps_ge_phy1>;
+	phy-mode = "sgmii";
+};
+
+&cps_pinctl {
+	/*
+	 * MPP Bus:
+	 * [0-5] TDM
+	 * [6,7] CP1_UART 0
+	 * [8]   CP1 10G SFP LOS
+	 * [9]   CP1 10G PHY RESET
+	 * [10]  CP1 10G SFP TX Disable
+	 * [11]  CP1 10G SFP Mode
+	 * [12]  SPI1 CS1n
+	 * [13]  SPI1 MISO (TDM and SPI ROM shared)
+	 * [14]  SPI1 CS0n
+	 * [15]  SPI1 MOSI (TDM and SPI ROM shared)
+	 * [16]  SPI1 CLK (TDM and SPI ROM shared)
+	 * [24]  CP1 2.5G SFP TX Disable
+	 * [26]  CP0 10G SFP TX Fault
+	 * [27]  CP0 10G SFP Mode
+	 * [28]  CP0 10G SFP LOS
+	 * [29]  CP0 10G SFP TX Disable
+	 * [30]  USB Over current indication
+	 * [31]  10G Port 0 phy reset
+	 * [32-62] = 0xff: Keep default CP1_shared_pins:
+	 */
+		/*   0    1    2    3    4    5    6    7    8    9 */
+	pin-func = < 0x4  0x4  0x4  0x4  0x4  0x4  0x8  0x8  0x0  0x0
+		     0x0  0x0  0x3  0x3  0x3  0x3  0x3  0xff 0xff 0xff
+		     0xff 0xff 0xff 0xff 0x0  0xff 0x0  0x0  0x0 0x0
+		     0x0  0x0  0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff
+		     0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff
+		     0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff
+		     0xff 0xff 0xff>;
+};
+
+&spi0 {
+	status = "okay";
+
+	spi-flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <10000000>;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			partition@u-boot {
+				reg = <0x00000000 0x001f0000>;
+				label = "u-boot";
+			};
+			partition@u-boot-env {
+				reg = <0x001f0000 0x00010000>;
+				label = "u-boot-env";
+			};
+			partition@ubi1 {
+				reg = <0x00200000 0x03f00000>;
+				label = "ubi1";
+			};
+			partition@ubi2 {
+				reg = <0x04100000 0x03f00000>;
+				label = "ubi2";
+			};
+		};
+	};
+};
+
+&cps_comphy {
+	/*
+	 * CP1 Serdes Configuration:
+	 * Lane 0: PCIe0 (x2)
+	 * Lane 1: PCIe0 (x2)
+	 * Lane 2: USB HOST 0
+	 * Lane 3: SGMII1
+	 * Lane 4: SFI (10G)
+	 * Lane 5: SGMII2
+	 */
+	phy0 {
+		phy-type = <PHY_TYPE_PEX0>;
+	};
+	phy1 {
+		phy-type = <PHY_TYPE_PEX0>;
+	};
+	phy2 {
+		phy-type = <PHY_TYPE_USB3_HOST0>;
+	};
+	phy3 {
+		phy-type = <PHY_TYPE_SGMII1>;
+		phy-speed = <PHY_SPEED_1_25G>;
+	};
+	phy4 {
+		phy-type = <PHY_TYPE_SFI>;
+	};
+	phy5 {
+		phy-type = <PHY_TYPE_SGMII2>;
+		phy-speed = <PHY_SPEED_1_25G>;
+	};
+};
diff --git a/arch/arm/dts/socfpga_agilex_socdk-u-boot.dtsi b/arch/arm/dts/socfpga_agilex_socdk-u-boot.dtsi
index debeb8b..6cac36a 100644
--- a/arch/arm/dts/socfpga_agilex_socdk-u-boot.dtsi
+++ b/arch/arm/dts/socfpga_agilex_socdk-u-boot.dtsi
@@ -40,3 +40,7 @@
 &qspi {
 	status = "okay";
 };
+
+&watchdog0 {
+	u-boot,dm-pre-reloc;
+};
diff --git a/arch/arm/dts/socfpga_arria10-handoff.dtsi b/arch/arm/dts/socfpga_arria10-handoff.dtsi
new file mode 100644
index 0000000..c083716
--- /dev/null
+++ b/arch/arm/dts/socfpga_arria10-handoff.dtsi
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0+ OR X11
+
+/ {
+	clocks {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		u-boot,dm-pre-reloc;
+
+		altera_arria10_hps_eosc1: altera_arria10_hps_eosc1 {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <EOSC1_CLK_HZ>;
+			clock-output-names = "altera_arria10_hps_eosc1-clk";
+			u-boot,dm-pre-reloc;
+		};
+
+		altera_arria10_hps_cb_intosc_ls: altera_arria10_hps_cb_intosc_ls {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <CB_INTOSC_LS_CLK_HZ>;
+			clock-output-names = "altera_arria10_hps_cb_intosc_ls-clk";
+			u-boot,dm-pre-reloc;
+		};
+
+		/* Clock source: altera_arria10_hps_f2h_free */
+		altera_arria10_hps_f2h_free: altera_arria10_hps_f2h_free {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <F2H_FREE_CLK_HZ>;
+			clock-output-names = "altera_arria10_hps_f2h_free-clk";
+			u-boot,dm-pre-reloc;
+		};
+	};
+
+	clkmgr@0xffd04000 {
+		compatible = "altr,socfpga-a10-clk-init";
+		reg = <0xffd04000 0x00000200>;
+		reg-names = "soc_clock_manager_OCP_SLV";
+		u-boot,dm-pre-reloc;
+
+		mainpll {
+			vco0-psrc = <MAINPLLGRP_VCO0_PSRC>;
+			vco1-denom = <MAINPLLGRP_VCO1_DENOM>;
+			vco1-numer = <MAINPLLGRP_VCO1_NUMER>;
+			mpuclk-cnt = <MAINPLLGRP_MPUCLK_CNT>;
+			mpuclk-src = <MAINPLLGRP_MPUCLK_SRC>;
+			nocclk-cnt = <MAINPLLGRP_NOCCLK_CNT>;
+			nocclk-src = <MAINPLLGRP_NOCCLK_SRC>;
+			cntr2clk-cnt = <MAINPLLGRP_CNTR2CLK_CNT>;
+			cntr3clk-cnt = <MAINPLLGRP_CNTR3CLK_CNT>;
+			cntr4clk-cnt = <MAINPLLGRP_CNTR4CLK_CNT>;
+			cntr5clk-cnt = <MAINPLLGRP_CNTR5CLK_CNT>;
+			cntr6clk-cnt = <MAINPLLGRP_CNTR6CLK_CNT>;
+			cntr7clk-cnt = <MAINPLLGRP_CNTR7CLK_CNT>;
+			cntr7clk-src = <MAINPLLGRP_CNTR7CLK_SRC>;
+			cntr8clk-cnt = <MAINPLLGRP_CNTR8CLK_CNT>;
+			cntr9clk-cnt = <MAINPLLGRP_CNTR9CLK_CNT>;
+			cntr9clk-src = <MAINPLLGRP_CNTR9CLK_SRC>;
+			cntr15clk-cnt = <MAINPLLGRP_CNTR15CLK_CNT>;
+			nocdiv-l4mainclk = <MAINPLLGRP_NOCDIV_L4MAINCLK>;
+			nocdiv-l4mpclk = <MAINPLLGRP_NOCDIV_L4MPCLK>;
+			nocdiv-l4spclk = <MAINPLLGRP_NOCDIV_L4SPCLK>;
+			nocdiv-csatclk = <MAINPLLGRP_NOCDIV_CSATCLK>;
+			nocdiv-cstraceclk = <MAINPLLGRP_NOCDIV_CSTRACECLK>;
+			nocdiv-cspdbgclk = <MAINPLLGRP_NOCDIV_CSPDBGCLK>;
+			u-boot,dm-pre-reloc;
+		};
+
+		perpll {
+			vco0-psrc = <PERPLLGRP_VCO0_PSRC>;
+			vco1-denom = <PERPLLGRP_VCO1_DENOM>;
+			vco1-numer = <PERPLLGRP_VCO1_NUMER>;
+			cntr2clk-cnt = <PERPLLGRP_CNTR2CLK_CNT>;
+			cntr2clk-src = <PERPLLGRP_CNTR2CLK_SRC>;
+			cntr3clk-cnt = <PERPLLGRP_CNTR3CLK_CNT>;
+			cntr3clk-src = <PERPLLGRP_CNTR3CLK_SRC>;
+			cntr4clk-cnt = <PERPLLGRP_CNTR4CLK_CNT>;
+			cntr4clk-src = <PERPLLGRP_CNTR4CLK_SRC>;
+			cntr5clk-cnt = <PERPLLGRP_CNTR5CLK_CNT>;
+			cntr5clk-src = <PERPLLGRP_CNTR5CLK_SRC>;
+			cntr6clk-cnt = <PERPLLGRP_CNTR6CLK_CNT>;
+			cntr6clk-src = <PERPLLGRP_CNTR6CLK_SRC>;
+			cntr7clk-cnt = <PERPLLGRP_CNTR7CLK_CNT>;
+			cntr8clk-cnt = <PERPLLGRP_CNTR8CLK_CNT>;
+			cntr8clk-src = <PERPLLGRP_CNTR8CLK_SRC>;
+			cntr9clk-cnt = <PERPLLGRP_CNTR9CLK_CNT>;
+			emacctl-emac0sel = <PERPLLGRP_EMACCTL_EMAC0SEL>;
+			emacctl-emac1sel = <PERPLLGRP_EMACCTL_EMAC1SEL>;
+			emacctl-emac2sel = <PERPLLGRP_EMACCTL_EMAC2SEL>;
+			gpiodiv-gpiodbclk = <PERPLLGRP_GPIODIV_GPIODBCLK>;
+			u-boot,dm-pre-reloc;
+		};
+
+		alteragrp {
+			nocclk = <ALTERAGRP_NOCCLK>;
+			mpuclk = <ALTERAGRP_MPUCLK>;
+			u-boot,dm-pre-reloc;
+		};
+	};
+
+	i_io48_pin_mux: pinmux@0xffd07000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "pinctrl-single";
+		reg = <0xffd07000 0x00000800>;
+		reg-names = "soc_3v_io48_pin_mux_OCP_SLV";
+		u-boot,dm-pre-reloc;
+
+		shared {
+			reg = <0xffd07000 0x00000200>;
+			pinctrl-single,register-width = <32>;
+			pinctrl-single,function-mask = <0x0000000f>;
+			pinctrl-single,pins =
+				<0x00000000 PINMUX_SHARED_IO_Q1_1_SEL>,
+				<0x00000004 PINMUX_SHARED_IO_Q1_2_SEL>,
+				<0x00000008 PINMUX_SHARED_IO_Q1_3_SEL>,
+				<0x0000000c PINMUX_SHARED_IO_Q1_4_SEL>,
+				<0x00000010 PINMUX_SHARED_IO_Q1_5_SEL>,
+				<0x00000014 PINMUX_SHARED_IO_Q1_6_SEL>,
+				<0x00000018 PINMUX_SHARED_IO_Q1_7_SEL>,
+				<0x0000001c PINMUX_SHARED_IO_Q1_8_SEL>,
+				<0x00000020 PINMUX_SHARED_IO_Q1_9_SEL>,
+				<0x00000024 PINMUX_SHARED_IO_Q1_10_SEL>,
+				<0x00000028 PINMUX_SHARED_IO_Q1_11_SEL>,
+				<0x0000002c PINMUX_SHARED_IO_Q1_12_SEL>,
+				<0x00000030 PINMUX_SHARED_IO_Q2_1_SEL>,
+				<0x00000034 PINMUX_SHARED_IO_Q2_2_SEL>,
+				<0x00000038 PINMUX_SHARED_IO_Q2_3_SEL>,
+				<0x0000003c PINMUX_SHARED_IO_Q2_4_SEL>,
+				<0x00000040 PINMUX_SHARED_IO_Q2_5_SEL>,
+				<0x00000044 PINMUX_SHARED_IO_Q2_6_SEL>,
+				<0x00000048 PINMUX_SHARED_IO_Q2_7_SEL>,
+				<0x0000004c PINMUX_SHARED_IO_Q2_8_SEL>,
+				<0x00000050 PINMUX_SHARED_IO_Q2_9_SEL>,
+				<0x00000054 PINMUX_SHARED_IO_Q2_10_SEL>,
+				<0x00000058 PINMUX_SHARED_IO_Q2_11_SEL>,
+				<0x0000005c PINMUX_SHARED_IO_Q2_12_SEL>,
+				<0x00000060 PINMUX_SHARED_IO_Q3_1_SEL>,
+				<0x00000064 PINMUX_SHARED_IO_Q3_2_SEL>,
+				<0x00000068 PINMUX_SHARED_IO_Q3_3_SEL>,
+				<0x0000006c PINMUX_SHARED_IO_Q3_4_SEL>,
+				<0x00000070 PINMUX_SHARED_IO_Q3_5_SEL>,
+				<0x00000074 PINMUX_SHARED_IO_Q3_6_SEL>,
+				<0x00000078 PINMUX_SHARED_IO_Q3_7_SEL>,
+				<0x0000007c PINMUX_SHARED_IO_Q3_8_SEL>,
+				<0x00000080 PINMUX_SHARED_IO_Q3_9_SEL>,
+				<0x00000084 PINMUX_SHARED_IO_Q3_10_SEL>,
+				<0x00000088 PINMUX_SHARED_IO_Q3_11_SEL>,
+				<0x0000008c PINMUX_SHARED_IO_Q3_12_SEL>,
+				<0x00000090 PINMUX_SHARED_IO_Q4_1_SEL>,
+				<0x00000094 PINMUX_SHARED_IO_Q4_2_SEL>,
+				<0x00000098 PINMUX_SHARED_IO_Q4_3_SEL>,
+				<0x0000009c PINMUX_SHARED_IO_Q4_4_SEL>,
+				<0x000000a0 PINMUX_SHARED_IO_Q4_5_SEL>,
+				<0x000000a4 PINMUX_SHARED_IO_Q4_6_SEL>,
+				<0x000000a8 PINMUX_SHARED_IO_Q4_7_SEL>,
+				<0x000000ac PINMUX_SHARED_IO_Q4_8_SEL>,
+				<0x000000b0 PINMUX_SHARED_IO_Q4_9_SEL>,
+				<0x000000b4 PINMUX_SHARED_IO_Q4_10_SEL>,
+				<0x000000b8 PINMUX_SHARED_IO_Q4_11_SEL>,
+				<0x000000bc PINMUX_SHARED_IO_Q4_12_SEL>;
+			u-boot,dm-pre-reloc;
+		};
+
+		dedicated {
+			reg = <0xffd07200 0x00000200>;
+			pinctrl-single,register-width = <32>;
+			pinctrl-single,function-mask = <0x0000000f>;
+			pinctrl-single,pins =
+				<0x0000000c PINMUX_DEDICATED_IO_4_SEL>,
+				<0x00000010 PINMUX_DEDICATED_IO_5_SEL>,
+				<0x00000014 PINMUX_DEDICATED_IO_6_SEL>,
+				<0x00000018 PINMUX_DEDICATED_IO_7_SEL>,
+				<0x0000001c PINMUX_DEDICATED_IO_8_SEL>,
+				<0x00000020 PINMUX_DEDICATED_IO_9_SEL>,
+				<0x00000024 PINMUX_DEDICATED_IO_10_SEL>,
+				<0x00000028 PINMUX_DEDICATED_IO_11_SEL>,
+				<0x0000002c PINMUX_DEDICATED_IO_12_SEL>,
+				<0x00000030 PINMUX_DEDICATED_IO_13_SEL>,
+				<0x00000034 PINMUX_DEDICATED_IO_14_SEL>,
+				<0x00000038 PINMUX_DEDICATED_IO_15_SEL>,
+				<0x0000003c PINMUX_DEDICATED_IO_16_SEL>,
+				<0x00000040 PINMUX_DEDICATED_IO_17_SEL>;
+			u-boot,dm-pre-reloc;
+		};
+
+		dedicated_cfg {
+			reg = <0xffd07200 0x00000200>;
+			pinctrl-single,register-width = <32>;
+			pinctrl-single,function-mask = <0x003f3f3f>;
+			pinctrl-single,pins =
+				<0x00000100 CONFIG_IO_BANK_VSEL>,
+				<0x00000104 CONFIG_IO_MACRO (CONFIG_IO_1)>,
+				<0x00000108 CONFIG_IO_MACRO (CONFIG_IO_2)>,
+				<0x0000010c CONFIG_IO_MACRO (CONFIG_IO_3)>,
+				<0x00000110 CONFIG_IO_MACRO (CONFIG_IO_4)>,
+				<0x00000114 CONFIG_IO_MACRO (CONFIG_IO_5)>,
+				<0x00000118 CONFIG_IO_MACRO (CONFIG_IO_6)>,
+				<0x0000011c CONFIG_IO_MACRO (CONFIG_IO_7)>,
+				<0x00000120 CONFIG_IO_MACRO (CONFIG_IO_8)>,
+				<0x00000124 CONFIG_IO_MACRO (CONFIG_IO_9)>,
+				<0x00000128 CONFIG_IO_MACRO (CONFIG_IO_10)>,
+				<0x0000012c CONFIG_IO_MACRO (CONFIG_IO_11)>,
+				<0x00000130 CONFIG_IO_MACRO (CONFIG_IO_12)>,
+				<0x00000134 CONFIG_IO_MACRO (CONFIG_IO_13)>,
+				<0x00000138 CONFIG_IO_MACRO (CONFIG_IO_14)>,
+				<0x0000013c CONFIG_IO_MACRO (CONFIG_IO_15)>,
+				<0x00000140 CONFIG_IO_MACRO (CONFIG_IO_16)>,
+				<0x00000144 CONFIG_IO_MACRO (CONFIG_IO_17)>;
+			u-boot,dm-pre-reloc;
+		};
+
+		fpga {
+			reg = <0xffd07400 0x00000100>;
+			pinctrl-single,register-width = <32>;
+			pinctrl-single,function-mask = <0x00000001>;
+			pinctrl-single,pins =
+				<0x00000000 PINMUX_RGMII0_USEFPGA_SEL>,
+				<0x00000004 PINMUX_RGMII1_USEFPGA_SEL>,
+				<0x00000008 PINMUX_RGMII2_USEFPGA_SEL>,
+				<0x0000000c PINMUX_I2C0_USEFPGA_SEL>,
+				<0x00000010 PINMUX_I2C1_USEFPGA_SEL>,
+				<0x00000014 PINMUX_I2CEMAC0_USEFPGA_SEL>,
+				<0x00000018 PINMUX_I2CEMAC1_USEFPGA_SEL>,
+				<0x0000001c PINMUX_I2CEMAC2_USEFPGA_SEL>,
+				<0x00000020 PINMUX_NAND_USEFPGA_SEL>,
+				<0x00000024 PINMUX_QSPI_USEFPGA_SEL>,
+				<0x00000028 PINMUX_SDMMC_USEFPGA_SEL>,
+				<0x0000002c PINMUX_SPIM0_USEFPGA_SEL>,
+				<0x00000030 PINMUX_SPIM1_USEFPGA_SEL>,
+				<0x00000034 PINMUX_SPIS0_USEFPGA_SEL>,
+				<0x00000038 PINMUX_SPIS1_USEFPGA_SEL>,
+				<0x0000003c PINMUX_UART0_USEFPGA_SEL>,
+				<0x00000040 PINMUX_UART1_USEFPGA_SEL>;
+			u-boot,dm-pre-reloc;
+		};
+	};
+
+	i_noc: noc@0xffd10000 {
+		compatible = "altr,socfpga-a10-noc";
+		reg = <0xffd10000 0x00008000>;
+		reg-names = "mpu_m0";
+		u-boot,dm-pre-reloc;
+
+		firewall {
+			mpu0 = <0x00000000 0x0000ffff>;
+			l3-0 = <0x00000000 0x0000ffff>;
+			fpga2sdram0-0 = <0x00000000 0x0000ffff>;
+			fpga2sdram1-0 = <0x00000000 0x0000ffff>;
+			fpga2sdram2-0 = <0x00000000 0x0000ffff>;
+			u-boot,dm-pre-reloc;
+		};
+	};
+
+	hps_fpgabridge0: fpgabridge@0 {
+		compatible = "altr,socfpga-hps2fpga-bridge";
+		init-val = <H2F_AXI_MASTER>;
+		u-boot,dm-pre-reloc;
+	};
+
+	hps_fpgabridge1: fpgabridge@1 {
+		compatible = "altr,socfpga-lwhps2fpga-bridge";
+		init-val = <LWH2F_AXI_MASTER>;
+		u-boot,dm-pre-reloc;
+	};
+
+	hps_fpgabridge2: fpgabridge@2 {
+		compatible = "altr,socfpga-fpga2hps-bridge";
+		init-val = <F2H_AXI_SLAVE>;
+		u-boot,dm-pre-reloc;
+	};
+
+	hps_fpgabridge3: fpgabridge@3 {
+		compatible = "altr,socfpga-fpga2sdram0-bridge";
+		init-val = <F2SDRAM0_AXI_SLAVE>;
+		u-boot,dm-pre-reloc;
+	};
+
+	hps_fpgabridge4: fpgabridge@4 {
+		compatible = "altr,socfpga-fpga2sdram1-bridge";
+		init-val = <F2SDRAM1_AXI_SLAVE>;
+		u-boot,dm-pre-reloc;
+	};
+
+	hps_fpgabridge5: fpgabridge@5 {
+		compatible = "altr,socfpga-fpga2sdram2-bridge";
+		init-val = <F2SDRAM2_AXI_SLAVE>;
+		u-boot,dm-pre-reloc;
+	};
+};
diff --git a/arch/arm/dts/socfpga_arria10_socdk-u-boot.dtsi b/arch/arm/dts/socfpga_arria10_socdk-u-boot.dtsi
index 58cd497..22e614d 100644
--- a/arch/arm/dts/socfpga_arria10_socdk-u-boot.dtsi
+++ b/arch/arm/dts/socfpga_arria10_socdk-u-boot.dtsi
@@ -15,3 +15,7 @@
 &uart1 {
 	u-boot,dm-pre-reloc;
 };
+
+&watchdog1 {
+        u-boot,dm-pre-reloc;
+};
diff --git a/arch/arm/dts/socfpga_arria10_socdk_sdmmc-u-boot.dtsi b/arch/arm/dts/socfpga_arria10_socdk_sdmmc-u-boot.dtsi
index c229e82..298c337 100644
--- a/arch/arm/dts/socfpga_arria10_socdk_sdmmc-u-boot.dtsi
+++ b/arch/arm/dts/socfpga_arria10_socdk_sdmmc-u-boot.dtsi
@@ -3,7 +3,8 @@
  * Copyright (C) 2014-2015, 2020 Intel. All rights reserved.
  */
 
-#include "socfpga_arria10_socdk_sdmmc_handoff.dtsi"
+#include "socfpga_arria10_socdk_sdmmc_handoff.h"
+#include "socfpga_arria10-handoff.dtsi"
 #include "socfpga_arria10_handoff_u-boot.dtsi"
 #include "socfpga_arria10_socdk-u-boot.dtsi"
 
diff --git a/arch/arm/dts/socfpga_arria10_socdk_sdmmc_handoff.dtsi b/arch/arm/dts/socfpga_arria10_socdk_sdmmc_handoff.dtsi
deleted file mode 100644
index 60c4192..0000000
--- a/arch/arm/dts/socfpga_arria10_socdk_sdmmc_handoff.dtsi
+++ /dev/null
@@ -1,329 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+ OR X11
-/*
- * Copyright (C) 2016-2017 Intel Corporation
- *
- *<auto-generated>
- *	This code was generated by a tool based on
- *	handoffs from both Qsys and Quartus.
- *
- *	Changes to this file may be lost if
- *	the code is regenerated.
- *</auto-generated>
- */
-
-/ {
-	#address-cells = <1>;
-	#size-cells = <1>;
-	model = "SOCFPGA Arria10 Dev Kit";	/* Bootloader setting: uboot.model */
-
-	/* Clock sources */
-	clocks {
-		#address-cells = <1>;
-		#size-cells = <1>;
-
-		/* Clock source: altera_arria10_hps_eosc1 */
-		altera_arria10_hps_eosc1: altera_arria10_hps_eosc1 {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <25000000>;
-			clock-output-names = "altera_arria10_hps_eosc1-clk";
-		};
-
-		/* Clock source: altera_arria10_hps_cb_intosc_ls */
-		altera_arria10_hps_cb_intosc_ls: altera_arria10_hps_cb_intosc_ls {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <60000000>;
-			clock-output-names = "altera_arria10_hps_cb_intosc_ls-clk";
-		};
-
-		/* Clock source: altera_arria10_hps_f2h_free */
-		altera_arria10_hps_f2h_free: altera_arria10_hps_f2h_free {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <200000000>;
-			clock-output-names = "altera_arria10_hps_f2h_free-clk";
-		};
-	};
-
-	/*
-	 * Driver: altera_arria10_soc_clock_manager_arria10_uboot_driver
-	 * Version: 1.0
-	 * Binding: device
-	 */
-	i_clk_mgr: clock_manager@0xffd04000 {
-		compatible = "altr,socfpga-a10-clk-init";
-		reg = <0xffd04000 0x00000200>;
-		reg-names = "soc_clock_manager_OCP_SLV";
-
-		/* Address Block: soc_clock_manager_OCP_SLV.i_clk_mgr_mainpllgrp */
-		mainpll {
-			vco0-psrc = <0>;	/* Field: vco0.psrc */
-			vco1-denom = <1>;	/* Field: vco1.denom */
-			vco1-numer = <191>;	/* Field: vco1.numer */
-			mpuclk-cnt = <0>;	/* Field: mpuclk.cnt */
-			mpuclk-src = <0>;	/* Field: mpuclk.src */
-			nocclk-cnt = <0>;	/* Field: nocclk.cnt */
-			nocclk-src = <0>;	/* Field: nocclk.src */
-			cntr2clk-cnt = <900>;	/* Field: cntr2clk.cnt */
-			cntr3clk-cnt = <900>;	/* Field: cntr3clk.cnt */
-			cntr4clk-cnt = <900>;	/* Field: cntr4clk.cnt */
-			cntr5clk-cnt = <900>;	/* Field: cntr5clk.cnt */
-			cntr6clk-cnt = <900>;	/* Field: cntr6clk.cnt */
-			cntr7clk-cnt = <900>;	/* Field: cntr7clk.cnt */
-			cntr7clk-src = <0>;	/* Field: cntr7clk.src */
-			cntr8clk-cnt = <900>;	/* Field: cntr8clk.cnt */
-			cntr9clk-cnt = <900>;	/* Field: cntr9clk.cnt */
-			cntr9clk-src = <0>;	/* Field: cntr9clk.src */
-			cntr15clk-cnt = <900>;	/* Field: cntr15clk.cnt */
-			nocdiv-l4mainclk = <0>;	/* Field: nocdiv.l4mainclk */
-			nocdiv-l4mpclk = <0>;	/* Field: nocdiv.l4mpclk */
-			nocdiv-l4spclk = <2>;	/* Field: nocdiv.l4spclk */
-			nocdiv-csatclk = <0>;	/* Field: nocdiv.csatclk */
-			nocdiv-cstraceclk = <1>;	/* Field: nocdiv.cstraceclk */
-			nocdiv-cspdbgclk = <1>;	/* Field: nocdiv.cspdbgclk */
-		};
-
-		/* Address Block: soc_clock_manager_OCP_SLV.i_clk_mgr_perpllgrp */
-		perpll {
-			vco0-psrc = <0>;	/* Field: vco0.psrc */
-			vco1-denom = <1>;	/* Field: vco1.denom */
-			vco1-numer = <159>;	/* Field: vco1.numer */
-			cntr2clk-cnt = <7>;	/* Field: cntr2clk.cnt */
-			cntr2clk-src = <1>;	/* Field: cntr2clk.src */
-			cntr3clk-cnt = <900>;	/* Field: cntr3clk.cnt */
-			cntr3clk-src = <1>;	/* Field: cntr3clk.src */
-			cntr4clk-cnt = <19>;	/* Field: cntr4clk.cnt */
-			cntr4clk-src = <1>;	/* Field: cntr4clk.src */
-			cntr5clk-cnt = <499>;	/* Field: cntr5clk.cnt */
-			cntr5clk-src = <1>;	/* Field: cntr5clk.src */
-			cntr6clk-cnt = <9>;	/* Field: cntr6clk.cnt */
-			cntr6clk-src = <1>;	/* Field: cntr6clk.src */
-			cntr7clk-cnt = <900>;	/* Field: cntr7clk.cnt */
-			cntr8clk-cnt = <900>;	/* Field: cntr8clk.cnt */
-			cntr8clk-src = <0>;	/* Field: cntr8clk.src */
-			cntr9clk-cnt = <900>;	/* Field: cntr9clk.cnt */
-			emacctl-emac0sel = <0>;	/* Field: emacctl.emac0sel */
-			emacctl-emac1sel = <0>;	/* Field: emacctl.emac1sel */
-			emacctl-emac2sel = <0>;	/* Field: emacctl.emac2sel */
-			gpiodiv-gpiodbclk = <32000>;	/* Field: gpiodiv.gpiodbclk */
-		};
-
-		/* Address Block: soc_clock_manager_OCP_SLV.i_clk_mgr_alteragrp */
-		alteragrp {
-			nocclk = <0x0384000b>;	/* Register: nocclk */
-			mpuclk = <0x03840001>;	/* Register: mpuclk */
-		};
-	};
-
-	/*
-	 * Driver: altera_arria10_soc_3v_io48_pin_mux_arria10_uboot_driver
-	 * Version: 1.0
-	 * Binding: pinmux
-	 */
-	i_io48_pin_mux: pinmux@0xffd07000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "pinctrl-single";
-		reg = <0xffd07000 0x00000800>;
-		reg-names = "soc_3v_io48_pin_mux_OCP_SLV";
-
-		/* Address Block: soc_3v_io48_pin_mux_OCP_SLV.i_io48_pin_mux_shared_3v_io_grp */
-		shared {
-			reg = <0xffd07000 0x00000200>;
-			pinctrl-single,register-width = <32>;
-			pinctrl-single,function-mask = <0x0000000f>;
-			pinctrl-single,pins =
-				<0x00000000 0x00000008>,	/* Register: pinmux_shared_io_q1_1 */
-				<0x00000004 0x00000008>,	/* Register: pinmux_shared_io_q1_2 */
-				<0x00000008 0x00000008>,	/* Register: pinmux_shared_io_q1_3 */
-				<0x0000000c 0x00000008>,	/* Register: pinmux_shared_io_q1_4 */
-				<0x00000010 0x00000008>,	/* Register: pinmux_shared_io_q1_5 */
-				<0x00000014 0x00000008>,	/* Register: pinmux_shared_io_q1_6 */
-				<0x00000018 0x00000008>,	/* Register: pinmux_shared_io_q1_7 */
-				<0x0000001c 0x00000008>,	/* Register: pinmux_shared_io_q1_8 */
-				<0x00000020 0x00000008>,	/* Register: pinmux_shared_io_q1_9 */
-				<0x00000024 0x00000008>,	/* Register: pinmux_shared_io_q1_10 */
-				<0x00000028 0x00000008>,	/* Register: pinmux_shared_io_q1_11 */
-				<0x0000002c 0x00000008>,	/* Register: pinmux_shared_io_q1_12 */
-				<0x00000030 0x00000004>,	/* Register: pinmux_shared_io_q2_1 */
-				<0x00000034 0x00000004>,	/* Register: pinmux_shared_io_q2_2 */
-				<0x00000038 0x00000004>,	/* Register: pinmux_shared_io_q2_3 */
-				<0x0000003c 0x00000004>,	/* Register: pinmux_shared_io_q2_4 */
-				<0x00000040 0x00000004>,	/* Register: pinmux_shared_io_q2_5 */
-				<0x00000044 0x00000004>,	/* Register: pinmux_shared_io_q2_6 */
-				<0x00000048 0x00000004>,	/* Register: pinmux_shared_io_q2_7 */
-				<0x0000004c 0x00000004>,	/* Register: pinmux_shared_io_q2_8 */
-				<0x00000050 0x00000004>,	/* Register: pinmux_shared_io_q2_9 */
-				<0x00000054 0x00000004>,	/* Register: pinmux_shared_io_q2_10 */
-				<0x00000058 0x00000004>,	/* Register: pinmux_shared_io_q2_11 */
-				<0x0000005c 0x00000004>,	/* Register: pinmux_shared_io_q2_12 */
-				<0x00000060 0x00000003>,	/* Register: pinmux_shared_io_q3_1 */
-				<0x00000064 0x00000003>,	/* Register: pinmux_shared_io_q3_2 */
-				<0x00000068 0x00000003>,	/* Register: pinmux_shared_io_q3_3 */
-				<0x0000006c 0x00000003>,	/* Register: pinmux_shared_io_q3_4 */
-				<0x00000070 0x00000003>,	/* Register: pinmux_shared_io_q3_5 */
-				<0x00000074 0x0000000f>,	/* Register: pinmux_shared_io_q3_6 */
-				<0x00000078 0x0000000a>,	/* Register: pinmux_shared_io_q3_7 */
-				<0x0000007c 0x0000000a>,	/* Register: pinmux_shared_io_q3_8 */
-				<0x00000080 0x0000000a>,	/* Register: pinmux_shared_io_q3_9 */
-				<0x00000084 0x0000000a>,	/* Register: pinmux_shared_io_q3_10 */
-				<0x00000088 0x00000001>,	/* Register: pinmux_shared_io_q3_11 */
-				<0x0000008c 0x00000001>,	/* Register: pinmux_shared_io_q3_12 */
-				<0x00000090 0x00000000>,	/* Register: pinmux_shared_io_q4_1 */
-				<0x00000094 0x00000000>,	/* Register: pinmux_shared_io_q4_2 */
-				<0x00000098 0x0000000f>,	/* Register: pinmux_shared_io_q4_3 */
-				<0x0000009c 0x0000000c>,	/* Register: pinmux_shared_io_q4_4 */
-				<0x000000a0 0x0000000f>,	/* Register: pinmux_shared_io_q4_5 */
-				<0x000000a4 0x0000000f>,	/* Register: pinmux_shared_io_q4_6 */
-				<0x000000a8 0x0000000a>,	/* Register: pinmux_shared_io_q4_7 */
-				<0x000000ac 0x0000000a>,	/* Register: pinmux_shared_io_q4_8 */
-				<0x000000b0 0x0000000c>,	/* Register: pinmux_shared_io_q4_9 */
-				<0x000000b4 0x0000000c>,	/* Register: pinmux_shared_io_q4_10 */
-				<0x000000b8 0x0000000c>,	/* Register: pinmux_shared_io_q4_11 */
-				<0x000000bc 0x0000000c>;	/* Register: pinmux_shared_io_q4_12 */
-		};
-
-		/* Address Block: soc_3v_io48_pin_mux_OCP_SLV.i_io48_pin_mux_dedicated_io_grp */
-		dedicated {
-			reg = <0xffd07200 0x00000200>;
-			pinctrl-single,register-width = <32>;
-			pinctrl-single,function-mask = <0x0000000f>;
-			pinctrl-single,pins =
-				<0x0000000c 0x00000008>,	/* Register: pinmux_dedicated_io_4 */
-				<0x00000010 0x00000008>,	/* Register: pinmux_dedicated_io_5 */
-				<0x00000014 0x00000008>,	/* Register: pinmux_dedicated_io_6 */
-				<0x00000018 0x00000008>,	/* Register: pinmux_dedicated_io_7 */
-				<0x0000001c 0x00000008>,	/* Register: pinmux_dedicated_io_8 */
-				<0x00000020 0x00000008>,	/* Register: pinmux_dedicated_io_9 */
-				<0x00000024 0x0000000a>,	/* Register: pinmux_dedicated_io_10 */
-				<0x00000028 0x0000000a>,	/* Register: pinmux_dedicated_io_11 */
-				<0x0000002c 0x00000008>,	/* Register: pinmux_dedicated_io_12 */
-				<0x00000030 0x00000008>,	/* Register: pinmux_dedicated_io_13 */
-				<0x00000034 0x00000008>,	/* Register: pinmux_dedicated_io_14 */
-				<0x00000038 0x00000008>,	/* Register: pinmux_dedicated_io_15 */
-				<0x0000003c 0x0000000d>,	/* Register: pinmux_dedicated_io_16 */
-				<0x00000040 0x0000000d>;	/* Register: pinmux_dedicated_io_17 */
-		};
-
-		/* Address Block: soc_3v_io48_pin_mux_OCP_SLV.i_io48_pin_mux_dedicated_io_grp */
-		dedicated_cfg {
-			reg = <0xffd07200 0x00000200>;
-			pinctrl-single,register-width = <32>;
-			pinctrl-single,function-mask = <0x003f3f3f>;
-			pinctrl-single,pins =
-				<0x00000100 0x00000101>,	/* Register: configuration_dedicated_io_bank */
-				<0x00000104 0x000b080a>,	/* Register: configuration_dedicated_io_1 */
-				<0x00000108 0x000b080a>,	/* Register: configuration_dedicated_io_2 */
-				<0x0000010c 0x000b080a>,	/* Register: configuration_dedicated_io_3 */
-				<0x00000110 0x000a282a>,	/* Register: configuration_dedicated_io_4 */
-				<0x00000114 0x000a282a>,	/* Register: configuration_dedicated_io_5 */
-				<0x00000118 0x0008282a>,	/* Register: configuration_dedicated_io_6 */
-				<0x0000011c 0x000a282a>,	/* Register: configuration_dedicated_io_7 */
-				<0x00000120 0x000a282a>,	/* Register: configuration_dedicated_io_8 */
-				<0x00000124 0x000a282a>,	/* Register: configuration_dedicated_io_9 */
-				<0x00000128 0x00090000>,	/* Register: configuration_dedicated_io_10 */
-				<0x0000012c 0x00090000>,	/* Register: configuration_dedicated_io_11 */
-				<0x00000130 0x000b282a>,	/* Register: configuration_dedicated_io_12 */
-				<0x00000134 0x000b282a>,	/* Register: configuration_dedicated_io_13 */
-				<0x00000138 0x000b282a>,	/* Register: configuration_dedicated_io_14 */
-				<0x0000013c 0x000b282a>,	/* Register: configuration_dedicated_io_15 */
-				<0x00000140 0x0008282a>,	/* Register: configuration_dedicated_io_16 */
-				<0x00000144 0x000a282a>;	/* Register: configuration_dedicated_io_17 */
-		};
-
-		/* Address Block: soc_3v_io48_pin_mux_OCP_SLV.i_io48_pin_mux_fpga_interface_grp */
-		fpga {
-			reg = <0xffd07400 0x00000100>;
-			pinctrl-single,register-width = <32>;
-			pinctrl-single,function-mask = <0x00000001>;
-			pinctrl-single,pins =
-				<0x00000000 0x00000000>,	/* Register: pinmux_emac0_usefpga */
-				<0x00000004 0x00000000>,	/* Register: pinmux_emac1_usefpga */
-				<0x00000008 0x00000000>,	/* Register: pinmux_emac2_usefpga */
-				<0x0000000c 0x00000000>,	/* Register: pinmux_i2c0_usefpga */
-				<0x00000010 0x00000000>,	/* Register: pinmux_i2c1_usefpga */
-				<0x00000014 0x00000000>,	/* Register: pinmux_i2c_emac0_usefpga */
-				<0x00000018 0x00000000>,	/* Register: pinmux_i2c_emac1_usefpga */
-				<0x0000001c 0x00000000>,	/* Register: pinmux_i2c_emac2_usefpga */
-				<0x00000020 0x00000000>,	/* Register: pinmux_nand_usefpga */
-				<0x00000024 0x00000000>,	/* Register: pinmux_qspi_usefpga */
-				<0x00000028 0x00000000>,	/* Register: pinmux_sdmmc_usefpga */
-				<0x0000002c 0x00000000>,	/* Register: pinmux_spim0_usefpga */
-				<0x00000030 0x00000000>,	/* Register: pinmux_spim1_usefpga */
-				<0x00000034 0x00000000>,	/* Register: pinmux_spis0_usefpga */
-				<0x00000038 0x00000000>,	/* Register: pinmux_spis1_usefpga */
-				<0x0000003c 0x00000000>,	/* Register: pinmux_uart0_usefpga */
-				<0x00000040 0x00000000>;	/* Register: pinmux_uart1_usefpga */
-		};
-	};
-
-	/*
-	 * Driver: altera_arria10_soc_noc_arria10_uboot_driver
-	 * Version: 1.0
-	 * Binding: device
-	 */
-	i_noc: noc@0xffd10000 {
-		compatible = "altr,socfpga-a10-noc";
-		reg = <0xffd10000 0x00008000>;
-		reg-names = "mpu_m0";
-
-		firewall {
-			/*
-			 * Driver setting: altera_arria10_soc_noc_arria10_uboot_driver.I_NOC.mpu_m0.noc_fw_ddr_mpu_fpga2sdram_ddr_scr.mpuregion0addr.base
-			 * Driver setting: altera_arria10_soc_noc_arria10_uboot_driver.I_NOC.mpu_m0.noc_fw_ddr_mpu_fpga2sdram_ddr_scr.mpuregion0addr.limit
-			 */
-			mpu0 = <0x00000000 0x0000ffff>;
-			/*
-			 * Driver setting: altera_arria10_soc_noc_arria10_uboot_driver.I_NOC.mpu_m0.noc_fw_ddr_l3_ddr_scr.hpsregion0addr.base
-			 * Driver setting: altera_arria10_soc_noc_arria10_uboot_driver.I_NOC.mpu_m0.noc_fw_ddr_l3_ddr_scr.hpsregion0addr.limit
-			 */
-			l3-0 = <0x00000000 0x0000ffff>;
-			/*
-			 * Driver setting: altera_arria10_soc_noc_arria10_uboot_driver.I_NOC.mpu_m0.noc_fw_ddr_mpu_fpga2sdram_ddr_scr.fpga2sdram0region0addr.base
-			 * Driver setting: altera_arria10_soc_noc_arria10_uboot_driver.I_NOC.mpu_m0.noc_fw_ddr_mpu_fpga2sdram_ddr_scr.fpga2sdram0region0addr.limit
-			 */
-			fpga2sdram0-0 = <0x00000000 0x0000ffff>;
-			/*
-			 * Driver setting: altera_arria10_soc_noc_arria10_uboot_driver.I_NOC.mpu_m0.noc_fw_ddr_mpu_fpga2sdram_ddr_scr.fpga2sdram1region0addr.base
-			 * Driver setting: altera_arria10_soc_noc_arria10_uboot_driver.I_NOC.mpu_m0.noc_fw_ddr_mpu_fpga2sdram_ddr_scr.fpga2sdram1region0addr.limit
-			 */
-			fpga2sdram1-0 = <0x00000000 0x0000ffff>;
-			/*
-			 * Driver setting: altera_arria10_soc_noc_arria10_uboot_driver.I_NOC.mpu_m0.noc_fw_ddr_mpu_fpga2sdram_ddr_scr.fpga2sdram2region0addr.base
-			 * Driver setting: altera_arria10_soc_noc_arria10_uboot_driver.I_NOC.mpu_m0.noc_fw_ddr_mpu_fpga2sdram_ddr_scr.fpga2sdram2region0addr.limit
-			 */
-			fpga2sdram2-0 = <0x00000000 0x0000ffff>;
-		};
-	};
-
-	hps_fpgabridge0: fpgabridge@0 {
-		compatible = "altr,socfpga-hps2fpga-bridge";
-		init-val = <1>;
-	};
-
-	hps_fpgabridge1: fpgabridge@1 {
-		compatible = "altr,socfpga-lwhps2fpga-bridge";
-		init-val = <1>;
-	};
-
-	hps_fpgabridge2: fpgabridge@2 {
-		compatible = "altr,socfpga-fpga2hps-bridge";
-		init-val = <1>;
-	};
-
-	hps_fpgabridge3: fpgabridge@3 {
-		compatible = "altr,socfpga-fpga2sdram0-bridge";
-		init-val = <1>;
-	};
-
-	hps_fpgabridge4: fpgabridge@4 {
-		compatible = "altr,socfpga-fpga2sdram1-bridge";
-		init-val = <0>;
-	};
-
-	hps_fpgabridge5: fpgabridge@5 {
-		compatible = "altr,socfpga-fpga2sdram2-bridge";
-		init-val = <1>;
-	};
-};
diff --git a/arch/arm/dts/socfpga_arria10_socdk_sdmmc_handoff.h b/arch/arm/dts/socfpga_arria10_socdk_sdmmc_handoff.h
new file mode 100644
index 0000000..4e3fe30
--- /dev/null
+++ b/arch/arm/dts/socfpga_arria10_socdk_sdmmc_handoff.h
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Intel Arria 10 SoCFPGA configuration
+ */
+
+#ifndef __SOCFPGA_ARRIA10_CONFIG_H__
+#define __SOCFPGA_ARRIA10_CONFIG_H__
+
+/* Clocks */
+#define CB_INTOSC_LS_CLK_HZ 60000000
+#define EMAC0_CLK_HZ 250000000
+#define EMAC1_CLK_HZ 250000000
+#define EMAC2_CLK_HZ 250000000
+#define EOSC1_CLK_HZ 25000000
+#define F2H_FREE_CLK_HZ 200000000
+#define H2F_USER0_CLK_HZ 400000000
+#define H2F_USER1_CLK_HZ 400000000
+#define L3_MAIN_FREE_CLK_HZ 200000000
+#define SDMMC_CLK_HZ 200000000
+#define TPIU_CLK_HZ 100000000
+#define MAINPLLGRP_CNTR15CLK_CNT 900
+#define MAINPLLGRP_CNTR2CLK_CNT 900
+#define MAINPLLGRP_CNTR3CLK_CNT 900
+#define MAINPLLGRP_CNTR4CLK_CNT 900
+#define MAINPLLGRP_CNTR5CLK_CNT 900
+#define MAINPLLGRP_CNTR6CLK_CNT 900
+#define MAINPLLGRP_CNTR7CLK_CNT 900
+#define MAINPLLGRP_CNTR7CLK_SRC 0
+#define MAINPLLGRP_CNTR8CLK_CNT 900
+#define MAINPLLGRP_CNTR9CLK_CNT 900
+#define MAINPLLGRP_CNTR9CLK_SRC 0
+#define MAINPLLGRP_MPUCLK_CNT 0
+#define MAINPLLGRP_MPUCLK_SRC 0
+#define MAINPLLGRP_NOCCLK_CNT 0
+#define MAINPLLGRP_NOCCLK_SRC 0
+#define MAINPLLGRP_NOCDIV_CSATCLK 0
+#define MAINPLLGRP_NOCDIV_CSPDBGCLK 1
+#define MAINPLLGRP_NOCDIV_CSTRACECLK 1
+#define MAINPLLGRP_NOCDIV_L4MAINCLK 0
+#define MAINPLLGRP_NOCDIV_L4MPCLK 0
+#define MAINPLLGRP_NOCDIV_L4SPCLK 2
+#define MAINPLLGRP_VCO0_PSRC 0
+#define MAINPLLGRP_VCO1_DENOM 1
+#define MAINPLLGRP_VCO1_NUMER 191
+#define PERPLLGRP_CNTR2CLK_CNT 7
+#define PERPLLGRP_CNTR2CLK_SRC 1
+#define PERPLLGRP_CNTR3CLK_CNT 900
+#define PERPLLGRP_CNTR3CLK_SRC 1
+#define PERPLLGRP_CNTR4CLK_CNT 19
+#define PERPLLGRP_CNTR4CLK_SRC 1
+#define PERPLLGRP_CNTR5CLK_CNT 499
+#define PERPLLGRP_CNTR5CLK_SRC 1
+#define PERPLLGRP_CNTR6CLK_CNT 9
+#define PERPLLGRP_CNTR6CLK_SRC 1
+#define PERPLLGRP_CNTR7CLK_CNT 900
+#define PERPLLGRP_CNTR8CLK_CNT 900
+#define PERPLLGRP_CNTR8CLK_SRC 0
+#define PERPLLGRP_CNTR9CLK_CNT 900
+#define PERPLLGRP_EMACCTL_EMAC0SEL 0
+#define PERPLLGRP_EMACCTL_EMAC1SEL 0
+#define PERPLLGRP_EMACCTL_EMAC2SEL 0
+#define PERPLLGRP_GPIODIV_GPIODBCLK 32000
+#define PERPLLGRP_VCO0_PSRC 0
+#define PERPLLGRP_VCO1_DENOM 1
+#define PERPLLGRP_VCO1_NUMER 159
+#define CLKMGR_TESTIOCTRL_DEBUGCLKSEL 16
+#define CLKMGR_TESTIOCTRL_MAINCLKSEL 8
+#define CLKMGR_TESTIOCTRL_PERICLKSEL 8
+#define ALTERAGRP_MPUCLK_MAINCNT 1
+#define ALTERAGRP_MPUCLK_PERICNT 900
+#define ALTERAGRP_NOCCLK_MAINCNT 11
+#define ALTERAGRP_NOCCLK_PERICNT 900
+#define ALTERAGRP_MPUCLK ((ALTERAGRP_MPUCLK_PERICNT << 16) | \
+	(ALTERAGRP_MPUCLK_MAINCNT))
+#define ALTERAGRP_NOCCLK ((ALTERAGRP_NOCCLK_PERICNT << 16) | \
+	(ALTERAGRP_NOCCLK_MAINCNT))
+
+/* Pin Mux Configuration */
+#define CONFIG_IO_10_INPUT_BUF_EN 0
+#define CONFIG_IO_10_PD_DRV_STRG 0
+#define CONFIG_IO_10_PD_SLW_RT 0
+#define CONFIG_IO_10_PU_DRV_STRG 0
+#define CONFIG_IO_10_PU_SLW_RT 0
+#define CONFIG_IO_10_RTRIM 1
+#define CONFIG_IO_10_WK_PU_EN 1
+#define CONFIG_IO_11_INPUT_BUF_EN 0
+#define CONFIG_IO_11_PD_DRV_STRG 0
+#define CONFIG_IO_11_PD_SLW_RT 0
+#define CONFIG_IO_11_PU_DRV_STRG 0
+#define CONFIG_IO_11_PU_SLW_RT 0
+#define CONFIG_IO_11_RTRIM 1
+#define CONFIG_IO_11_WK_PU_EN 1
+#define CONFIG_IO_12_INPUT_BUF_EN 1
+#define CONFIG_IO_12_PD_DRV_STRG 10
+#define CONFIG_IO_12_PD_SLW_RT 1
+#define CONFIG_IO_12_PU_DRV_STRG 8
+#define CONFIG_IO_12_PU_SLW_RT 1
+#define CONFIG_IO_12_RTRIM 1
+#define CONFIG_IO_12_WK_PU_EN 1
+#define CONFIG_IO_13_INPUT_BUF_EN 1
+#define CONFIG_IO_13_PD_DRV_STRG 10
+#define CONFIG_IO_13_PD_SLW_RT 1
+#define CONFIG_IO_13_PU_DRV_STRG 8
+#define CONFIG_IO_13_PU_SLW_RT 1
+#define CONFIG_IO_13_RTRIM 1
+#define CONFIG_IO_13_WK_PU_EN 1
+#define CONFIG_IO_14_INPUT_BUF_EN 1
+#define CONFIG_IO_14_PD_DRV_STRG 10
+#define CONFIG_IO_14_PD_SLW_RT 1
+#define CONFIG_IO_14_PU_DRV_STRG 8
+#define CONFIG_IO_14_PU_SLW_RT 1
+#define CONFIG_IO_14_RTRIM 1
+#define CONFIG_IO_14_WK_PU_EN 1
+#define CONFIG_IO_15_INPUT_BUF_EN 1
+#define CONFIG_IO_15_PD_DRV_STRG 10
+#define CONFIG_IO_15_PD_SLW_RT 1
+#define CONFIG_IO_15_PU_DRV_STRG 8
+#define CONFIG_IO_15_PU_SLW_RT 1
+#define CONFIG_IO_15_RTRIM 1
+#define CONFIG_IO_15_WK_PU_EN 1
+#define CONFIG_IO_16_INPUT_BUF_EN 0
+#define CONFIG_IO_16_PD_DRV_STRG 10
+#define CONFIG_IO_16_PD_SLW_RT 1
+#define CONFIG_IO_16_PU_DRV_STRG 8
+#define CONFIG_IO_16_PU_SLW_RT 1
+#define CONFIG_IO_16_RTRIM 1
+#define CONFIG_IO_16_WK_PU_EN 0
+#define CONFIG_IO_17_INPUT_BUF_EN 1
+#define CONFIG_IO_17_PD_DRV_STRG 10
+#define CONFIG_IO_17_PD_SLW_RT 1
+#define CONFIG_IO_17_PU_DRV_STRG 8
+#define CONFIG_IO_17_PU_SLW_RT 1
+#define CONFIG_IO_17_RTRIM 1
+#define CONFIG_IO_17_WK_PU_EN 0
+#define CONFIG_IO_1_INPUT_BUF_EN 1
+#define CONFIG_IO_1_PD_DRV_STRG 10
+#define CONFIG_IO_1_PD_SLW_RT 0
+#define CONFIG_IO_1_PU_DRV_STRG 8
+#define CONFIG_IO_1_PU_SLW_RT 0
+#define CONFIG_IO_1_RTRIM 1
+#define CONFIG_IO_1_WK_PU_EN 1
+#define CONFIG_IO_2_INPUT_BUF_EN 1
+#define CONFIG_IO_2_PD_DRV_STRG 10
+#define CONFIG_IO_2_PD_SLW_RT 0
+#define CONFIG_IO_2_PU_DRV_STRG 8
+#define CONFIG_IO_2_PU_SLW_RT 0
+#define CONFIG_IO_2_RTRIM 1
+#define CONFIG_IO_2_WK_PU_EN 1
+#define CONFIG_IO_3_INPUT_BUF_EN 1
+#define CONFIG_IO_3_PD_DRV_STRG 10
+#define CONFIG_IO_3_PD_SLW_RT 0
+#define CONFIG_IO_3_PU_DRV_STRG 8
+#define CONFIG_IO_3_PU_SLW_RT 0
+#define CONFIG_IO_3_RTRIM 1
+#define CONFIG_IO_3_WK_PU_EN 1
+#define CONFIG_IO_4_INPUT_BUF_EN 1
+#define CONFIG_IO_4_PD_DRV_STRG 10
+#define CONFIG_IO_4_PD_SLW_RT 1
+#define CONFIG_IO_4_PU_DRV_STRG 8
+#define CONFIG_IO_4_PU_SLW_RT 1
+#define CONFIG_IO_4_RTRIM 1
+#define CONFIG_IO_4_WK_PU_EN 0
+#define CONFIG_IO_5_INPUT_BUF_EN 1
+#define CONFIG_IO_5_PD_DRV_STRG 10
+#define CONFIG_IO_5_PD_SLW_RT 1
+#define CONFIG_IO_5_PU_DRV_STRG 8
+#define CONFIG_IO_5_PU_SLW_RT 1
+#define CONFIG_IO_5_RTRIM 1
+#define CONFIG_IO_5_WK_PU_EN 0
+#define CONFIG_IO_6_INPUT_BUF_EN 0
+#define CONFIG_IO_6_PD_DRV_STRG 10
+#define CONFIG_IO_6_PD_SLW_RT 1
+#define CONFIG_IO_6_PU_DRV_STRG 8
+#define CONFIG_IO_6_PU_SLW_RT 1
+#define CONFIG_IO_6_RTRIM 1
+#define CONFIG_IO_6_WK_PU_EN 0
+#define CONFIG_IO_7_INPUT_BUF_EN 1
+#define CONFIG_IO_7_PD_DRV_STRG 10
+#define CONFIG_IO_7_PD_SLW_RT 1
+#define CONFIG_IO_7_PU_DRV_STRG 8
+#define CONFIG_IO_7_PU_SLW_RT 1
+#define CONFIG_IO_7_RTRIM 1
+#define CONFIG_IO_7_WK_PU_EN 0
+#define CONFIG_IO_8_INPUT_BUF_EN 1
+#define CONFIG_IO_8_PD_DRV_STRG 10
+#define CONFIG_IO_8_PD_SLW_RT 1
+#define CONFIG_IO_8_PU_DRV_STRG 8
+#define CONFIG_IO_8_PU_SLW_RT 1
+#define CONFIG_IO_8_RTRIM 1
+#define CONFIG_IO_8_WK_PU_EN 0
+#define CONFIG_IO_9_INPUT_BUF_EN 1
+#define CONFIG_IO_9_PD_DRV_STRG 10
+#define CONFIG_IO_9_PD_SLW_RT 1
+#define CONFIG_IO_9_PU_DRV_STRG 8
+#define CONFIG_IO_9_PU_SLW_RT 1
+#define CONFIG_IO_9_RTRIM 1
+#define CONFIG_IO_9_WK_PU_EN 0
+#define CONFIG_IO_BANK_VOLTAGE_SEL_CLKRST_IO 1
+#define CONFIG_IO_BANK_VOLTAGE_SEL_PERI_IO 1
+#define PINMUX_DEDICATED_IO_10_SEL 10
+#define PINMUX_DEDICATED_IO_11_SEL 10
+#define PINMUX_DEDICATED_IO_12_SEL 8
+#define PINMUX_DEDICATED_IO_13_SEL 8
+#define PINMUX_DEDICATED_IO_14_SEL 8
+#define PINMUX_DEDICATED_IO_15_SEL 8
+#define PINMUX_DEDICATED_IO_16_SEL 13
+#define PINMUX_DEDICATED_IO_17_SEL 13
+#define PINMUX_DEDICATED_IO_4_SEL 8
+#define PINMUX_DEDICATED_IO_5_SEL 8
+#define PINMUX_DEDICATED_IO_6_SEL 8
+#define PINMUX_DEDICATED_IO_7_SEL 8
+#define PINMUX_DEDICATED_IO_8_SEL 8
+#define PINMUX_DEDICATED_IO_9_SEL 8
+#define PINMUX_I2C0_USEFPGA_SEL 0
+#define PINMUX_I2C1_USEFPGA_SEL 0
+#define PINMUX_I2CEMAC0_USEFPGA_SEL 0
+#define PINMUX_I2CEMAC1_USEFPGA_SEL 0
+#define PINMUX_I2CEMAC2_USEFPGA_SEL 0
+#define PINMUX_NAND_USEFPGA_SEL 0
+#define PINMUX_PLL_CLOCK_OUT_USEFPGA_SEL 0
+#define PINMUX_QSPI_USEFPGA_SEL 0
+#define PINMUX_RGMII0_USEFPGA_SEL 0
+#define PINMUX_RGMII1_USEFPGA_SEL 0
+#define PINMUX_RGMII2_USEFPGA_SEL 0
+#define PINMUX_SDMMC_USEFPGA_SEL 0
+#define PINMUX_SHARED_IO_Q1_10_SEL 8
+#define PINMUX_SHARED_IO_Q1_11_SEL 8
+#define PINMUX_SHARED_IO_Q1_12_SEL 8
+#define PINMUX_SHARED_IO_Q1_1_SEL 8
+#define PINMUX_SHARED_IO_Q1_2_SEL 8
+#define PINMUX_SHARED_IO_Q1_3_SEL 8
+#define PINMUX_SHARED_IO_Q1_4_SEL 8
+#define PINMUX_SHARED_IO_Q1_5_SEL 8
+#define PINMUX_SHARED_IO_Q1_6_SEL 8
+#define PINMUX_SHARED_IO_Q1_7_SEL 8
+#define PINMUX_SHARED_IO_Q1_8_SEL 8
+#define PINMUX_SHARED_IO_Q1_9_SEL 8
+#define PINMUX_SHARED_IO_Q2_10_SEL 4
+#define PINMUX_SHARED_IO_Q2_11_SEL 4
+#define PINMUX_SHARED_IO_Q2_12_SEL 4
+#define PINMUX_SHARED_IO_Q2_1_SEL 4
+#define PINMUX_SHARED_IO_Q2_2_SEL 4
+#define PINMUX_SHARED_IO_Q2_3_SEL 4
+#define PINMUX_SHARED_IO_Q2_4_SEL 4
+#define PINMUX_SHARED_IO_Q2_5_SEL 4
+#define PINMUX_SHARED_IO_Q2_6_SEL 4
+#define PINMUX_SHARED_IO_Q2_7_SEL 4
+#define PINMUX_SHARED_IO_Q2_8_SEL 4
+#define PINMUX_SHARED_IO_Q2_9_SEL 4
+#define PINMUX_SHARED_IO_Q3_10_SEL 10
+#define PINMUX_SHARED_IO_Q3_11_SEL 1
+#define PINMUX_SHARED_IO_Q3_12_SEL 1
+#define PINMUX_SHARED_IO_Q3_1_SEL 3
+#define PINMUX_SHARED_IO_Q3_2_SEL 3
+#define PINMUX_SHARED_IO_Q3_3_SEL 3
+#define PINMUX_SHARED_IO_Q3_4_SEL 3
+#define PINMUX_SHARED_IO_Q3_5_SEL 3
+#define PINMUX_SHARED_IO_Q3_6_SEL 15
+#define PINMUX_SHARED_IO_Q3_7_SEL 10
+#define PINMUX_SHARED_IO_Q3_8_SEL 10
+#define PINMUX_SHARED_IO_Q3_9_SEL 10
+#define PINMUX_SHARED_IO_Q4_10_SEL 12
+#define PINMUX_SHARED_IO_Q4_11_SEL 12
+#define PINMUX_SHARED_IO_Q4_12_SEL 12
+#define PINMUX_SHARED_IO_Q4_1_SEL 0
+#define PINMUX_SHARED_IO_Q4_2_SEL 0
+#define PINMUX_SHARED_IO_Q4_3_SEL 15
+#define PINMUX_SHARED_IO_Q4_4_SEL 12
+#define PINMUX_SHARED_IO_Q4_5_SEL 15
+#define PINMUX_SHARED_IO_Q4_6_SEL 15
+#define PINMUX_SHARED_IO_Q4_7_SEL 10
+#define PINMUX_SHARED_IO_Q4_8_SEL 15
+#define PINMUX_SHARED_IO_Q4_9_SEL 12
+#define PINMUX_SPIM0_USEFPGA_SEL 0
+#define PINMUX_SPIM1_USEFPGA_SEL 0
+#define PINMUX_SPIS0_USEFPGA_SEL 0
+#define PINMUX_SPIS1_USEFPGA_SEL 0
+#define PINMUX_UART0_USEFPGA_SEL 0
+#define PINMUX_UART1_USEFPGA_SEL 0
+#define PINMUX_USB0_USEFPGA_SEL 0
+#define PINMUX_USB1_USEFPGA_SEL 0
+
+/* Bridge Configuration */
+#define F2H_AXI_SLAVE 1
+#define F2SDRAM0_AXI_SLAVE 1
+#define F2SDRAM1_AXI_SLAVE 0
+#define F2SDRAM2_AXI_SLAVE 1
+#define H2F_AXI_MASTER 1
+#define LWH2F_AXI_MASTER 1
+
+/* Voltage Select for Config IO */
+#define CONFIG_IO_BANK_VSEL \
+	(((CONFIG_IO_BANK_VOLTAGE_SEL_CLKRST_IO & 0x3) << 8) | \
+	(CONFIG_IO_BANK_VOLTAGE_SEL_PERI_IO & 0x3))
+
+/* Macro for Config IO bit mapping */
+#define CONFIG_IO_MACRO(NAME) (((NAME ## _RTRIM & 0xff) << 19) | \
+	((NAME ## _INPUT_BUF_EN & 0x3) << 17) | \
+	((NAME ## _WK_PU_EN & 0x1) << 16) | \
+	((NAME ## _PU_SLW_RT & 0x1) << 13) | \
+	((NAME ## _PU_DRV_STRG & 0xf) << 8) | \
+	((NAME ## _PD_SLW_RT & 0x1) << 5) | \
+	(NAME ## _PD_DRV_STRG & 0x1f))
+
+#endif /* __SOCFPGA_ARRIA10_CONFIG_H__ */
diff --git a/arch/arm/dts/socfpga_stratix10.dtsi b/arch/arm/dts/socfpga_stratix10.dtsi
index a8e61cf..cb799bc 100755
--- a/arch/arm/dts/socfpga_stratix10.dtsi
+++ b/arch/arm/dts/socfpga_stratix10.dtsi
@@ -386,7 +386,6 @@
 			reg = <0xffd00200 0x100>;
 			interrupts = <0 117 4>;
 			resets = <&rst WATCHDOG0_RESET>;
-			u-boot,dm-pre-reloc;
 			status = "disabled";
 		};
 
diff --git a/arch/arm/dts/socfpga_stratix10_socdk-u-boot.dtsi b/arch/arm/dts/socfpga_stratix10_socdk-u-boot.dtsi
index a903040..2669abb 100755
--- a/arch/arm/dts/socfpga_stratix10_socdk-u-boot.dtsi
+++ b/arch/arm/dts/socfpga_stratix10_socdk-u-boot.dtsi
@@ -33,5 +33,6 @@
 };
 
 &watchdog0 {
+	status = "okay";
 	u-boot,dm-pre-reloc;
 };
diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig
index a3699e8..26f2cf8 100644
--- a/arch/arm/mach-socfpga/Kconfig
+++ b/arch/arm/mach-socfpga/Kconfig
@@ -35,6 +35,7 @@
 	select ARMV8_SET_SMPEN
 	select ARMV8_SPIN_TABLE
 	select CLK
+	select FPGA_INTEL_SDM_MAILBOX
 	select NCORE_CACHE
 	select SPL_CLK if SPL
 
@@ -79,7 +80,7 @@
 	select ARMV8_MULTIENTRY
 	select ARMV8_SET_SMPEN
 	select ARMV8_SPIN_TABLE
-	select FPGA_STRATIX10
+	select FPGA_INTEL_SDM_MAILBOX
 
 choice
 	prompt "Altera SOCFPGA board select"
diff --git a/arch/arm/mach-socfpga/include/mach/mailbox_s10.h b/arch/arm/mach-socfpga/include/mach/mailbox_s10.h
index 55707ab..4d78311 100644
--- a/arch/arm/mach-socfpga/include/mach/mailbox_s10.h
+++ b/arch/arm/mach-socfpga/include/mach/mailbox_s10.h
@@ -67,8 +67,42 @@
 	MBOX_RESP_UNKNOWN_BR = 2,
 	/* CMD code not recognized by firmware */
 	MBOX_RESP_UNKNOWN = 3,
-	/* Indicates that the device is not configured */
-	MBOX_RESP_NOT_CONFIGURED = 256,
+	/* Length setting is not a valid length for this CMD type */
+	MBOX_RESP_INVALID_LEN = 4,
+	/* Indirect setting is not valid for this CMD type */
+	MBOX_RESP_INVALID_INDIRECT_SETTING = 5,
+	/* HW source which is not allowed to send CMD type */
+	MBOX_RESP_CMD_INVALID_ON_SRC = 6,
+	/* Client with ID not associated with any running PR CMD tries to run
+	 * RECONFIG_DATA RECONFIG_STATUS and accessing QSPI / SDMMC using ID
+	 * without exclusive access
+	 */
+	MBOX_RESP_CLIENT_ID_NO_MATCH = 8,
+	/* Address provided to the system is invalid (alignment, range
+	 * permission)
+	 */
+	MBOX_RESP_INVALID_ADDR = 0x9,
+	/* Signature authentication failed */
+	MBOX_RESP_AUTH_FAIL = 0xA,
+	/* CMD timed out */
+	MBOX_RESP_TIMEOUT = 0xB,
+	/* HW (i.e. QSPI) is not ready (initialized or configured) */
+	MBOX_RESP_HW_NOT_RDY = 0xC,
+	/* Invalid license for IID registration */
+	MBOX_RESP_PUF_ACCCES_FAILED = 0x80,
+	MBOX_PUF_ENROLL_DISABLE = 0x81,
+	MBOX_RESP_PUF_ENROLL_FAIL = 0x82,
+	MBOX_RESP_PUF_RAM_TEST_FAIL = 0x83,
+	MBOX_RESP_ATTEST_CERT_GEN_FAIL = 0x84,
+	/* Operation not allowed under current security settings */
+	MBOX_RESP_NOT_ALLOWED_UNDER_SECURITY_SETTINGS = 0x85,
+	MBOX_RESP_PUF_TRNG_FAIL = 0x86,
+	MBOX_RESP_FUSE_ALREADY_BLOWN = 0x87,
+	MBOX_RESP_INVALID_SIGNATURE = 0x88,
+	MBOX_RESP_INVALID_HASH = 0x8b,
+	MBOX_RESP_INVALID_CERTIFICATE = 0x91,
+	/* Indicates that the device (FPGA or HPS) is not configured */
+	MBOX_RESP_NOT_CONFIGURED = 0x100,
 	/* Indicates that the device is busy */
 	MBOX_RESP_DEVICE_BUSY = 0x1FF,
 	/* Indicates that there is no valid response available */
diff --git a/arch/arm/mach-socfpga/include/mach/reset_manager_soc64.h b/arch/arm/mach-socfpga/include/mach/reset_manager_soc64.h
index 3f952bc..c8bb727 100644
--- a/arch/arm/mach-socfpga/include/mach/reset_manager_soc64.h
+++ b/arch/arm/mach-socfpga/include/mach/reset_manager_soc64.h
@@ -8,6 +8,7 @@
 
 void reset_deassert_peripherals_handoff(void);
 int cpu_has_been_warmreset(void);
+void print_reset_info(void);
 void socfpga_bridges_reset(int enable);
 
 #define RSTMGR_SOC64_STATUS	0x00
@@ -21,8 +22,15 @@
 #define RSTMGR_BRGMODRST_DDRSCH_MASK	0X00000040
 #define RSTMGR_BRGMODRST_FPGA2SOC_MASK	0x00000004
 
-/* Watchdogs and MPU warm reset mask */
-#define RSTMGR_L4WD_MPU_WARMRESET_MASK	0x000F0F00
+/* SDM, Watchdogs and MPU warm reset mask */
+#define RSTMGR_STAT_SDMWARMRST		BIT(1)
+#define RSTMGR_STAT_MPU0RST_BITPOS	8
+#define RSTMGR_STAT_L4WD0RST_BITPOS	16
+#define RSTMGR_L4WD_MPU_WARMRESET_MASK	(RSTMGR_STAT_SDMWARMRST | \
+		GENMASK(RSTMGR_STAT_MPU0RST_BITPOS + 3, \
+			RSTMGR_STAT_MPU0RST_BITPOS) | \
+		GENMASK(RSTMGR_STAT_L4WD0RST_BITPOS + 3, \
+			RSTMGR_STAT_L4WD0RST_BITPOS))
 
 /*
  * SocFPGA Stratix10 reset IDs, bank mapping is as follows:
diff --git a/arch/arm/mach-socfpga/include/mach/system_manager_soc64.h b/arch/arm/mach-socfpga/include/mach/system_manager_soc64.h
index 5e3f54a..4949cae 100644
--- a/arch/arm/mach-socfpga/include/mach/system_manager_soc64.h
+++ b/arch/arm/mach-socfpga/include/mach/system_manager_soc64.h
@@ -46,13 +46,21 @@
 #define SYSMGR_SOC64_GPO			0xe4
 #define SYSMGR_SOC64_GPI			0xe8
 #define SYSMGR_SOC64_MPU			0xf0
+/* store qspi ref clock */
 #define SYSMGR_SOC64_BOOT_SCRATCH_COLD0		0x200
+/* store osc1 clock freq */
 #define SYSMGR_SOC64_BOOT_SCRATCH_COLD1		0x204
+/* store fpga clock freq */
 #define SYSMGR_SOC64_BOOT_SCRATCH_COLD2		0x208
+/* reserved for customer use */
 #define SYSMGR_SOC64_BOOT_SCRATCH_COLD3		0x20c
+/* store PSCI_CPU_ON value */
 #define SYSMGR_SOC64_BOOT_SCRATCH_COLD4		0x210
+/* store PSCI_CPU_ON value */
 #define SYSMGR_SOC64_BOOT_SCRATCH_COLD5		0x214
+/* store VBAR_EL3 value */
 #define SYSMGR_SOC64_BOOT_SCRATCH_COLD6		0x218
+/* store VBAR_EL3 value */
 #define SYSMGR_SOC64_BOOT_SCRATCH_COLD7		0x21c
 #define SYSMGR_SOC64_BOOT_SCRATCH_COLD8		0x220
 #define SYSMGR_SOC64_BOOT_SCRATCH_COLD9		0x224
diff --git a/arch/arm/mach-socfpga/mailbox_s10.c b/arch/arm/mach-socfpga/mailbox_s10.c
index f30e7f8..18d4492 100644
--- a/arch/arm/mach-socfpga/mailbox_s10.c
+++ b/arch/arm/mach-socfpga/mailbox_s10.c
@@ -29,54 +29,107 @@
 static __always_inline int mbox_polling_resp(u32 rout)
 {
 	u32 rin;
-	unsigned long i = ~0;
+	unsigned long i = 2000;
 
 	while (i) {
 		rin = MBOX_READL(MBOX_RIN);
 		if (rout != rin)
 			return 0;
 
+		udelay(1000);
 		i--;
 	}
 
 	return -ETIMEDOUT;
 }
 
+static __always_inline int mbox_is_cmdbuf_full(u32 cin)
+{
+	return (((cin + 1) % MBOX_CMD_BUFFER_SIZE) == MBOX_READL(MBOX_COUT));
+}
+
+static __always_inline int mbox_is_cmdbuf_empty(u32 cin)
+{
+	return (((MBOX_READL(MBOX_COUT) + 1) % MBOX_CMD_BUFFER_SIZE) == cin);
+}
+
+static __always_inline int mbox_wait_for_cmdbuf_empty(u32 cin)
+{
+	int timeout = 2000;
+
+	while (timeout) {
+		if (mbox_is_cmdbuf_empty(cin))
+			return 0;
+		udelay(1000);
+		timeout--;
+	}
+
+	return -ETIMEDOUT;
+}
+
+static __always_inline int mbox_write_cmd_buffer(u32 *cin, u32 data,
+						 int *is_cmdbuf_overflow)
+{
+	int timeout = 1000;
+
+	while (timeout) {
+		if (mbox_is_cmdbuf_full(*cin)) {
+			if (is_cmdbuf_overflow &&
+			    *is_cmdbuf_overflow == 0) {
+				/* Trigger SDM doorbell */
+				MBOX_WRITEL(1, MBOX_DOORBELL_TO_SDM);
+				*is_cmdbuf_overflow = 1;
+			}
+			udelay(1000);
+		} else {
+			/* write header to circular buffer */
+			MBOX_WRITE_CMD_BUF(data, (*cin)++);
+			*cin %= MBOX_CMD_BUFFER_SIZE;
+			MBOX_WRITEL(*cin, MBOX_CIN);
+			break;
+		}
+		timeout--;
+	}
+
+	if (!timeout)
+		return -ETIMEDOUT;
+
+	/* Wait for the SDM to drain the FIFO command buffer */
+	if (is_cmdbuf_overflow && *is_cmdbuf_overflow)
+		return mbox_wait_for_cmdbuf_empty(*cin);
+
+	return 0;
+}
+
 /* Check for available slot and write to circular buffer.
  * It also update command valid offset (cin) register.
  */
 static __always_inline int mbox_fill_cmd_circular_buff(u32 header, u32 len,
 						       u32 *arg)
 {
-	u32 cin;
-	u32 cout;
-	u32 i;
+	int i, ret;
+	int is_cmdbuf_overflow = 0;
+	u32 cin = MBOX_READL(MBOX_CIN) % MBOX_CMD_BUFFER_SIZE;
 
-	cin = MBOX_READL(MBOX_CIN) % MBOX_CMD_BUFFER_SIZE;
-	cout = MBOX_READL(MBOX_COUT) % MBOX_CMD_BUFFER_SIZE;
-
-	/* if command buffer is full or not enough free space
-	 * to fit the data. Note, len is in u32 unit.
-	 */
-	if (((cin + 1) % MBOX_CMD_BUFFER_SIZE) == cout ||
-	    ((MBOX_CMD_BUFFER_SIZE - cin + cout - 1) %
-	     MBOX_CMD_BUFFER_SIZE) < (len + 1))
-		return -ENOMEM;
-
-	/* write header to circular buffer */
-	MBOX_WRITE_CMD_BUF(header, cin++);
-	/* wrapping around when it reach the buffer size */
-	cin %= MBOX_CMD_BUFFER_SIZE;
+	ret = mbox_write_cmd_buffer(&cin, header, &is_cmdbuf_overflow);
+	if (ret)
+		return ret;
 
 	/* write arguments */
 	for (i = 0; i < len; i++) {
-		MBOX_WRITE_CMD_BUF(arg[i], cin++);
-		/* wrapping around when it reach the buffer size */
-		cin %= MBOX_CMD_BUFFER_SIZE;
+		is_cmdbuf_overflow = 0;
+		ret = mbox_write_cmd_buffer(&cin, arg[i], &is_cmdbuf_overflow);
+		if (ret)
+			return ret;
 	}
 
-	/* write command valid offset */
-	MBOX_WRITEL(cin, MBOX_CIN);
+	/* If SDM doorbell is not triggered after the last data is
+	 * written into mailbox FIFO command buffer, trigger the
+	 * SDM doorbell again to ensure SDM able to read the remaining
+	 * data.
+	 */
+	if (!is_cmdbuf_overflow)
+		MBOX_WRITEL(1, MBOX_DOORBELL_TO_SDM);
 
 	return 0;
 }
@@ -89,10 +142,6 @@
 	u32 header;
 	int ret;
 
-	/* Total length is command + argument length */
-	if ((len + 1) > MBOX_CMD_BUFFER_SIZE)
-		return -EINVAL;
-
 	if (cmd > MBOX_MAX_CMD_INDEX)
 		return -EINVAL;
 
@@ -109,11 +158,7 @@
 						     u8 is_indirect, u32 len,
 						     u32 *arg)
 {
-	int ret = mbox_prepare_cmd_only(id, cmd, is_indirect, len, arg);
-	/* write doorbell */
-	MBOX_WRITEL(1, MBOX_DOORBELL_TO_SDM);
-
-	return ret;
+	return mbox_prepare_cmd_only(id, cmd, is_indirect, len, arg);
 }
 
 /* Return number of responses received in buffer */
@@ -166,21 +211,24 @@
 		status = MBOX_READL(MBOX_STATUS) & MBOX_STATUS_UA_MSK;
 		/* Write urgent command to urgent register */
 		MBOX_WRITEL(cmd, MBOX_URG);
+		/* write doorbell */
+		MBOX_WRITEL(1, MBOX_DOORBELL_TO_SDM);
 	} else {
 		ret = mbox_prepare_cmd_only(id, cmd, is_indirect, len, arg);
 		if (ret)
 			return ret;
 	}
 
-	/* write doorbell */
-	MBOX_WRITEL(1, MBOX_DOORBELL_TO_SDM);
-
 	while (1) {
-		ret = ~0;
+		ret = 1000;
 
 		/* Wait for doorbell from SDM */
-		while (!MBOX_READL(MBOX_DOORBELL_FROM_SDM) && ret--)
-			;
+		do {
+			if (MBOX_READL(MBOX_DOORBELL_FROM_SDM))
+				break;
+			udelay(1000);
+		} while (--ret);
+
 		if (!ret)
 			return -ETIMEDOUT;
 
@@ -216,9 +264,7 @@
 			if ((MBOX_RESP_CLIENT_GET(resp) ==
 			     MBOX_CLIENT_ID_UBOOT) &&
 			    (MBOX_RESP_ID_GET(resp) == id)) {
-				ret = MBOX_RESP_ERR_GET(resp);
-				if (ret)
-					return ret;
+				int resp_err = MBOX_RESP_ERR_GET(resp);
 
 				if (resp_buf_len) {
 					buf_len = *resp_buf_len;
@@ -247,14 +293,36 @@
 						buf_len--;
 					}
 				}
-				return ret;
+				return resp_err;
 			}
 		}
-	};
+	}
 
 	return -EIO;
 }
 
+static __always_inline int mbox_send_cmd_common_retry(u8 id, u32 cmd,
+						      u8 is_indirect,
+						      u32 len, u32 *arg,
+						      u8 urgent,
+						      u32 *resp_buf_len,
+						      u32 *resp_buf)
+{
+	int ret;
+	int i;
+
+	for (i = 0; i < 3; i++) {
+		ret = mbox_send_cmd_common(id, cmd, is_indirect, len, arg,
+					   urgent, resp_buf_len, resp_buf);
+		if (ret == MBOX_RESP_TIMEOUT || ret == MBOX_RESP_DEVICE_BUSY)
+			udelay(2000); /* wait for 2ms before resend */
+		else
+			break;
+	}
+
+	return ret;
+}
+
 int mbox_init(void)
 {
 	int ret;
@@ -349,10 +417,10 @@
 	int ret;
 
 	reconfig_status_resp_len = RECONFIG_STATUS_RESPONSE_LEN;
-	ret = mbox_send_cmd_common(MBOX_ID_UBOOT, cmd,
-				   MBOX_CMD_DIRECT, 0, NULL, 0,
-				   &reconfig_status_resp_len,
-				   reconfig_status_resp);
+	ret = mbox_send_cmd_common_retry(MBOX_ID_UBOOT, cmd,
+					 MBOX_CMD_DIRECT, 0, NULL, 0,
+					 &reconfig_status_resp_len,
+					 reconfig_status_resp);
 
 	if (ret)
 		return ret;
@@ -392,16 +460,16 @@
 int mbox_send_cmd(u8 id, u32 cmd, u8 is_indirect, u32 len, u32 *arg,
 		  u8 urgent, u32 *resp_buf_len, u32 *resp_buf)
 {
-	return mbox_send_cmd_common(id, cmd, is_indirect, len, arg, urgent,
-			       resp_buf_len, resp_buf);
+	return mbox_send_cmd_common_retry(id, cmd, is_indirect, len, arg,
+					  urgent, resp_buf_len, resp_buf);
 }
 
 int __secure mbox_send_cmd_psci(u8 id, u32 cmd, u8 is_indirect, u32 len,
 				u32 *arg, u8 urgent, u32 *resp_buf_len,
 				u32 *resp_buf)
 {
-	return mbox_send_cmd_common(id, cmd, is_indirect, len, arg, urgent,
-			       resp_buf_len, resp_buf);
+	return mbox_send_cmd_common_retry(id, cmd, is_indirect, len, arg,
+					  urgent, resp_buf_len, resp_buf);
 }
 
 int mbox_send_cmd_only(u8 id, u32 cmd, u8 is_indirect, u32 len, u32 *arg)
diff --git a/arch/arm/mach-socfpga/misc_s10.c b/arch/arm/mach-socfpga/misc_s10.c
index 52868fb..0d67b0f 100644
--- a/arch/arm/mach-socfpga/misc_s10.c
+++ b/arch/arm/mach-socfpga/misc_s10.c
@@ -8,20 +8,13 @@
 #include <common.h>
 #include <env.h>
 #include <errno.h>
-#include <fdtdec.h>
 #include <init.h>
 #include <log.h>
-#include <miiphy.h>
-#include <netdev.h>
 #include <asm/io.h>
+#include <asm/arch/mailbox_s10.h>
+#include <asm/arch/misc.h>
 #include <asm/arch/reset_manager.h>
 #include <asm/arch/system_manager.h>
-#include <asm/arch/misc.h>
-#include <asm/pl310.h>
-#include <linux/libfdt.h>
-#include <asm/arch/mailbox_s10.h>
-
-#include <dt-bindings/reset/altr,rst-mgr-s10.h>
 
 DECLARE_GLOBAL_DATA_PTR;
 
@@ -31,7 +24,7 @@
 static Altera_desc altera_fpga[] = {
 	{
 		/* Family */
-		Intel_FPGA_Stratix10,
+		Intel_FPGA_SDM_Mailbox,
 		/* Interface type */
 		secure_device_manager_mailbox,
 		/* No limitation as additional data will be ignored */
@@ -45,79 +38,6 @@
 	},
 };
 
-/*
- * DesignWare Ethernet initialization
- */
-#ifdef CONFIG_ETH_DESIGNWARE
-
-static u32 socfpga_phymode_setup(u32 gmac_index, const char *phymode)
-{
-	u32 modereg;
-
-	if (!phymode)
-		return -EINVAL;
-
-	if (!strcmp(phymode, "mii") || !strcmp(phymode, "gmii") ||
-	    !strcmp(phymode, "sgmii"))
-		modereg = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
-	else if (!strcmp(phymode, "rgmii"))
-		modereg = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
-	else if (!strcmp(phymode, "rmii"))
-		modereg = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII;
-	else
-		return -EINVAL;
-
-	clrsetbits_le32(socfpga_get_sysmgr_addr() + SYSMGR_SOC64_EMAC0 +
-			(gmac_index * sizeof(u32)),
-			SYSMGR_EMACGRP_CTRL_PHYSEL_MASK, modereg);
-
-	return 0;
-}
-
-static int socfpga_set_phymode(void)
-{
-	const void *fdt = gd->fdt_blob;
-	struct fdtdec_phandle_args args;
-	const char *phy_mode;
-	u32 gmac_index;
-	int nodes[3];	/* Max. 3 GMACs */
-	int ret, count;
-	int i, node;
-
-	count = fdtdec_find_aliases_for_id(fdt, "ethernet",
-					   COMPAT_ALTERA_SOCFPGA_DWMAC,
-					   nodes, ARRAY_SIZE(nodes));
-	for (i = 0; i < count; i++) {
-		node = nodes[i];
-		if (node <= 0)
-			continue;
-
-		ret = fdtdec_parse_phandle_with_args(fdt, node, "resets",
-						     "#reset-cells", 1, 0,
-						     &args);
-		if (ret || args.args_count != 1) {
-			debug("GMAC%i: Failed to parse DT 'resets'!\n", i);
-			continue;
-		}
-
-		gmac_index = args.args[0] - EMAC0_RESET;
-
-		phy_mode = fdt_getprop(fdt, node, "phy-mode", NULL);
-		ret = socfpga_phymode_setup(gmac_index, phy_mode);
-		if (ret) {
-			debug("GMAC%i: Failed to parse DT 'phy-mode'!\n", i);
-			continue;
-		}
-	}
-
-	return 0;
-}
-#else
-static int socfpga_set_phymode(void)
-{
-	return 0;
-};
-#endif
 
 /*
  * Print CPU information
@@ -139,7 +59,6 @@
 	sprintf(qspi_string, "<0x%08x>", cm_get_qspi_controller_clk_hz());
 	env_set("qspi_clock", qspi_string);
 
-	socfpga_set_phymode();
 	return 0;
 }
 #endif
diff --git a/arch/arm/mach-socfpga/qts-filter-a10.sh b/arch/arm/mach-socfpga/qts-filter-a10.sh
new file mode 100755
index 0000000..57d77e8
--- /dev/null
+++ b/arch/arm/mach-socfpga/qts-filter-a10.sh
@@ -0,0 +1,141 @@
+#!/bin/bash
+
+#
+# helper function to convert from DOS to Unix, if necessary, and handle
+# lines ending in '\'.
+#
+fix_newlines_in_macros() {
+	sed -n ':next;s/\r$//;/[^\\]\\$/ {N;s/\\\n//;b next};p' $1
+}
+
+#filter out only what we need from a10 hps.xml
+grep_a10_hps_config() {
+	egrep "clk_hz|i_clk_mgr|i_io48_pin_mux|AXI_SLAVE|AXI_MASTER"
+}
+
+#
+# Process hps.xml
+# $1:	hps.xml
+# $2:	Output File
+#
+process_a10_hps_config() {
+	hps_xml="$1"
+	outfile="$2"
+
+	(cat << EOF
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Intel Arria 10 SoCFPGA configuration
+ */
+
+#ifndef __SOCFPGA_ARRIA10_CONFIG_H__
+#define __SOCFPGA_ARRIA10_CONFIG_H__
+
+EOF
+
+	echo "/* Clocks */"
+	fix_newlines_in_macros \
+		${hps_xml} | egrep "clk_hz" |
+			awk -F"'" '{ gsub("\\.","_",$2) ; \
+				print "#define" " " toupper($2) " " $4}' |
+			sed 's/\.[0-9]//' |
+			sed 's/I_CLK_MGR_//' |
+			sort
+	fix_newlines_in_macros \
+		${hps_xml} | egrep "i_clk_mgr_mainpll" |
+			awk -F"'" '{ gsub("\\.","_",$2) ; \
+				print "#define" " " toupper($2) " " $4}' |
+			sed 's/\.[0-9]//' |
+			sed 's/I_CLK_MGR_//' |
+			sort
+	fix_newlines_in_macros \
+		${hps_xml} | egrep "i_clk_mgr_perpll" |
+			awk -F"'" '{ gsub("\\.","_",$2) ; \
+				print "#define" " " toupper($2) " " $4}' |
+			sed 's/\.[0-9]//' |
+			sed 's/I_CLK_MGR_//' |
+			sort
+	fix_newlines_in_macros \
+		${hps_xml} | egrep "i_clk_mgr_clkmgr" |
+			awk -F"'" '{ gsub("\\.","_",$2) ; \
+				print "#define" " " toupper($2) " " $4}' |
+			sed 's/\.[0-9]//' |
+			sed 's/I_CLK_MGR_//' |
+			sort
+	fix_newlines_in_macros \
+		${hps_xml} | egrep "i_clk_mgr_alteragrp" |
+			awk -F"'" '{ gsub("\\.","_",$2) ; \
+				print "#define" " " toupper($2) " " $4}' |
+			sed 's/\.[0-9]//' |
+			sed 's/I_CLK_MGR_//' |
+			sort
+	echo "#define ALTERAGRP_MPUCLK ((ALTERAGRP_MPUCLK_PERICNT << 16) | \\"
+	echo "	(ALTERAGRP_MPUCLK_MAINCNT))"
+	echo "#define ALTERAGRP_NOCCLK ((ALTERAGRP_NOCCLK_PERICNT << 16) | \\"
+	echo "	(ALTERAGRP_NOCCLK_MAINCNT))"
+
+	echo
+	echo "/* Pin Mux Configuration */"
+	fix_newlines_in_macros \
+		${hps_xml} | egrep "i_io48_pin_mux" |
+			awk -F"'" '{ gsub("\\.","_",$2) ; \
+				print "#define" " " toupper($2) " " $4}' |
+			sed 's/I_IO48_PIN_MUX_//' |
+			sed 's/SHARED_3V_IO_GRP_//' |
+			sed 's/FPGA_INTERFACE_GRP_//' |
+			sed 's/DEDICATED_IO_GRP_//' |
+			sed 's/CONFIGURATION_DEDICATED/CONFIG/' |
+			sort
+
+	echo
+	echo "/* Bridge Configuration */"
+	fix_newlines_in_macros \
+		${hps_xml} | egrep "AXI_SLAVE|AXI_MASTER" |
+			awk -F"'" '{ gsub("\\.","_",$2) ; \
+				print "#define" " " toupper($2) " " $4}' |
+			sed 's/true/1/' |
+			sed 's/false/0/' |
+			sort
+
+	echo
+	echo "/* Voltage Select for Config IO */"
+	echo "#define CONFIG_IO_BANK_VSEL \\"
+	echo "	(((CONFIG_IO_BANK_VOLTAGE_SEL_CLKRST_IO & 0x3) << 8) | \\"
+	echo "	(CONFIG_IO_BANK_VOLTAGE_SEL_PERI_IO & 0x3))"
+
+	echo
+	echo "/* Macro for Config IO bit mapping */"
+	echo -n "#define CONFIG_IO_MACRO(NAME) "
+	echo "(((NAME ## _RTRIM & 0xff) << 19) | \\"
+	echo "	((NAME ## _INPUT_BUF_EN & 0x3) << 17) | \\"
+	echo "	((NAME ## _WK_PU_EN & 0x1) << 16) | \\"
+	echo "	((NAME ## _PU_SLW_RT & 0x1) << 13) | \\"
+	echo "	((NAME ## _PU_DRV_STRG & 0xf) << 8) | \\"
+	echo "	((NAME ## _PD_SLW_RT & 0x1) << 5) | \\"
+	echo "	(NAME ## _PD_DRV_STRG & 0x1f))"
+
+	cat << EOF
+
+#endif /* __SOCFPGA_ARRIA10_CONFIG_H__ */
+EOF
+	) > "${outfile}"
+}
+
+usage() {
+	echo "$0 [hps_xml] [output_file]"
+	echo "Process QTS-generated hps.xml into devicetree header."
+	echo ""
+	echo "  hps_xml      - hps.xml file from hps_isw_handoff"
+	echo "  output_file  - Output header file for dtsi include"
+	echo ""
+}
+
+hps_xml="$1"
+outfile="$2"
+
+if [ "$#" -ne 2 ] ; then
+	usage
+	exit 1
+fi
+
+process_a10_hps_config "${hps_xml}" "${outfile}"
diff --git a/arch/arm/mach-socfpga/reset_manager_s10.c b/arch/arm/mach-socfpga/reset_manager_s10.c
index c743077..3746e6a 100644
--- a/arch/arm/mach-socfpga/reset_manager_s10.c
+++ b/arch/arm/mach-socfpga/reset_manager_s10.c
@@ -9,6 +9,7 @@
 #include <asm/arch/reset_manager.h>
 #include <asm/arch/system_manager.h>
 #include <dt-bindings/reset/altr,rst-mgr-s10.h>
+#include <linux/iopoll.h>
 
 DECLARE_GLOBAL_DATA_PTR;
 
@@ -54,6 +55,8 @@
 
 void socfpga_bridges_reset(int enable)
 {
+	u32 reg;
+
 	if (enable) {
 		/* clear idle request to all bridges */
 		setbits_le32(socfpga_get_sysmgr_addr() +
@@ -64,9 +67,9 @@
 			     ~0);
 
 		/* Poll until all idleack to 0 */
-		while (readl(socfpga_get_sysmgr_addr() +
-			     SYSMGR_SOC64_NOC_IDLEACK))
-			;
+		read_poll_timeout(readl, socfpga_get_sysmgr_addr() +
+				  SYSMGR_SOC64_NOC_IDLEACK, reg, !reg, 1000,
+				  300000);
 	} else {
 		/* set idle request to all bridges */
 		writel(~0,
@@ -77,14 +80,18 @@
 		writel(1, socfpga_get_sysmgr_addr() + SYSMGR_SOC64_NOC_TIMEOUT);
 
 		/* Poll until all idleack to 1 */
-		while ((readl(socfpga_get_sysmgr_addr() + SYSMGR_SOC64_NOC_IDLEACK) ^
-			(SYSMGR_NOC_H2F_MSK | SYSMGR_NOC_LWH2F_MSK)))
-			;
+		read_poll_timeout(readl, socfpga_get_sysmgr_addr() +
+				  SYSMGR_SOC64_NOC_IDLEACK, reg,
+				  reg == (SYSMGR_NOC_H2F_MSK |
+					  SYSMGR_NOC_LWH2F_MSK),
+				  1000, 300000);
 
 		/* Poll until all idlestatus to 1 */
-		while ((readl(socfpga_get_sysmgr_addr() + SYSMGR_SOC64_NOC_IDLESTATUS) ^
-			(SYSMGR_NOC_H2F_MSK | SYSMGR_NOC_LWH2F_MSK)))
-			;
+		read_poll_timeout(readl, socfpga_get_sysmgr_addr() +
+				  SYSMGR_SOC64_NOC_IDLESTATUS, reg,
+				  reg == (SYSMGR_NOC_H2F_MSK |
+					  SYSMGR_NOC_LWH2F_MSK),
+				  1000, 300000);
 
 		/* Reset all bridges (except NOR DDR scheduler & F2S) */
 		setbits_le32(socfpga_get_rstmgr_addr() + RSTMGR_SOC64_BRGMODRST,
@@ -104,3 +111,25 @@
 	return readl(socfpga_get_rstmgr_addr() + RSTMGR_SOC64_STATUS) &
 			RSTMGR_L4WD_MPU_WARMRESET_MASK;
 }
+
+void print_reset_info(void)
+{
+	bool iswd;
+	int n;
+	u32 stat = cpu_has_been_warmreset();
+
+	printf("Reset state: %s%s", stat ? "Warm " : "Cold",
+	       (stat & RSTMGR_STAT_SDMWARMRST) ? "[from SDM] " : "");
+
+	stat &= ~RSTMGR_STAT_SDMWARMRST;
+	if (!stat) {
+		puts("\n");
+		return;
+	}
+
+	n = generic_ffs(stat) - 1;
+	iswd = (n >= RSTMGR_STAT_L4WD0RST_BITPOS);
+	printf("(Triggered by %s %d)\n", iswd ? "Watchdog" : "MPU",
+	       iswd ? (n - RSTMGR_STAT_L4WD0RST_BITPOS) :
+	       (n - RSTMGR_STAT_MPU0RST_BITPOS));
+}
diff --git a/arch/arm/mach-socfpga/spl_agilex.c b/arch/arm/mach-socfpga/spl_agilex.c
index bd971ec..78b5d7c 100644
--- a/arch/arm/mach-socfpga/spl_agilex.c
+++ b/arch/arm/mach-socfpga/spl_agilex.c
@@ -51,11 +51,11 @@
 
 	socfpga_get_managers_addr();
 
-#ifdef CONFIG_HW_WATCHDOG
 	/* Ensure watchdog is paused when debugging is happening */
 	writel(SYSMGR_WDDBG_PAUSE_ALL_CPU,
 	       socfpga_get_sysmgr_addr() + SYSMGR_SOC64_WDDBG);
 
+#ifdef CONFIG_HW_WATCHDOG
 	/* Enable watchdog before initializing the HW */
 	socfpga_per_reset(SOCFPGA_RESET(L4WD0), 1);
 	socfpga_per_reset(SOCFPGA_RESET(L4WD0), 0);
@@ -76,6 +76,7 @@
 	}
 
 	preloader_console_init();
+	print_reset_info();
 	cm_print_clock_quick_summary();
 
 	firewall_setup();
diff --git a/arch/arm/mach-socfpga/spl_s10.c b/arch/arm/mach-socfpga/spl_s10.c
index b3c6f6a..daed056 100644
--- a/arch/arm/mach-socfpga/spl_s10.c
+++ b/arch/arm/mach-socfpga/spl_s10.c
@@ -53,11 +53,11 @@
 
 	socfpga_get_managers_addr();
 
-#ifdef CONFIG_HW_WATCHDOG
 	/* Ensure watchdog is paused when debugging is happening */
 	writel(SYSMGR_WDDBG_PAUSE_ALL_CPU,
 	       socfpga_get_sysmgr_addr() + SYSMGR_SOC64_WDDBG);
 
+#ifdef CONFIG_HW_WATCHDOG
 	/* Enable watchdog before initializing the HW */
 	socfpga_per_reset(SOCFPGA_RESET(L4WD0), 1);
 	socfpga_per_reset(SOCFPGA_RESET(L4WD0), 0);
@@ -81,6 +81,7 @@
 #endif
 
 	preloader_console_init();
+	print_reset_info();
 	cm_print_clock_quick_summary();
 
 	firewall_setup();
diff --git a/arch/arm/mach-socfpga/timer_s10.c b/arch/arm/mach-socfpga/timer_s10.c
index 3ad98bd..7d5598e 100644
--- a/arch/arm/mach-socfpga/timer_s10.c
+++ b/arch/arm/mach-socfpga/timer_s10.c
@@ -14,6 +14,7 @@
  */
 int timer_init(void)
 {
+#ifdef CONFIG_SPL_BUILD
 	int enable = 0x3;	/* timer enable + output signal masked */
 	int loadval = ~0;
 
@@ -22,6 +23,6 @@
 	/* enable processor pysical counter */
 	asm volatile("msr cntp_ctl_el0, %0" : : "r" (enable));
 	asm volatile("msr cntp_tval_el0, %0" : : "r" (loadval));
-
+#endif
 	return 0;
 }
diff --git a/arch/sandbox/dts/sandbox.dtsi b/arch/sandbox/dts/sandbox.dtsi
index c76ecc0..0faad3f 100644
--- a/arch/sandbox/dts/sandbox.dtsi
+++ b/arch/sandbox/dts/sandbox.dtsi
@@ -18,14 +18,14 @@
 	buttons {
 		compatible = "gpio-keys";
 
-		summer {
+		btn1 {
 			gpios = <&gpio_a 3 0>;
-			label = "summer";
+			label = "button1";
 		};
 
-		christmas {
+		btn2 {
 			gpios = <&gpio_a 4 0>;
-			label = "christmas";
+			label = "button2";
 		};
 	};
 
diff --git a/arch/sandbox/dts/test.dts b/arch/sandbox/dts/test.dts
index c7d1376..fa84b2c 100644
--- a/arch/sandbox/dts/test.dts
+++ b/arch/sandbox/dts/test.dts
@@ -55,14 +55,14 @@
 	buttons {
 		compatible = "gpio-keys";
 
-		summer {
+		btn1 {
 			gpios = <&gpio_a 3 0>;
-			label = "summer";
+			label = "button1";
 		};
 
-		christmas {
+		btn2 {
 			gpios = <&gpio_a 4 0>;
-			label = "christmas";
+			label = "button2";
 		};
 	};
 
diff --git a/board/Marvell/mvebu_armada-37xx/board.c b/board/Marvell/mvebu_armada-37xx/board.c
index 2bfc717..73d69e0 100644
--- a/board/Marvell/mvebu_armada-37xx/board.c
+++ b/board/Marvell/mvebu_armada-37xx/board.c
@@ -88,14 +88,14 @@
 	if (env_get("fdtfile"))
 		return 0;
 
-	if (!of_machine_is_compatible("marvell,armada-3720-espressobin"))
+	if (!of_machine_is_compatible("globalscale,espressobin"))
 		return 0;
 
 	/* If the memory controller has been configured for DDR4, we're running on v7 */
 	ddr4 = ((readl(A3700_CH0_MC_CTRL2_REG) >> A3700_MC_CTRL2_SDRAM_TYPE_OFFS)
 		& A3700_MC_CTRL2_SDRAM_TYPE_MASK) == A3700_MC_CTRL2_SDRAM_TYPE_DDR4;
 
-	emmc = of_machine_is_compatible("marvell,armada-3720-espressobin-emmc");
+	emmc = of_machine_is_compatible("globalscale,espressobin-emmc");
 
 	if (ddr4 && emmc)
 		env_set("fdtfile", "marvell/armada-3720-espressobin-v7-emmc.dtb");
@@ -248,7 +248,7 @@
 /* Bring-up board-specific network stuff */
 int board_network_enable(struct mii_dev *bus)
 {
-	if (!of_machine_is_compatible("marvell,armada-3720-espressobin"))
+	if (!of_machine_is_compatible("globalscale,espressobin"))
 		return 0;
 
 	/*
@@ -300,7 +300,7 @@
 	int part_off;
 
 	/* Fill SPI MTD partitions for Linux kernel on Espressobin */
-	if (!of_machine_is_compatible("marvell,armada-3720-espressobin"))
+	if (!of_machine_is_compatible("globalscale,espressobin"))
 		return 0;
 
 	spi_off = fdt_node_offset_by_compatible(blob, -1, "jedec,spi-nor");
diff --git a/board/Marvell/mvebu_armada-8k/MAINTAINERS b/board/Marvell/mvebu_armada-8k/MAINTAINERS
index 2551ed0..15660cd 100644
--- a/board/Marvell/mvebu_armada-8k/MAINTAINERS
+++ b/board/Marvell/mvebu_armada-8k/MAINTAINERS
@@ -10,3 +10,9 @@
 M:	Konstantin Porotchkin <kostap@marvell.com>
 S:	Maintained
 F:	configs/mvebu_mcbin-88f8040_defconfig
+
+Puzzle-M801 BOARD
+M:	Luka Kovacic <luka.kovacic@sartura.hr>
+S:	Maintained
+F:	configs/mvebu_puzzle-m801-88f8040_defconfig
+F:	arch/arm/dts/armada-8040-puzzle-m801.dts
diff --git a/board/Marvell/mvebu_armada-8k/board.c b/board/Marvell/mvebu_armada-8k/board.c
index 60b0024..bf8a929 100644
--- a/board/Marvell/mvebu_armada-8k/board.c
+++ b/board/Marvell/mvebu_armada-8k/board.c
@@ -34,6 +34,17 @@
 #define I2C_IO_REG_CL		((1 << I2C_IO_REG_0_USB_H0_CL) | \
 				 (1 << I2C_IO_REG_0_USB_H1_CL))
 
+/*
+ * Information specific to the iEi Puzzle-M801 board.
+ */
+
+/* Internal configuration registers */
+#define CP1_CONF_REG_BASE 0xf4440000
+#define CONF_REG_MPP0 0x0
+#define CONF_REG_MPP1 0x4
+#define CONF_REG_MPP2 0x8
+#define CONF_REG_MPP3 0xC
+
 static int usb_enabled = 0;
 
 /* Board specific xHCI dis-/enable code */
@@ -141,7 +152,14 @@
 
 int board_early_init_f(void)
 {
-	/* Nothing to do (yet), perhaps later some pin-muxing etc */
+	/* Initialize some platform specific memory locations */
+	if (of_machine_is_compatible("marvell,armada8040-puzzle-m801")) {
+		/* MPP setup */
+		writel(0x00444444, CP1_CONF_REG_BASE + CONF_REG_MPP0);
+		writel(0x00000000, CP1_CONF_REG_BASE + CONF_REG_MPP1);
+		writel(0x00000000, CP1_CONF_REG_BASE + CONF_REG_MPP2);
+		writel(0x08888000, CP1_CONF_REG_BASE + CONF_REG_MPP3);
+	}
 
 	return 0;
 }
diff --git a/board/bosch/guardian/MAINTAINERS b/board/bosch/guardian/MAINTAINERS
index 2f674d7..fd216a9 100644
--- a/board/bosch/guardian/MAINTAINERS
+++ b/board/bosch/guardian/MAINTAINERS
@@ -1,7 +1,7 @@
 Guardian BOARD
 M:	Sjoerd Simons <sjoerd.simons@collabora.co.uk>
 M:	Govindaraji Sivanantham <Govindaraji.Sivanantham@in.bosch.com>
-M:	Moses Christopher Bollavarapu <BollavarapuMoses.Christopher@in.bosch.com>
+M:	Hiremath Gireesh <Gireesh.Hiremath@in.bosch.com>
 S:	Maintained
 F:	board/bosch/guardian/
 F:	include/configs/am335x_guardian.h
diff --git a/board/ti/j721e/evm.c b/board/ti/j721e/evm.c
index 7925b76..d8711eb 100644
--- a/board/ti/j721e/evm.c
+++ b/board/ti/j721e/evm.c
@@ -110,7 +110,7 @@
 		do_fixup_by_compat(blob, "ti,am654-hbmc", "status",
 				   "okay", sizeof("okay"), 0);
 		offset = fdt_node_offset_by_compatible(blob, -1,
-						       "ti,j721e-ospi");
+						       "ti,am654-ospi");
 		fdt_setprop(blob, offset, "status", "disabled",
 			    sizeof("disabled"));
 	}
diff --git a/cmd/Kconfig b/cmd/Kconfig
index 4cb1717..a3166e4 100644
--- a/cmd/Kconfig
+++ b/cmd/Kconfig
@@ -1751,6 +1751,77 @@
 	  feature is to allow easy loading of files passed to qemu-system
 	  via -kernel / -initrd
 
+config CMD_PSTORE
+	bool "pstore"
+	help
+	  This provides access to Linux PStore with Rammoops backend. The main
+	  feature is to allow to display or save PStore records.
+
+	  See doc/pstore.rst for more information.
+
+if CMD_PSTORE
+
+config CMD_PSTORE_MEM_ADDR
+	hex "Memory Address"
+	depends on CMD_PSTORE
+	help
+	  Base addr used for PStore ramoops memory, should be identical to
+	  ramoops.mem_address parameter used by kernel
+
+config CMD_PSTORE_MEM_SIZE
+	hex "Memory size"
+	depends on CMD_PSTORE
+	default "0x10000"
+	help
+	  Size of PStore ramoops memory, should be identical to ramoops.mem_size
+	  parameter used by kernel, a power of 2 and larger than the sum of the
+	  record sizes
+
+config CMD_PSTORE_RECORD_SIZE
+	hex "Dump record size"
+	depends on CMD_PSTORE
+	default "0x1000"
+	help
+	  Size of each dump done on oops/panic, should be identical to
+	  ramoops.record_size parameter used by kernel and a power of 2
+	  Must be non-zero
+
+config CMD_PSTORE_CONSOLE_SIZE
+	hex "Kernel console log size"
+	depends on CMD_PSTORE
+	default "0x1000"
+	help
+	  Size of kernel console log, should be identical to
+	  ramoops.console_size parameter used by kernel and a power of 2
+	  Must be non-zero
+
+config CMD_PSTORE_FTRACE_SIZE
+	hex "FTrace log size"
+	depends on CMD_PSTORE
+	default "0x1000"
+	help
+	  Size of ftrace log, should be identical to ramoops.ftrace_size
+	  parameter used by kernel and a power of 2
+
+config CMD_PSTORE_PMSG_SIZE
+	hex "User space message log size"
+	depends on CMD_PSTORE
+	default "0x1000"
+	help
+	  Size of user space message log, should be identical to
+	  ramoops.pmsg_size parameter used by kernel and a power of 2
+
+config CMD_PSTORE_ECC_SIZE
+	int "ECC size"
+	depends on CMD_PSTORE
+	default "0"
+	help
+	if non-zero, the option enables ECC support and specifies ECC buffer
+	size in bytes (1 is a special value, means 16 bytes ECC), should be
+	identical to ramoops.ramoops_ecc parameter used by kernel
+
+endif
+
 source "cmd/mvebu/Kconfig"
 
 config CMD_TERMINAL
diff --git a/cmd/Makefile b/cmd/Makefile
index 015b837..19a8916 100644
--- a/cmd/Makefile
+++ b/cmd/Makefile
@@ -116,6 +116,7 @@
 endif
 obj-$(CONFIG_CMD_PINMUX) += pinmux.o
 obj-$(CONFIG_CMD_PMC) += pmc.o
+obj-$(CONFIG_CMD_PSTORE) += pstore.o
 obj-$(CONFIG_CMD_PXE) += pxe.o pxe_utils.o
 obj-$(CONFIG_CMD_WOL) += wol.o
 obj-$(CONFIG_CMD_QFW) += qfw.o
diff --git a/cmd/button.c b/cmd/button.c
index 84ad165..64c5a8f 100644
--- a/cmd/button.c
+++ b/cmd/button.c
@@ -75,11 +75,11 @@
 
 	ret = show_button_state(dev);
 
-	return 0;
+	return !ret;
 }
 
 U_BOOT_CMD(
-	button, 4, 1, do_button,
+	button, 2, 1, do_button,
 	"manage buttons",
 	"<button_label> \tGet button state\n"
 	"button list\t\tShow a list of buttons"
diff --git a/cmd/pstore.c b/cmd/pstore.c
new file mode 100644
index 0000000..9a8b38c
--- /dev/null
+++ b/cmd/pstore.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright © 2019 Collabora Ltd
+ */
+
+#include <config.h>
+#include <command.h>
+#include <fs.h>
+#include <log.h>
+#include <mapmem.h>
+#include <memalign.h>
+#include <part.h>
+
+struct persistent_ram_buffer {
+	u32    sig;
+	u32    start;
+	u32    size;
+	u8     data[0];
+};
+
+#define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
+#define RAMOOPS_KERNMSG_HDR "===="
+
+#define PSTORE_TYPE_DMESG 0
+#define PSTORE_TYPE_CONSOLE 2
+#define PSTORE_TYPE_FTRACE 3
+#define PSTORE_TYPE_PMSG 7
+#define PSTORE_TYPE_ALL 255
+
+static phys_addr_t pstore_addr = CONFIG_CMD_PSTORE_MEM_ADDR;
+static phys_size_t pstore_length = CONFIG_CMD_PSTORE_MEM_SIZE;
+static unsigned int pstore_record_size = CONFIG_CMD_PSTORE_RECORD_SIZE;
+static unsigned int pstore_console_size = CONFIG_CMD_PSTORE_CONSOLE_SIZE;
+static unsigned int pstore_ftrace_size = CONFIG_CMD_PSTORE_FTRACE_SIZE;
+static unsigned int pstore_pmsg_size = CONFIG_CMD_PSTORE_PMSG_SIZE;
+static unsigned int pstore_ecc_size = CONFIG_CMD_PSTORE_ECC_SIZE;
+static unsigned int buffer_size;
+
+ /**
+  * pstore_read_kmsg_hdr() - Check kernel header and get compression flag if
+  *                          available.
+  * @buffer: Kernel messages buffer.
+  * @compressed: Returns TRUE if kernel buffer is compressed, else FALSE.
+  *
+  * Check if buffer starts with a kernel header of the form:
+  *   ====<secs>.<nsecs>[-<compression>]\n
+  * If <compression> is equal to 'C' then the buffer is compressed, else iter
+  * should be 'D'.
+  *
+  * Return: Length of kernel header.
+  */
+static int pstore_read_kmsg_hdr(char *buffer, bool *compressed)
+{
+	char *ptr = buffer;
+	*compressed = false;
+
+	if (strncmp(RAMOOPS_KERNMSG_HDR, ptr, strlen(RAMOOPS_KERNMSG_HDR)) != 0)
+		return 0;
+
+	ptr += strlen(RAMOOPS_KERNMSG_HDR);
+
+	ptr = strchr(ptr, '\n');
+	if (!ptr)
+		return 0;
+
+	if (ptr[-2] == '-' && ptr[-1] == 'C')
+		*compressed = true;
+
+	return ptr - buffer + 1;
+}
+
+/**
+ * pstore_get_buffer() - Get unwrapped record buffer
+ * @sig: Signature to check
+ * @buffer: Buffer containing wrapped record
+ * @size: wrapped record size
+ * @dest: Buffer used to store unwrapped record
+ *
+ * The record starts with <signature><start><size> header.
+ * The signature is 'DBGC' for all records except for Ftrace's record(s) wich
+ * use LINUX_VERSION_CODE ^ 'DBGC'.
+ * Use 0 for @sig to prevent checking signature.
+ * Start and size are 4 bytes long.
+ *
+ * Return: record's length
+ */
+static u32 pstore_get_buffer(u32 sig, phys_addr_t buffer, u32 size, char *dest)
+{
+	struct persistent_ram_buffer *prb =
+		(struct persistent_ram_buffer *)map_sysmem(buffer, size);
+	u32 dest_size;
+
+	if (sig == 0 || prb->sig == sig) {
+		if (prb->size == 0) {
+			log_debug("found existing empty buffer\n");
+			return 0;
+		}
+
+		if (prb->size > size) {
+			log_debug("found existing invalid buffer, size %u, start %u\n",
+			          prb->size, prb->start);
+			return 0;
+		}
+	} else {
+		log_debug("no valid data in buffer (sig = 0x%08x)\n", prb->sig);
+		return 0;
+	}
+
+	log_debug("found existing buffer, size %u, start %u\n",
+	          prb->size, prb->start);
+
+	memcpy(dest, &prb->data[prb->start], prb->size - prb->start);
+	memcpy(dest + prb->size - prb->start, &prb->data[0], prb->start);
+
+	dest_size = prb->size;
+	unmap_sysmem(prb);
+
+	return dest_size;
+}
+
+/**
+ * pstore_init_buffer_size() - Init buffer size to largest record size
+ *
+ * Records, console, FTrace and user logs can use different buffer sizes.
+ * This function allows to retrieve the biggest one.
+ */
+static void pstore_init_buffer_size(void)
+{
+	if (pstore_record_size > buffer_size)
+		buffer_size = pstore_record_size;
+
+	if (pstore_console_size > buffer_size)
+		buffer_size = pstore_console_size;
+
+	if (pstore_ftrace_size > buffer_size)
+		buffer_size = pstore_ftrace_size;
+
+	if (pstore_pmsg_size > buffer_size)
+		buffer_size = pstore_pmsg_size;
+}
+
+/**
+ * pstore_set() - Initialize PStore settings from command line arguments
+ * @cmdtp: Command data struct pointer
+ * @flag: Command flag
+ * @argc: Command-line argument count
+ * @argv: Array of command-line arguments
+ *
+ * Set pstore reserved memory info, starting at 'addr' for 'len' bytes.
+ * Default length for records is 4K.
+ * Mandatory arguments:
+ * - addr: ramoops starting address
+ * - len: ramoops total length
+ * Optional arguments:
+ * - record-size: size of one panic or oops record ('dump' type)
+ * - console-size: size of the kernel logs record
+ * - ftrace-size: size of the ftrace record(s), this can be a single record or
+ *                divided in parts based on number of CPUs
+ * - pmsg-size: size of the user space logs record
+ * - ecc-size: enables/disables ECC support and specifies ECC buffer size in
+ *             bytes (0 disables it, 1 is a special value, means 16 bytes ECC)
+ *
+ * Return: zero on success, CMD_RET_USAGE in case of misuse and negative
+ * on error.
+ */
+static int pstore_set(struct cmd_tbl *cmdtp, int flag,  int argc,
+		      char * const argv[])
+{
+	if (argc < 3)
+		return CMD_RET_USAGE;
+
+	/* Address is specified since argc > 2
+	 */
+	pstore_addr = simple_strtoul(argv[1], NULL, 16);
+
+	/* Length is specified since argc > 2
+	 */
+	pstore_length = simple_strtoul(argv[2], NULL, 16);
+
+	if (argc > 3)
+		pstore_record_size = simple_strtoul(argv[3], NULL, 16);
+
+	if (argc > 4)
+		pstore_console_size = simple_strtoul(argv[4], NULL, 16);
+
+	if (argc > 5)
+		pstore_ftrace_size = simple_strtoul(argv[5], NULL, 16);
+
+	if (argc > 6)
+		pstore_pmsg_size = simple_strtoul(argv[6], NULL, 16);
+
+	if (argc > 7)
+		pstore_ecc_size = simple_strtoul(argv[7], NULL, 16);
+
+	if (pstore_length < (pstore_record_size + pstore_console_size
+			     + pstore_ftrace_size + pstore_pmsg_size)) {
+		printf("pstore <len> should be larger than the sum of all records sizes\n");
+		pstore_length = 0;
+	}
+
+	log_debug("pstore set done: start 0x%08llx - length 0x%llx\n",
+	          (unsigned long long)pstore_addr,
+	          (unsigned long long)pstore_length);
+
+	return 0;
+}
+
+/**
+ * pstore_print_buffer() - Print buffer
+ * @type: buffer type
+ * @buffer: buffer to print
+ * @size: buffer size
+ *
+ * Print buffer type and content
+ */
+static void pstore_print_buffer(char *type, char *buffer, u32 size)
+{
+	u32 i = 0;
+
+	printf("**** %s\n", type);
+	while (i < size && buffer[i] != 0) {
+		putc(buffer[i]);
+		i++;
+	}
+}
+
+/**
+ * pstore_display() - Display existing records in pstore reserved memory
+ * @cmdtp: Command data struct pointer
+ * @flag: Command flag
+ * @argc: Command-line argument count
+ * @argv: Array of command-line arguments
+ *
+ * A 'record-type' can be given to only display records of this kind.
+ * If no 'record-type' is given, all valid records are dispayed.
+ * 'record-type' can be one of 'dump', 'console', 'ftrace' or 'user'. For 'dump'
+ * and 'ftrace' types, a 'nb' can be given to only display one record.
+ *
+ * Return: zero on success, CMD_RET_USAGE in case of misuse and negative
+ * on error.
+ */
+static int pstore_display(struct cmd_tbl *cmdtp, int flag,  int argc,
+			  char * const argv[])
+{
+	int type = PSTORE_TYPE_ALL;
+	phys_addr_t ptr;
+	char *buffer;
+	u32 size;
+	int header_len = 0;
+	bool compressed;
+
+	if (argc > 1) {
+		if (!strcmp(argv[1], "dump"))
+			type = PSTORE_TYPE_DMESG;
+		else if (!strcmp(argv[1], "console"))
+			type = PSTORE_TYPE_CONSOLE;
+		else if (!strcmp(argv[1], "ftrace"))
+			type = PSTORE_TYPE_FTRACE;
+		else if (!strcmp(argv[1], "user"))
+			type = PSTORE_TYPE_PMSG;
+		else
+			return CMD_RET_USAGE;
+	}
+
+	if (pstore_length == 0) {
+		printf("Please set PStore configuration\n");
+		return CMD_RET_USAGE;
+	}
+
+	if (buffer_size == 0)
+		pstore_init_buffer_size();
+
+	buffer = malloc_cache_aligned(buffer_size);
+
+	if (type == PSTORE_TYPE_DMESG || type == PSTORE_TYPE_ALL) {
+		ptr = pstore_addr;
+		phys_addr_t ptr_end = ptr + pstore_length - pstore_pmsg_size
+				- pstore_ftrace_size - pstore_console_size;
+
+		if (argc > 2) {
+			ptr += simple_strtoul(argv[2], NULL, 10)
+				* pstore_record_size;
+			ptr_end = ptr + pstore_record_size;
+		}
+
+		while (ptr < ptr_end) {
+			size = pstore_get_buffer(PERSISTENT_RAM_SIG, ptr,
+						 pstore_record_size, buffer);
+			ptr += pstore_record_size;
+
+			if (size == 0)
+				continue;
+
+			header_len = pstore_read_kmsg_hdr(buffer, &compressed);
+			if (header_len == 0) {
+				log_debug("no valid kernel header\n");
+				continue;
+			}
+
+			if (compressed) {
+				printf("Compressed buffer, display not available\n");
+				continue;
+			}
+
+			pstore_print_buffer("Dump", buffer + header_len,
+					    size - header_len);
+		}
+	}
+
+	if (type == PSTORE_TYPE_CONSOLE || type == PSTORE_TYPE_ALL) {
+		ptr = pstore_addr + pstore_length - pstore_pmsg_size
+			- pstore_ftrace_size - pstore_console_size;
+		size = pstore_get_buffer(PERSISTENT_RAM_SIG, ptr,
+					 pstore_console_size, buffer);
+		if (size != 0)
+			pstore_print_buffer("Console", buffer, size);
+	}
+
+	if (type == PSTORE_TYPE_FTRACE || type == PSTORE_TYPE_ALL) {
+		ptr = pstore_addr + pstore_length - pstore_pmsg_size
+		- pstore_ftrace_size;
+		/* The FTrace record(s) uses LINUX_VERSION_CODE ^ 'DBGC'
+		 * signature, pass 0 to pstore_get_buffer to prevent
+		 * checking it
+		 */
+		size = pstore_get_buffer(0, ptr, pstore_ftrace_size, buffer);
+		if (size != 0)
+			pstore_print_buffer("FTrace", buffer, size);
+	}
+
+	if (type == PSTORE_TYPE_PMSG || type == PSTORE_TYPE_ALL) {
+		ptr = pstore_addr + pstore_length - pstore_pmsg_size;
+		size = pstore_get_buffer(PERSISTENT_RAM_SIG, ptr,
+					 pstore_pmsg_size, buffer);
+		if (size != 0)
+			pstore_print_buffer("User", buffer, size);
+	}
+
+	free(buffer);
+
+	return 0;
+}
+
+/**
+ * pstore_save() - Save existing records from pstore reserved memory
+ * @cmdtp: Command data struct pointer
+ * @flag: Command flag
+ * @argc: Command-line argument count
+ * @argv: Array of command-line arguments
+ *
+ * the records are saved under 'directory path', which should already exist,
+ * to partition 'part' on device type 'interface' instance 'dev'
+ * Filenames are automatically generated, depending on record type, like in
+ * /sys/fs/pstore under Linux
+ *
+ * Return: zero on success, CMD_RET_USAGE in case of misuse and negative
+ * on error.
+ */
+static int pstore_save(struct cmd_tbl *cmdtp, int flag,  int argc,
+		       char * const argv[])
+{
+	phys_addr_t ptr, ptr_end;
+	char *buffer;
+	char *save_argv[6];
+	char addr[19], length[19];
+	char path[256];
+	u32 size;
+	unsigned int index;
+	int header_len = 0;
+	bool compressed;
+
+	if (argc < 4)
+		return CMD_RET_USAGE;
+
+	if (pstore_length == 0) {
+		printf("Please set PStore configuration\n");
+		return CMD_RET_USAGE;
+	}
+
+	if (buffer_size == 0)
+		pstore_init_buffer_size();
+
+	buffer = malloc_cache_aligned(buffer_size);
+	sprintf(addr, "0x%p", buffer);
+
+	save_argv[0] = argv[0];
+	save_argv[1] = argv[1];
+	save_argv[2] = argv[2];
+	save_argv[3] = addr;
+	save_argv[4] = path;
+	save_argv[5] = length;
+
+	/* Save all Dump records */
+	ptr = pstore_addr;
+	ptr_end = ptr + pstore_length - pstore_pmsg_size - pstore_ftrace_size
+				- pstore_console_size;
+	index = 0;
+	while (ptr < ptr_end) {
+		size = pstore_get_buffer(PERSISTENT_RAM_SIG, ptr,
+					 pstore_record_size, buffer);
+		ptr += pstore_record_size;
+
+		if (size == 0)
+			continue;
+
+		header_len = pstore_read_kmsg_hdr(buffer, &compressed);
+		if (header_len == 0) {
+			log_debug("no valid kernel header\n");
+			continue;
+		}
+
+		sprintf(addr, "0x%08lx", (ulong)map_to_sysmem(buffer + header_len));
+		sprintf(length, "0x%X", size - header_len);
+		sprintf(path, "%s/dmesg-ramoops-%u%s", argv[3], index,
+			compressed ? ".enc.z" : "");
+		do_save(cmdtp, flag, 6, save_argv, FS_TYPE_ANY);
+		index++;
+	}
+
+	sprintf(addr, "0x%08lx", (ulong)map_to_sysmem(buffer));
+
+	/* Save Console record */
+	size = pstore_get_buffer(PERSISTENT_RAM_SIG, ptr, pstore_console_size,
+				 buffer);
+	if (size != 0) {
+		sprintf(length, "0x%X", size);
+		sprintf(path, "%s/console-ramoops-0", argv[3]);
+		do_save(cmdtp, flag, 6, save_argv, FS_TYPE_ANY);
+	}
+	ptr += pstore_console_size;
+
+	/* Save FTrace record(s)
+	 * The FTrace record(s) uses LINUX_VERSION_CODE ^ 'DBGC' signature,
+	 * pass 0 to pstore_get_buffer to prevent checking it
+	 */
+	size = pstore_get_buffer(0, ptr, pstore_ftrace_size, buffer);
+	if (size != 0) {
+		sprintf(length, "0x%X", size);
+		sprintf(path, "%s/ftrace-ramoops-0", argv[3]);
+		do_save(cmdtp, flag, 6, save_argv, FS_TYPE_ANY);
+	}
+	ptr += pstore_ftrace_size;
+
+	/* Save Console record */
+	size = pstore_get_buffer(PERSISTENT_RAM_SIG, ptr, pstore_pmsg_size,
+				 buffer);
+	if (size != 0) {
+		sprintf(length, "0x%X", size);
+		sprintf(path, "%s/pmsg-ramoops-0", argv[3]);
+		do_save(cmdtp, flag, 6, save_argv, FS_TYPE_ANY);
+	}
+
+	free(buffer);
+
+	return 0;
+}
+
+static struct cmd_tbl cmd_pstore_sub[] = {
+	U_BOOT_CMD_MKENT(set, 8, 0, pstore_set, "", ""),
+	U_BOOT_CMD_MKENT(display, 3, 0, pstore_display, "", ""),
+	U_BOOT_CMD_MKENT(save, 4, 0, pstore_save, "", ""),
+};
+
+static int do_pstore(struct cmd_tbl *cmdtp, int flag, int argc, char * const argv[])
+{
+	struct cmd_tbl *c;
+
+	if (argc < 2)
+		return CMD_RET_USAGE;
+
+	/* Strip off leading argument */
+	argc--;
+	argv++;
+
+	c = find_cmd_tbl(argv[0], cmd_pstore_sub, ARRAY_SIZE(cmd_pstore_sub));
+
+	if (!c)
+		return CMD_RET_USAGE;
+
+	return c->cmd(cmdtp, flag, argc, argv);
+}
+
+void fdt_fixup_pstore(void *blob)
+{
+	char node[32];
+	int  nodeoffset;	/* node offset from libfdt */
+
+	nodeoffset = fdt_path_offset(blob, "/");
+	if (nodeoffset < 0) {
+		/* Not found or something else bad happened. */
+		log_err("fdt_path_offset() returned %s\n", fdt_strerror(nodeoffset));
+		return;
+	}
+
+	nodeoffset = fdt_add_subnode(blob, nodeoffset, "reserved-memory");
+	if (nodeoffset < 0) {
+		log_err("Add 'reserved-memory' node failed: %s\n",
+				fdt_strerror(nodeoffset));
+		return;
+	}
+	fdt_setprop_u32(blob, nodeoffset, "#address-cells", 2);
+	fdt_setprop_u32(blob, nodeoffset, "#size-cells", 2);
+	fdt_setprop_empty(blob, nodeoffset, "ranges");
+
+	sprintf(node, "ramoops@%llx", (unsigned long long)pstore_addr);
+	nodeoffset = fdt_add_subnode(blob, nodeoffset, node);
+	if (nodeoffset < 0) {
+		log_err("Add '%s' node failed: %s\n", node, fdt_strerror(nodeoffset));
+		return;
+	}
+	fdt_setprop_string(blob, nodeoffset, "compatible", "ramoops");
+	fdt_setprop_u64(blob, nodeoffset, "reg", pstore_addr);
+	fdt_appendprop_u64(blob, nodeoffset, "reg", pstore_length);
+	fdt_setprop_u32(blob, nodeoffset, "record-size", pstore_record_size);
+	fdt_setprop_u32(blob, nodeoffset, "console-size", pstore_console_size);
+	fdt_setprop_u32(blob, nodeoffset, "ftrace-size", pstore_ftrace_size);
+	fdt_setprop_u32(blob, nodeoffset, "pmsg-size", pstore_pmsg_size);
+	fdt_setprop_u32(blob, nodeoffset, "ecc-size", pstore_ecc_size);
+}
+
+U_BOOT_CMD(pstore, 10, 0, do_pstore,
+	   "Manage Linux Persistent Storage",
+	   "set <addr> <len> [record-size] [console-size] [ftrace-size] [pmsg_size] [ecc-size]\n"
+	   "- Set pstore reserved memory info, starting at 'addr' for 'len' bytes.\n"
+	   "  Default length for records is 4K.\n"
+	   "  'record-size' is the size of one panic or oops record ('dump' type).\n"
+	   "  'console-size' is the size of the kernel logs record.\n"
+	   "  'ftrace-size' is the size of the ftrace record(s), this can be a single\n"
+	   "  record or divided in parts based on number of CPUs.\n"
+	   "  'pmsg-size' is the size of the user space logs record.\n"
+	   "  'ecc-size' enables/disables ECC support and specifies ECC buffer size in\n"
+	   "  bytes (0 disables it, 1 is a special value, means 16 bytes ECC).\n"
+	   "pstore display [record-type] [nb]\n"
+	   "- Display existing records in pstore reserved memory. A 'record-type' can\n"
+	   "  be given to only display records of this kind. 'record-type' can be one\n"
+	   "  of 'dump', 'console', 'ftrace' or 'user'. For 'dump' and 'ftrace' types,\n"
+	   "  a 'nb' can be given to only display one record.\n"
+	   "pstore save <interface> <dev[:part]> <directory-path>\n"
+	   "- Save existing records in pstore reserved memory under 'directory path'\n"
+	   "  to partition 'part' on device type 'interface' instance 'dev'.\n"
+	   "  Filenames are automatically generated, depending on record type, like\n"
+	   "  in /sys/fs/pstore under Linux.\n"
+	   "  The 'directory-path' should already exist.\n"
+);
diff --git a/common/image-cipher.c b/common/image-cipher.c
index 09869f7..4ca9eec 100644
--- a/common/image-cipher.c
+++ b/common/image-cipher.c
@@ -94,9 +94,11 @@
 		return -1;
 	}
 
+	info->iv = fdt_getprop(fit, cipher_noffset, "iv", NULL);
 	info->ivname = fdt_getprop(fit, cipher_noffset, "iv-name-hint", NULL);
-	if (!info->ivname) {
-		printf("Can't get IV name\n");
+
+	if (!info->iv && !info->ivname) {
+		printf("Can't get IV or IV name\n");
 		return -1;
 	}
 
@@ -120,8 +122,12 @@
 	 * Search the cipher node in the u-boot fdt
 	 * the path should be: /cipher/key-<algo>-<key>-<iv>
 	 */
-	snprintf(node_path, sizeof(node_path), "/%s/key-%s-%s-%s",
-		 FIT_CIPHER_NODENAME, algo_name, info->keyname, info->ivname);
+	if (info->ivname)
+		snprintf(node_path, sizeof(node_path), "/%s/key-%s-%s-%s",
+			 FIT_CIPHER_NODENAME, algo_name, info->keyname, info->ivname);
+	else
+		snprintf(node_path, sizeof(node_path), "/%s/key-%s-%s",
+			 FIT_CIPHER_NODENAME, algo_name, info->keyname);
 
 	noffset = fdt_path_offset(fdt, node_path);
 	if (noffset < 0) {
@@ -137,10 +143,12 @@
 	}
 
 	/* read iv */
-	info->iv = fdt_getprop(fdt, noffset, "iv", NULL);
 	if (!info->iv) {
-		printf("Can't get IV in cipher node '%s'\n", node_path);
-		return -1;
+		info->iv = fdt_getprop(fdt, noffset, "iv", NULL);
+		if (!info->iv) {
+			printf("Can't get IV in cipher node '%s'\n", node_path);
+			return -1;
+		}
 	}
 
 	return 0;
diff --git a/common/image-fdt.c b/common/image-fdt.c
index 3d6935a..327a8c4 100644
--- a/common/image-fdt.c
+++ b/common/image-fdt.c
@@ -567,6 +567,10 @@
 
 	/* Update ethernet nodes */
 	fdt_fixup_ethernet(blob);
+#if CONFIG_IS_ENABLED(CMD_PSTORE)
+	/* Append PStore configuration */
+	fdt_fixup_pstore(blob);
+#endif
 	if (IMAGE_OF_BOARD_SETUP) {
 		fdt_ret = ft_board_setup(blob, gd->bd);
 		if (fdt_ret) {
diff --git a/common/image-fit-sig.c b/common/image-fit-sig.c
index cc19671..5401d94 100644
--- a/common/image-fit-sig.c
+++ b/common/image-fit-sig.c
@@ -416,6 +416,10 @@
 {
 	int noffset;
 	int sig_node;
+	int verified = 0;
+	int reqd_sigs = 0;
+	bool reqd_policy_all = true;
+	const char *reqd_mode;
 
 	/* Work out what we need to verify */
 	sig_node = fdt_subnode_offset(sig_blob, 0, FIT_SIG_NODENAME);
@@ -425,6 +429,14 @@
 		return 0;
 	}
 
+	/* Get required-mode policy property from DTB */
+	reqd_mode = fdt_getprop(sig_blob, sig_node, "required-mode", NULL);
+	if (reqd_mode && !strcmp(reqd_mode, "any"))
+		reqd_policy_all = false;
+
+	debug("%s: required-mode policy set to '%s'\n", __func__,
+	      reqd_policy_all ? "all" : "any");
+
 	fdt_for_each_subnode(noffset, sig_blob, sig_node) {
 		const char *required;
 		int ret;
@@ -433,15 +445,29 @@
 				       NULL);
 		if (!required || strcmp(required, "conf"))
 			continue;
+
+		reqd_sigs++;
+
 		ret = fit_config_verify_sig(fit, conf_noffset, sig_blob,
 					    noffset);
 		if (ret) {
-			printf("Failed to verify required signature '%s'\n",
-			       fit_get_name(sig_blob, noffset, NULL));
-			return ret;
+			if (reqd_policy_all) {
+				printf("Failed to verify required signature '%s'\n",
+				       fit_get_name(sig_blob, noffset, NULL));
+				return ret;
+			}
+		} else {
+			verified++;
+			if (!reqd_policy_all)
+				break;
 		}
 	}
 
+	if (reqd_sigs && !verified) {
+		printf("Failed to verify 'any' of the required signature(s)\n");
+		return -EPERM;
+	}
+
 	return 0;
 }
 
diff --git a/common/spl/Kconfig b/common/spl/Kconfig
index 807b1dc..620ea1e 100644
--- a/common/spl/Kconfig
+++ b/common/spl/Kconfig
@@ -1237,6 +1237,7 @@
 
 config SPL_USB_SDP_SUPPORT
 	bool "Support SDP (Serial Download Protocol)"
+	depends on SPL_SERIAL_SUPPORT
 	help
 	  Enable Serial Download Protocol (SDP) device support in SPL. This
 	  allows to download images into memory and execute (jump to) them
diff --git a/common/spl/spl.c b/common/spl/spl.c
index 4840d1d..63c48fb 100644
--- a/common/spl/spl.c
+++ b/common/spl/spl.c
@@ -552,7 +552,9 @@
 		struct spl_image_loader *loader;
 
 		loader = spl_ll_find_loader(spl_boot_list[i]);
-#if defined(CONFIG_SPL_SERIAL_SUPPORT) && defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
+#if defined(CONFIG_SPL_SERIAL_SUPPORT) \
+    && defined(CONFIG_SPL_LIBCOMMON_SUPPORT)    \
+    && !defined(CONFIG_SILENT_CONSOLE)
 		if (loader)
 			printf("Trying to boot from %s\n", loader->name);
 		else
diff --git a/configs/am335x_evm_defconfig b/configs/am335x_evm_defconfig
index b76230f..eb5a299 100644
--- a/configs/am335x_evm_defconfig
+++ b/configs/am335x_evm_defconfig
@@ -92,4 +92,3 @@
 CONFIG_DYNAMIC_CRC_TABLE=y
 CONFIG_RSA=y
 CONFIG_LZO=y
-# CONFIG_OF_LIBFDT_OVERLAY is not set
diff --git a/configs/mvebu_puzzle-m801-88f8040_defconfig b/configs/mvebu_puzzle-m801-88f8040_defconfig
new file mode 100644
index 0000000..d6fa829
--- /dev/null
+++ b/configs/mvebu_puzzle-m801-88f8040_defconfig
@@ -0,0 +1,91 @@
+CONFIG_ARM=y
+CONFIG_ARCH_CPU_INIT=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_SYS_TEXT_BASE=0x00000000
+CONFIG_SYS_MALLOC_F_LEN=0x2000
+CONFIG_TARGET_MVEBU_ARMADA_8K=y
+CONFIG_ENV_SIZE=0x10000
+CONFIG_ENV_SECT_SIZE=0x10000
+CONFIG_ENV_OFFSET=0x1F0000
+CONFIG_NR_DRAM_BANKS=2
+CONFIG_DEBUG_UART_BASE=0xf0512000
+CONFIG_DEBUG_UART_CLOCK=200000000
+CONFIG_DEBUG_UART=y
+CONFIG_AHCI=y
+CONFIG_DISTRO_DEFAULTS=y
+# CONFIG_SYS_MALLOC_CLEAR_ON_INIT is not set
+CONFIG_USE_PREBOOT=y
+CONFIG_SYS_CONSOLE_INFO_QUIET=y
+# CONFIG_DISPLAY_CPUINFO is not set
+# CONFIG_DISPLAY_BOARDINFO is not set
+CONFIG_DISPLAY_BOARDINFO_LATE=y
+CONFIG_AUTOBOOT_KEYED=y
+CONFIG_AUTOBOOT_PROMPT="Autoboot in %d seconds, to stop use 's' key\n"
+CONFIG_AUTOBOOT_STOP_STR="s"
+CONFIG_AUTOBOOT_KEYED_CTRLC=y
+CONFIG_ARCH_EARLY_INIT_R=y
+CONFIG_BOARD_EARLY_INIT_F=y
+# CONFIG_EFI_LOADER is not set
+# CONFIG_CMD_FLASH is not set
+CONFIG_CMD_GPIO=y
+CONFIG_CMD_I2C=y
+CONFIG_CMD_MMC=y
+CONFIG_CMD_PCI=y
+CONFIG_CMD_SPI=y
+CONFIG_CMD_USB=y
+# CONFIG_CMD_SETEXPR is not set
+CONFIG_CMD_TFTPPUT=y
+CONFIG_CMD_CACHE=y
+CONFIG_CMD_TIME=y
+CONFIG_CMD_MVEBU_BUBT=y
+CONFIG_CMD_REGULATOR=y
+CONFIG_CMD_EXT4_WRITE=y
+CONFIG_MAC_PARTITION=y
+CONFIG_DEFAULT_DEVICE_TREE="armada-8040-puzzle-m801"
+CONFIG_ENV_IS_IN_SPI_FLASH=y
+CONFIG_SYS_RELOC_GD_ENV_ADDR=y
+CONFIG_NET_RANDOM_ETHADDR=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_DM_GPIO=y
+CONFIG_DM_PCA953X=y
+CONFIG_DM_I2C=y
+CONFIG_SYS_I2C_MVTWSI=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_DM_RTC=y
+CONFIG_RTC_RX8010SJ=y
+CONFIG_MISC=y
+CONFIG_DM_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_SF_DEFAULT_MODE=0
+CONFIG_SPI_FLASH_SFDP_SUPPORT=y
+CONFIG_SPI_FLASH_BAR=y
+CONFIG_SPI_FLASH_MACRONIX=y
+# CONFIG_SPI_FLASH_SPANSION is not set
+# CONFIG_SPI_FLASH_STMICRO is not set
+# CONFIG_SPI_FLASH_WINBOND is not set
+CONFIG_PHY_MARVELL=y
+CONFIG_PHY_GIGE=y
+CONFIG_MVPP2=y
+CONFIG_NVME=y
+CONFIG_PCI=y
+CONFIG_DM_PCI=y
+CONFIG_PCIE_DW_MVEBU=y
+CONFIG_MVEBU_COMPHY_SUPPORT=y
+CONFIG_PINCTRL=y
+CONFIG_PINCTRL_ARMADA_8K=y
+CONFIG_DM_REGULATOR_FIXED=y
+CONFIG_DEBUG_UART_SHIFT=2
+CONFIG_DEBUG_UART_ANNOUNCE=y
+CONFIG_SYS_NS16550=y
+CONFIG_KIRKWOOD_SPI=y
+CONFIG_USB=y
+CONFIG_DM_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_HOST_ETHER is not set
+# CONFIG_USB_ETHER_ASIX is not set
+# CONFIG_USB_ETHER_MCS7830 is not set
+# CONFIG_USB_ETHER_RTL8152 is not set
+# CONFIG_USB_ETHER_SMSC95XX is not set
diff --git a/configs/sandbox64_defconfig b/configs/sandbox64_defconfig
index c3ca796..7c713cb 100644
--- a/configs/sandbox64_defconfig
+++ b/configs/sandbox64_defconfig
@@ -80,6 +80,8 @@
 CONFIG_CMD_CRAMFS=y
 CONFIG_CMD_EXT4_WRITE=y
 CONFIG_CMD_MTDPARTS=y
+CONFIG_CMD_PSTORE=y
+CONFIG_CMD_PSTORE_MEM_ADDR=0x3000000
 CONFIG_MAC_PARTITION=y
 CONFIG_AMIGA_PARTITION=y
 CONFIG_OF_CONTROL=y
diff --git a/configs/sandbox_defconfig b/configs/sandbox_defconfig
index 18f787c..6ac2919 100644
--- a/configs/sandbox_defconfig
+++ b/configs/sandbox_defconfig
@@ -93,6 +93,8 @@
 CONFIG_CMD_EXT4_WRITE=y
 CONFIG_CMD_SQUASHFS=y
 CONFIG_CMD_MTDPARTS=y
+CONFIG_CMD_PSTORE=y
+CONFIG_CMD_PSTORE_MEM_ADDR=0x3000000
 CONFIG_MAC_PARTITION=y
 CONFIG_AMIGA_PARTITION=y
 CONFIG_OF_CONTROL=y
diff --git a/configs/socfpga_agilex_defconfig b/configs/socfpga_agilex_defconfig
index 6684af7..3a9b3b5 100644
--- a/configs/socfpga_agilex_defconfig
+++ b/configs/socfpga_agilex_defconfig
@@ -12,6 +12,7 @@
 CONFIG_IDENT_STRING="socfpga_agilex"
 CONFIG_SPL_FS_FAT=y
 CONFIG_DEFAULT_DEVICE_TREE="socfpga_agilex_socdk"
+# CONFIG_PSCI_RESET is not set
 CONFIG_BOOTDELAY=5
 CONFIG_USE_BOOTARGS=y
 CONFIG_BOOTARGS="earlycon"
@@ -34,7 +35,6 @@
 CONFIG_CMD_EXT4=y
 CONFIG_CMD_FAT=y
 CONFIG_CMD_FS_GENERIC=y
-CONFIG_OF_EMBED=y
 CONFIG_ENV_IS_IN_MMC=y
 CONFIG_NET_RANDOM_ETHADDR=y
 CONFIG_SPL_DM_SEQ_ALIAS=y
@@ -60,4 +60,7 @@
 CONFIG_DM_USB=y
 CONFIG_USB_DWC2=y
 CONFIG_USB_STORAGE=y
+CONFIG_DESIGNWARE_WATCHDOG=y
+CONFIG_WDT=y
 # CONFIG_SPL_USE_TINY_PRINTF is not set
+CONFIG_PANIC_HANG=y
diff --git a/configs/socfpga_stratix10_defconfig b/configs/socfpga_stratix10_defconfig
index 2d00eb8..a48388f 100644
--- a/configs/socfpga_stratix10_defconfig
+++ b/configs/socfpga_stratix10_defconfig
@@ -14,6 +14,7 @@
 CONFIG_DEFAULT_DEVICE_TREE="socfpga_stratix10_socdk"
 CONFIG_OPTIMIZE_INLINING=y
 CONFIG_SPL_OPTIMIZE_INLINING=y
+# CONFIG_PSCI_RESET is not set
 CONFIG_BOOTDELAY=5
 CONFIG_USE_BOOTARGS=y
 CONFIG_BOOTARGS="earlycon"
@@ -36,7 +37,6 @@
 CONFIG_CMD_EXT4=y
 CONFIG_CMD_FAT=y
 CONFIG_CMD_FS_GENERIC=y
-CONFIG_OF_EMBED=y
 CONFIG_ENV_IS_IN_MMC=y
 CONFIG_SYS_RELOC_GD_ENV_ADDR=y
 CONFIG_NET_RANDOM_ETHADDR=y
@@ -67,3 +67,4 @@
 CONFIG_DESIGNWARE_WATCHDOG=y
 CONFIG_WDT=y
 # CONFIG_SPL_USE_TINY_PRINTF is not set
+CONFIG_PANIC_HANG=y
diff --git a/doc/README.marvell b/doc/README.marvell
index 6fc5ac8..6f05ad4 100644
--- a/doc/README.marvell
+++ b/doc/README.marvell
@@ -43,8 +43,11 @@
         In order to prevent this, the required device-tree MUST be set during compilation.
         All device-tree files are located in ./arch/arm/dts/ folder.
 
-	For other DB boards (MacchiatoBin, EspressoBin and 3700 DB board) compile u-boot with
-	just default device-tree from defconfig using:
+	For the EspressoBin board with populated eMMC device use
+		# make DEVICE_TREE=armada-3720-espressobin-emmc
+
+	For other DB boards (MacchiatoBin, EspressoBin without soldered eMMC and 3700 DB board)
+	compile u-boot with just default device-tree from defconfig using:
 
 		# make
 
diff --git a/doc/README.socfpga b/doc/README.socfpga
index cae0ef1..4d73398 100644
--- a/doc/README.socfpga
+++ b/doc/README.socfpga
@@ -16,9 +16,9 @@
 #define CONFIG_SYS_MMC_MAX_BLK_COUNT	256
 -> Using smaller max blk cnt to avoid flooding the limited stack in OCRAM
 
---------------------------------------------------
-Generating the handoff header files for U-Boot SPL
---------------------------------------------------
+---------------------------------------------------------------------
+Cyclone 5 / Arria 5 generating the handoff header files for U-Boot SPL
+---------------------------------------------------------------------
 
 This text is assuming quartus 16.1, but newer versions will probably work just fine too;
 verified with DE1_SOC_Linux_FB demo project (https://github.com/VCTLabs/DE1_SOC_Linux_FB).
@@ -32,7 +32,7 @@
 
 Choose one of the follwing methods, either command line or GUI.
 
-Using the comaand line
+Using the command line
 ~~~~~~~~~~~~~~~~~~~~~~
 
 First run the embedded command shell, using your path to the Quartus install:
@@ -147,3 +147,32 @@
 
 Now your board is ready for full mainline support including U-Boot SPL.
 The Preloader will not be needed any more.
+
+----------------------------------------------------------
+Arria 10 generating the handoff header files for U-Boot SPL
+----------------------------------------------------------
+
+A header file for inclusion in a devicetree for Arria10 can be generated
+by the qts-filter-a10.sh script directly from the hps_isw_handoff/hps.xml
+file generated during the FPGA project compilation.  The header contains
+all PLL, clock, pinmux, and bridge configurations required.
+
+Please look at the socfpga_arria10_socdk_sdmmc-u-boot.dtsi for an example
+that includes use of the generated handoff header.
+
+Devicetree header generation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The qts-filter-a10.sh script can process the compile time genetated hps.xml
+to create the appropriate devicetree header.
+
+
+  $ ./arch/arm/mach-socfpga/qts-filter-a10.sh \
+        <hps_xml> \
+        <output_file>
+
+    hps_xml      - hps_isw_handoff/hps.xml from Quartus project
+    output_file  - Output filename and location for header file
+
+The script generates a single header file names <output_file> that should
+be placed in arch/arm/dts.
diff --git a/doc/develop/global_data.rst b/doc/develop/global_data.rst
new file mode 100644
index 0000000..9e7c8a2
--- /dev/null
+++ b/doc/develop/global_data.rst
@@ -0,0 +1,53 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Global data
+===========
+
+Globally required fields are held in the global data structure. A pointer to the
+structure is available as symbol gd. The symbol is made available by the macro
+%DECLARE_GLOBAL_DATA_PTR.
+
+Register pointing to global data
+--------------------------------
+
+On most architectures the global data pointer is stored in a register.
+
++------------+----------+
+| ARC        | r25      |
++------------+----------+
+| ARM 32bit  | r9       |
++------------+----------+
+| ARM 64bit  | x18      |
++------------+----------+
+| M68000     | d7       |
++------------+----------+
+| MicroBlaze | r31      |
++------------+----------+
+| NDS32      | r10      |
++------------+----------+
+| Nios II    | gp       |
++------------+----------+
+| PowerPC    | r2       |
++------------+----------+
+| RISC-V     | gp (x3)  |
++------------+----------+
+| SuperH     | r13      |
++------------+----------+
+
+The sandbox, x86, and Xtensa are notable exceptions.
+
+Clang for ARM does not support assigning a global register. When using Clang
+gd is defined as an inline function using assembly code. This adds a few bytes
+to the code size.
+
+Binaries called by U-Boot are not aware of the register usage and will not
+conserve gd. UEFI binaries call the API provided by U-Boot and may return to
+U-Boot. The value of gd has to be saved every time U-Boot is left and restored
+whenever U-Boot is reentered. This is also relevant for the implementation of
+function tracing. For setting the value of gd function set_gd() can be used.
+
+Global data structure
+---------------------
+
+.. kernel-doc:: include/asm-generic/global_data.h
+   :internal:
diff --git a/doc/develop/index.rst b/doc/develop/index.rst
index 98a95ad..89e80ea 100644
--- a/doc/develop/index.rst
+++ b/doc/develop/index.rst
@@ -9,4 +9,5 @@
 
    coccinelle
    crash_dumps
+   global_data
    logging
diff --git a/doc/index.rst b/doc/index.rst
index fd9f10f..68a083b 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -109,6 +109,13 @@
 
    android/index
 
+Command line
+------------
+.. toctree::
+   :maxdepth: 2
+
+   pstore.rst
+
 Indices and tables
 ==================
 
diff --git a/doc/pstore.rst b/doc/pstore.rst
new file mode 100644
index 0000000..8427d8f
--- /dev/null
+++ b/doc/pstore.rst
@@ -0,0 +1,82 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+PStore command
+==============
+
+Design
+------
+
+Linux PStore and Ramoops modules (Linux config options PSTORE and PSTORE_RAM)
+allow to use memory to pass data from the dying breath of a crashing kernel to
+its successor. This command allows to read those records from U-Boot command
+line.
+
+Ramoops is an oops/panic logger that writes its logs to RAM before the system
+crashes. It works by logging oopses and panics in a circular buffer. Ramoops
+needs a system with persistent RAM so that the content of that area can survive
+after a restart.
+
+Ramoops uses a predefined memory area to store the dump.
+
+Ramoops parameters can be passed as kernel parameters or through Device Tree,
+i.e.::
+
+    ramoops.mem_address=0x30000000 ramoops.mem_size=0x100000 ramoops.record_size=0x2000 ramoops.console_size=0x2000 memmap=0x100000$0x30000000
+
+The same values should be set in U-Boot to be able to retrieve the records.
+This values can be set at build time in U-Boot configuration file, or at runtime.
+U-Boot automatically patches the Device Tree to pass the Ramoops parameters to
+the kernel.
+
+The PStore configuration parameters are:
+
+======================= ==========
+ Name                   Default
+======================= ==========
+CMD_PSTORE_MEM_ADDR
+CMD_PSTORE_MEM_SIZE     0x10000
+CMD_PSTORE_RECORD_SIZE  0x1000
+CMD_PSTORE_CONSOLE_SIZE 0x1000
+CMD_PSTORE_FTRACE_SIZE  0x1000
+CMD_PSTORE_PMSG_SIZE    0x1000
+CMD_PSTORE_ECC_SIZE     0
+======================= ==========
+
+Records sizes should be a power of 2.
+The memory size and the record/console size must be non-zero.
+
+Multiple 'dump' records can be stored in the memory reserved for PStore.
+The memory size has to be larger than the sum of the record sizes, i.e.::
+
+    MEM_SIZE >= RECORD_SIZE * n + CONSOLE_SIZE + FTRACE_SIZE + PMSG_SIZE
+
+Usage
+-----
+
+Generate kernel crash
+~~~~~~~~~~~~~~~~~~~~~
+
+For test purpose, you can generate a kernel crash by setting reboot timeout to
+10 seconds and trigger a panic::
+
+    $ sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"
+    $ sudo sh -c "echo 10 > /proc/sys/kernel/panic"
+    $ sudo sh -c "echo c > /proc/sysrq-trigger"
+
+Retrieve logs in U-Boot
+~~~~~~~~~~~~~~~~~~~~~~~
+
+First of all, unless PStore parameters as been set during U-Boot configuration
+and match kernel ramoops parameters, it needs to be set using 'pstore set', e.g.::
+
+    => pstore set 0x30000000 0x100000 0x2000 0x2000
+
+Then all available dumps can be displayed
+using::
+
+    => pstore display
+
+Or saved to an existing directory in an Ext2 or Ext4 partition, e.g. on root
+directory of 1st partition of the 2nd MMC::
+
+    => pstore save mmc 1:1 /
diff --git a/doc/sphinx/cdomain.py b/doc/sphinx/cdomain.py
index cf13ff3..cbac8e6 100644
--- a/doc/sphinx/cdomain.py
+++ b/doc/sphinx/cdomain.py
@@ -48,7 +48,10 @@
 
 def setup(app):
 
-    app.override_domain(CDomain)
+    if (major == 1 and minor < 8):
+        app.override_domain(CDomain)
+    else:
+        app.add_domain(CDomain, override=True)
 
     return dict(
         version = __version__,
diff --git a/doc/uImage.FIT/signature.txt b/doc/uImage.FIT/signature.txt
index d4afd75..a345588 100644
--- a/doc/uImage.FIT/signature.txt
+++ b/doc/uImage.FIT/signature.txt
@@ -386,6 +386,20 @@
 
 This happens automatically as part of a bootm command when FITs are used.
 
+For Signed Configurations, the default verification behavior can be changed by
+the following optional property in /signature node in U-Boot's control FDT.
+
+- required-mode: Valid values are "any" to allow verified boot to succeed if
+the selected configuration is signed by any of the 'required' keys, and "all"
+to allow verified boot to succeed if the selected configuration is signed by
+all of the 'required' keys.
+
+This property can be added to a binary device tree using fdtput as shown in
+below examples::
+
+	fdtput -t s control.dtb /signature required-mode any
+	fdtput -t s control.dtb /signature required-mode all
+
 
 Enabling FIT Verification
 -------------------------
diff --git a/drivers/clk/altera/clk-agilex.c b/drivers/clk/altera/clk-agilex.c
index 9927ada..a539889 100644
--- a/drivers/clk/altera/clk-agilex.c
+++ b/drivers/clk/altera/clk-agilex.c
@@ -47,8 +47,66 @@
 #define MEMBUS_MAINPLL				0
 #define MEMBUS_PERPLL				1
 #define MEMBUS_TIMEOUT				1000
-#define MEMBUS_ADDR_CLKSLICE			0x27
-#define MEMBUS_CLKSLICE_SYNC_MODE_EN		0x80
+
+#define MEMBUS_CLKSLICE_REG				0x27
+#define MEMBUS_SYNTHCALFOSC_INIT_CENTERFREQ_REG		0xb3
+#define MEMBUS_SYNTHPPM_WATCHDOGTMR_VF01_REG		0xe6
+#define MEMBUS_CALCLKSLICE0_DUTY_LOCOVR_REG		0x03
+#define MEMBUS_CALCLKSLICE1_DUTY_LOCOVR_REG		0x07
+
+static const struct {
+	u32 reg;
+	u32 val;
+	u32 mask;
+} membus_pll[] = {
+	{
+		MEMBUS_CLKSLICE_REG,
+		/*
+		 * BIT[7:7]
+		 * Enable source synchronous mode
+		 */
+		BIT(7),
+		BIT(7)
+	},
+	{
+		MEMBUS_SYNTHCALFOSC_INIT_CENTERFREQ_REG,
+		/*
+		 * BIT[0:0]
+		 * Sets synthcalfosc_init_centerfreq=1 to limit overshoot
+		 * frequency during lock
+		 */
+		BIT(0),
+		BIT(0)
+	},
+	{
+		MEMBUS_SYNTHPPM_WATCHDOGTMR_VF01_REG,
+		/*
+		 * BIT[0:0]
+		 * Sets synthppm_watchdogtmr_vf0=1 to give the pll more time
+		 * to settle before lock is asserted.
+		 */
+		BIT(0),
+		BIT(0)
+	},
+	{
+		MEMBUS_CALCLKSLICE0_DUTY_LOCOVR_REG,
+		/*
+		 * BIT[6:0]
+		 * Centering duty cycle for clkslice0 output
+		 */
+		0x4a,
+		GENMASK(6, 0)
+	},
+	{
+		MEMBUS_CALCLKSLICE1_DUTY_LOCOVR_REG,
+		/*
+		 * BIT[6:0]
+		 * Centering duty cycle for clkslice1 output
+		 */
+		0x4a,
+		GENMASK(6, 0)
+	},
+};
 
 static int membus_wait_for_req(struct socfpga_clk_platdata *plat, u32 pll,
 			       int timeout)
@@ -126,6 +184,20 @@
 	return 0;
 }
 
+static void membus_pll_configs(struct socfpga_clk_platdata *plat, u32 pll)
+{
+	int i;
+	u32 rdata;
+
+	for (i = 0; i < ARRAY_SIZE(membus_pll); i++) {
+		membus_read_pll(plat, pll, membus_pll[i].reg,
+				&rdata, MEMBUS_TIMEOUT);
+		membus_write_pll(plat, pll, membus_pll[i].reg,
+			 ((rdata & ~membus_pll[i].mask) | membus_pll[i].val),
+			 MEMBUS_TIMEOUT);
+	}
+}
+
 static u32 calc_vocalib_pll(u32 pllm, u32 pllglob)
 {
 	u32 mdiv, refclkdiv, arefclkdiv, drefclkdiv, mscnt, hscnt, vcocalib;
@@ -166,11 +238,20 @@
 {
 	struct socfpga_clk_platdata *plat = dev_get_platdata(dev);
 	u32 vcocalib;
-	u32 rdata;
 
 	if (!cfg)
 		return;
 
+#ifdef CONFIG_SPL_BUILD
+	/* Always force clock manager into boot mode before any configuration */
+	clk_write_ctrl(plat,
+		       CM_REG_READL(plat, CLKMGR_CTRL) | CLKMGR_CTRL_BOOTMODE);
+#else
+	/* Skip clock configuration in SSBL if it's not in boot mode */
+	if (!(CM_REG_READL(plat, CLKMGR_CTRL) & CLKMGR_CTRL_BOOTMODE))
+		return;
+#endif
+
 	/* Put both PLLs in bypass */
 	clk_write_bypass_mainpll(plat, CLKMGR_BYPASS_MAINPLL_ALL);
 	clk_write_bypass_perpll(plat, CLKMGR_BYPASS_PERPLL_ALL);
@@ -216,19 +297,10 @@
 	CM_REG_SETBITS(plat, CLKMGR_PERPLL_PLLGLOB,
 		       CLKMGR_PLLGLOB_PD_MASK | CLKMGR_PLLGLOB_RST_MASK);
 
-	/* Membus programming to set mainpll and perripll to
-	 * source synchronous mode
-	 */
-	membus_read_pll(plat, MEMBUS_MAINPLL, MEMBUS_ADDR_CLKSLICE, &rdata,
-			MEMBUS_TIMEOUT);
-	membus_write_pll(plat, MEMBUS_MAINPLL, MEMBUS_ADDR_CLKSLICE,
-			 (rdata | MEMBUS_CLKSLICE_SYNC_MODE_EN),
-			 MEMBUS_TIMEOUT);
-	membus_read_pll(plat, MEMBUS_PERPLL, MEMBUS_ADDR_CLKSLICE, &rdata,
-			MEMBUS_TIMEOUT);
-	membus_write_pll(plat, MEMBUS_PERPLL, MEMBUS_ADDR_CLKSLICE,
-			 (rdata | MEMBUS_CLKSLICE_SYNC_MODE_EN),
-			 MEMBUS_TIMEOUT);
+	/* Membus programming for mainpll */
+	membus_pll_configs(plat, MEMBUS_MAINPLL);
+	/* Membus programming for peripll */
+	membus_pll_configs(plat, MEMBUS_PERPLL);
 
 	cm_wait_for_lock(CLKMGR_STAT_ALLPLL_LOCKED_MASK);
 
@@ -533,12 +605,20 @@
 	case AGILEX_EMAC2_CLK:
 		return clk_get_emac_clk_hz(plat, clk->id);
 	case AGILEX_USB_CLK:
+	case AGILEX_NAND_X_CLK:
 		return clk_get_l4_mp_clk_hz(plat);
+	case AGILEX_NAND_CLK:
+		return clk_get_l4_mp_clk_hz(plat) / 4;
 	default:
 		return -ENXIO;
 	}
 }
 
+static int socfpga_clk_enable(struct clk *clk)
+{
+	return 0;
+}
+
 static int socfpga_clk_probe(struct udevice *dev)
 {
 	const struct cm_config *cm_default_cfg = cm_get_default_config();
@@ -562,6 +642,7 @@
 }
 
 static struct clk_ops socfpga_clk_ops = {
+	.enable		= socfpga_clk_enable,
 	.get_rate	= socfpga_clk_get_rate,
 };
 
diff --git a/drivers/clk/rockchip/clk_rk3399.c b/drivers/clk/rockchip/clk_rk3399.c
index 22c373a..1ea41f3 100644
--- a/drivers/clk/rockchip/clk_rk3399.c
+++ b/drivers/clk/rockchip/clk_rk3399.c
@@ -1592,7 +1592,7 @@
 
 static int rk3399_pmuclk_bind(struct udevice *dev)
 {
-#if CONFIG_IS_ENABLED(CONFIG_RESET_ROCKCHIP)
+#if CONFIG_IS_ENABLED(RESET_ROCKCHIP)
 	int ret;
 
 	ret = offsetof(struct rk3399_pmucru, pmucru_softrst_con[0]);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 57d9fbf..9421604 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -1134,7 +1134,9 @@
 
 static void udma_free_chan_resources(struct udma_chan *uc)
 {
-	/* Some configuration to UDMA-P channel: disable, reset, whatever */
+	/* Hard reset UDMA channel */
+	udma_stop_hard(uc);
+	udma_reset_counters(uc);
 
 	/* Release PSI-L pairing */
 	udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index fe398a1..425b52a 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -31,16 +31,16 @@
 	  Enable FPGA driver for loading bitstream in BIT and BIN format
 	  on Altera Cyclone II device.
 
-config FPGA_STRATIX10
-	bool "Enable Altera FPGA driver for Stratix 10"
-	depends on TARGET_SOCFPGA_STRATIX10
+config FPGA_INTEL_SDM_MAILBOX
+	bool "Enable Intel FPGA Full Reconfiguration SDM Mailbox driver"
+	depends on TARGET_SOCFPGA_STRATIX10 || TARGET_SOCFPGA_AGILEX
 	select FPGA_ALTERA
 	help
-	  Say Y here to enable the Altera Stratix 10 FPGA specific driver
+	  Say Y here to enable the Intel FPGA Full Reconfig SDM Mailbox driver
 
-	  This provides common functionality for Altera Stratix 10 devices.
-	  Enable FPGA driver for writing bitstream into Altera Stratix10
-	  device.
+	  This provides common functionality for Intel FPGA devices.
+	  Enable FPGA driver for writing full bitstream into Intel FPGA
+	  devices through SDM (Secure Device Manager) Mailbox.
 
 config FPGA_XILINX
 	bool "Enable Xilinx FPGA drivers"
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 04e6480..83243fb 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -16,9 +16,9 @@
 obj-y += altera.o
 obj-$(CONFIG_FPGA_ACEX1K) += ACEX1K.o
 obj-$(CONFIG_FPGA_CYCLON2) += cyclon2.o
+obj-$(CONFIG_FPGA_INTEL_SDM_MAILBOX) += intel_sdm_mb.o
 obj-$(CONFIG_FPGA_STRATIX_II) += stratixII.o
 obj-$(CONFIG_FPGA_STRATIX_V) += stratixv.o
-obj-$(CONFIG_FPGA_STRATIX10) += stratix10.o
 obj-$(CONFIG_FPGA_SOCFPGA) += socfpga.o
 obj-$(CONFIG_TARGET_SOCFPGA_GEN5) += socfpga_gen5.o
 obj-$(CONFIG_TARGET_SOCFPGA_ARRIA10) += socfpga_arria10.o
diff --git a/drivers/fpga/altera.c b/drivers/fpga/altera.c
index bb27b37..10c0475 100644
--- a/drivers/fpga/altera.c
+++ b/drivers/fpga/altera.c
@@ -40,12 +40,13 @@
 #if defined(CONFIG_FPGA_STRATIX_V)
 	{ Altera_StratixV, "StratixV", stratixv_load, NULL, NULL },
 #endif
-#if defined(CONFIG_FPGA_STRATIX10)
-	{ Intel_FPGA_Stratix10, "Stratix10", stratix10_load, NULL, NULL },
-#endif
 #if defined(CONFIG_FPGA_SOCFPGA)
 	{ Altera_SoCFPGA, "SoC FPGA", socfpga_load, NULL, NULL },
 #endif
+#if defined(CONFIG_FPGA_INTEL_SDM_MAILBOX)
+	{ Intel_FPGA_SDM_Mailbox, "Intel SDM Mailbox", intel_sdm_mb_load, NULL,
+	  NULL },
+#endif
 };
 
 static int altera_validate(Altera_desc *desc, const char *fn)
diff --git a/drivers/fpga/stratix10.c b/drivers/fpga/intel_sdm_mb.c
similarity index 97%
rename from drivers/fpga/stratix10.c
rename to drivers/fpga/intel_sdm_mb.c
index da8fa31..9a1dc2c 100644
--- a/drivers/fpga/stratix10.c
+++ b/drivers/fpga/intel_sdm_mb.c
@@ -6,6 +6,7 @@
 #include <common.h>
 #include <altera.h>
 #include <log.h>
+#include <watchdog.h>
 #include <asm/arch/mailbox_s10.h>
 #include <linux/delay.h>
 
@@ -113,6 +114,7 @@
 
 		puts(".");
 		udelay(RECONFIG_STATUS_INTERVAL_DELAY_US);
+		WATCHDOG_RESET();
 	}
 
 	return -ETIMEDOUT;
@@ -238,6 +240,7 @@
 			if (resp_err && !xfer_count)
 				return resp_err;
 		}
+		WATCHDOG_RESET();
 	}
 
 	return 0;
@@ -247,7 +250,7 @@
  * This is the interface used by FPGA driver.
  * Return 0 for success, non-zero for error.
  */
-int stratix10_load(Altera_desc *desc, const void *rbf_data, size_t rbf_size)
+int intel_sdm_mb_load(Altera_desc *desc, const void *rbf_data, size_t rbf_size)
 {
 	int ret;
 	u32 resp_len = 2;
diff --git a/drivers/gpio/sandbox.c b/drivers/gpio/sandbox.c
index c2f8047..eb2600d 100644
--- a/drivers/gpio/sandbox.c
+++ b/drivers/gpio/sandbox.c
@@ -185,7 +185,15 @@
 
 	dir_flags = get_gpio_dir_flags(dev, offset);
 
-	*dir_flags = flags;
+	/*
+	 * For testing purposes keep the output value when switching to input.
+	 * This allows us to manipulate the input value via the gpio command.
+	 */
+	if (flags & GPIOD_IS_IN)
+		*dir_flags = (flags & ~GPIOD_IS_OUT_ACTIVE) |
+			     (*dir_flags & GPIOD_IS_OUT_ACTIVE);
+	else
+		*dir_flags = flags;
 
 	return 0;
 }
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 7783535..c46fa4e 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -3123,3 +3123,12 @@
 	return 0;
 }
 #endif
+
+__weak int mmc_get_env_dev(void)
+{
+#ifdef CONFIG_SYS_MMC_ENV_DEV
+	return CONFIG_SYS_MMC_ENV_DEV;
+#else
+	return 0;
+#endif
+}
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index df4cbd5..cd7e598 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -291,6 +291,22 @@
 	  This flag prevent U-boot reconfigure NAND flash controller and reuse
 	  the NAND timing from 1st stage bootloader.
 
+config NAND_OCTEONTX
+	bool "Support for OcteonTX NAND controller"
+	select SYS_NAND_SELF_INIT
+	imply CMD_NAND
+	help
+	 This enables Nand flash controller hardware found on the OcteonTX
+	 processors.
+
+config NAND_OCTEONTX_HW_ECC
+	bool "Support Hardware ECC for OcteonTX NAND controller"
+	depends on NAND_OCTEONTX
+	default y
+	help
+	 This enables Hardware BCH engine found on the OcteonTX processors to
+	 support ECC for NAND flash controller.
+
 config NAND_STM32_FMC2
 	bool "Support for NAND controller on STM32MP SoCs"
 	depends on ARCH_STM32MP
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 9337f64..24c51b6 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -58,6 +58,8 @@
 obj-$(CONFIG_NAND_MXC) += mxc_nand.o
 obj-$(CONFIG_NAND_MXS) += mxs_nand.o
 obj-$(CONFIG_NAND_MXS_DT) += mxs_nand_dt.o
+obj-$(CONFIG_NAND_OCTEONTX) += octeontx_nand.o
+obj-$(CONFIG_NAND_OCTEONTX_HW_ECC) += octeontx_bch.o
 obj-$(CONFIG_NAND_PXA3XX) += pxa3xx_nand.o
 obj-$(CONFIG_NAND_SPEAR) += spr_nand.o
 obj-$(CONFIG_TEGRA_NAND) += tegra_nand.o
diff --git a/drivers/mtd/nand/raw/octeontx_bch.c b/drivers/mtd/nand/raw/octeontx_bch.c
new file mode 100644
index 0000000..6937062
--- /dev/null
+++ b/drivers/mtd/nand/raw/octeontx_bch.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <dm/of_access.h>
+#include <malloc.h>
+#include <memalign.h>
+#include <nand.h>
+#include <pci.h>
+#include <pci_ids.h>
+#include <time.h>
+#include <linux/bitfield.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/libfdt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/io.h>
+#include <asm/types.h>
+#include <asm/dma-mapping.h>
+#include <asm/arch/clock.h>
+#include "octeontx_bch.h"
+
+#ifdef DEBUG
+# undef CONFIG_LOGLEVEL
+# define CONFIG_LOGLEVEL 8
+#endif
+
+LIST_HEAD(octeontx_bch_devices);
+static unsigned int num_vfs = BCH_NR_VF;
+static void *bch_pf;
+static void *bch_vf;
+static void *token;
+static bool bch_pf_initialized;
+static bool bch_vf_initialized;
+
+static int pci_enable_sriov(struct udevice *dev, int nr_virtfn)
+{
+	int ret;
+
+	ret = pci_sriov_init(dev, nr_virtfn);
+	if (ret)
+		printf("%s(%s): pci_sriov_init returned %d\n", __func__,
+		       dev->name, ret);
+	return ret;
+}
+
+void *octeontx_bch_getv(void)
+{
+	if (!bch_vf)
+		return NULL;
+	if (bch_vf_initialized && bch_pf_initialized)
+		return bch_vf;
+	else
+		return NULL;
+}
+
+void octeontx_bch_putv(void *token)
+{
+	bch_vf_initialized = !!token;
+	bch_vf = token;
+}
+
+void *octeontx_bch_getp(void)
+{
+	return token;
+}
+
+void octeontx_bch_putp(void *token)
+{
+	bch_pf = token;
+	bch_pf_initialized = !!token;
+}
+
+static int do_bch_init(struct bch_device *bch)
+{
+	return 0;
+}
+
+static void bch_reset(struct bch_device *bch)
+{
+	writeq(1, bch->reg_base + BCH_CTL);
+	mdelay(2);
+}
+
+static void bch_disable(struct bch_device *bch)
+{
+	writeq(~0ull, bch->reg_base + BCH_ERR_INT_ENA_W1C);
+	writeq(~0ull, bch->reg_base + BCH_ERR_INT);
+	bch_reset(bch);
+}
+
+static u32 bch_check_bist_status(struct bch_device *bch)
+{
+	return readq(bch->reg_base + BCH_BIST_RESULT);
+}
+
+static int bch_device_init(struct bch_device *bch)
+{
+	u64 bist;
+	int rc;
+
+	debug("%s: Resetting...\n", __func__);
+	/* Reset the PF when probed first */
+	bch_reset(bch);
+
+	debug("%s: Checking BIST...\n", __func__);
+	/* Check BIST status */
+	bist = (u64)bch_check_bist_status(bch);
+	if (bist) {
+		dev_err(dev, "BCH BIST failed with code 0x%llx\n", bist);
+		return -ENODEV;
+	}
+
+	/* Get max VQs/VFs supported by the device */
+
+	bch->max_vfs = pci_sriov_get_totalvfs(bch->dev);
+	debug("%s: %d vfs\n", __func__, bch->max_vfs);
+	if (num_vfs > bch->max_vfs) {
+		dev_warn(dev, "Num of VFs to enable %d is greater than max available.  Enabling %d VFs.\n",
+			 num_vfs, bch->max_vfs);
+		num_vfs = bch->max_vfs;
+	}
+	bch->vfs_enabled = bch->max_vfs;
+	/* Get number of VQs/VFs to be enabled */
+	/* TODO: Get CLK frequency */
+	/* Reset device parameters */
+
+	debug("%s: Doing initialization\n", __func__);
+	rc = do_bch_init(bch);
+
+	return rc;
+}
+
+static int bch_sriov_configure(struct udevice *dev, int numvfs)
+{
+	struct bch_device *bch = dev_get_priv(dev);
+	int ret = -EBUSY;
+
+	debug("%s(%s, %d), bch: %p, vfs_in_use: %d, enabled: %d\n", __func__,
+	      dev->name, numvfs, bch, bch->vfs_in_use, bch->vfs_enabled);
+	if (bch->vfs_in_use)
+		goto exit;
+
+	ret = 0;
+
+	if (numvfs > 0) {
+		debug("%s: Enabling sriov\n", __func__);
+		ret = pci_enable_sriov(dev, numvfs);
+		if (ret == 0) {
+			bch->flags |= BCH_FLAG_SRIOV_ENABLED;
+			ret = numvfs;
+			bch->vfs_enabled = numvfs;
+		}
+	}
+
+	debug("VFs enabled: %d\n", ret);
+exit:
+	debug("%s: Returning %d\n", __func__, ret);
+	return ret;
+}
+
+static int octeontx_pci_bchpf_probe(struct udevice *dev)
+{
+	struct bch_device *bch;
+	int ret;
+
+	debug("%s(%s)\n", __func__, dev->name);
+	bch = dev_get_priv(dev);
+	if (!bch)
+		return -ENOMEM;
+
+	bch->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+	bch->dev = dev;
+
+	debug("%s: base address: %p\n", __func__, bch->reg_base);
+	ret = bch_device_init(bch);
+	if (ret) {
+		printf("%s(%s): init returned %d\n", __func__, dev->name, ret);
+		return ret;
+	}
+	INIT_LIST_HEAD(&bch->list);
+	list_add(&bch->list, &octeontx_bch_devices);
+	token = (void *)dev;
+
+	debug("%s: Configuring SRIOV\n", __func__);
+	bch_sriov_configure(dev, num_vfs);
+	debug("%s: Done.\n", __func__);
+	octeontx_bch_putp(bch);
+
+	return 0;
+}
+
+static const struct pci_device_id octeontx_bchpf_pci_id_table[] = {
+	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCH) },
+	{},
+};
+
+static const struct pci_device_id octeontx_bchvf_pci_id_table[] = {
+	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCHVF)},
+	{},
+};
+
+/**
+ * Given a data block calculate the ecc data and fill in the response
+ *
+ * @param[in] block	8-byte aligned pointer to data block to calculate ECC
+ * @param block_size	Size of block in bytes, must be a multiple of two.
+ * @param bch_level	Number of errors that must be corrected.  The number of
+ *			parity bytes is equal to ((15 * bch_level) + 7) / 8.
+ *			Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
+ * @param[out] ecc	8-byte aligned pointer to where ecc data should go
+ * @param[in] resp	pointer to where responses will be written.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size,
+			u8 bch_level, dma_addr_t ecc, dma_addr_t resp)
+{
+	union bch_cmd cmd;
+	int rc;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.s.cword.ecc_gen = eg_gen;
+	cmd.s.cword.ecc_level = bch_level;
+	cmd.s.cword.size = block_size;
+
+	cmd.s.oword.ptr = ecc;
+	cmd.s.iword.ptr = block;
+	cmd.s.rword.ptr = resp;
+	rc = octeontx_cmd_queue_write(QID_BCH, 1,
+				      sizeof(cmd) / sizeof(uint64_t), cmd.u);
+	if (rc)
+		return -1;
+
+	octeontx_bch_write_doorbell(1, vf);
+
+	return 0;
+}
+
+/**
+ * Given a data block and ecc data correct the data block
+ *
+ * @param[in] block_ecc_in	8-byte aligned pointer to data block with ECC
+ *				data concatenated to the end to correct
+ * @param block_size		Size of block in bytes, must be a multiple of
+ *				two.
+ * @param bch_level		Number of errors that must be corrected.  The
+ *				number of parity bytes is equal to
+ *				((15 * bch_level) + 7) / 8.
+ *				Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
+ * @param[out] block_out	8-byte aligned pointer to corrected data buffer.
+ *				This should not be the same as block_ecc_in.
+ * @param[in] resp		pointer to where responses will be written.
+ *
+ * @return Zero on success, negative on failure.
+ */
+
+int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in,
+			u16 block_size, u8 bch_level,
+			dma_addr_t block_out, dma_addr_t resp)
+{
+	union bch_cmd cmd;
+	int rc;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.s.cword.ecc_gen = eg_correct;
+	cmd.s.cword.ecc_level = bch_level;
+	cmd.s.cword.size = block_size;
+
+	cmd.s.oword.ptr = block_out;
+	cmd.s.iword.ptr = block_ecc_in;
+	cmd.s.rword.ptr = resp;
+	rc = octeontx_cmd_queue_write(QID_BCH, 1,
+				      sizeof(cmd) / sizeof(uint64_t), cmd.u);
+	if (rc)
+		return -1;
+
+	octeontx_bch_write_doorbell(1, vf);
+	return 0;
+}
+EXPORT_SYMBOL(octeontx_bch_decode);
+
+int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp,
+		      dma_addr_t handle)
+{
+	ulong start = get_timer(0);
+
+	__iormb(); /* HW is updating *resp */
+	while (!resp->s.done && get_timer(start) < 10)
+		__iormb(); /* HW is updating *resp */
+
+	if (resp->s.done)
+		return 0;
+
+	return -ETIMEDOUT;
+}
+
+struct bch_q octeontx_bch_q[QID_MAX];
+
+static int octeontx_cmd_queue_initialize(struct udevice *dev, int queue_id,
+					 int max_depth, int fpa_pool,
+					 int pool_size)
+{
+	/* some params are for later merge with CPT or cn83xx */
+	struct bch_q *q = &octeontx_bch_q[queue_id];
+	unsigned long paddr;
+	u64 *chunk_buffer;
+	int chunk = max_depth + 1;
+	int i, size;
+
+	if ((unsigned int)queue_id >= QID_MAX)
+		return -EINVAL;
+	if (max_depth & chunk) /* must be 2^N - 1 */
+		return -EINVAL;
+
+	size = NQS * chunk * sizeof(u64);
+	chunk_buffer = dma_alloc_coherent(size, &paddr);
+	if (!chunk_buffer)
+		return -ENOMEM;
+
+	q->base_paddr = paddr;
+	q->dev = dev;
+	q->index = 0;
+	q->max_depth = max_depth;
+	q->pool_size_m1 = pool_size;
+	q->base_vaddr = chunk_buffer;
+
+	for (i = 0; i < NQS; i++) {
+		u64 *ixp;
+		int inext = (i + 1) * chunk - 1;
+		int j = (i + 1) % NQS;
+		int jnext = j * chunk;
+		dma_addr_t jbase = q->base_paddr + jnext * sizeof(u64);
+
+		ixp = &chunk_buffer[inext];
+		*ixp = jbase;
+	}
+
+	return 0;
+}
+
+static int octeontx_pci_bchvf_probe(struct udevice *dev)
+{
+	struct bch_vf *vf;
+	union bch_vqx_ctl ctl;
+	union bch_vqx_cmd_buf cbuf;
+	int err;
+
+	debug("%s(%s)\n", __func__, dev->name);
+	vf = dev_get_priv(dev);
+	if (!vf)
+		return -ENOMEM;
+
+	vf->dev = dev;
+
+	/* Map PF's configuration registers */
+	vf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+	debug("%s: reg base: %p\n", __func__, vf->reg_base);
+
+	err = octeontx_cmd_queue_initialize(dev, QID_BCH, QDEPTH - 1, 0,
+					    sizeof(union bch_cmd) * QDEPTH);
+	if (err) {
+		dev_err(dev, "octeontx_cmd_queue_initialize() failed\n");
+		goto release;
+	}
+
+	ctl.u = readq(vf->reg_base + BCH_VQX_CTL(0));
+
+	cbuf.u = 0;
+	cbuf.s.ldwb = 1;
+	cbuf.s.dfb = 1;
+	cbuf.s.size = QDEPTH;
+	writeq(cbuf.u, vf->reg_base + BCH_VQX_CMD_BUF(0));
+
+	writeq(ctl.u, vf->reg_base + BCH_VQX_CTL(0));
+
+	writeq(octeontx_bch_q[QID_BCH].base_paddr,
+	       vf->reg_base + BCH_VQX_CMD_PTR(0));
+
+	octeontx_bch_putv(vf);
+
+	debug("%s: bch vf initialization complete\n", __func__);
+
+	if (octeontx_bch_getv())
+		return octeontx_pci_nand_deferred_probe();
+
+	return -1;
+
+release:
+	return err;
+}
+
+static int octeontx_pci_bchpf_remove(struct udevice *dev)
+{
+	struct bch_device *bch = dev_get_priv(dev);
+
+	bch_disable(bch);
+	return 0;
+}
+
+U_BOOT_DRIVER(octeontx_pci_bchpf) = {
+	.name	= BCHPF_DRIVER_NAME,
+	.id	= UCLASS_MISC,
+	.probe	= octeontx_pci_bchpf_probe,
+	.remove = octeontx_pci_bchpf_remove,
+	.priv_auto_alloc_size = sizeof(struct bch_device),
+	.flags = DM_FLAG_OS_PREPARE,
+};
+
+U_BOOT_DRIVER(octeontx_pci_bchvf) = {
+	.name	= BCHVF_DRIVER_NAME,
+	.id	= UCLASS_MISC,
+	.probe = octeontx_pci_bchvf_probe,
+	.priv_auto_alloc_size = sizeof(struct bch_vf),
+};
+
+U_BOOT_PCI_DEVICE(octeontx_pci_bchpf, octeontx_bchpf_pci_id_table);
+U_BOOT_PCI_DEVICE(octeontx_pci_bchvf, octeontx_bchvf_pci_id_table);
diff --git a/drivers/mtd/nand/raw/octeontx_bch.h b/drivers/mtd/nand/raw/octeontx_bch.h
new file mode 100644
index 0000000..3aaa52c
--- /dev/null
+++ b/drivers/mtd/nand/raw/octeontx_bch.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef __OCTEONTX_BCH_H__
+#define __OCTEONTX_BCH_H__
+
+#include "octeontx_bch_regs.h"
+
+/* flags to indicate the features supported */
+#define BCH_FLAG_SRIOV_ENABLED		BIT(1)
+
+/*
+ * BCH Registers map for 81xx
+ */
+
+/* PF registers */
+#define BCH_CTL				0x0ull
+#define BCH_ERR_CFG			0x10ull
+#define BCH_BIST_RESULT			0x80ull
+#define BCH_ERR_INT			0x88ull
+#define BCH_ERR_INT_W1S			0x90ull
+#define BCH_ERR_INT_ENA_W1C		0xA0ull
+#define BCH_ERR_INT_ENA_W1S		0xA8ull
+
+/* VF registers */
+#define BCH_VQX_CTL(z)			0x0ull
+#define BCH_VQX_CMD_BUF(z)		0x8ull
+#define BCH_VQX_CMD_PTR(z)		0x20ull
+#define BCH_VQX_DOORBELL(z)		0x800ull
+
+#define BCHPF_DRIVER_NAME	"octeontx-bchpf"
+#define BCHVF_DRIVER_NAME	"octeontx-bchvf"
+
+struct bch_device {
+	struct list_head list;
+	u8 max_vfs;
+	u8 vfs_enabled;
+	u8 vfs_in_use;
+	u32 flags;
+	void __iomem *reg_base;
+	struct udevice *dev;
+};
+
+struct bch_vf {
+	u16 flags;
+	u8 vfid;
+	u8 node;
+	u8 priority;
+	struct udevice *dev;
+	void __iomem *reg_base;
+};
+
+struct buf_ptr {
+	u8 *vptr;
+	dma_addr_t dma_addr;
+	u16 size;
+};
+
+void *octeontx_bch_getv(void);
+void octeontx_bch_putv(void *token);
+void *octeontx_bch_getp(void);
+void octeontx_bch_putp(void *token);
+int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp,
+		      dma_addr_t handle);
+/**
+ * Given a data block calculate the ecc data and fill in the response
+ *
+ * @param[in] block	8-byte aligned pointer to data block to calculate ECC
+ * @param block_size	Size of block in bytes, must be a multiple of two.
+ * @param bch_level	Number of errors that must be corrected.  The number of
+ *			parity bytes is equal to ((15 * bch_level) + 7) / 8.
+ *			Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
+ * @param[out] ecc	8-byte aligned pointer to where ecc data should go
+ * @param[in] resp	pointer to where responses will be written.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size,
+			u8 bch_level, dma_addr_t ecc, dma_addr_t resp);
+
+/**
+ * Given a data block and ecc data correct the data block
+ *
+ * @param[in] block_ecc_in	8-byte aligned pointer to data block with ECC
+ *				data concatenated to the end to correct
+ * @param block_size		Size of block in bytes, must be a multiple of
+ *				two.
+ * @param bch_level		Number of errors that must be corrected.  The
+ *				number of parity bytes is equal to
+ *				((15 * bch_level) + 7) / 8.
+ *				Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
+ * @param[out] block_out	8-byte aligned pointer to corrected data buffer.
+ *				This should not be the same as block_ecc_in.
+ * @param[in] resp		pointer to where responses will be written.
+ *
+ * @return Zero on success, negative on failure.
+ */
+
+int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in,
+			u16 block_size, u8 bch_level,
+			dma_addr_t block_out, dma_addr_t resp);
+
+/**
+ * Ring the BCH doorbell telling it that new commands are
+ * available.
+ *
+ * @param num_commands	Number of new commands
+ * @param vf		virtual function handle
+ */
+static inline void octeontx_bch_write_doorbell(u64 num_commands,
+					       struct bch_vf *vf)
+{
+	u64 num_words = num_commands * sizeof(union bch_cmd) / sizeof(uint64_t);
+
+	writeq(num_words, vf->reg_base + BCH_VQX_DOORBELL(0));
+}
+
+/**
+ * Since it's possible (and even likely) that the NAND device will be probed
+ * before the BCH device has been probed, we may need to defer the probing.
+ *
+ * In this case, the initial probe returns success but the actual probing
+ * is deferred until the BCH VF has been probed.
+ *
+ * @return	0 for success, otherwise error
+ */
+int octeontx_pci_nand_deferred_probe(void);
+
+#endif /* __OCTEONTX_BCH_H__ */
diff --git a/drivers/mtd/nand/raw/octeontx_bch_regs.h b/drivers/mtd/nand/raw/octeontx_bch_regs.h
new file mode 100644
index 0000000..7d34438
--- /dev/null
+++ b/drivers/mtd/nand/raw/octeontx_bch_regs.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef __OCTEONTX_BCH_REGS_H__
+#define __OCTEONTX_BCH_REGS_H__
+
+#define BCH_NR_VF	1
+
+union bch_cmd {
+	u64 u[4];
+	struct fields {
+	    struct {
+		u64 size:12;
+		u64 reserved_12_31:20;
+		u64 ecc_level:4;
+		u64 reserved_36_61:26;
+		u64 ecc_gen:2;
+	    } cword;
+	    struct {
+		u64 ptr:49;
+		u64 reserved_49_55:7;
+		u64 nc:1;
+		u64 fw:1;
+		u64 reserved_58_63:6;
+	    } oword;
+	    struct {
+		u64 ptr:49;
+		u64 reserved_49_55:7;
+		u64 nc:1;
+		u64 reserved_57_63:7;
+	    } iword;
+	    struct {
+		u64 ptr:49;
+		u64 reserved_49_63:15;
+	    } rword;
+	} s;
+};
+
+enum ecc_gen {
+	eg_correct,
+	eg_copy,
+	eg_gen,
+	eg_copy3,
+};
+
+/** Response from BCH instruction */
+union bch_resp {
+	u16  u16;
+	struct {
+		u16	num_errors:7;	/** Number of errors in block */
+		u16	zero:6;		/** Always zero, ignore */
+		u16	erased:1;	/** Block is erased */
+		u16	uncorrectable:1;/** too many bits flipped */
+		u16	done:1;		/** Block is done */
+	} s;
+};
+
+union bch_vqx_ctl {
+	u64 u;
+	struct {
+		u64 reserved_0:1;
+		u64 cmd_be:1;
+		u64 max_read:4;
+		u64 reserved_6_15:10;
+		u64 erase_disable:1;
+		u64 one_cmd:1;
+		u64 early_term:4;
+		u64 reserved_22_63:42;
+	} s;
+};
+
+union bch_vqx_cmd_buf {
+	u64 u;
+	struct {
+		u64 reserved_0_32:33;
+		u64 size:13;
+		u64 dfb:1;
+		u64 ldwb:1;
+		u64 reserved_48_63:16;
+	} s;
+};
+
+/* keep queue state indexed, even though just one supported here,
+ * for later generalization to similarly-shaped queues on other Cavium devices
+ */
+enum {
+	QID_BCH,
+	QID_MAX
+};
+
+struct bch_q {
+	struct udevice *dev;
+	int index;
+	u16 max_depth;
+	u16 pool_size_m1;
+	u64 *base_vaddr;
+	dma_addr_t base_paddr;
+};
+
+extern struct bch_q octeontx_bch_q[QID_MAX];
+
+/* with one dma-mapped area, virt<->phys conversions by +/- (vaddr-paddr) */
+static inline dma_addr_t qphys(int qid, void *v)
+{
+	struct bch_q *q = &octeontx_bch_q[qid];
+	int off = (u8 *)v - (u8 *)q->base_vaddr;
+
+	return q->base_paddr + off;
+}
+
+#define octeontx_ptr_to_phys(v) qphys(QID_BCH, (v))
+
+static inline void *qvirt(int qid, dma_addr_t p)
+{
+	struct bch_q *q = &octeontx_bch_q[qid];
+	int off = p - q->base_paddr;
+
+	return q->base_vaddr + off;
+}
+
+#define octeontx_phys_to_ptr(p) qvirt(QID_BCH, (p))
+
+/* plenty for interleaved r/w on two planes with 16k page, ecc_size 1k */
+/* QDEPTH >= 16, as successive chunks must align on 128-byte boundaries */
+#define QDEPTH	256	/* u64s in a command queue chunk, incl next-pointer */
+#define NQS	1	/* linked chunks in the chain */
+
+/**
+ * Write an arbitrary number of command words to a command queue.
+ * This is a generic function; the fixed number of command word
+ * functions yield higher performance.
+ *
+ * Could merge with crypto version for FPA use on cn83xx
+ */
+static inline int octeontx_cmd_queue_write(int queue_id, bool use_locking,
+					   int cmd_count, const u64 *cmds)
+{
+	int ret = 0;
+	u64 *cmd_ptr;
+	struct bch_q *qptr = &octeontx_bch_q[queue_id];
+
+	if (unlikely(cmd_count < 1 || cmd_count > 32))
+		return -EINVAL;
+	if (unlikely(!cmds))
+		return -EINVAL;
+
+	cmd_ptr = qptr->base_vaddr;
+
+	while (cmd_count > 0) {
+		int slot = qptr->index % (QDEPTH * NQS);
+
+		if (slot % QDEPTH != QDEPTH - 1) {
+			cmd_ptr[slot] = *cmds++;
+			cmd_count--;
+		}
+
+		qptr->index++;
+	}
+
+	__iowmb();	/* flush commands before ringing bell */
+
+	return ret;
+}
+
+#endif /* __OCTEONTX_BCH_REGS_H__ */
diff --git a/drivers/mtd/nand/raw/octeontx_nand.c b/drivers/mtd/nand/raw/octeontx_nand.c
new file mode 100644
index 0000000..ad21917
--- /dev/null
+++ b/drivers/mtd/nand/raw/octeontx_nand.c
@@ -0,0 +1,2257 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <dm/device-internal.h>
+#include <dm/devres.h>
+#include <dm/of_access.h>
+#include <malloc.h>
+#include <memalign.h>
+#include <nand.h>
+#include <pci.h>
+#include <time.h>
+#include <linux/bitfield.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/libfdt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/io.h>
+#include <asm/types.h>
+#include <asm/dma-mapping.h>
+#include <asm/arch/clock.h>
+#include "octeontx_bch.h"
+
+#ifdef DEBUG
+# undef CONFIG_LOGLEVEL
+# define CONFIG_LOGLEVEL 8
+#endif
+
+/*
+ * The NDF_CMD queue takes commands between 16 - 128 bit.
+ * All commands must be 16 bit aligned and are little endian.
+ * WAIT_STATUS commands must be 64 bit aligned.
+ * Commands are selected by the 4 bit opcode.
+ *
+ * Available Commands:
+ *
+ * 16 Bit:
+ *   NOP
+ *   WAIT
+ *   BUS_ACQ, BUS_REL
+ *   CHIP_EN, CHIP_DIS
+ *
+ * 32 Bit:
+ *   CLE_CMD
+ *   RD_CMD, RD_EDO_CMD
+ *   WR_CMD
+ *
+ * 64 Bit:
+ *   SET_TM_PAR
+ *
+ * 96 Bit:
+ *   ALE_CMD
+ *
+ * 128 Bit:
+ *   WAIT_STATUS, WAIT_STATUS_ALE
+ */
+
+/* NDF Register offsets */
+#define NDF_CMD			0x0
+#define NDF_MISC		0x8
+#define NDF_ECC_CNT		0x10
+#define NDF_DRBELL		0x30
+#define NDF_ST_REG		0x38	/* status */
+#define NDF_INT			0x40
+#define NDF_INT_W1S		0x48
+#define NDF_DMA_CFG		0x50
+#define NDF_DMA_ADR		0x58
+#define NDF_INT_ENA_W1C		0x60
+#define NDF_INT_ENA_W1S		0x68
+
+/* NDF command opcodes */
+#define NDF_OP_NOP		0x0
+#define NDF_OP_SET_TM_PAR	0x1
+#define NDF_OP_WAIT		0x2
+#define NDF_OP_CHIP_EN_DIS	0x3
+#define NDF_OP_CLE_CMD		0x4
+#define NDF_OP_ALE_CMD		0x5
+#define NDF_OP_WR_CMD		0x8
+#define NDF_OP_RD_CMD		0x9
+#define NDF_OP_RD_EDO_CMD	0xa
+#define NDF_OP_WAIT_STATUS	0xb	/* same opcode for WAIT_STATUS_ALE */
+#define NDF_OP_BUS_ACQ_REL	0xf
+
+#define NDF_BUS_ACQUIRE		1
+#define NDF_BUS_RELEASE		0
+
+#define DBGX_EDSCR(X)		(0x87A008000088 + (X) * 0x80000)
+
+struct ndf_nop_cmd {
+	u16 opcode:	4;
+	u16 nop:	12;
+};
+
+struct ndf_wait_cmd {
+	u16 opcode:4;
+	u16 r_b:1;		/* wait for one cycle or PBUS_WAIT deassert */
+	u16:3;
+	u16 wlen:3;		/* timing parameter select */
+	u16:5;
+};
+
+struct ndf_bus_cmd {
+	u16 opcode:4;
+	u16 direction:4;	/* 1 = acquire, 0 = release */
+	u16:8;
+};
+
+struct ndf_chip_cmd {
+	u16 opcode:4;
+	u16 chip:3;		/* select chip, 0 = disable */
+	u16 enable:1;		/* 1 = enable, 0 = disable */
+	u16 bus_width:2;	/* 10 = 16 bit, 01 = 8 bit */
+	u16:6;
+};
+
+struct ndf_cle_cmd {
+	u32 opcode:4;
+	u32:4;
+	u32 cmd_data:8;		/* command sent to the PBUS AD pins */
+	u32 clen1:3;		/* time between PBUS CLE and WE asserts */
+	u32 clen2:3;		/* time WE remains asserted */
+	u32 clen3:3;		/* time between WE deassert and CLE */
+	u32:7;
+};
+
+/* RD_EDO_CMD uses the same layout as RD_CMD */
+struct ndf_rd_cmd {
+	u32 opcode:4;
+	u32 data:16;		/* data bytes */
+	u32 rlen1:3;
+	u32 rlen2:3;
+	u32 rlen3:3;
+	u32 rlen4:3;
+};
+
+struct ndf_wr_cmd {
+	u32 opcode:4;
+	u32 data:16;		/* data bytes */
+	u32:4;
+	u32 wlen1:3;
+	u32 wlen2:3;
+	u32:3;
+};
+
+struct ndf_set_tm_par_cmd {
+	u64 opcode:4;
+	u64 tim_mult:4;	/* multiplier for the seven parameters */
+	u64 tm_par1:8;	/* --> Following are the 7 timing parameters that */
+	u64 tm_par2:8;	/*     specify the number of coprocessor cycles.  */
+	u64 tm_par3:8;	/*     A value of zero means one cycle.		  */
+	u64 tm_par4:8;	/*     All values are scaled by tim_mult	  */
+	u64 tm_par5:8;	/*     using tim_par * (2 ^ tim_mult).		  */
+	u64 tm_par6:8;
+	u64 tm_par7:8;
+};
+
+struct ndf_ale_cmd {
+	u32 opcode:4;
+	u32:4;
+	u32 adr_byte_num:4;	/* number of address bytes to be sent */
+	u32:4;
+	u32 alen1:3;
+	u32 alen2:3;
+	u32 alen3:3;
+	u32 alen4:3;
+	u32:4;
+	u8 adr_byt1;
+	u8 adr_byt2;
+	u8 adr_byt3;
+	u8 adr_byt4;
+	u8 adr_byt5;
+	u8 adr_byt6;
+	u8 adr_byt7;
+	u8 adr_byt8;
+};
+
+struct ndf_wait_status_cmd {
+	u32 opcode:4;
+	u32:4;
+	u32 data:8;		/** data */
+	u32 clen1:3;
+	u32 clen2:3;
+	u32 clen3:3;
+	u32:8;
+	/** set to 5 to select WAIT_STATUS_ALE command */
+	u32 ale_ind:8;
+	/** ALE only: number of address bytes to be sent */
+	u32 adr_byte_num:4;
+	u32:4;
+	u32 alen1:3;	/* ALE only */
+	u32 alen2:3;	/* ALE only */
+	u32 alen3:3;	/* ALE only */
+	u32 alen4:3;	/* ALE only */
+	u32:4;
+	u8 adr_byt[4];		/* ALE only */
+	u32 nine:4;	/* set to 9 */
+	u32 and_mask:8;
+	u32 comp_byte:8;
+	u32 rlen1:3;
+	u32 rlen2:3;
+	u32 rlen3:3;
+	u32 rlen4:3;
+};
+
+union ndf_cmd {
+	u64 val[2];
+	union {
+		struct ndf_nop_cmd		nop;
+		struct ndf_wait_cmd		wait;
+		struct ndf_bus_cmd		bus_acq_rel;
+		struct ndf_chip_cmd		chip_en_dis;
+		struct ndf_cle_cmd		cle_cmd;
+		struct ndf_rd_cmd		rd_cmd;
+		struct ndf_wr_cmd		wr_cmd;
+		struct ndf_set_tm_par_cmd	set_tm_par;
+		struct ndf_ale_cmd		ale_cmd;
+		struct ndf_wait_status_cmd	wait_status;
+	} u;
+};
+
+/** Disable multi-bit error hangs */
+#define NDF_MISC_MB_DIS		BIT_ULL(27)
+/** High watermark for NBR FIFO or load/store operations */
+#define NDF_MISC_NBR_HWM	GENMASK_ULL(26, 24)
+/** Wait input filter count */
+#define NDF_MISC_WAIT_CNT	GENMASK_ULL(23, 18)
+/** Unfilled NFD_CMD queue bytes */
+#define NDF_MISC_FR_BYTE	GENMASK_ULL(17, 7)
+/** Set by HW when it reads the last 8 bytes of NDF_CMD */
+#define NDF_MISC_RD_DONE	BIT_ULL(6)
+/** Set by HW when it reads. SW read of NDF_CMD clears it */
+#define NDF_MISC_RD_VAL		BIT_ULL(5)
+/** Let HW read NDF_CMD queue. Cleared on SW NDF_CMD write */
+#define NDF_MISC_RD_CMD		BIT_ULL(4)
+/** Boot disable */
+#define NDF_MISC_BT_DIS		BIT_ULL(2)
+/** Stop command execution after completing command queue */
+#define NDF_MISC_EX_DIS		BIT_ULL(1)
+/** Reset fifo */
+#define NDF_MISC_RST_FF		BIT_ULL(0)
+
+/** DMA engine enable */
+#define NDF_DMA_CFG_EN		BIT_ULL(63)
+/** Read or write */
+#define NDF_DMA_CFG_RW		BIT_ULL(62)
+/** Terminates DMA and clears enable bit */
+#define NDF_DMA_CFG_CLR		BIT_ULL(61)
+/** 32-bit swap enable */
+#define NDF_DMA_CFG_SWAP32	BIT_ULL(59)
+/** 16-bit swap enable */
+#define NDF_DMA_CFG_SWAP16	BIT_ULL(58)
+/** 8-bit swap enable */
+#define NDF_DMA_CFG_SWAP8	BIT_ULL(57)
+/** Endian mode */
+#define NDF_DMA_CFG_CMD_BE	BIT_ULL(56)
+/** Number of 64 bit transfers */
+#define NDF_DMA_CFG_SIZE	GENMASK_ULL(55, 36)
+
+/** Command execution status idle */
+#define NDF_ST_REG_EXE_IDLE	BIT_ULL(15)
+/** Command execution SM states */
+#define NDF_ST_REG_EXE_SM	GENMASK_ULL(14, 11)
+/** DMA and load SM states */
+#define NDF_ST_REG_BT_SM	GENMASK_ULL(10, 7)
+/** Queue read-back SM bad state */
+#define NDF_ST_REG_RD_FF_BAD	BIT_ULL(6)
+/** Queue read-back SM states */
+#define NDF_ST_REG_RD_FF	GENMASK_ULL(5, 4)
+/** Main SM is in a bad state */
+#define NDF_ST_REG_MAIN_BAD	BIT_ULL(3)
+/** Main SM states */
+#define NDF_ST_REG_MAIN_SM	GENMASK_ULL(2, 0)
+
+#define MAX_NAND_NAME_LEN	64
+#if (defined(NAND_MAX_PAGESIZE) && (NAND_MAX_PAGESIZE > 4096)) ||	\
+	!defined(NAND_MAX_PAGESIZE)
+# undef NAND_MAX_PAGESIZE
+# define NAND_MAX_PAGESIZE	4096
+#endif
+#if (defined(NAND_MAX_OOBSIZE) && (NAND_MAX_OOBSIZE > 256)) ||		\
+	!defined(NAND_MAX_OOBSIZE)
+# undef NAND_MAX_OOBSIZE
+# define NAND_MAX_OOBSIZE	256
+#endif
+
+#define OCTEONTX_NAND_DRIVER_NAME	"octeontx_nand"
+
+#define NDF_TIMEOUT		1000	/** Timeout in ms */
+#define USEC_PER_SEC		1000000	/** Linux compatibility */
+#ifndef NAND_MAX_CHIPS
+# define NAND_MAX_CHIPS		8	/** Linux compatibility */
+#endif
+
+struct octeontx_nand_chip {
+	struct list_head node;
+	struct nand_chip nand;
+	struct ndf_set_tm_par_cmd timings;
+	int cs;
+	int selected_page;
+	int iface_mode;
+	int row_bytes;
+	int col_bytes;
+	bool oob_only;
+	bool iface_set;
+};
+
+struct octeontx_nand_buf {
+	u8 *dmabuf;
+	dma_addr_t dmaaddr;
+	int dmabuflen;
+	int data_len;
+	int data_index;
+};
+
+/** NAND flash controller (NDF) related information */
+struct octeontx_nfc {
+	struct nand_hw_control controller;
+	struct udevice *dev;
+	void __iomem *base;
+	struct list_head chips;
+	int selected_chip;      /* Currently selected NAND chip number */
+
+	/*
+	 * Status is separate from octeontx_nand_buf because
+	 * it can be used in parallel and during init.
+	 */
+	u8 *stat;
+	dma_addr_t stat_addr;
+	bool use_status;
+
+	struct octeontx_nand_buf buf;
+	union bch_resp *bch_resp;
+	dma_addr_t bch_rhandle;
+
+	/* BCH of all-0xff, so erased pages read as error-free */
+	unsigned char *eccmask;
+};
+
+/* settable timings - 0..7 select timing of alen1..4/clen1..3/etc */
+enum tm_idx {
+	t0, /* fixed at 4<<mult cycles */
+	t1, t2, t3, t4, t5, t6, t7, /* settable per ONFI-timing mode */
+};
+
+struct octeontx_probe_device {
+	struct list_head list;
+	struct udevice *dev;
+};
+
+static struct bch_vf *bch_vf;
+/** Deferred devices due to BCH not being ready */
+LIST_HEAD(octeontx_pci_nand_deferred_devices);
+
+/** default parameters used for probing chips */
+#define MAX_ONFI_MODE	5
+
+static int default_onfi_timing;
+static int slew_ns = 2; /* default timing padding */
+static int def_ecc_size = 512; /* 1024 best for sw_bch, <= 4095 for hw_bch */
+static int default_width = 1; /* 8 bit */
+static int default_page_size = 2048;
+static struct ndf_set_tm_par_cmd default_timing_parms;
+
+/** Port from Linux */
+#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us)	\
+({									\
+	ulong __start = get_timer(0);					\
+	void *__addr = (addr);						\
+	const ulong __timeout_ms = timeout_us / 1000;			\
+	do {								\
+		(val) = readq(__addr);					\
+		if (cond)						\
+			break;						\
+		if (timeout_us && get_timer(__start) > __timeout_ms) {	\
+			(val) = readq(__addr);				\
+			break;						\
+		}							\
+		if (delay_us)						\
+			udelay(delay_us);				\
+	} while (1);							\
+	(cond) ? 0 : -ETIMEDOUT;					\
+})
+
+/** Ported from Linux 4.9.0 include/linux/of.h for compatibility */
+static inline int of_get_child_count(const ofnode node)
+{
+	return fdtdec_get_child_count(gd->fdt_blob, ofnode_to_offset(node));
+}
+
+/**
+ * Linux compatibility from Linux 4.9.0 drivers/mtd/nand/nand_base.c
+ */
+static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section || !ecc->total)
+		return -ERANGE;
+
+	oobregion->length = ecc->total;
+	oobregion->offset = mtd->oobsize - oobregion->length;
+
+	return 0;
+}
+
+/**
+ * Linux compatibility from Linux 4.9.0 drivers/mtd/nand/nand_base.c
+ */
+static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = mtd->oobsize - ecc->total - 2;
+	oobregion->offset = 2;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
+	.ecc = nand_ooblayout_ecc_lp,
+	.rfree = nand_ooblayout_free_lp,
+};
+
+static inline struct octeontx_nand_chip *to_otx_nand(struct nand_chip *nand)
+{
+	return container_of(nand, struct octeontx_nand_chip, nand);
+}
+
+static inline struct octeontx_nfc *to_otx_nfc(struct nand_hw_control *ctrl)
+{
+	return container_of(ctrl, struct octeontx_nfc, controller);
+}
+
+static int octeontx_nand_calc_ecc_layout(struct nand_chip *nand)
+{
+	struct nand_ecclayout *layout = nand->ecc.layout;
+	struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+	struct mtd_info *mtd = &nand->mtd;
+	int oobsize = mtd->oobsize;
+	int i;
+	bool layout_alloc = false;
+
+	if (!layout) {
+		layout = devm_kzalloc(tn->dev, sizeof(*layout), GFP_KERNEL);
+		if (!layout)
+			return -ENOMEM;
+		nand->ecc.layout = layout;
+		layout_alloc = true;
+	}
+	layout->eccbytes = nand->ecc.steps * nand->ecc.bytes;
+	/* Reserve 2 bytes for bad block marker */
+	if (layout->eccbytes + 2 > oobsize) {
+		pr_err("No suitable oob scheme available for oobsize %d eccbytes %u\n",
+		       oobsize, layout->eccbytes);
+		goto fail;
+	}
+	/* put ecc bytes at oob tail */
+	for (i = 0; i < layout->eccbytes; i++)
+		layout->eccpos[i] = oobsize - layout->eccbytes + i;
+	layout->oobfree[0].offset = 2;
+	layout->oobfree[0].length = oobsize - 2 - layout->eccbytes;
+	nand->ecc.layout = layout;
+	return 0;
+
+fail:
+	if (layout_alloc)
+		kfree(layout);
+	return -1;
+}
+
+/*
+ * Read a single byte from the temporary buffer. Used after READID
+ * to get the NAND information and for STATUS.
+ */
+static u8 octeontx_nand_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+
+	if (tn->use_status) {
+		tn->use_status = false;
+		return *tn->stat;
+	}
+
+	if (tn->buf.data_index < tn->buf.data_len)
+		return tn->buf.dmabuf[tn->buf.data_index++];
+
+	dev_err(tn->dev, "No data to read, idx: 0x%x, len: 0x%x\n",
+		tn->buf.data_index, tn->buf.data_len);
+
+	return 0xff;
+}
+
+/*
+ * Read a number of pending bytes from the temporary buffer. Used
+ * to get page and OOB data.
+ */
+static void octeontx_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+
+	if (len > tn->buf.data_len - tn->buf.data_index) {
+		dev_err(tn->dev, "Not enough data for read of %d bytes\n", len);
+		return;
+	}
+
+	memcpy(buf, tn->buf.dmabuf + tn->buf.data_index, len);
+	tn->buf.data_index += len;
+}
+
+static void octeontx_nand_write_buf(struct mtd_info *mtd,
+				    const u8 *buf, int len)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+
+	memcpy(tn->buf.dmabuf + tn->buf.data_len, buf, len);
+	tn->buf.data_len += len;
+}
+
+/* Overwrite default function to avoid sync abort on chip = -1. */
+static void octeontx_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+}
+
+static inline int timing_to_cycle(u32 psec, unsigned long clock)
+{
+	unsigned int ns;
+	int ticks;
+
+	ns = DIV_ROUND_UP(psec, 1000);
+	ns += slew_ns;
+
+	/* no rounding needed since clock is multiple of 1MHz */
+	clock /= 1000000;
+	ns *= clock;
+
+	ticks = DIV_ROUND_UP(ns, 1000);
+
+	/* actual delay is (tm_parX+1)<<tim_mult */
+	if (ticks)
+		ticks--;
+
+	return ticks;
+}
+
+static void set_timings(struct octeontx_nand_chip *chip,
+			struct ndf_set_tm_par_cmd *tp,
+			const struct nand_sdr_timings *timings,
+			unsigned long sclk)
+{
+	/* scaled coprocessor-cycle values */
+	u32 s_wh, s_cls, s_clh, s_rp, s_wb, s_wc;
+
+	tp->tim_mult = 0;
+	s_wh = timing_to_cycle(timings->tWH_min, sclk);
+	s_cls = timing_to_cycle(timings->tCLS_min, sclk);
+	s_clh = timing_to_cycle(timings->tCLH_min, sclk);
+	s_rp = timing_to_cycle(timings->tRP_min, sclk);
+	s_wb = timing_to_cycle(timings->tWB_max, sclk);
+	s_wc = timing_to_cycle(timings->tWC_min, sclk);
+
+	tp->tm_par1 = s_wh;
+	tp->tm_par2 = s_clh;
+	tp->tm_par3 = s_rp + 1;
+	tp->tm_par4 = s_cls - s_wh;
+	tp->tm_par5 = s_wc - s_wh + 1;
+	tp->tm_par6 = s_wb;
+	tp->tm_par7 = 0;
+	tp->tim_mult++; /* overcompensate for bad math */
+
+	/* TODO: comment parameter re-use */
+
+	pr_debug("%s: tim_par: mult: %d  p1: %d  p2: %d  p3: %d\n",
+		 __func__, tp->tim_mult, tp->tm_par1, tp->tm_par2, tp->tm_par3);
+	pr_debug("                 p4: %d  p5: %d  p6: %d  p7: %d\n",
+		 tp->tm_par4, tp->tm_par5, tp->tm_par6, tp->tm_par7);
+}
+
+static int set_default_timings(struct octeontx_nfc *tn,
+			       const struct nand_sdr_timings *timings)
+{
+	unsigned long sclk = octeontx_get_io_clock();
+
+	set_timings(NULL, &default_timing_parms, timings, sclk);
+	return 0;
+}
+
+static int octeontx_nfc_chip_set_timings(struct octeontx_nand_chip *chip,
+					 const struct nand_sdr_timings *timings)
+{
+	/*struct octeontx_nfc *tn = to_otx_nfc(chip->nand.controller);*/
+	unsigned long sclk = octeontx_get_io_clock();
+
+	set_timings(chip, &chip->timings, timings, sclk);
+	return 0;
+}
+
+/* How many bytes are free in the NFD_CMD queue? */
+static int ndf_cmd_queue_free(struct octeontx_nfc *tn)
+{
+	u64 ndf_misc;
+
+	ndf_misc = readq(tn->base + NDF_MISC);
+	return FIELD_GET(NDF_MISC_FR_BYTE, ndf_misc);
+}
+
+/* Submit a command to the NAND command queue. */
+static int ndf_submit(struct octeontx_nfc *tn, union ndf_cmd *cmd)
+{
+	int opcode = cmd->val[0] & 0xf;
+
+	switch (opcode) {
+	/* All these commands fit in one 64bit word */
+	case NDF_OP_NOP:
+	case NDF_OP_SET_TM_PAR:
+	case NDF_OP_WAIT:
+	case NDF_OP_CHIP_EN_DIS:
+	case NDF_OP_CLE_CMD:
+	case NDF_OP_WR_CMD:
+	case NDF_OP_RD_CMD:
+	case NDF_OP_RD_EDO_CMD:
+	case NDF_OP_BUS_ACQ_REL:
+		if (ndf_cmd_queue_free(tn) < 8)
+			goto full;
+		writeq(cmd->val[0], tn->base + NDF_CMD);
+		break;
+	case NDF_OP_ALE_CMD:
+		/* ALE commands take either one or two 64bit words */
+		if (cmd->u.ale_cmd.adr_byte_num < 5) {
+			if (ndf_cmd_queue_free(tn) < 8)
+				goto full;
+			writeq(cmd->val[0], tn->base + NDF_CMD);
+		} else {
+			if (ndf_cmd_queue_free(tn) < 16)
+				goto full;
+			writeq(cmd->val[0], tn->base + NDF_CMD);
+			writeq(cmd->val[1], tn->base + NDF_CMD);
+		}
+		break;
+	case NDF_OP_WAIT_STATUS: /* Wait status commands take two 64bit words */
+		if (ndf_cmd_queue_free(tn) < 16)
+			goto full;
+		writeq(cmd->val[0], tn->base + NDF_CMD);
+		writeq(cmd->val[1], tn->base + NDF_CMD);
+		break;
+	default:
+		dev_err(tn->dev, "%s: unknown command: %u\n", __func__, opcode);
+		return -EINVAL;
+	}
+	return 0;
+
+full:
+	dev_err(tn->dev, "%s: no space left in command queue\n", __func__);
+	return -ENOMEM;
+}
+
+/**
+ * Wait for the ready/busy signal. First wait for busy to be valid,
+ * then wait for busy to de-assert.
+ */
+static int ndf_build_wait_busy(struct octeontx_nfc *tn)
+{
+	union ndf_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.u.wait.opcode = NDF_OP_WAIT;
+	cmd.u.wait.r_b = 1;
+	cmd.u.wait.wlen = t6;
+
+	if (ndf_submit(tn, &cmd))
+		return -ENOMEM;
+	return 0;
+}
+
+static bool ndf_dma_done(struct octeontx_nfc *tn)
+{
+	u64 dma_cfg;
+
+	/* Enable bit should be clear after a transfer */
+	dma_cfg = readq(tn->base + NDF_DMA_CFG);
+	if (!(dma_cfg & NDF_DMA_CFG_EN))
+		return true;
+
+	return false;
+}
+
+static int ndf_wait(struct octeontx_nfc *tn)
+{
+	ulong start = get_timer(0);
+	bool done;
+
+	while (!(done = ndf_dma_done(tn)) && get_timer(start) < NDF_TIMEOUT)
+		;
+
+	if (!done) {
+		dev_err(tn->dev, "%s: timeout error\n", __func__);
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+static int ndf_wait_idle(struct octeontx_nfc *tn)
+{
+	u64 val;
+	u64 dval = 0;
+	int rc;
+	int pause = 100;
+	u64 tot_us = USEC_PER_SEC / 10;
+
+	rc = readq_poll_timeout(tn->base + NDF_ST_REG,
+				val, val & NDF_ST_REG_EXE_IDLE, pause, tot_us);
+	if (!rc)
+		rc = readq_poll_timeout(tn->base + NDF_DMA_CFG,
+					dval, !(dval & NDF_DMA_CFG_EN),
+					pause, tot_us);
+
+	return rc;
+}
+
+/** Issue set timing parameters */
+static int ndf_queue_cmd_timing(struct octeontx_nfc *tn,
+				struct ndf_set_tm_par_cmd *timings)
+{
+	union ndf_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.u.set_tm_par.opcode = NDF_OP_SET_TM_PAR;
+	cmd.u.set_tm_par.tim_mult = timings->tim_mult;
+	cmd.u.set_tm_par.tm_par1 = timings->tm_par1;
+	cmd.u.set_tm_par.tm_par2 = timings->tm_par2;
+	cmd.u.set_tm_par.tm_par3 = timings->tm_par3;
+	cmd.u.set_tm_par.tm_par4 = timings->tm_par4;
+	cmd.u.set_tm_par.tm_par5 = timings->tm_par5;
+	cmd.u.set_tm_par.tm_par6 = timings->tm_par6;
+	cmd.u.set_tm_par.tm_par7 = timings->tm_par7;
+	return ndf_submit(tn, &cmd);
+}
+
+/** Issue bus acquire or release */
+static int ndf_queue_cmd_bus(struct octeontx_nfc *tn, int direction)
+{
+	union ndf_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.u.bus_acq_rel.opcode = NDF_OP_BUS_ACQ_REL;
+	cmd.u.bus_acq_rel.direction = direction;
+	return ndf_submit(tn, &cmd);
+}
+
+/* Issue chip select or deselect */
+static int ndf_queue_cmd_chip(struct octeontx_nfc *tn, int enable, int chip,
+			      int width)
+{
+	union ndf_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.u.chip_en_dis.opcode = NDF_OP_CHIP_EN_DIS;
+	cmd.u.chip_en_dis.chip = chip;
+	cmd.u.chip_en_dis.enable = enable;
+	cmd.u.chip_en_dis.bus_width = width;
+	return ndf_submit(tn, &cmd);
+}
+
+static int ndf_queue_cmd_wait(struct octeontx_nfc *tn, int t_delay)
+{
+	union ndf_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.u.wait.opcode = NDF_OP_WAIT;
+	cmd.u.wait.wlen = t_delay;
+	return ndf_submit(tn, &cmd);
+}
+
+static int ndf_queue_cmd_cle(struct octeontx_nfc *tn, int command)
+{
+	union ndf_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.u.cle_cmd.opcode = NDF_OP_CLE_CMD;
+	cmd.u.cle_cmd.cmd_data = command;
+	cmd.u.cle_cmd.clen1 = t4;
+	cmd.u.cle_cmd.clen2 = t1;
+	cmd.u.cle_cmd.clen3 = t2;
+	return ndf_submit(tn, &cmd);
+}
+
+static int ndf_queue_cmd_ale(struct octeontx_nfc *tn, int addr_bytes,
+			     struct nand_chip *nand, u64 page,
+			     u32 col, int page_size)
+{
+	struct octeontx_nand_chip *octeontx_nand = (nand) ?
+						to_otx_nand(nand) : NULL;
+	union ndf_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.u.ale_cmd.opcode = NDF_OP_ALE_CMD;
+	cmd.u.ale_cmd.adr_byte_num = addr_bytes;
+
+	/* set column bit for OOB area, assume OOB follows page */
+	if (octeontx_nand && octeontx_nand->oob_only)
+		col += page_size;
+
+	/* page is u64 for this generality, even if cmdfunc() passes int */
+	switch (addr_bytes) {
+	/* 4-8 bytes: page, then 2-byte col */
+	case 8:
+		cmd.u.ale_cmd.adr_byt8 = (page >> 40) & 0xff;
+		fallthrough;
+	case 7:
+		cmd.u.ale_cmd.adr_byt7 = (page >> 32) & 0xff;
+		fallthrough;
+	case 6:
+		cmd.u.ale_cmd.adr_byt6 = (page >> 24) & 0xff;
+		fallthrough;
+	case 5:
+		cmd.u.ale_cmd.adr_byt5 = (page >> 16) & 0xff;
+		fallthrough;
+	case 4:
+		cmd.u.ale_cmd.adr_byt4 = (page >> 8) & 0xff;
+		cmd.u.ale_cmd.adr_byt3 = page & 0xff;
+		cmd.u.ale_cmd.adr_byt2 = (col >> 8) & 0xff;
+		cmd.u.ale_cmd.adr_byt1 =  col & 0xff;
+		break;
+	/* 1-3 bytes: just the page address */
+	case 3:
+		cmd.u.ale_cmd.adr_byt3 = (page >> 16) & 0xff;
+		fallthrough;
+	case 2:
+		cmd.u.ale_cmd.adr_byt2 = (page >> 8) & 0xff;
+		fallthrough;
+	case 1:
+		cmd.u.ale_cmd.adr_byt1 = page & 0xff;
+		break;
+	default:
+		break;
+	}
+
+	cmd.u.ale_cmd.alen1 = t3;
+	cmd.u.ale_cmd.alen2 = t1;
+	cmd.u.ale_cmd.alen3 = t5;
+	cmd.u.ale_cmd.alen4 = t2;
+	return ndf_submit(tn, &cmd);
+}
+
+static int ndf_queue_cmd_write(struct octeontx_nfc *tn, int len)
+{
+	union ndf_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.u.wr_cmd.opcode = NDF_OP_WR_CMD;
+	cmd.u.wr_cmd.data = len;
+	cmd.u.wr_cmd.wlen1 = t3;
+	cmd.u.wr_cmd.wlen2 = t1;
+	return ndf_submit(tn, &cmd);
+}
+
+static int ndf_build_pre_cmd(struct octeontx_nfc *tn, int cmd1,
+			     int addr_bytes, u64 page, u32 col, int cmd2)
+{
+	struct nand_chip *nand = tn->controller.active;
+	struct octeontx_nand_chip *octeontx_nand;
+	struct ndf_set_tm_par_cmd *timings;
+	int width, page_size, rc;
+
+	/* Also called before chip probing is finished */
+	if (!nand) {
+		timings = &default_timing_parms;
+		page_size = default_page_size;
+		width = default_width;
+	} else {
+		octeontx_nand = to_otx_nand(nand);
+		timings = &octeontx_nand->timings;
+		page_size = nand->mtd.writesize;
+		if (nand->options & NAND_BUSWIDTH_16)
+			width = 2;
+		else
+			width = 1;
+	}
+	rc = ndf_queue_cmd_timing(tn, timings);
+	if (rc)
+		return rc;
+
+	rc = ndf_queue_cmd_bus(tn, NDF_BUS_ACQUIRE);
+	if (rc)
+		return rc;
+
+	rc = ndf_queue_cmd_chip(tn, 1, tn->selected_chip, width);
+	if (rc)
+		return rc;
+
+	rc = ndf_queue_cmd_wait(tn, t1);
+	if (rc)
+		return rc;
+
+	rc = ndf_queue_cmd_cle(tn, cmd1);
+	if (rc)
+		return rc;
+
+	if (addr_bytes) {
+		rc = ndf_build_wait_busy(tn);
+		if (rc)
+			return rc;
+
+		rc = ndf_queue_cmd_ale(tn, addr_bytes, nand,
+				       page, col, page_size);
+		if (rc)
+			return rc;
+	}
+
+	/* CLE 2 */
+	if (cmd2) {
+		rc = ndf_build_wait_busy(tn);
+		if (rc)
+			return rc;
+
+		rc = ndf_queue_cmd_cle(tn, cmd2);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+static int ndf_build_post_cmd(struct octeontx_nfc *tn, int hold_time)
+{
+	int rc;
+
+	/* Deselect chip */
+	rc = ndf_queue_cmd_chip(tn, 0, 0, 0);
+	if (rc)
+		return rc;
+
+	rc = ndf_queue_cmd_wait(tn, t2);
+	if (rc)
+		return rc;
+
+	/* Release bus */
+	rc = ndf_queue_cmd_bus(tn, 0);
+	if (rc)
+		return rc;
+
+	rc = ndf_queue_cmd_wait(tn, hold_time);
+	if (rc)
+		return rc;
+
+	/*
+	 * Last action is ringing the doorbell with number of bus
+	 * acquire-releases cycles (currently 1).
+	 */
+	writeq(1, tn->base + NDF_DRBELL);
+	return 0;
+}
+
+/* Setup the NAND DMA engine for a transfer. */
+static void ndf_setup_dma(struct octeontx_nfc *tn, int is_write,
+			  dma_addr_t bus_addr, int len)
+{
+	u64 dma_cfg;
+
+	dma_cfg = FIELD_PREP(NDF_DMA_CFG_RW, is_write) |
+		  FIELD_PREP(NDF_DMA_CFG_SIZE, (len >> 3) - 1);
+	dma_cfg |= NDF_DMA_CFG_EN;
+	writeq(bus_addr, tn->base + NDF_DMA_ADR);
+	writeq(dma_cfg, tn->base + NDF_DMA_CFG);
+}
+
+static int octeontx_nand_reset(struct octeontx_nfc *tn)
+{
+	int rc;
+
+	rc = ndf_build_pre_cmd(tn, NAND_CMD_RESET, 0, 0, 0, 0);
+	if (rc)
+		return rc;
+
+	rc = ndf_build_wait_busy(tn);
+	if (rc)
+		return rc;
+
+	rc = ndf_build_post_cmd(tn, t2);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int ndf_read(struct octeontx_nfc *tn, int cmd1, int addr_bytes,
+		    u64 page, u32 col, int cmd2, int len)
+{
+	dma_addr_t bus_addr = tn->use_status ? tn->stat_addr : tn->buf.dmaaddr;
+	struct nand_chip *nand = tn->controller.active;
+	int timing_mode, bytes, rc;
+	union ndf_cmd cmd;
+	u64 start, end;
+
+	pr_debug("%s(%p, 0x%x, 0x%x, 0x%llx, 0x%x, 0x%x, 0x%x)\n", __func__,
+		 tn, cmd1, addr_bytes, page, col, cmd2, len);
+	if (!nand)
+		timing_mode = default_onfi_timing;
+	else
+		timing_mode = nand->onfi_timing_mode_default;
+
+	/* Build the command and address cycles */
+	rc = ndf_build_pre_cmd(tn, cmd1, addr_bytes, page, col, cmd2);
+	if (rc) {
+		dev_err(tn->dev, "Build pre command failed\n");
+		return rc;
+	}
+
+	/* This waits for some time, then waits for busy to be de-asserted. */
+	rc = ndf_build_wait_busy(tn);
+	if (rc) {
+		dev_err(tn->dev, "Wait timeout\n");
+		return rc;
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	if (timing_mode < 4)
+		cmd.u.rd_cmd.opcode = NDF_OP_RD_CMD;
+	else
+		cmd.u.rd_cmd.opcode = NDF_OP_RD_EDO_CMD;
+
+	cmd.u.rd_cmd.data = len;
+	cmd.u.rd_cmd.rlen1 = t7;
+	cmd.u.rd_cmd.rlen2 = t3;
+	cmd.u.rd_cmd.rlen3 = t1;
+	cmd.u.rd_cmd.rlen4 = t7;
+	rc = ndf_submit(tn, &cmd);
+	if (rc) {
+		dev_err(tn->dev, "Error submitting command\n");
+		return rc;
+	}
+
+	start = (u64)bus_addr;
+	ndf_setup_dma(tn, 0, bus_addr, len);
+
+	rc = ndf_build_post_cmd(tn, t2);
+	if (rc) {
+		dev_err(tn->dev, "Build post command failed\n");
+		return rc;
+	}
+
+	/* Wait for the DMA to complete */
+	rc = ndf_wait(tn);
+	if (rc) {
+		dev_err(tn->dev, "DMA timed out\n");
+		return rc;
+	}
+
+	end = readq(tn->base + NDF_DMA_ADR);
+	bytes = end - start;
+
+	/* Make sure NDF is really done */
+	rc = ndf_wait_idle(tn);
+	if (rc) {
+		dev_err(tn->dev, "poll idle failed\n");
+		return rc;
+	}
+
+	pr_debug("%s: Read %d bytes\n", __func__, bytes);
+	return bytes;
+}
+
+static int octeontx_nand_get_features(struct mtd_info *mtd,
+				      struct nand_chip *chip, int feature_addr,
+				      u8 *subfeature_para)
+{
+	struct nand_chip *nand = chip;
+	struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+	int len = 8;
+	int rc;
+
+	pr_debug("%s: feature addr: 0x%x\n", __func__, feature_addr);
+	memset(tn->buf.dmabuf, 0xff, len);
+	tn->buf.data_index = 0;
+	tn->buf.data_len = 0;
+	rc = ndf_read(tn, NAND_CMD_GET_FEATURES, 1, feature_addr, 0, 0, len);
+	if (rc)
+		return rc;
+
+	memcpy(subfeature_para, tn->buf.dmabuf, ONFI_SUBFEATURE_PARAM_LEN);
+
+	return 0;
+}
+
+static int octeontx_nand_set_features(struct mtd_info *mtd,
+				      struct nand_chip *chip, int feature_addr,
+				      u8 *subfeature_para)
+{
+	struct nand_chip *nand = chip;
+	struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+	const int len = ONFI_SUBFEATURE_PARAM_LEN;
+	int rc;
+
+	rc = ndf_build_pre_cmd(tn, NAND_CMD_SET_FEATURES,
+			       1, feature_addr, 0, 0);
+	if (rc)
+		return rc;
+
+	memcpy(tn->buf.dmabuf, subfeature_para, len);
+	memset(tn->buf.dmabuf + len, 0, 8 - len);
+
+	ndf_setup_dma(tn, 1, tn->buf.dmaaddr, 8);
+
+	rc = ndf_queue_cmd_write(tn, 8);
+	if (rc)
+		return rc;
+
+	rc = ndf_build_wait_busy(tn);
+	if (rc)
+		return rc;
+
+	rc = ndf_build_post_cmd(tn, t2);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+/*
+ * Read a page from NAND. If the buffer has room, the out of band
+ * data will be included.
+ */
+static int ndf_page_read(struct octeontx_nfc *tn, u64 page, int col, int len)
+{
+	debug("%s(%p, 0x%llx, 0x%x, 0x%x) active: %p\n", __func__,
+	      tn, page, col, len, tn->controller.active);
+	struct nand_chip *nand = tn->controller.active;
+	struct octeontx_nand_chip *chip = to_otx_nand(nand);
+	int addr_bytes = chip->row_bytes + chip->col_bytes;
+
+	memset(tn->buf.dmabuf, 0xff, len);
+	return ndf_read(tn, NAND_CMD_READ0, addr_bytes,
+		    page, col, NAND_CMD_READSTART, len);
+}
+
+/* Erase a NAND block */
+static int ndf_block_erase(struct octeontx_nfc *tn, u64 page_addr)
+{
+	struct nand_chip *nand = tn->controller.active;
+	struct octeontx_nand_chip *chip = to_otx_nand(nand);
+	int addr_bytes = chip->row_bytes;
+	int rc;
+
+	rc = ndf_build_pre_cmd(tn, NAND_CMD_ERASE1, addr_bytes,
+			       page_addr, 0, NAND_CMD_ERASE2);
+	if (rc)
+		return rc;
+
+	/* Wait for R_B to signal erase is complete  */
+	rc = ndf_build_wait_busy(tn);
+	if (rc)
+		return rc;
+
+	rc = ndf_build_post_cmd(tn, t2);
+	if (rc)
+		return rc;
+
+	/* Wait until the command queue is idle */
+	return ndf_wait_idle(tn);
+}
+
+/*
+ * Write a page (or less) to NAND.
+ */
+static int ndf_page_write(struct octeontx_nfc *tn, int page)
+{
+	int len, rc;
+	struct nand_chip *nand = tn->controller.active;
+	struct octeontx_nand_chip *chip = to_otx_nand(nand);
+	int addr_bytes = chip->row_bytes + chip->col_bytes;
+
+	len = tn->buf.data_len - tn->buf.data_index;
+	chip->oob_only = (tn->buf.data_index >= nand->mtd.writesize);
+	WARN_ON_ONCE(len & 0x7);
+
+	ndf_setup_dma(tn, 1, tn->buf.dmaaddr + tn->buf.data_index, len);
+	rc = ndf_build_pre_cmd(tn, NAND_CMD_SEQIN, addr_bytes, page, 0, 0);
+	if (rc)
+		return rc;
+
+	rc = ndf_queue_cmd_write(tn, len);
+	if (rc)
+		return rc;
+
+	rc = ndf_queue_cmd_cle(tn, NAND_CMD_PAGEPROG);
+	if (rc)
+		return rc;
+
+	/* Wait for R_B to signal program is complete  */
+	rc = ndf_build_wait_busy(tn);
+	if (rc)
+		return rc;
+
+	rc = ndf_build_post_cmd(tn, t2);
+	if (rc)
+		return rc;
+
+	/* Wait for the DMA to complete */
+	rc = ndf_wait(tn);
+	if (rc)
+		return rc;
+
+	/* Data transfer is done but NDF is not, it is waiting for R/B# */
+	return ndf_wait_idle(tn);
+}
+
+static void octeontx_nand_cmdfunc(struct mtd_info *mtd, unsigned int command,
+				  int column, int page_addr)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct octeontx_nand_chip *octeontx_nand = to_otx_nand(nand);
+	struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+	int rc;
+
+	tn->selected_chip = octeontx_nand->cs;
+	if (tn->selected_chip < 0 || tn->selected_chip >= NAND_MAX_CHIPS) {
+		dev_err(tn->dev, "invalid chip select\n");
+		return;
+	}
+
+	tn->use_status = false;
+
+	pr_debug("%s(%p, 0x%x, 0x%x, 0x%x) cs: %d\n", __func__, mtd, command,
+		 column, page_addr, tn->selected_chip);
+	switch (command) {
+	case NAND_CMD_READID:
+		tn->buf.data_index = 0;
+		octeontx_nand->oob_only = false;
+		rc = ndf_read(tn, command, 1, column, 0, 0, 8);
+		if (rc < 0)
+			dev_err(tn->dev, "READID failed with %d\n", rc);
+		else
+			tn->buf.data_len = rc;
+		break;
+
+	case NAND_CMD_READOOB:
+		octeontx_nand->oob_only = true;
+		tn->buf.data_index = 0;
+		tn->buf.data_len = 0;
+		rc = ndf_page_read(tn, page_addr, column, mtd->oobsize);
+		if (rc < mtd->oobsize)
+			dev_err(tn->dev, "READOOB failed with %d\n",
+				tn->buf.data_len);
+		else
+			tn->buf.data_len = rc;
+		break;
+
+	case NAND_CMD_READ0:
+		octeontx_nand->oob_only = false;
+		tn->buf.data_index = 0;
+		tn->buf.data_len = 0;
+		rc = ndf_page_read(tn, page_addr, column,
+				   mtd->writesize + mtd->oobsize);
+
+		if (rc < mtd->writesize + mtd->oobsize)
+			dev_err(tn->dev, "READ0 failed with %d\n", rc);
+		else
+			tn->buf.data_len = rc;
+		break;
+
+	case NAND_CMD_STATUS:
+		/* used in oob/not states */
+		tn->use_status = true;
+		rc = ndf_read(tn, command, 0, 0, 0, 0, 8);
+		if (rc < 0)
+			dev_err(tn->dev, "STATUS failed with %d\n", rc);
+		break;
+
+	case NAND_CMD_RESET:
+		/* used in oob/not states */
+		rc = octeontx_nand_reset(tn);
+		if (rc < 0)
+			dev_err(tn->dev, "RESET failed with %d\n", rc);
+		break;
+
+	case NAND_CMD_PARAM:
+		octeontx_nand->oob_only = false;
+		tn->buf.data_index = 0;
+		rc = ndf_read(tn, command, 1, 0, 0, 0,
+			      min(tn->buf.dmabuflen, 3 * 512));
+		if (rc < 0)
+			dev_err(tn->dev, "PARAM failed with %d\n", rc);
+		else
+			tn->buf.data_len = rc;
+		break;
+
+	case NAND_CMD_RNDOUT:
+		tn->buf.data_index = column;
+		break;
+
+	case NAND_CMD_ERASE1:
+		if (ndf_block_erase(tn, page_addr))
+			dev_err(tn->dev, "ERASE1 failed\n");
+		break;
+
+	case NAND_CMD_ERASE2:
+		/* We do all erase processing in the first command, so ignore
+		 * this one.
+		 */
+		break;
+
+	case NAND_CMD_SEQIN:
+		octeontx_nand->oob_only = (column >= mtd->writesize);
+		tn->buf.data_index = column;
+		tn->buf.data_len = column;
+
+		octeontx_nand->selected_page = page_addr;
+		break;
+
+	case NAND_CMD_PAGEPROG:
+		rc = ndf_page_write(tn, octeontx_nand->selected_page);
+		if (rc)
+			dev_err(tn->dev, "PAGEPROG failed with %d\n", rc);
+		break;
+
+	case NAND_CMD_SET_FEATURES:
+		octeontx_nand->oob_only = false;
+		/* assume tn->buf.data_len == 4 of data has been set there */
+		rc = octeontx_nand_set_features(mtd, nand,
+						page_addr, tn->buf.dmabuf);
+		if (rc)
+			dev_err(tn->dev, "SET_FEATURES failed with %d\n", rc);
+		break;
+
+	case NAND_CMD_GET_FEATURES:
+		octeontx_nand->oob_only = false;
+		rc = octeontx_nand_get_features(mtd, nand,
+						page_addr, tn->buf.dmabuf);
+		if (!rc) {
+			tn->buf.data_index = 0;
+			tn->buf.data_len = 4;
+		} else {
+			dev_err(tn->dev, "GET_FEATURES failed with %d\n", rc);
+		}
+		break;
+
+	default:
+		WARN_ON_ONCE(1);
+		dev_err(tn->dev, "unhandled nand cmd: %x\n", command);
+	}
+}
+
+static int octeontx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+	struct octeontx_nfc *tn = to_otx_nfc(chip->controller);
+	int ret;
+
+	ret = ndf_wait_idle(tn);
+	return (ret < 0) ? -EIO : 0;
+}
+
+/* check compatibility with ONFI timing mode#N, and optionally apply */
+/* TODO: Implement chipnr support? */
+static int octeontx_nand_setup_dat_intf(struct mtd_info *mtd, int chipnr,
+					const struct nand_data_interface *conf)
+{
+	static const bool check_only;
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct octeontx_nand_chip *chip = to_otx_nand(nand);
+	static u64 t_wc_n[MAX_ONFI_MODE + 2]; /* cache a mode signature */
+	int mode; /* deduced mode number, for reporting and restricting */
+	int rc;
+
+	/*
+	 * Cache timing modes for reporting, and reducing needless change.
+	 *
+	 * Challenge: caller does not pass ONFI mode#, but reporting the mode
+	 * and restricting to a maximum, or a list, are useful for diagnosing
+	 * new hardware.  So use tWC_min, distinct and monotonic across modes,
+	 * to discover the requested/accepted mode number
+	 */
+	for (mode = MAX_ONFI_MODE; mode >= 0 && !t_wc_n[0]; mode--) {
+		const struct nand_sdr_timings *t;
+
+		t = onfi_async_timing_mode_to_sdr_timings(mode);
+		if (!t)
+			continue;
+		t_wc_n[mode] = t->tWC_min;
+	}
+
+	if (!conf) {
+		rc = -EINVAL;
+	} else if (check_only) {
+		rc = 0;
+	} else if (nand->data_interface &&
+			chip->iface_set && chip->iface_mode == mode) {
+		/*
+		 * Cases:
+		 * - called from nand_reset, which clears DDR timing
+		 *   mode back to SDR.  BUT if we're already in SDR,
+		 *   timing mode persists over resets.
+		 *   While mtd/nand layer only supports SDR,
+		 *   this is always safe. And this driver only supports SDR.
+		 *
+		 * - called from post-power-event nand_reset (maybe
+		 *   NFC+flash power down, or system hibernate.
+		 *   Address this when CONFIG_PM support added
+		 */
+		rc = 0;
+	} else {
+		rc = octeontx_nfc_chip_set_timings(chip, &conf->timings.sdr);
+		if (!rc) {
+			chip->iface_mode = mode;
+			chip->iface_set = true;
+		}
+	}
+	return rc;
+}
+
+static void octeontx_bch_reset(void)
+{
+}
+
+/*
+ * Given a page, calculate the ECC code
+ *
+ * chip:	Pointer to NAND chip data structure
+ * buf:		Buffer to calculate ECC on
+ * code:	Buffer to hold ECC data
+ *
+ * Return 0 on success or -1 on failure
+ */
+static int octeontx_nand_bch_calculate_ecc_internal(struct mtd_info *mtd,
+						    dma_addr_t ihandle,
+						    u8 *code)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+	int rc;
+	int i;
+	static u8 *ecc_buffer;
+	static int ecc_size;
+	static unsigned long ecc_handle;
+	union bch_resp *r = tn->bch_resp;
+
+	if (!ecc_buffer || ecc_size < nand->ecc.size) {
+		ecc_size = nand->ecc.size;
+		ecc_buffer = dma_alloc_coherent(ecc_size,
+						(unsigned long *)&ecc_handle);
+	}
+
+	memset(ecc_buffer, 0, nand->ecc.bytes);
+
+	r->u16 = 0;
+	__iowmb(); /* flush done=0 before making request */
+
+	rc = octeontx_bch_encode(bch_vf, ihandle, nand->ecc.size,
+				 nand->ecc.strength,
+				 (dma_addr_t)ecc_handle, tn->bch_rhandle);
+
+	if (!rc) {
+		octeontx_bch_wait(bch_vf, r, tn->bch_rhandle);
+	} else {
+		dev_err(tn->dev, "octeontx_bch_encode failed\n");
+		return -1;
+	}
+
+	if (!r->s.done || r->s.uncorrectable) {
+		dev_err(tn->dev,
+			"%s timeout, done:%d uncorr:%d corr:%d erased:%d\n",
+			__func__, r->s.done, r->s.uncorrectable,
+			r->s.num_errors, r->s.erased);
+		octeontx_bch_reset();
+		return -1;
+	}
+
+	memcpy(code, ecc_buffer, nand->ecc.bytes);
+
+	for (i = 0; i < nand->ecc.bytes; i++)
+		code[i] ^= tn->eccmask[i];
+
+	return tn->bch_resp->s.num_errors;
+}
+
+/*
+ * Given a page, calculate the ECC code
+ *
+ * mtd:        MTD block structure
+ * dat:        raw data (unused)
+ * ecc_code:   buffer for ECC
+ */
+static int octeontx_nand_bch_calculate(struct mtd_info *mtd,
+				       const u8 *dat, u8 *ecc_code)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	dma_addr_t handle = dma_map_single((u8 *)dat,
+					   nand->ecc.size, DMA_TO_DEVICE);
+	int ret;
+
+	ret = octeontx_nand_bch_calculate_ecc_internal(mtd, handle,
+						       (void *)ecc_code);
+
+	return ret;
+}
+
+/*
+ * Detect and correct multi-bit ECC for a page
+ *
+ * mtd:        MTD block structure
+ * dat:        raw data read from the chip
+ * read_ecc:   ECC from the chip (unused)
+ * isnull:     unused
+ *
+ * Returns number of bits corrected or -1 if unrecoverable
+ */
+static int octeontx_nand_bch_correct(struct mtd_info *mtd, u_char *dat,
+				     u_char *read_ecc, u_char *isnull)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+	int i = nand->ecc.size + nand->ecc.bytes;
+	static u8 *data_buffer;
+	static dma_addr_t ihandle;
+	static int buffer_size;
+	dma_addr_t ohandle;
+	union bch_resp *r = tn->bch_resp;
+	int rc;
+
+	if (i > buffer_size) {
+		if (buffer_size)
+			free(data_buffer);
+		data_buffer = dma_alloc_coherent(i,
+						 (unsigned long *)&ihandle);
+		if (!data_buffer) {
+			dev_err(tn->dev,
+				"%s: Could not allocate %d bytes for buffer\n",
+				__func__, i);
+			goto error;
+		}
+		buffer_size = i;
+	}
+
+	memcpy(data_buffer, dat, nand->ecc.size);
+	memcpy(data_buffer + nand->ecc.size, read_ecc, nand->ecc.bytes);
+
+	for (i = 0; i < nand->ecc.bytes; i++)
+		data_buffer[nand->ecc.size + i] ^= tn->eccmask[i];
+
+	r->u16 = 0;
+	__iowmb(); /* flush done=0 before making request */
+
+	ohandle = dma_map_single(dat, nand->ecc.size, DMA_FROM_DEVICE);
+	rc = octeontx_bch_decode(bch_vf, ihandle, nand->ecc.size,
+				 nand->ecc.strength, ohandle, tn->bch_rhandle);
+
+	if (!rc)
+		octeontx_bch_wait(bch_vf, r, tn->bch_rhandle);
+
+	if (rc) {
+		dev_err(tn->dev, "octeontx_bch_decode failed\n");
+		goto error;
+	}
+
+	if (!r->s.done) {
+		dev_err(tn->dev, "Error: BCH engine timeout\n");
+		octeontx_bch_reset();
+		goto error;
+	}
+
+	if (r->s.erased) {
+		debug("Info: BCH block is erased\n");
+		return 0;
+	}
+
+	if (r->s.uncorrectable) {
+		debug("Cannot correct NAND block, response: 0x%x\n",
+		      r->u16);
+		goto error;
+	}
+
+	return r->s.num_errors;
+
+error:
+	debug("Error performing bch correction\n");
+	return -1;
+}
+
+void octeontx_nand_bch_hwctl(struct mtd_info *mtd, int mode)
+{
+	/* Do nothing. */
+}
+
+static int octeontx_nand_hw_bch_read_page(struct mtd_info *mtd,
+					  struct nand_chip *chip, u8 *buf,
+					  int oob_required, int page)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+	int i, eccsize = chip->ecc.size, ret;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	u8 *p;
+	u8 *ecc_code = chip->buffers->ecccode;
+	unsigned int max_bitflips = 0;
+
+	/* chip->read_buf() insists on sequential order, we do OOB first */
+	memcpy(chip->oob_poi, tn->buf.dmabuf + mtd->writesize, mtd->oobsize);
+
+	/* Use private buffer as input for ECC correction */
+	p = tn->buf.dmabuf;
+
+	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+		int stat;
+
+		debug("Correcting block offset %lx, ecc offset %x\n",
+		      p - buf, i);
+		stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
+
+		if (stat < 0) {
+			mtd->ecc_stats.failed++;
+			debug("Cannot correct NAND page %d\n", page);
+		} else {
+			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
+	}
+
+	/* Copy corrected data to caller's buffer now */
+	memcpy(buf, tn->buf.dmabuf, mtd->writesize);
+
+	return max_bitflips;
+}
+
+static int octeontx_nand_hw_bch_write_page(struct mtd_info *mtd,
+					   struct nand_chip *chip,
+					   const u8 *buf, int oob_required,
+					   int page)
+{
+	struct octeontx_nfc *tn = to_otx_nfc(chip->controller);
+	int i, eccsize = chip->ecc.size, ret;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	const u8 *p;
+	u8 *ecc_calc = chip->buffers->ecccalc;
+
+	debug("%s(buf?%p, oob%d p%x)\n",
+	      __func__, buf, oob_required, page);
+	for (i = 0; i < chip->ecc.total; i++)
+		ecc_calc[i] = 0xFF;
+
+	/* Copy the page data from caller's buffers to private buffer */
+	chip->write_buf(mtd, buf, mtd->writesize);
+	/* Use private date as source for ECC calculation */
+	p = tn->buf.dmabuf;
+
+	/* Hardware ECC calculation */
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+		int ret;
+
+		ret = chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+		if (ret < 0)
+			debug("calculate(mtd, p?%p, &ecc_calc[%d]?%p) returned %d\n",
+			      p, i, &ecc_calc[i], ret);
+
+		debug("block offset %lx, ecc offset %x\n", p - buf, i);
+	}
+
+	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	/* Store resulting OOB into private buffer, will be sent to HW */
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+/**
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static int octeontx_nand_write_page_raw(struct mtd_info *mtd,
+					struct nand_chip *chip,
+					const u8 *buf, int oob_required,
+					int page)
+{
+	chip->write_buf(mtd, buf, mtd->writesize);
+	if (oob_required)
+		chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+/**
+ * octeontx_nand_write_oob_std - [REPLACEABLE] the most common OOB data write
+ *                             function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int octeontx_nand_write_oob_std(struct mtd_info *mtd,
+				       struct nand_chip *chip,
+				       int page)
+{
+	int status = 0;
+	const u8 *buf = chip->oob_poi;
+	int length = mtd->oobsize;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+	chip->write_buf(mtd, buf, length);
+	/* Send command to program the OOB data */
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+	status = chip->waitfunc(mtd, chip);
+
+	return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/**
+ * octeontx_nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static int octeontx_nand_read_page_raw(struct mtd_info *mtd,
+				       struct nand_chip *chip,
+				       u8 *buf, int oob_required, int page)
+{
+	chip->read_buf(mtd, buf, mtd->writesize);
+	if (oob_required)
+		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+
+static int octeontx_nand_read_oob_std(struct mtd_info *mtd,
+				      struct nand_chip *chip,
+				      int page)
+
+{
+	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+
+static int octeontx_nand_calc_bch_ecc_strength(struct nand_chip *nand)
+{
+	struct mtd_info *mtd = nand_to_mtd(nand);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+	struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+	int nsteps = mtd->writesize / ecc->size;
+	int oobchunk = mtd->oobsize / nsteps;
+
+	/* ecc->strength determines ecc_level and OOB's ecc_bytes. */
+	const u8 strengths[]  = {4, 8, 16, 24, 32, 40, 48, 56, 60, 64};
+	/* first set the desired ecc_level to match strengths[] */
+	int index = ARRAY_SIZE(strengths) - 1;
+	int need;
+
+	while (index > 0 && !(ecc->options & NAND_ECC_MAXIMIZE) &&
+	       strengths[index - 1] >= ecc->strength)
+		index--;
+
+	do {
+		need = DIV_ROUND_UP(15 * strengths[index], 8);
+		if (need <= oobchunk - 2)
+			break;
+	} while (index > 0);
+
+	debug("%s: steps ds: %d, strength ds: %d\n", __func__,
+	      nand->ecc_step_ds, nand->ecc_strength_ds);
+	ecc->strength = strengths[index];
+	ecc->bytes = need;
+	debug("%s: strength: %d, bytes: %d\n", __func__, ecc->strength,
+	      ecc->bytes);
+
+	if (!tn->eccmask)
+		tn->eccmask = devm_kzalloc(tn->dev, ecc->bytes, GFP_KERNEL);
+	if (!tn->eccmask)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/* sample the BCH signature of an erased (all 0xff) page,
+ * to XOR into all page traffic, so erased pages have no ECC errors
+ */
+static int octeontx_bch_save_empty_eccmask(struct nand_chip *nand)
+{
+	struct mtd_info *mtd = nand_to_mtd(nand);
+	struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+	unsigned int eccsize = nand->ecc.size;
+	unsigned int eccbytes = nand->ecc.bytes;
+	u8 erased_ecc[eccbytes];
+	unsigned long erased_handle;
+	unsigned char *erased_page = dma_alloc_coherent(eccsize,
+							&erased_handle);
+	int i;
+	int rc = 0;
+
+	if (!erased_page)
+		return -ENOMEM;
+
+	memset(erased_page, 0xff, eccsize);
+	memset(erased_ecc, 0, eccbytes);
+
+	rc = octeontx_nand_bch_calculate_ecc_internal(mtd,
+						      (dma_addr_t)erased_handle,
+						      erased_ecc);
+
+	free(erased_page);
+
+	for (i = 0; i < eccbytes; i++)
+		tn->eccmask[i] = erased_ecc[i] ^ 0xff;
+
+	return rc;
+}
+
+static void octeontx_nfc_chip_sizing(struct nand_chip *nand)
+{
+	struct octeontx_nand_chip *chip = to_otx_nand(nand);
+	struct mtd_info *mtd = nand_to_mtd(nand);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+
+	chip->row_bytes = nand->onfi_params.addr_cycles & 0xf;
+	chip->col_bytes = nand->onfi_params.addr_cycles >> 4;
+	debug("%s(%p) row bytes: %d, col bytes: %d, ecc mode: %d\n",
+	      __func__, nand, chip->row_bytes, chip->col_bytes, ecc->mode);
+
+	/*
+	 * HW_BCH using OcteonTX BCH engine, or SOFT_BCH laid out in
+	 * HW_BCH-compatible fashion, depending on devtree advice
+	 * and kernel config.
+	 * BCH/NFC hardware capable of subpage ops, not implemented.
+	 */
+	mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+	nand->options |= NAND_NO_SUBPAGE_WRITE;
+	debug("%s: start steps: %d, size: %d, bytes: %d\n",
+	      __func__, ecc->steps, ecc->size, ecc->bytes);
+	debug("%s: step ds: %d, strength ds: %d\n", __func__,
+	      nand->ecc_step_ds, nand->ecc_strength_ds);
+
+	if (ecc->mode != NAND_ECC_NONE) {
+		int nsteps = ecc->steps ? ecc->steps : 1;
+
+		if (ecc->size && ecc->size != mtd->writesize)
+			nsteps = mtd->writesize / ecc->size;
+		else if (mtd->writesize > def_ecc_size &&
+			 !(mtd->writesize & (def_ecc_size - 1)))
+			nsteps = mtd->writesize / def_ecc_size;
+		ecc->steps = nsteps;
+		ecc->size = mtd->writesize / nsteps;
+		ecc->bytes = mtd->oobsize / nsteps;
+
+		if (nand->ecc_strength_ds)
+			ecc->strength = nand->ecc_strength_ds;
+		if (nand->ecc_step_ds)
+			ecc->size = nand->ecc_step_ds;
+		/*
+		 * no subpage ops, but set subpage-shift to match ecc->steps
+		 * so mtd_nandbiterrs tests appropriate boundaries
+		 */
+		if (!mtd->subpage_sft && !(ecc->steps & (ecc->steps - 1)))
+			mtd->subpage_sft = fls(ecc->steps) - 1;
+
+		if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) {
+			debug("%s: ecc mode: %d\n", __func__, ecc->mode);
+			if (ecc->mode != NAND_ECC_SOFT &&
+			    !octeontx_nand_calc_bch_ecc_strength(nand)) {
+				struct octeontx_nfc *tn =
+					to_otx_nfc(nand->controller);
+
+				debug("Using hardware BCH engine support\n");
+				ecc->mode = NAND_ECC_HW_SYNDROME;
+				ecc->read_page = octeontx_nand_hw_bch_read_page;
+				ecc->write_page =
+					octeontx_nand_hw_bch_write_page;
+				ecc->read_page_raw =
+					octeontx_nand_read_page_raw;
+				ecc->write_page_raw =
+					octeontx_nand_write_page_raw;
+				ecc->read_oob = octeontx_nand_read_oob_std;
+				ecc->write_oob = octeontx_nand_write_oob_std;
+
+				ecc->calculate = octeontx_nand_bch_calculate;
+				ecc->correct = octeontx_nand_bch_correct;
+				ecc->hwctl = octeontx_nand_bch_hwctl;
+
+				debug("NAND chip %d using hw_bch\n",
+				      tn->selected_chip);
+				debug(" %d bytes ECC per %d byte block\n",
+				      ecc->bytes, ecc->size);
+				debug(" for %d bits of correction per block.",
+				      ecc->strength);
+				octeontx_nand_calc_ecc_layout(nand);
+				octeontx_bch_save_empty_eccmask(nand);
+			}
+		}
+	}
+}
+
+static int octeontx_nfc_chip_init(struct octeontx_nfc *tn, struct udevice *dev,
+				  ofnode node)
+{
+	struct octeontx_nand_chip *chip;
+	struct nand_chip *nand;
+	struct mtd_info *mtd;
+	int ret;
+
+	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	debug("%s: Getting chip select\n", __func__);
+	ret = ofnode_read_s32(node, "reg", &chip->cs);
+	if (ret) {
+		dev_err(dev, "could not retrieve reg property: %d\n", ret);
+		return ret;
+	}
+
+	if (chip->cs >= NAND_MAX_CHIPS) {
+		dev_err(dev, "invalid reg value: %u (max CS = 7)\n", chip->cs);
+		return -EINVAL;
+	}
+	debug("%s: chip select: %d\n", __func__, chip->cs);
+	nand = &chip->nand;
+	nand->controller = &tn->controller;
+	if (!tn->controller.active)
+		tn->controller.active = nand;
+
+	debug("%s: Setting flash node\n", __func__);
+	nand_set_flash_node(nand, node);
+
+	nand->options = 0;
+	nand->select_chip = octeontx_nand_select_chip;
+	nand->cmdfunc = octeontx_nand_cmdfunc;
+	nand->waitfunc = octeontx_nand_waitfunc;
+	nand->read_byte = octeontx_nand_read_byte;
+	nand->read_buf = octeontx_nand_read_buf;
+	nand->write_buf = octeontx_nand_write_buf;
+	nand->onfi_set_features = octeontx_nand_set_features;
+	nand->onfi_get_features = octeontx_nand_get_features;
+	nand->setup_data_interface = octeontx_nand_setup_dat_intf;
+
+	mtd = nand_to_mtd(nand);
+	debug("%s: mtd: %p\n", __func__, mtd);
+	mtd->dev->parent = dev;
+
+	debug("%s: NDF_MISC: 0x%llx\n", __func__,
+	      readq(tn->base + NDF_MISC));
+
+	/* TODO: support more then 1 chip */
+	debug("%s: Scanning identification\n", __func__);
+	ret = nand_scan_ident(mtd, 1, NULL);
+	if (ret)
+		return ret;
+
+	debug("%s: Sizing chip\n", __func__);
+	octeontx_nfc_chip_sizing(nand);
+
+	debug("%s: Scanning tail\n", __func__);
+	ret = nand_scan_tail(mtd);
+	if (ret) {
+		dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+		return ret;
+	}
+
+	debug("%s: Registering mtd\n", __func__);
+	ret = nand_register(0, mtd);
+
+	debug("%s: Adding tail\n", __func__);
+	list_add_tail(&chip->node, &tn->chips);
+	return 0;
+}
+
+static int octeontx_nfc_chips_init(struct octeontx_nfc *tn)
+{
+	struct udevice *dev = tn->dev;
+	ofnode node = dev->node;
+	ofnode nand_node;
+	int nr_chips = of_get_child_count(node);
+	int ret;
+
+	debug("%s: node: %s\n", __func__, ofnode_get_name(node));
+	debug("%s: %d chips\n", __func__, nr_chips);
+	if (nr_chips > NAND_MAX_CHIPS) {
+		dev_err(dev, "too many NAND chips: %d\n", nr_chips);
+		return -EINVAL;
+	}
+
+	if (!nr_chips) {
+		debug("no DT NAND chips found\n");
+		return -ENODEV;
+	}
+
+	pr_info("%s: scanning %d chips DTs\n", __func__, nr_chips);
+
+	ofnode_for_each_subnode(nand_node, node) {
+		debug("%s: Calling octeontx_nfc_chip_init(%p, %s, %ld)\n",
+		      __func__, tn, dev->name, nand_node.of_offset);
+		ret = octeontx_nfc_chip_init(tn, dev, nand_node);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/* Reset NFC and initialize registers. */
+static int octeontx_nfc_init(struct octeontx_nfc *tn)
+{
+	const struct nand_sdr_timings *timings;
+	u64 ndf_misc;
+	int rc;
+
+	/* Initialize values and reset the fifo */
+	ndf_misc = readq(tn->base + NDF_MISC);
+
+	ndf_misc &= ~NDF_MISC_EX_DIS;
+	ndf_misc |= (NDF_MISC_BT_DIS | NDF_MISC_RST_FF);
+	writeq(ndf_misc, tn->base + NDF_MISC);
+	debug("%s: NDF_MISC: 0x%llx\n", __func__, readq(tn->base + NDF_MISC));
+
+	/* Bring the fifo out of reset */
+	ndf_misc &= ~(NDF_MISC_RST_FF);
+
+	/* Maximum of co-processor cycles for glitch filtering */
+	ndf_misc |= FIELD_PREP(NDF_MISC_WAIT_CNT, 0x3f);
+
+	writeq(ndf_misc, tn->base + NDF_MISC);
+
+	/* Set timing parameters to onfi mode 0 for probing */
+	timings = onfi_async_timing_mode_to_sdr_timings(0);
+	if (IS_ERR(timings))
+		return PTR_ERR(timings);
+	rc = set_default_timings(tn, timings);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int octeontx_pci_nand_probe(struct udevice *dev)
+{
+	struct octeontx_nfc *tn = dev_get_priv(dev);
+	int ret;
+	static bool probe_done;
+
+	debug("%s(%s) tn: %p\n", __func__, dev->name, tn);
+	if (probe_done)
+		return 0;
+
+	if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) {
+		bch_vf = octeontx_bch_getv();
+		if (!bch_vf) {
+			struct octeontx_probe_device *probe_dev;
+
+			debug("%s: bch not yet initialized\n", __func__);
+			probe_dev = calloc(sizeof(*probe_dev), 1);
+			if (!probe_dev) {
+				printf("%s: Out of memory\n", __func__);
+				return -ENOMEM;
+			}
+			probe_dev->dev = dev;
+			INIT_LIST_HEAD(&probe_dev->list);
+			list_add_tail(&probe_dev->list,
+				      &octeontx_pci_nand_deferred_devices);
+			debug("%s: Defering probe until after BCH initialization\n",
+			      __func__);
+			return 0;
+		}
+	}
+
+	tn->dev = dev;
+	INIT_LIST_HEAD(&tn->chips);
+
+	tn->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+	if (!tn->base) {
+		ret = -EINVAL;
+		goto release;
+	}
+	debug("%s: bar at %p\n", __func__, tn->base);
+	tn->buf.dmabuflen = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
+	tn->buf.dmabuf = dma_alloc_coherent(tn->buf.dmabuflen,
+					    (unsigned long *)&tn->buf.dmaaddr);
+	if (!tn->buf.dmabuf) {
+		ret = -ENOMEM;
+		debug("%s: Could not allocate DMA buffer\n", __func__);
+		goto unclk;
+	}
+
+	/* one hw-bch response, for one outstanding transaction */
+	tn->bch_resp = dma_alloc_coherent(sizeof(*tn->bch_resp),
+					  (unsigned long *)&tn->bch_rhandle);
+
+	tn->stat = dma_alloc_coherent(8, (unsigned long *)&tn->stat_addr);
+	if (!tn->stat || !tn->bch_resp) {
+		debug("%s: Could not allocate bch status or response\n",
+		      __func__);
+		ret = -ENOMEM;
+		goto unclk;
+	}
+
+	debug("%s: Calling octeontx_nfc_init()\n", __func__);
+	octeontx_nfc_init(tn);
+	debug("%s: Initializing chips\n", __func__);
+	ret = octeontx_nfc_chips_init(tn);
+	debug("%s: init chips ret: %d\n", __func__, ret);
+	if (ret) {
+		if (ret != -ENODEV)
+			dev_err(dev, "failed to init nand chips\n");
+		goto unclk;
+	}
+	dev_info(dev, "probed\n");
+	return 0;
+
+unclk:
+release:
+	return ret;
+}
+
+int octeontx_pci_nand_disable(struct udevice *dev)
+{
+	struct octeontx_nfc *tn = dev_get_priv(dev);
+	u64 dma_cfg;
+	u64 ndf_misc;
+
+	debug("%s: Disabling NAND device %s\n", __func__, dev->name);
+	dma_cfg = readq(tn->base + NDF_DMA_CFG);
+	dma_cfg &= ~NDF_DMA_CFG_EN;
+	dma_cfg |= NDF_DMA_CFG_CLR;
+	writeq(dma_cfg, tn->base + NDF_DMA_CFG);
+
+	/* Disable execution and put FIFO in reset mode */
+	ndf_misc = readq(tn->base + NDF_MISC);
+	ndf_misc |= NDF_MISC_EX_DIS | NDF_MISC_RST_FF;
+	writeq(ndf_misc, tn->base + NDF_MISC);
+	ndf_misc &= ~NDF_MISC_RST_FF;
+	writeq(ndf_misc, tn->base + NDF_MISC);
+#ifdef DEBUG
+	printf("%s: NDF_MISC: 0x%llx\n", __func__, readq(tn->base + NDF_MISC));
+#endif
+	/* Clear any interrupts and enable bits */
+	writeq(~0ull, tn->base + NDF_INT_ENA_W1C);
+	writeq(~0ull, tn->base + NDF_INT);
+	debug("%s: NDF_ST_REG: 0x%llx\n", __func__,
+	      readq(tn->base + NDF_ST_REG));
+	return 0;
+}
+
+/**
+ * Since it's possible (and even likely) that the NAND device will be probed
+ * before the BCH device has been probed, we may need to defer the probing.
+ *
+ * In this case, the initial probe returns success but the actual probing
+ * is deferred until the BCH VF has been probed.
+ *
+ * @return	0 for success, otherwise error
+ */
+int octeontx_pci_nand_deferred_probe(void)
+{
+	int rc = 0;
+	struct octeontx_probe_device *pdev;
+
+	debug("%s: Performing deferred probing\n", __func__);
+	list_for_each_entry(pdev, &octeontx_pci_nand_deferred_devices, list) {
+		debug("%s: Probing %s\n", __func__, pdev->dev->name);
+		pdev->dev->flags &= ~DM_FLAG_ACTIVATED;
+		rc = device_probe(pdev->dev);
+		if (rc && rc != -ENODEV) {
+			printf("%s: Error %d with deferred probe of %s\n",
+			       __func__, rc, pdev->dev->name);
+			break;
+		}
+	}
+	return rc;
+}
+
+static const struct pci_device_id octeontx_nfc_pci_id_table[] = {
+	{ PCI_VDEVICE(CAVIUM, 0xA04F) },
+	{}
+};
+
+static int octeontx_nand_ofdata_to_platdata(struct udevice *dev)
+{
+	return 0;
+}
+
+static const struct udevice_id octeontx_nand_ids[] = {
+	{ .compatible = "cavium,cn8130-nand" },
+	{ },
+};
+
+U_BOOT_DRIVER(octeontx_pci_nand) = {
+	.name	= OCTEONTX_NAND_DRIVER_NAME,
+	.id	= UCLASS_MTD,
+	.of_match = of_match_ptr(octeontx_nand_ids),
+	.ofdata_to_platdata = octeontx_nand_ofdata_to_platdata,
+	.probe = octeontx_pci_nand_probe,
+	.priv_auto_alloc_size = sizeof(struct octeontx_nfc),
+	.remove = octeontx_pci_nand_disable,
+	.flags = DM_FLAG_OS_PREPARE,
+};
+
+U_BOOT_PCI_DEVICE(octeontx_pci_nand, octeontx_nfc_pci_id_table);
+
+void board_nand_init(void)
+{
+	struct udevice *dev;
+	int ret;
+
+	if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) {
+		ret = uclass_get_device_by_driver(UCLASS_MISC,
+						  DM_GET_DRIVER(octeontx_pci_bchpf),
+						  &dev);
+		if (ret && ret != -ENODEV) {
+			pr_err("Failed to initialize OcteonTX BCH PF controller. (error %d)\n",
+			       ret);
+		}
+		ret = uclass_get_device_by_driver(UCLASS_MISC,
+						  DM_GET_DRIVER(octeontx_pci_bchvf),
+						  &dev);
+		if (ret && ret != -ENODEV) {
+			pr_err("Failed to initialize OcteonTX BCH VF controller. (error %d)\n",
+			       ret);
+		}
+	}
+
+	ret = uclass_get_device_by_driver(UCLASS_MTD,
+					  DM_GET_DRIVER(octeontx_pci_nand),
+					  &dev);
+	if (ret && ret != -ENODEV)
+		pr_err("Failed to initialize OcteonTX NAND controller. (error %d)\n",
+		       ret);
+}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a0d2d21..2beec2d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -407,6 +407,37 @@
 	  The MediaTek MT7628 ethernet interface is used on MT7628 and
 	  MT7688 based boards.
 
+config NET_OCTEONTX
+	bool "OcteonTX Ethernet support"
+	depends on ARCH_OCTEONTX
+	depends on PCI_SRIOV
+	help
+	  You must select Y to enable network device support for
+	  OcteonTX SoCs. If unsure, say n
+
+config NET_OCTEONTX2
+	bool "OcteonTX2 Ethernet support"
+	depends on ARCH_OCTEONTX2
+	select OCTEONTX2_CGX_INTF
+	help
+	  You must select Y to enable network device support for
+	  OcteonTX2 SoCs. If unsure, say n
+
+config OCTEONTX_SMI
+	bool "OcteonTX SMI Device support"
+	depends on ARCH_OCTEONTX || ARCH_OCTEONTX2
+	help
+	  You must select Y to enable SMI controller support for
+	  OcteonTX or OcteonTX2 SoCs. If unsure, say n
+
+config OCTEONTX2_CGX_INTF
+	bool "OcteonTX2 CGX ATF interface support"
+	depends on ARCH_OCTEONTX2
+	default y if ARCH_OCTEONTX2
+	help
+	  You must select Y to enable CGX ATF interface support for
+	  OcteonTX2 SoCs. If unsure, say n
+
 config PCH_GBE
 	bool "Intel Platform Controller Hub EG20T GMAC driver"
 	depends on DM_ETH && DM_PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 03f0192..9f7a79e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -65,6 +65,10 @@
 obj-$(CONFIG_SMC91111) += smc91111.o
 obj-$(CONFIG_SMC911X) += smc911x.o
 obj-$(CONFIG_TSEC_ENET) += tsec.o fsl_mdio.o
+obj-$(CONFIG_NET_OCTEONTX) += octeontx/
+obj-$(CONFIG_NET_OCTEONTX2) += octeontx2/
+obj-$(CONFIG_OCTEONTX_SMI) += octeontx/smi.o
+obj-$(CONFIG_OCTEONTX2_CGX_INTF) += octeontx2/cgx_intf.o
 obj-$(CONFIG_FMAN_ENET) += fsl_mdio.o
 obj-$(CONFIG_ULI526X) += uli526x.o
 obj-$(CONFIG_VSC7385_ENET) += vsc7385.o
diff --git a/drivers/net/e1000.c b/drivers/net/e1000.c
index 49be766..8e6c755 100644
--- a/drivers/net/e1000.c
+++ b/drivers/net/e1000.c
@@ -1644,6 +1644,11 @@
 	E1000_WRITE_REG(hw, TCTL, E1000_TCTL_PSP);
 	E1000_WRITE_FLUSH(hw);
 
+	if (hw->mac_type == e1000_igb) {
+		E1000_WRITE_REG(hw, RXPBS, I210_RXPBSIZE_DEFAULT);
+		E1000_WRITE_REG(hw, TXPBS, I210_TXPBSIZE_DEFAULT);
+	}
+
 	/* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
 	hw->tbi_compatibility_on = false;
 
diff --git a/drivers/net/e1000.h b/drivers/net/e1000.h
index 19ed477..072851b 100644
--- a/drivers/net/e1000.h
+++ b/drivers/net/e1000.h
@@ -735,6 +735,7 @@
 #define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
 #define E1000_FCRTL    0x02160	/* Flow Control Receive Threshold Low - RW */
 #define E1000_FCRTH    0x02168	/* Flow Control Receive Threshold High - RW */
+#define E1000_RXPBS    0x02404  /* Rx Packet Buffer Size - RW */
 #define E1000_RDBAL    0x02800	/* RX Descriptor Base Address Low - RW */
 #define E1000_RDBAH    0x02804	/* RX Descriptor Base Address High - RW */
 #define E1000_RDLEN    0x02808	/* RX Descriptor Length - RW */
@@ -745,6 +746,7 @@
 #define E1000_RADV     0x0282C	/* RX Interrupt Absolute Delay Timer - RW */
 #define E1000_RSRPD    0x02C00	/* RX Small Packet Detect - RW */
 #define E1000_TXDMAC   0x03000	/* TX DMA Control - RW */
+#define E1000_TXPBS	   0x03404  /* Tx Packet Buffer Size - RW */
 #define E1000_TDFH     0x03410  /* TX Data FIFO Head - RW */
 #define E1000_TDFT     0x03418  /* TX Data FIFO Tail - RW */
 #define E1000_TDFHS    0x03420  /* TX Data FIFO Head Saved - RW */
@@ -2589,4 +2591,8 @@
 
 #define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000 /* Clear Interrupt timers
 							after IMS clear */
+
+#define I210_RXPBSIZE_DEFAULT          0x000000A2 /* RXPBSIZE default */
+#define I210_TXPBSIZE_DEFAULT          0x04000014 /* TXPBSIZE default */
+
 #endif	/* _E1000_HW_H_ */
diff --git a/drivers/net/octeontx/Makefile b/drivers/net/octeontx/Makefile
new file mode 100644
index 0000000..d4adb7c
--- /dev/null
+++ b/drivers/net/octeontx/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier:    GPL-2.0
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+
+obj-$(CONFIG_NET_OCTEONTX) += bgx.o nic_main.o nicvf_queues.o nicvf_main.o \
+				xcv.o
diff --git a/drivers/net/octeontx/bgx.c b/drivers/net/octeontx/bgx.c
new file mode 100644
index 0000000..fbe2e2c
--- /dev/null
+++ b/drivers/net/octeontx/bgx.c
@@ -0,0 +1,1565 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <config.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdt_support.h>
+#include <malloc.h>
+#include <miiphy.h>
+#include <misc.h>
+#include <net.h>
+#include <netdev.h>
+#include <pci.h>
+#include <pci_ids.h>
+#include <asm/io.h>
+#include <asm/arch/board.h>
+#include <linux/delay.h>
+#include <linux/libfdt.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "bgx.h"
+
+static const phy_interface_t if_mode[] = {
+	[QLM_MODE_SGMII]  = PHY_INTERFACE_MODE_SGMII,
+	[QLM_MODE_RGMII]  = PHY_INTERFACE_MODE_RGMII,
+	[QLM_MODE_QSGMII] = PHY_INTERFACE_MODE_QSGMII,
+	[QLM_MODE_XAUI]   = PHY_INTERFACE_MODE_XAUI,
+	[QLM_MODE_RXAUI]  = PHY_INTERFACE_MODE_RXAUI,
+};
+
+struct lmac {
+	struct bgx		*bgx;
+	int			dmac;
+	u8			mac[6];
+	bool			link_up;
+	bool			init_pend;
+	int			lmacid; /* ID within BGX */
+	int			phy_addr; /* ID on board */
+	struct udevice		*dev;
+	struct mii_dev		*mii_bus;
+	struct phy_device	*phydev;
+	unsigned int		last_duplex;
+	unsigned int		last_link;
+	unsigned int		last_speed;
+	int			lane_to_sds;
+	int			use_training;
+	int			lmac_type;
+	u8			qlm_mode;
+	int			qlm;
+	bool			is_1gx;
+};
+
+struct bgx {
+	u8			bgx_id;
+	int			node;
+	struct	lmac		lmac[MAX_LMAC_PER_BGX];
+	int			lmac_count;
+	u8			max_lmac;
+	void __iomem		*reg_base;
+	struct pci_dev		*pdev;
+	bool			is_rgx;
+};
+
+struct bgx_board_info bgx_board_info[MAX_BGX_PER_NODE];
+
+struct bgx *bgx_vnic[MAX_BGX_PER_NODE];
+
+/* APIs to read/write BGXX CSRs */
+static u64 bgx_reg_read(struct bgx *bgx, uint8_t lmac, u64 offset)
+{
+	u64 addr = (uintptr_t)bgx->reg_base +
+				((uint32_t)lmac << 20) + offset;
+
+	return readq((void *)addr);
+}
+
+static void bgx_reg_write(struct bgx *bgx, uint8_t lmac,
+			  u64 offset, u64 val)
+{
+	u64 addr = (uintptr_t)bgx->reg_base +
+				((uint32_t)lmac << 20) + offset;
+
+	writeq(val, (void *)addr);
+}
+
+static void bgx_reg_modify(struct bgx *bgx, uint8_t lmac,
+			   u64 offset, u64 val)
+{
+	u64 addr = (uintptr_t)bgx->reg_base +
+				((uint32_t)lmac << 20) + offset;
+
+	writeq(val | bgx_reg_read(bgx, lmac, offset), (void *)addr);
+}
+
+static int bgx_poll_reg(struct bgx *bgx, uint8_t lmac,
+			u64 reg, u64 mask, bool zero)
+{
+	int timeout = 200;
+	u64 reg_val;
+
+	while (timeout) {
+		reg_val = bgx_reg_read(bgx, lmac, reg);
+		if (zero && !(reg_val & mask))
+			return 0;
+		if (!zero && (reg_val & mask))
+			return 0;
+		mdelay(1);
+		timeout--;
+	}
+	return 1;
+}
+
+static int gser_poll_reg(u64 reg, int bit, u64 mask, u64 expected_val,
+			 int timeout)
+{
+	u64 reg_val;
+
+	debug("%s reg = %#llx, mask = %#llx,", __func__, reg, mask);
+	debug(" expected_val = %#llx, bit = %d\n", expected_val, bit);
+	while (timeout) {
+		reg_val = readq(reg) >> bit;
+		if ((reg_val & mask) == (expected_val))
+			return 0;
+		mdelay(1);
+		timeout--;
+	}
+	return 1;
+}
+
+static bool is_bgx_port_valid(int bgx, int lmac)
+{
+	debug("%s bgx %d lmac %d valid %d\n", __func__, bgx, lmac,
+	      bgx_board_info[bgx].lmac_reg[lmac]);
+
+	if (bgx_board_info[bgx].lmac_reg[lmac])
+		return 1;
+	else
+		return 0;
+}
+
+struct lmac *bgx_get_lmac(int node, int bgx_idx, int lmacid)
+{
+	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+
+	if (bgx)
+		return &bgx->lmac[lmacid];
+
+	return NULL;
+}
+
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
+{
+	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+
+	if (bgx)
+		return bgx->lmac[lmacid].mac;
+
+	return NULL;
+}
+
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
+{
+	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+
+	if (!bgx)
+		return;
+
+	memcpy(bgx->lmac[lmacid].mac, mac, 6);
+}
+
+/* Return number of BGX present in HW */
+void bgx_get_count(int node, int *bgx_count)
+{
+	int i;
+	struct bgx *bgx;
+
+	*bgx_count = 0;
+	for (i = 0; i < MAX_BGX_PER_NODE; i++) {
+		bgx = bgx_vnic[node * MAX_BGX_PER_NODE + i];
+		debug("bgx_vnic[%u]: %p\n", node * MAX_BGX_PER_NODE + i,
+		      bgx);
+		if (bgx)
+			*bgx_count |= (1 << i);
+	}
+}
+
+/* Return number of LMAC configured for this BGX */
+int bgx_get_lmac_count(int node, int bgx_idx)
+{
+	struct bgx *bgx;
+
+	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+	if (bgx)
+		return bgx->lmac_count;
+
+	return 0;
+}
+
+void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
+{
+	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+	u64 cfg;
+
+	if (!bgx)
+		return;
+
+	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+	if (enable)
+		cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
+	else
+		cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+}
+
+static void bgx_flush_dmac_addrs(struct bgx *bgx, u64 lmac)
+{
+	u64 dmac = 0x00;
+	u64 offset, addr;
+
+	while (bgx->lmac[lmac].dmac > 0) {
+		offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(dmac)) +
+			(lmac * MAX_DMAC_PER_LMAC * sizeof(dmac));
+		addr = (uintptr_t)bgx->reg_base +
+				BGX_CMR_RX_DMACX_CAM + offset;
+		writeq(dmac, (void *)addr);
+		bgx->lmac[lmac].dmac--;
+	}
+}
+
+/* Configure BGX LMAC in internal loopback mode */
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+				int lmac_idx, bool enable)
+{
+	struct bgx *bgx;
+	struct lmac *lmac;
+	u64    cfg;
+
+	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+	if (!bgx)
+		return;
+
+	lmac = &bgx->lmac[lmac_idx];
+	if (lmac->qlm_mode == QLM_MODE_SGMII) {
+		cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
+		if (enable)
+			cfg |= PCS_MRX_CTL_LOOPBACK1;
+		else
+			cfg &= ~PCS_MRX_CTL_LOOPBACK1;
+		bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
+	} else {
+		cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
+		if (enable)
+			cfg |= SPU_CTL_LOOPBACK;
+		else
+			cfg &= ~SPU_CTL_LOOPBACK;
+		bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
+	}
+}
+
+/* Return the DLM used for the BGX */
+static int get_qlm_for_bgx(int node, int bgx_id, int index)
+{
+	int qlm = 0;
+	u64 cfg;
+
+	if (otx_is_soc(CN81XX)) {
+		qlm = (bgx_id) ? 2 : 0;
+		qlm += (index >= 2) ? 1 : 0;
+	} else if (otx_is_soc(CN83XX)) {
+		switch (bgx_id) {
+		case 0:
+			qlm = 2;
+			break;
+		case 1:
+			qlm = 3;
+			break;
+		case 2:
+			if (index >= 2)
+				qlm = 6;
+			else
+				qlm = 5;
+			break;
+		case 3:
+			qlm = 4;
+			break;
+		}
+	}
+
+	cfg = readq(GSERX_CFG(qlm)) & GSERX_CFG_BGX;
+	debug("%s:qlm%d: cfg = %lld\n", __func__, qlm, cfg);
+
+	/* Check if DLM is configured as BGX# */
+	if (cfg) {
+		if (readq(GSERX_PHY_CTL(qlm)))
+			return -1;
+		return qlm;
+	}
+	return -1;
+}
+
+static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
+{
+	u64 cfg;
+	struct lmac *lmac;
+
+	lmac = &bgx->lmac[lmacid];
+
+	debug("%s:bgx_id = %d, lmacid = %d\n", __func__, bgx->bgx_id, lmacid);
+
+	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
+	/* max packet size */
+	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
+
+	/* Disable frame alignment if using preamble */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+	if (cfg & 1)
+		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
+
+	/* Enable lmac */
+	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+	/* PCS reset */
+	bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
+	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
+			 PCS_MRX_CTL_RESET, true)) {
+		printf("BGX PCS reset not completed\n");
+		return -1;
+	}
+
+	/* power down, reset autoneg, autoneg enable */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
+	cfg &= ~PCS_MRX_CTL_PWR_DN;
+
+	if (bgx_board_info[bgx->bgx_id].phy_info[lmacid].autoneg_dis)
+		cfg |= (PCS_MRX_CTL_RST_AN);
+	else
+		cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
+
+	/* Disable disparity for QSGMII mode, to prevent propogation across
+	 * ports.
+	 */
+
+	if (lmac->qlm_mode == QLM_MODE_QSGMII) {
+		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
+		cfg &= ~PCS_MISCX_CTL_DISP_EN;
+		bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
+		return 0; /* Skip checking AN_CPT */
+	}
+
+	if (lmac->is_1gx) {
+		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
+		cfg |= PCS_MISC_CTL_MODE;
+		bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
+	}
+
+	if (lmac->qlm_mode == QLM_MODE_SGMII) {
+		if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+				 PCS_MRX_STATUS_AN_CPT, false)) {
+			printf("BGX AN_CPT not completed\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int bgx_lmac_sgmii_set_link_speed(struct lmac *lmac)
+{
+	u64 prtx_cfg;
+	u64 pcs_miscx_ctl;
+	u64 cfg;
+	struct bgx *bgx = lmac->bgx;
+	unsigned int lmacid = lmac->lmacid;
+
+	debug("%s: lmacid %d\n", __func__, lmac->lmacid);
+
+	/* Disable LMAC before setting up speed */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+	cfg &= ~CMR_EN;
+	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+	/* Read GMX CFG */
+	prtx_cfg = bgx_reg_read(bgx, lmacid,
+				BGX_GMP_GMI_PRTX_CFG);
+	/* Read PCS MISCS CTL */
+	pcs_miscx_ctl = bgx_reg_read(bgx, lmacid,
+				     BGX_GMP_PCS_MISCX_CTL);
+
+	/* Use GMXENO to force the link down*/
+	if (lmac->link_up) {
+		pcs_miscx_ctl &= ~PCS_MISC_CTL_GMX_ENO;
+		/* change the duplex setting if the link is up */
+		prtx_cfg |= GMI_PORT_CFG_DUPLEX;
+	} else {
+		pcs_miscx_ctl |= PCS_MISC_CTL_GMX_ENO;
+	}
+
+	/* speed based setting for GMX */
+	switch (lmac->last_speed) {
+	case 10:
+		prtx_cfg &= ~GMI_PORT_CFG_SPEED;
+		prtx_cfg |= GMI_PORT_CFG_SPEED_MSB;
+		prtx_cfg &= ~GMI_PORT_CFG_SLOT_TIME;
+		pcs_miscx_ctl |= 50; /* sampling point */
+		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x40);
+		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+		break;
+	case 100:
+		prtx_cfg &= ~GMI_PORT_CFG_SPEED;
+		prtx_cfg &= ~GMI_PORT_CFG_SPEED_MSB;
+		prtx_cfg &= ~GMI_PORT_CFG_SLOT_TIME;
+		pcs_miscx_ctl |= 0x5; /* sampling point */
+		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x40);
+		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+		break;
+	case 1000:
+		prtx_cfg |= GMI_PORT_CFG_SPEED;
+		prtx_cfg &= ~GMI_PORT_CFG_SPEED_MSB;
+		prtx_cfg |= GMI_PORT_CFG_SLOT_TIME;
+		pcs_miscx_ctl |= 0x1; /* sampling point */
+		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x200);
+		if (lmac->last_duplex)
+			bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+		else /* half duplex */
+			bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST,
+				      0x2000);
+		break;
+	default:
+		break;
+	}
+
+	/* write back the new PCS misc and GMX settings */
+	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, pcs_miscx_ctl);
+	bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_PRTX_CFG, prtx_cfg);
+
+	/* read back GMX CFG again to check config completion */
+	bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_PRTX_CFG);
+
+	/* enable BGX back */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+	cfg |= CMR_EN;
+	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+	return 0;
+}
+
+static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
+{
+	u64 cfg;
+	struct lmac *lmac;
+
+	lmac = &bgx->lmac[lmacid];
+
+	/* Reset SPU */
+	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
+	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+		printf("BGX SPU reset not completed\n");
+		return -1;
+	}
+
+	/* Disable LMAC */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+	cfg &= ~CMR_EN;
+	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+	/* Set interleaved running disparity for RXAUI */
+	if (lmac->qlm_mode != QLM_MODE_RXAUI)
+		bgx_reg_modify(bgx, lmacid,
+			       BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+	else
+		bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
+			       SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
+
+	/* clear all interrupts */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
+	bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
+	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+	bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+
+	if (lmac->use_training) {
+		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
+		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
+		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
+		/* training enable */
+		bgx_reg_modify(bgx, lmacid,
+			       BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
+	}
+
+	/* Append FCS to each packet */
+	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
+
+	/* Disable forward error correction */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
+	cfg &= ~SPU_FEC_CTL_FEC_EN;
+	bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
+
+	/* Disable autoneg */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
+	cfg = cfg & ~(SPU_AN_CTL_XNP_EN);
+	if (lmac->use_training)
+		cfg = cfg | (SPU_AN_CTL_AN_EN);
+	else
+		cfg = cfg & ~(SPU_AN_CTL_AN_EN);
+	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
+
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
+	/* Clear all KR bits, configure according to the mode */
+	cfg &= ~((0xfULL << 22) | (1ULL << 12));
+	if (lmac->qlm_mode == QLM_MODE_10G_KR)
+		cfg |= (1 << 23);
+	else if (lmac->qlm_mode == QLM_MODE_40G_KR4)
+		cfg |= (1 << 24);
+	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
+
+	cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
+	if (lmac->use_training)
+		cfg |= SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
+	else
+		cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
+	bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
+
+	/* Enable lmac */
+	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
+	cfg &= ~SPU_CTL_LOW_POWER;
+	bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
+
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
+	cfg &= ~SMU_TX_CTL_UNI_EN;
+	cfg |= SMU_TX_CTL_DIC_EN;
+	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
+
+	/* take lmac_count into account */
+	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
+	/* max packet size */
+	bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
+
+	debug("xaui_init: lmacid = %d, qlm = %d, qlm_mode = %d\n",
+	      lmacid, lmac->qlm, lmac->qlm_mode);
+	/* RXAUI with Marvell PHY requires some tweaking */
+	if (lmac->qlm_mode == QLM_MODE_RXAUI) {
+		char mii_name[20];
+		struct phy_info *phy;
+
+		phy = &bgx_board_info[bgx->bgx_id].phy_info[lmacid];
+		snprintf(mii_name, sizeof(mii_name), "smi%d", phy->mdio_bus);
+
+		debug("mii_name: %s\n", mii_name);
+		lmac->mii_bus = miiphy_get_dev_by_name(mii_name);
+		lmac->phy_addr = phy->phy_addr;
+		rxaui_phy_xs_init(lmac->mii_bus, lmac->phy_addr);
+	}
+
+	return 0;
+}
+
+/* Get max number of lanes present in a given QLM/DLM */
+static int get_qlm_lanes(int qlm)
+{
+	if (otx_is_soc(CN81XX))
+		return 2;
+	else if (otx_is_soc(CN83XX))
+		return (qlm >= 5) ? 2 : 4;
+	else
+		return -1;
+}
+
+int __rx_equalization(int qlm, int lane)
+{
+	int max_lanes = get_qlm_lanes(qlm);
+	int l;
+	int fail = 0;
+
+	/* Before completing Rx equalization wait for
+	 * GSERx_RX_EIE_DETSTS[CDRLOCK] to be set
+	 * This ensures the rx data is valid
+	 */
+	if (lane == -1) {
+		if (gser_poll_reg(GSER_RX_EIE_DETSTS(qlm), GSER_CDRLOCK, 0xf,
+				  (1 << max_lanes) - 1, 100)) {
+			debug("ERROR: CDR Lock not detected");
+			debug(" on DLM%d for 2 lanes\n", qlm);
+			return -1;
+		}
+	} else {
+		if (gser_poll_reg(GSER_RX_EIE_DETSTS(qlm), GSER_CDRLOCK,
+				  (0xf & (1 << lane)), (1 << lane), 100)) {
+			debug("ERROR: DLM%d: CDR Lock not detected", qlm);
+			debug(" on %d lane\n", lane);
+			return -1;
+		}
+	}
+
+	for (l = 0; l < max_lanes; l++) {
+		u64 rctl, reer;
+
+		if (lane != -1 && lane != l)
+			continue;
+
+		/* Enable software control */
+		rctl = readq(GSER_BR_RXX_CTL(qlm, l));
+		rctl |= GSER_BR_RXX_CTL_RXT_SWM;
+		writeq(rctl, GSER_BR_RXX_CTL(qlm, l));
+
+		/* Clear the completion flag and initiate a new request */
+		reer = readq(GSER_BR_RXX_EER(qlm, l));
+		reer &= ~GSER_BR_RXX_EER_RXT_ESV;
+		reer |= GSER_BR_RXX_EER_RXT_EER;
+		writeq(reer, GSER_BR_RXX_EER(qlm, l));
+	}
+
+	/* Wait for RX equalization to complete */
+	for (l = 0; l < max_lanes; l++) {
+		u64 rctl, reer;
+
+		if (lane != -1 && lane != l)
+			continue;
+
+		gser_poll_reg(GSER_BR_RXX_EER(qlm, l), EER_RXT_ESV, 1, 1, 200);
+		reer = readq(GSER_BR_RXX_EER(qlm, l));
+
+		/* Switch back to hardware control */
+		rctl = readq(GSER_BR_RXX_CTL(qlm, l));
+		rctl &= ~GSER_BR_RXX_CTL_RXT_SWM;
+		writeq(rctl, GSER_BR_RXX_CTL(qlm, l));
+
+		if (reer & GSER_BR_RXX_EER_RXT_ESV) {
+			debug("Rx equalization completed on DLM%d", qlm);
+			debug(" QLM%d rxt_esm = 0x%llx\n", l, (reer & 0x3fff));
+		} else {
+			debug("Rx equalization timedout on DLM%d", qlm);
+			debug(" lane %d\n", l);
+			fail = 1;
+		}
+	}
+
+	return (fail) ? -1 : 0;
+}
+
+static int bgx_xaui_check_link(struct lmac *lmac)
+{
+	struct bgx *bgx = lmac->bgx;
+	int lmacid = lmac->lmacid;
+	int lmac_type = lmac->lmac_type;
+	u64 cfg;
+
+	bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+
+	/* check if auto negotiation is complete */
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
+	if (cfg & SPU_AN_CTL_AN_EN) {
+		cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_STATUS);
+		if (!(cfg & SPU_AN_STS_AN_COMPLETE)) {
+			/* Restart autonegotiation */
+			debug("restarting auto-neg\n");
+			bgx_reg_modify(bgx, lmacid, BGX_SPUX_AN_CONTROL,
+				       SPU_AN_CTL_AN_RESTART);
+			return -1;
+		}
+	}
+
+	debug("%s link use_training %d\n", __func__, lmac->use_training);
+	if (lmac->use_training) {
+		cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+		if (!(cfg & (1ull << 13))) {
+			debug("waiting for link training\n");
+			/* Clear the training interrupts (W1C) */
+			cfg = (1ull << 13) | (1ull << 14);
+			bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+
+			udelay(2000);
+			/* Restart training */
+			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
+			cfg |= (1ull << 0);
+			bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
+			return -1;
+		}
+	}
+
+	/* Perform RX Equalization. Applies to non-KR interfaces for speeds
+	 * >= 6.25Gbps.
+	 */
+	if (!lmac->use_training) {
+		int qlm;
+		bool use_dlm = 0;
+
+		if (otx_is_soc(CN81XX) || (otx_is_soc(CN83XX) &&
+					   bgx->bgx_id == 2))
+			use_dlm = 1;
+		switch (lmac->lmac_type) {
+		default:
+		case BGX_MODE_SGMII:
+		case BGX_MODE_RGMII:
+		case BGX_MODE_XAUI:
+			/* Nothing to do */
+			break;
+		case BGX_MODE_XLAUI:
+			if (use_dlm) {
+				if (__rx_equalization(lmac->qlm, -1) ||
+				    __rx_equalization(lmac->qlm + 1, -1)) {
+					printf("BGX%d:%d", bgx->bgx_id, lmacid);
+					printf(" Waiting for RX Equalization");
+					printf(" on DLM%d/DLM%d\n",
+					       lmac->qlm, lmac->qlm + 1);
+					return -1;
+				}
+			} else {
+				if (__rx_equalization(lmac->qlm, -1)) {
+					printf("BGX%d:%d", bgx->bgx_id, lmacid);
+					printf(" Waiting for RX Equalization");
+					printf(" on QLM%d\n", lmac->qlm);
+					return -1;
+				}
+			}
+			break;
+		case BGX_MODE_RXAUI:
+			/* RXAUI0 uses LMAC0:QLM0/QLM2 and RXAUI1 uses
+			 * LMAC1:QLM1/QLM3 RXAUI requires 2 lanes
+			 * for each interface
+			 */
+			qlm = lmac->qlm;
+			if (__rx_equalization(qlm, 0)) {
+				printf("BGX%d:%d", bgx->bgx_id, lmacid);
+				printf(" Waiting for RX Equalization");
+				printf(" on QLM%d, Lane0\n", qlm);
+				return -1;
+			}
+			if (__rx_equalization(qlm, 1)) {
+				printf("BGX%d:%d", bgx->bgx_id, lmacid);
+				printf(" Waiting for RX Equalization");
+				printf(" on QLM%d, Lane1\n", qlm);
+				return -1;
+			}
+			break;
+		case BGX_MODE_XFI:
+			{
+				int lid;
+				bool altpkg = otx_is_altpkg();
+
+				if (bgx->bgx_id == 0 && altpkg && lmacid)
+					lid = 0;
+				else if ((lmacid >= 2) && use_dlm)
+					lid = lmacid - 2;
+				else
+					lid = lmacid;
+
+				if (__rx_equalization(lmac->qlm, lid)) {
+					printf("BGX%d:%d", bgx->bgx_id, lid);
+					printf(" Waiting for RX Equalization");
+					printf(" on QLM%d\n", lmac->qlm);
+				}
+			}
+			break;
+		}
+	}
+
+	/* wait for PCS to come out of reset */
+	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+		printf("BGX SPU reset not completed\n");
+		return -1;
+	}
+
+	if (lmac_type == 3 || lmac_type == 4) {
+		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
+				 SPU_BR_STATUS_BLK_LOCK, false)) {
+			printf("SPU_BR_STATUS_BLK_LOCK not completed\n");
+			return -1;
+		}
+	} else {
+		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
+				 SPU_BX_STATUS_RX_ALIGN, false)) {
+			printf("SPU_BX_STATUS_RX_ALIGN not completed\n");
+			return -1;
+		}
+	}
+
+	/* Clear rcvflt bit (latching high) and read it back */
+	bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+		printf("Receive fault, retry training\n");
+		if (lmac->use_training) {
+			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+			if (!(cfg & (1ull << 13))) {
+				cfg = (1ull << 13) | (1ull << 14);
+				bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+				cfg = bgx_reg_read(bgx, lmacid,
+						   BGX_SPUX_BR_PMD_CRTL);
+				cfg |= (1ull << 0);
+				bgx_reg_write(bgx, lmacid,
+					      BGX_SPUX_BR_PMD_CRTL, cfg);
+				return -1;
+			}
+		}
+		return -1;
+	}
+
+	/* Wait for MAC RX to be ready */
+	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
+			 SMU_RX_CTL_STATUS, true)) {
+		printf("SMU RX link not okay\n");
+		return -1;
+	}
+
+	/* Wait for BGX RX to be idle */
+	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
+		printf("SMU RX not idle\n");
+		return -1;
+	}
+
+	/* Wait for BGX TX to be idle */
+	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
+		printf("SMU TX not idle\n");
+		return -1;
+	}
+
+	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+		printf("Receive fault\n");
+		return -1;
+	}
+
+	/* Receive link is latching low. Force it high and verify it */
+	if (!(bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS1) &
+	    SPU_STATUS1_RCV_LNK))
+		bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1,
+			       SPU_STATUS1_RCV_LNK);
+	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
+			 SPU_STATUS1_RCV_LNK, false)) {
+		printf("SPU receive link down\n");
+		return -1;
+	}
+
+	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+	cfg &= ~SPU_MISC_CTL_RX_DIS;
+	bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
+	return 0;
+}
+
+static int bgx_lmac_enable(struct bgx *bgx, int8_t lmacid)
+{
+	struct lmac *lmac;
+	u64 cfg;
+
+	lmac = &bgx->lmac[lmacid];
+
+	debug("%s: lmac: %p, lmacid = %d\n", __func__, lmac, lmacid);
+
+	if (lmac->qlm_mode == QLM_MODE_SGMII ||
+	    lmac->qlm_mode == QLM_MODE_RGMII ||
+	    lmac->qlm_mode == QLM_MODE_QSGMII) {
+		if (bgx_lmac_sgmii_init(bgx, lmacid)) {
+			debug("bgx_lmac_sgmii_init failed\n");
+			return -1;
+		}
+		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+		cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+		bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
+		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
+	} else {
+		if (bgx_lmac_xaui_init(bgx, lmacid, lmac->lmac_type))
+			return -1;
+		cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
+		cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+		bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
+		bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
+	}
+
+	/* Enable lmac */
+	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
+		       CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+
+	return 0;
+}
+
+int bgx_poll_for_link(int node, int bgx_idx, int lmacid)
+{
+	int ret;
+	struct lmac *lmac = bgx_get_lmac(node, bgx_idx, lmacid);
+	char mii_name[10];
+	struct phy_info *phy;
+
+	if (!lmac) {
+		printf("LMAC %d/%d/%d is disabled or doesn't exist\n",
+		       node, bgx_idx, lmacid);
+		return 0;
+	}
+
+	debug("%s: %d, lmac: %d/%d/%d %p\n",
+	      __FILE__, __LINE__,
+	      node, bgx_idx, lmacid, lmac);
+	if (lmac->init_pend) {
+		ret = bgx_lmac_enable(lmac->bgx, lmacid);
+		if (ret < 0) {
+			printf("BGX%d LMAC%d lmac_enable failed\n", bgx_idx,
+			       lmacid);
+			return ret;
+		}
+		lmac->init_pend = 0;
+		mdelay(100);
+	}
+	if (lmac->qlm_mode == QLM_MODE_SGMII ||
+	    lmac->qlm_mode == QLM_MODE_RGMII ||
+	    lmac->qlm_mode == QLM_MODE_QSGMII) {
+		if (bgx_board_info[bgx_idx].phy_info[lmacid].phy_addr == -1) {
+			lmac->link_up = 1;
+			lmac->last_speed = 1000;
+			lmac->last_duplex = 1;
+			printf("BGX%d:LMAC %u link up\n", bgx_idx, lmacid);
+			return lmac->link_up;
+		}
+		snprintf(mii_name, sizeof(mii_name), "smi%d",
+			 bgx_board_info[bgx_idx].phy_info[lmacid].mdio_bus);
+
+		debug("mii_name: %s\n", mii_name);
+
+		lmac->mii_bus = miiphy_get_dev_by_name(mii_name);
+		phy = &bgx_board_info[bgx_idx].phy_info[lmacid];
+		lmac->phy_addr = phy->phy_addr;
+
+		debug("lmac->mii_bus: %p\n", lmac->mii_bus);
+		if (!lmac->mii_bus) {
+			printf("MDIO device %s not found\n", mii_name);
+			ret = -ENODEV;
+			return ret;
+		}
+
+		lmac->phydev = phy_connect(lmac->mii_bus, lmac->phy_addr,
+					   lmac->dev,
+					   if_mode[lmac->qlm_mode]);
+
+		if (!lmac->phydev) {
+			printf("%s: No PHY device\n", __func__);
+			return -1;
+		}
+
+		ret = phy_config(lmac->phydev);
+		if (ret) {
+			printf("%s: Could not initialize PHY %s\n",
+			       __func__, lmac->phydev->dev->name);
+			return ret;
+		}
+
+		ret = phy_startup(lmac->phydev);
+		debug("%s: %d\n", __FILE__, __LINE__);
+		if (ret) {
+			printf("%s: Could not initialize PHY %s\n",
+			       __func__, lmac->phydev->dev->name);
+		}
+
+#ifdef OCTEONTX_XCV
+		if (lmac->qlm_mode == QLM_MODE_RGMII)
+			xcv_setup_link(lmac->phydev->link, lmac->phydev->speed);
+#endif
+
+		lmac->link_up = lmac->phydev->link;
+		lmac->last_speed = lmac->phydev->speed;
+		lmac->last_duplex = lmac->phydev->duplex;
+
+		debug("%s qlm_mode %d phy link status 0x%x,last speed 0x%x,",
+		      __func__, lmac->qlm_mode, lmac->link_up,
+		      lmac->last_speed);
+		debug(" duplex 0x%x\n", lmac->last_duplex);
+
+		if (lmac->qlm_mode != QLM_MODE_RGMII)
+			bgx_lmac_sgmii_set_link_speed(lmac);
+
+	} else {
+		u64 status1;
+		u64 tx_ctl;
+		u64 rx_ctl;
+
+		status1 = bgx_reg_read(lmac->bgx, lmac->lmacid,
+				       BGX_SPUX_STATUS1);
+		tx_ctl = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_TX_CTL);
+		rx_ctl = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+		debug("BGX%d LMAC%d BGX_SPUX_STATUS2: %lx\n", bgx_idx, lmacid,
+		      (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
+						  BGX_SPUX_STATUS2));
+		debug("BGX%d LMAC%d BGX_SPUX_STATUS1: %lx\n", bgx_idx, lmacid,
+		      (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
+						  BGX_SPUX_STATUS1));
+		debug("BGX%d LMAC%d BGX_SMUX_RX_CTL: %lx\n", bgx_idx, lmacid,
+		      (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
+						  BGX_SMUX_RX_CTL));
+		debug("BGX%d LMAC%d BGX_SMUX_TX_CTL: %lx\n", bgx_idx, lmacid,
+		      (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
+						  BGX_SMUX_TX_CTL));
+
+		if ((status1 & SPU_STATUS1_RCV_LNK) &&
+		    ((tx_ctl & SMU_TX_CTL_LNK_STATUS) == 0) &&
+		    ((rx_ctl & SMU_RX_CTL_STATUS) == 0)) {
+			lmac->link_up = 1;
+			if (lmac->lmac_type == 4)
+				lmac->last_speed = 40000;
+			else
+				lmac->last_speed = 10000;
+			lmac->last_duplex = 1;
+		} else {
+			lmac->link_up = 0;
+			lmac->last_speed = 0;
+			lmac->last_duplex = 0;
+			return bgx_xaui_check_link(lmac);
+		}
+
+		lmac->last_link = lmac->link_up;
+	}
+
+	printf("BGX%d:LMAC %u link %s\n", bgx_idx, lmacid,
+	       (lmac->link_up) ? "up" : "down");
+
+	return lmac->link_up;
+}
+
+void bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
+{
+	struct lmac *lmac;
+	u64 cmrx_cfg;
+
+	lmac = &bgx->lmac[lmacid];
+
+	cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+	cmrx_cfg &= ~(1 << 15);
+	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+	bgx_flush_dmac_addrs(bgx, lmacid);
+
+	if (lmac->phydev)
+		phy_shutdown(lmac->phydev);
+
+	lmac->phydev = NULL;
+}
+
+/* Program BGXX_CMRX_CONFIG.{lmac_type,lane_to_sds} for each interface.
+ * And the number of LMACs used by this interface. Each lmac can be in
+ * programmed in a different mode, so parse each lmac one at a time.
+ */
+static void bgx_init_hw(struct bgx *bgx)
+{
+	struct lmac *lmac;
+	int i, lmacid, count = 0, inc = 0;
+	char buf[40];
+	static int qsgmii_configured;
+
+	for (lmacid = 0; lmacid < MAX_LMAC_PER_BGX; lmacid++) {
+		struct lmac *tlmac;
+
+		lmac = &bgx->lmac[lmacid];
+		debug("%s: lmacid = %d, qlm = %d, mode = %d\n",
+		      __func__, lmacid, lmac->qlm, lmac->qlm_mode);
+		/* If QLM is not programmed, skip */
+		if (lmac->qlm == -1)
+			continue;
+
+		switch (lmac->qlm_mode) {
+		case QLM_MODE_SGMII:
+		{
+			/* EBB8000 (alternative pkg) has only lane0 present on
+			 * DLM0 and DLM1, skip configuring other lanes
+			 */
+			if (bgx->bgx_id == 0 && otx_is_altpkg()) {
+				if (lmacid % 2)
+					continue;
+			}
+			lmac->lane_to_sds = lmacid;
+			lmac->lmac_type = 0;
+			snprintf(buf, sizeof(buf),
+				 "BGX%d QLM%d LMAC%d mode: %s\n",
+				 bgx->bgx_id, lmac->qlm, lmacid,
+				 lmac->is_1gx ? "1000Base-X" : "SGMII");
+			break;
+		}
+		case QLM_MODE_XAUI:
+			if (lmacid != 0)
+				continue;
+			lmac->lmac_type = 1;
+			lmac->lane_to_sds = 0xE4;
+			snprintf(buf, sizeof(buf),
+				 "BGX%d QLM%d LMAC%d mode: XAUI\n",
+				 bgx->bgx_id, lmac->qlm, lmacid);
+			break;
+		case QLM_MODE_RXAUI:
+			if (lmacid == 0) {
+				lmac->lmac_type = 2;
+				lmac->lane_to_sds = 0x4;
+			} else if (lmacid == 1) {
+				struct lmac *tlmac;
+
+				tlmac = &bgx->lmac[2];
+				if (tlmac->qlm_mode == QLM_MODE_RXAUI) {
+					lmac->lmac_type = 2;
+					lmac->lane_to_sds = 0xe;
+					lmac->qlm = tlmac->qlm;
+				}
+			} else {
+				continue;
+			}
+			snprintf(buf, sizeof(buf),
+				 "BGX%d QLM%d LMAC%d mode: RXAUI\n",
+				 bgx->bgx_id, lmac->qlm, lmacid);
+			break;
+		case QLM_MODE_XFI:
+			/* EBB8000 (alternative pkg) has only lane0 present on
+			 * DLM0 and DLM1, skip configuring other lanes
+			 */
+			if (bgx->bgx_id == 0 && otx_is_altpkg()) {
+				if (lmacid % 2)
+					continue;
+			}
+			lmac->lane_to_sds = lmacid;
+			lmac->lmac_type = 3;
+			snprintf(buf, sizeof(buf),
+				 "BGX%d QLM%d LMAC%d mode: XFI\n",
+				 bgx->bgx_id, lmac->qlm, lmacid);
+			break;
+		case QLM_MODE_XLAUI:
+			if (lmacid != 0)
+				continue;
+			lmac->lmac_type = 4;
+			lmac->lane_to_sds = 0xE4;
+			snprintf(buf, sizeof(buf),
+				 "BGX%d QLM%d LMAC%d mode: XLAUI\n",
+				 bgx->bgx_id, lmac->qlm, lmacid);
+			break;
+		case QLM_MODE_10G_KR:
+			/* EBB8000 (alternative pkg) has only lane0 present on
+			 * DLM0 and DLM1, skip configuring other lanes
+			 */
+			if (bgx->bgx_id == 0 && otx_is_altpkg()) {
+				if (lmacid % 2)
+					continue;
+			}
+			lmac->lane_to_sds = lmacid;
+			lmac->lmac_type = 3;
+			lmac->use_training = 1;
+			snprintf(buf, sizeof(buf),
+				 "BGX%d QLM%d LMAC%d mode: 10G-KR\n",
+				 bgx->bgx_id, lmac->qlm, lmacid);
+			break;
+		case QLM_MODE_40G_KR4:
+			if (lmacid != 0)
+				continue;
+			lmac->lmac_type = 4;
+			lmac->lane_to_sds = 0xE4;
+			lmac->use_training = 1;
+			snprintf(buf, sizeof(buf),
+				 "BGX%d QLM%d LMAC%d mode: 40G-KR4\n",
+				 bgx->bgx_id, lmac->qlm, lmacid);
+			break;
+		case QLM_MODE_RGMII:
+			if (lmacid != 0)
+				continue;
+			lmac->lmac_type = 5;
+			lmac->lane_to_sds = 0xE4;
+			snprintf(buf, sizeof(buf),
+				 "BGX%d LMAC%d mode: RGMII\n",
+				 bgx->bgx_id, lmacid);
+			break;
+		case QLM_MODE_QSGMII:
+			if (qsgmii_configured)
+				continue;
+			if (lmacid == 0 || lmacid == 2) {
+				count = 4;
+				printf("BGX%d QLM%d LMAC%d mode: QSGMII\n",
+				       bgx->bgx_id, lmac->qlm, lmacid);
+				for (i = 0; i < count; i++) {
+					struct lmac *l;
+					int type;
+
+					l = &bgx->lmac[i];
+					l->lmac_type = 6;
+					type = l->lmac_type;
+					l->qlm_mode = QLM_MODE_QSGMII;
+					l->lane_to_sds = lmacid + i;
+					if (is_bgx_port_valid(bgx->bgx_id, i))
+						bgx_reg_write(bgx, i,
+							      BGX_CMRX_CFG,
+							      (type << 8) |
+							      l->lane_to_sds);
+				}
+				qsgmii_configured = 1;
+			}
+			continue;
+		default:
+			continue;
+		}
+
+		/* Reset lmac to the unused slot */
+		if (is_bgx_port_valid(bgx->bgx_id, count) &&
+		    lmac->qlm_mode != QLM_MODE_QSGMII) {
+			int lmac_en = 0;
+			int tmp, idx;
+
+			tlmac = &bgx->lmac[count];
+			tlmac->lmac_type = lmac->lmac_type;
+			idx = bgx->bgx_id;
+			tmp = count + inc;
+			/* Adjust lane_to_sds based on BGX-ENABLE */
+			for (; tmp < MAX_LMAC_PER_BGX; inc++) {
+				lmac_en = bgx_board_info[idx].lmac_enable[tmp];
+				if (lmac_en)
+					break;
+				tmp = count + inc;
+			}
+
+			if (inc != 0 && inc < MAX_LMAC_PER_BGX &&
+			    lmac_en && inc != count)
+				tlmac->lane_to_sds =
+					lmac->lane_to_sds + abs(inc - count);
+			else
+				tlmac->lane_to_sds = lmac->lane_to_sds;
+			tlmac->qlm = lmac->qlm;
+			tlmac->qlm_mode = lmac->qlm_mode;
+
+			printf("%s", buf);
+			/* Initialize lmac_type and lane_to_sds */
+			bgx_reg_write(bgx, count, BGX_CMRX_CFG,
+				      (tlmac->lmac_type << 8) |
+				      tlmac->lane_to_sds);
+
+			if (tlmac->lmac_type == BGX_MODE_SGMII) {
+				if (tlmac->is_1gx) {
+					/* This is actually 1000BASE-X, so
+					 * mark the LMAC as such.
+					 */
+					bgx_reg_modify(bgx, count,
+						       BGX_GMP_PCS_MISCX_CTL,
+						       PCS_MISC_CTL_MODE);
+				}
+
+				if (!bgx_board_info[bgx->bgx_id].phy_info[lmacid].autoneg_dis) {
+					/* The Linux DTS does not disable
+					 * autoneg for this LMAC (in SGMII or
+					 * 1000BASE-X mode), so that means
+					 * enable autoneg.
+					 */
+					bgx_reg_modify(bgx, count,
+						       BGX_GMP_PCS_MRX_CTL,
+						       PCS_MRX_CTL_AN_EN);
+				}
+			}
+
+			count += 1;
+		}
+	}
+
+	/* Done probing all 4 lmacs, now clear qsgmii_configured */
+	qsgmii_configured = 0;
+
+	printf("BGX%d LMACs: %d\n", bgx->bgx_id, count);
+	bgx->lmac_count = count;
+	bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, count);
+	bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, count);
+
+	bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
+	if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
+		printf("BGX%d BIST failed\n", bgx->bgx_id);
+
+	/* Set the backpressure AND mask */
+	for (i = 0; i < bgx->lmac_count; i++)
+		bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
+			       ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
+				(i * MAX_BGX_CHANS_PER_LMAC));
+
+	/* Disable all MAC filtering */
+	for (i = 0; i < RX_DMAC_COUNT; i++)
+		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
+
+	/* Disable MAC steering (NCSI traffic) */
+	for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
+		bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
+}
+
+static void bgx_get_qlm_mode(struct bgx *bgx)
+{
+	struct lmac *lmac;
+	int lmacid;
+
+	/* Read LMACx type to figure out QLM mode
+	 * This is configured by low level firmware
+	 */
+	for (lmacid = 0; lmacid < MAX_LMAC_PER_BGX; lmacid++) {
+		int lmac_type;
+		int train_en;
+		int index = 0;
+
+		if (otx_is_soc(CN81XX) || (otx_is_soc(CN83XX) &&
+					   bgx->bgx_id == 2))
+			index = (lmacid < 2) ? 0 : 2;
+
+		lmac = &bgx->lmac[lmacid];
+
+		/* check if QLM is programmed, if not, skip */
+		if (lmac->qlm == -1)
+			continue;
+
+		lmac_type = bgx_reg_read(bgx, index, BGX_CMRX_CFG);
+		lmac->lmac_type = (lmac_type >> 8) & 0x07;
+		debug("%s:%d:%d: lmac_type = %d, altpkg = %d\n", __func__,
+		      bgx->bgx_id, lmacid, lmac->lmac_type, otx_is_altpkg());
+
+		train_en = (readq(GSERX_SCRATCH(lmac->qlm))) & 0xf;
+		lmac->is_1gx = bgx_reg_read(bgx, index, BGX_GMP_PCS_MISCX_CTL)
+				& (PCS_MISC_CTL_MODE) ? true : false;
+
+		switch (lmac->lmac_type) {
+		case BGX_MODE_SGMII:
+			if (bgx->is_rgx) {
+				if (lmacid == 0) {
+					lmac->qlm_mode = QLM_MODE_RGMII;
+					debug("BGX%d LMAC%d mode: RGMII\n",
+					      bgx->bgx_id, lmacid);
+				}
+				continue;
+			} else {
+				if (bgx->bgx_id == 0 && otx_is_altpkg()) {
+					if (lmacid % 2)
+						continue;
+				}
+				lmac->qlm_mode = QLM_MODE_SGMII;
+				debug("BGX%d QLM%d LMAC%d mode: %s\n",
+				      bgx->bgx_id, lmac->qlm, lmacid,
+				      lmac->is_1gx ? "1000Base-X" : "SGMII");
+			}
+			break;
+		case BGX_MODE_XAUI:
+			if (bgx->bgx_id == 0 && otx_is_altpkg())
+				continue;
+			lmac->qlm_mode = QLM_MODE_XAUI;
+			if (lmacid != 0)
+				continue;
+			debug("BGX%d QLM%d LMAC%d mode: XAUI\n",
+			      bgx->bgx_id, lmac->qlm, lmacid);
+			break;
+		case BGX_MODE_RXAUI:
+			if (bgx->bgx_id == 0 && otx_is_altpkg())
+				continue;
+			lmac->qlm_mode = QLM_MODE_RXAUI;
+			if (index == lmacid) {
+				debug("BGX%d QLM%d LMAC%d mode: RXAUI\n",
+				      bgx->bgx_id, lmac->qlm, (index ? 1 : 0));
+			}
+			break;
+		case BGX_MODE_XFI:
+			if (bgx->bgx_id == 0 && otx_is_altpkg()) {
+				if (lmacid % 2)
+					continue;
+			}
+			if ((lmacid < 2 && (train_en & (1 << lmacid))) ||
+			    (train_en & (1 << (lmacid - 2)))) {
+				lmac->qlm_mode = QLM_MODE_10G_KR;
+				debug("BGX%d QLM%d LMAC%d mode: 10G_KR\n",
+				      bgx->bgx_id, lmac->qlm, lmacid);
+			} else {
+				lmac->qlm_mode = QLM_MODE_XFI;
+				debug("BGX%d QLM%d LMAC%d mode: XFI\n",
+				      bgx->bgx_id, lmac->qlm, lmacid);
+			}
+			break;
+		case BGX_MODE_XLAUI:
+			if (bgx->bgx_id == 0 && otx_is_altpkg())
+				continue;
+			if (train_en) {
+				lmac->qlm_mode = QLM_MODE_40G_KR4;
+				if (lmacid != 0)
+					break;
+				debug("BGX%d QLM%d LMAC%d mode: 40G_KR4\n",
+				      bgx->bgx_id, lmac->qlm, lmacid);
+			} else {
+				lmac->qlm_mode = QLM_MODE_XLAUI;
+				if (lmacid != 0)
+					break;
+				debug("BGX%d QLM%d LMAC%d mode: XLAUI\n",
+				      bgx->bgx_id, lmac->qlm, lmacid);
+			}
+		break;
+		case BGX_MODE_QSGMII:
+			/* If QLM is configured as QSGMII, use lmac0 */
+			if (otx_is_soc(CN83XX) && lmacid == 2 &&
+			    bgx->bgx_id != 2) {
+				//lmac->qlm_mode = QLM_MODE_DISABLED;
+				continue;
+			}
+
+			if (lmacid == 0 || lmacid == 2) {
+				lmac->qlm_mode = QLM_MODE_QSGMII;
+				debug("BGX%d QLM%d LMAC%d mode: QSGMII\n",
+				      bgx->bgx_id, lmac->qlm, lmacid);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+void bgx_set_board_info(int bgx_id, int *mdio_bus,
+			int *phy_addr, bool *autoneg_dis, bool *lmac_reg,
+			bool *lmac_enable)
+{
+	unsigned int i;
+
+	for (i = 0; i < MAX_LMAC_PER_BGX; i++) {
+		bgx_board_info[bgx_id].phy_info[i].phy_addr = phy_addr[i];
+		bgx_board_info[bgx_id].phy_info[i].mdio_bus = mdio_bus[i];
+		bgx_board_info[bgx_id].phy_info[i].autoneg_dis = autoneg_dis[i];
+		bgx_board_info[bgx_id].lmac_reg[i] = lmac_reg[i];
+		bgx_board_info[bgx_id].lmac_enable[i] = lmac_enable[i];
+		debug("%s bgx_id %d lmac %d\n", __func__, bgx_id, i);
+		debug("phy addr %x mdio bus %d autoneg_dis %d lmac_reg %d\n",
+		      bgx_board_info[bgx_id].phy_info[i].phy_addr,
+		      bgx_board_info[bgx_id].phy_info[i].mdio_bus,
+		      bgx_board_info[bgx_id].phy_info[i].autoneg_dis,
+		      bgx_board_info[bgx_id].lmac_reg[i]);
+		debug("lmac_enable = %x\n",
+		      bgx_board_info[bgx_id].lmac_enable[i]);
+	}
+}
+
+int octeontx_bgx_remove(struct udevice *dev)
+{
+	int lmacid;
+	u64 cfg;
+	int count = MAX_LMAC_PER_BGX;
+	struct bgx *bgx = dev_get_priv(dev);
+
+	if (!bgx->reg_base)
+		return 0;
+
+	if (bgx->is_rgx)
+		count = 1;
+
+	for (lmacid = 0; lmacid < count; lmacid++) {
+		struct lmac *lmac;
+
+		lmac = &bgx->lmac[lmacid];
+		cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+		cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+		bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+		/* Disable PCS for 1G interface */
+		if (lmac->lmac_type == BGX_MODE_SGMII ||
+		    lmac->lmac_type == BGX_MODE_QSGMII) {
+			cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
+			cfg |= PCS_MRX_CTL_PWR_DN;
+			bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
+		}
+
+		debug("%s disabling bgx%d lmacid%d\n", __func__, bgx->bgx_id,
+		      lmacid);
+		bgx_lmac_disable(bgx, lmacid);
+	}
+	return 0;
+}
+
+int octeontx_bgx_probe(struct udevice *dev)
+{
+	struct bgx *bgx = dev_get_priv(dev);
+	u8 lmac = 0;
+	int qlm[4] = {-1, -1, -1, -1};
+	int bgx_idx, node;
+	int inc = 1;
+
+	bgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+				       PCI_REGION_MEM);
+	if (!bgx->reg_base) {
+		debug("No PCI region found\n");
+		return 0;
+	}
+
+#ifdef OCTEONTX_XCV
+	/* Use FAKE BGX2 for RGX interface */
+	if ((((uintptr_t)bgx->reg_base >> 24) & 0xf) == 0x8) {
+		bgx->bgx_id = 2;
+		bgx->is_rgx = true;
+		for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac++) {
+			if (lmac == 0) {
+				bgx->lmac[lmac].lmacid = 0;
+				bgx->lmac[lmac].qlm = 0;
+			} else {
+				bgx->lmac[lmac].qlm = -1;
+			}
+		}
+		xcv_init_hw();
+		goto skip_qlm_config;
+	}
+#endif
+
+	node = node_id(bgx->reg_base);
+	bgx_idx = ((uintptr_t)bgx->reg_base >> 24) & 3;
+	bgx->bgx_id = (node * MAX_BGX_PER_NODE) + bgx_idx;
+	if (otx_is_soc(CN81XX))
+		inc = 2;
+	else if (otx_is_soc(CN83XX) && (bgx_idx == 2))
+		inc = 2;
+
+	for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac += inc) {
+		/* BGX3 (DLM4), has only 2 lanes */
+		if (otx_is_soc(CN83XX) && bgx_idx == 3 && lmac >= 2)
+			continue;
+		qlm[lmac + 0] = get_qlm_for_bgx(node, bgx_idx, lmac);
+		/* Each DLM has 2 lanes, configure both lanes with
+		 * same qlm configuration
+		 */
+		if (inc == 2)
+			qlm[lmac + 1] = qlm[lmac];
+		debug("qlm[%d] = %d\n", lmac, qlm[lmac]);
+	}
+
+	/* A BGX can take 1 or 2 DLMs, if both the DLMs are not configured
+	 * as BGX, then return, nothing to initialize
+	 */
+	if (otx_is_soc(CN81XX))
+		if ((qlm[0] == -1) && (qlm[2] == -1))
+			return -ENODEV;
+
+	/* MAP configuration registers */
+	for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac++) {
+		bgx->lmac[lmac].qlm = qlm[lmac];
+		bgx->lmac[lmac].lmacid = lmac;
+	}
+
+#ifdef OCTEONTX_XCV
+skip_qlm_config:
+#endif
+	bgx_vnic[bgx->bgx_id] = bgx;
+	bgx_get_qlm_mode(bgx);
+	debug("bgx_vnic[%u]: %p\n", bgx->bgx_id, bgx);
+
+	bgx_init_hw(bgx);
+
+	/* Init LMACs */
+	for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+		struct lmac *tlmac = &bgx->lmac[lmac];
+
+		tlmac->dev = dev;
+		tlmac->init_pend = 1;
+		tlmac->bgx = bgx;
+	}
+
+	return 0;
+}
+
+U_BOOT_DRIVER(octeontx_bgx) = {
+	.name	= "octeontx_bgx",
+	.id	= UCLASS_MISC,
+	.probe	= octeontx_bgx_probe,
+	.remove	= octeontx_bgx_remove,
+	.priv_auto_alloc_size = sizeof(struct bgx),
+	.flags  = DM_FLAG_OS_PREPARE,
+};
+
+static struct pci_device_id octeontx_bgx_supported[] = {
+	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BGX) },
+	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RGX) },
+	{}
+};
+
+U_BOOT_PCI_DEVICE(octeontx_bgx, octeontx_bgx_supported);
diff --git a/drivers/net/octeontx/bgx.h b/drivers/net/octeontx/bgx.h
new file mode 100644
index 0000000..8402630
--- /dev/null
+++ b/drivers/net/octeontx/bgx.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef BGX_H
+#define BGX_H
+
+#include <asm/arch/board.h>
+
+/* PCI device IDs */
+#define	PCI_DEVICE_ID_OCTEONTX_BGX	0xA026
+#define	PCI_DEVICE_ID_OCTEONTX_RGX	0xA054
+
+#define    MAX_LMAC_PER_BGX			4
+#define    MAX_BGX_CHANS_PER_LMAC		16
+#define    MAX_DMAC_PER_LMAC			8
+#define    MAX_FRAME_SIZE			9216
+
+#define    MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE	2
+
+#define    MAX_LMAC	(MAX_BGX_PER_NODE * MAX_LMAC_PER_BGX)
+
+#define    NODE_ID_MASK				0x300000000000
+#define    NODE_ID(x)				(((x) & NODE_ID_MASK) >> 44)
+
+/* Registers */
+#define GSERX_CFG(x)		(0x87E090000080ull + (x) * 0x1000000ull)
+#define GSERX_SCRATCH(x)	(0x87E090000020ull + (x) * 0x1000000ull)
+#define GSERX_PHY_CTL(x)	(0x87E090000000ull + (x) * 0x1000000ull)
+#define GSERX_CFG_BGX		BIT(2)
+#define GSER_RX_EIE_DETSTS(x)	(0x87E090000150ull + (x) * 0x1000000ull)
+#define GSER_CDRLOCK		(8)
+#define GSER_BR_RXX_CTL(x, y)	(0x87E090000400ull + (x) * 0x1000000ull + \
+				(y) * 0x80)
+#define GSER_BR_RXX_CTL_RXT_SWM	BIT(2)
+#define GSER_BR_RXX_EER(x, y)	(0x87E090000418ull + (x) * 0x1000000ull + \
+				(y) * 0x80)
+#define GSER_BR_RXX_EER_RXT_ESV BIT(14)
+#define GSER_BR_RXX_EER_RXT_EER BIT(15)
+#define EER_RXT_ESV		(14)
+
+#define BGX_CMRX_CFG			0x00
+#define CMR_PKT_TX_EN				BIT_ULL(13)
+#define CMR_PKT_RX_EN				BIT_ULL(14)
+#define CMR_EN					BIT_ULL(15)
+#define BGX_CMR_GLOBAL_CFG		0x08
+#define CMR_GLOBAL_CFG_FCS_STRIP		BIT_ULL(6)
+#define BGX_CMRX_RX_ID_MAP		0x60
+#define BGX_CMRX_RX_STAT0		0x70
+#define BGX_CMRX_RX_STAT1		0x78
+#define BGX_CMRX_RX_STAT2		0x80
+#define BGX_CMRX_RX_STAT3		0x88
+#define BGX_CMRX_RX_STAT4		0x90
+#define BGX_CMRX_RX_STAT5		0x98
+#define BGX_CMRX_RX_STAT6		0xA0
+#define BGX_CMRX_RX_STAT7		0xA8
+#define BGX_CMRX_RX_STAT8		0xB0
+#define BGX_CMRX_RX_STAT9		0xB8
+#define BGX_CMRX_RX_STAT10		0xC0
+#define BGX_CMRX_RX_BP_DROP		0xC8
+#define BGX_CMRX_RX_DMAC_CTL		0x0E8
+#define BGX_CMR_RX_DMACX_CAM		0x200
+#define RX_DMACX_CAM_EN				BIT_ULL(48)
+#define RX_DMACX_CAM_LMACID(x)			((x) << 49)
+#define RX_DMAC_COUNT			32
+#define BGX_CMR_RX_STREERING		0x300
+#define RX_TRAFFIC_STEER_RULE_COUNT	8
+#define BGX_CMR_CHAN_MSK_AND		0x450
+#define BGX_CMR_BIST_STATUS		0x460
+#define BGX_CMR_RX_LMACS		0x468
+#define BGX_CMRX_TX_STAT0		0x600
+#define BGX_CMRX_TX_STAT1		0x608
+#define BGX_CMRX_TX_STAT2		0x610
+#define BGX_CMRX_TX_STAT3		0x618
+#define BGX_CMRX_TX_STAT4		0x620
+#define BGX_CMRX_TX_STAT5		0x628
+#define BGX_CMRX_TX_STAT6		0x630
+#define BGX_CMRX_TX_STAT7		0x638
+#define BGX_CMRX_TX_STAT8		0x640
+#define BGX_CMRX_TX_STAT9		0x648
+#define BGX_CMRX_TX_STAT10		0x650
+#define BGX_CMRX_TX_STAT11		0x658
+#define BGX_CMRX_TX_STAT12		0x660
+#define BGX_CMRX_TX_STAT13		0x668
+#define BGX_CMRX_TX_STAT14		0x670
+#define BGX_CMRX_TX_STAT15		0x678
+#define BGX_CMRX_TX_STAT16		0x680
+#define BGX_CMRX_TX_STAT17		0x688
+#define BGX_CMR_TX_LMACS		0x1000
+
+#define BGX_SPUX_CONTROL1		0x10000
+#define SPU_CTL_LOW_POWER			BIT_ULL(11)
+#define SPU_CTL_LOOPBACK                        BIT_ULL(14)
+#define SPU_CTL_RESET				BIT_ULL(15)
+#define BGX_SPUX_STATUS1		0x10008
+#define SPU_STATUS1_RCV_LNK			BIT_ULL(2)
+#define BGX_SPUX_STATUS2		0x10020
+#define SPU_STATUS2_RCVFLT			BIT_ULL(10)
+#define BGX_SPUX_BX_STATUS		0x10028
+#define SPU_BX_STATUS_RX_ALIGN                  BIT_ULL(12)
+#define BGX_SPUX_BR_STATUS1		0x10030
+#define SPU_BR_STATUS_BLK_LOCK			BIT_ULL(0)
+#define SPU_BR_STATUS_RCV_LNK			BIT_ULL(12)
+#define BGX_SPUX_BR_PMD_CRTL		0x10068
+#define SPU_PMD_CRTL_TRAIN_EN			BIT_ULL(1)
+#define BGX_SPUX_BR_PMD_LP_CUP		0x10078
+#define BGX_SPUX_BR_PMD_LD_CUP		0x10088
+#define BGX_SPUX_BR_PMD_LD_REP		0x10090
+#define BGX_SPUX_FEC_CONTROL		0x100A0
+#define SPU_FEC_CTL_FEC_EN			BIT_ULL(0)
+#define SPU_FEC_CTL_ERR_EN			BIT_ULL(1)
+#define BGX_SPUX_AN_CONTROL		0x100C8
+#define SPU_AN_CTL_AN_EN			BIT_ULL(12)
+#define SPU_AN_CTL_XNP_EN			BIT_ULL(13)
+#define SPU_AN_CTL_AN_RESTART			BIT_ULL(15)
+#define BGX_SPUX_AN_STATUS		0x100D0
+#define SPU_AN_STS_AN_COMPLETE			BIT_ULL(5)
+#define BGX_SPUX_AN_ADV			0x100D8
+#define BGX_SPUX_MISC_CONTROL		0x10218
+#define SPU_MISC_CTL_INTLV_RDISP		BIT_ULL(10)
+#define SPU_MISC_CTL_RX_DIS			BIT_ULL(12)
+#define BGX_SPUX_INT			0x10220	/* +(0..3) << 20 */
+#define BGX_SPUX_INT_W1S		0x10228
+#define BGX_SPUX_INT_ENA_W1C		0x10230
+#define BGX_SPUX_INT_ENA_W1S		0x10238
+#define BGX_SPU_DBG_CONTROL		0x10300
+#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN		BIT_ULL(18)
+#define SPU_DBG_CTL_AN_NONCE_MCT_DIS		BIT_ULL(29)
+
+#define BGX_SMUX_RX_INT			0x20000
+#define BGX_SMUX_RX_JABBER		0x20030
+#define BGX_SMUX_RX_CTL			0x20048
+#define SMU_RX_CTL_STATUS			(3ull << 0)
+#define BGX_SMUX_TX_APPEND		0x20100
+#define SMU_TX_APPEND_FCS_D			BIT_ULL(2)
+#define BGX_SMUX_TX_MIN_PKT		0x20118
+#define BGX_SMUX_TX_INT			0x20140
+#define BGX_SMUX_TX_CTL			0x20178
+#define SMU_TX_CTL_DIC_EN			BIT_ULL(0)
+#define SMU_TX_CTL_UNI_EN			BIT_ULL(1)
+#define SMU_TX_CTL_LNK_STATUS			(3ull << 4)
+#define BGX_SMUX_TX_THRESH		0x20180
+#define BGX_SMUX_CTL			0x20200
+#define SMU_CTL_RX_IDLE				BIT_ULL(0)
+#define SMU_CTL_TX_IDLE				BIT_ULL(1)
+
+#define BGX_GMP_PCS_MRX_CTL		0x30000
+#define	PCS_MRX_CTL_RST_AN			BIT_ULL(9)
+#define	PCS_MRX_CTL_PWR_DN			BIT_ULL(11)
+#define	PCS_MRX_CTL_AN_EN			BIT_ULL(12)
+#define PCS_MRX_CTL_LOOPBACK1                   BIT_ULL(14)
+#define	PCS_MRX_CTL_RESET			BIT_ULL(15)
+#define BGX_GMP_PCS_MRX_STATUS		0x30008
+#define	PCS_MRX_STATUS_AN_CPT			BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_AN_RESULTS	0x30020
+#define BGX_GMP_PCS_SGM_AN_ADV		0x30068
+#define BGX_GMP_PCS_MISCX_CTL		0x30078
+#define PCS_MISCX_CTL_DISP_EN			BIT_ULL(13)
+#define PCS_MISC_CTL_GMX_ENO			BIT_ULL(11)
+#define PCS_MISC_CTL_SAMP_PT_MASK		0x7Full
+#define PCS_MISC_CTL_MODE			BIT_ULL(8)
+#define BGX_GMP_GMI_PRTX_CFG		0x38020
+#define GMI_PORT_CFG_SPEED			BIT_ULL(1)
+#define GMI_PORT_CFG_DUPLEX			BIT_ULL(2)
+#define GMI_PORT_CFG_SLOT_TIME			BIT_ULL(3)
+#define GMI_PORT_CFG_SPEED_MSB			BIT_ULL(8)
+#define BGX_GMP_GMI_RXX_JABBER		0x38038
+#define BGX_GMP_GMI_TXX_THRESH		0x38210
+#define BGX_GMP_GMI_TXX_APPEND		0x38218
+#define BGX_GMP_GMI_TXX_SLOT		0x38220
+#define BGX_GMP_GMI_TXX_BURST		0x38228
+#define BGX_GMP_GMI_TXX_MIN_PKT		0x38240
+#define BGX_GMP_GMI_TXX_SGMII_CTL	0x38300
+
+#define BGX_MSIX_VEC_0_29_ADDR		0x400000 /* +(0..29) << 4 */
+#define BGX_MSIX_VEC_0_29_CTL		0x400008
+#define BGX_MSIX_PBA_0			0x4F0000
+
+/* MSI-X interrupts */
+#define BGX_MSIX_VECTORS	30
+#define BGX_LMAC_VEC_OFFSET	7
+#define BGX_MSIX_VEC_SHIFT	4
+
+#define CMRX_INT		0
+#define SPUX_INT		1
+#define SMUX_RX_INT		2
+#define SMUX_TX_INT		3
+#define GMPX_PCS_INT		4
+#define GMPX_GMI_RX_INT		5
+#define GMPX_GMI_TX_INT		6
+#define CMR_MEM_INT		28
+#define SPU_MEM_INT		29
+
+#define LMAC_INTR_LINK_UP	BIT(0)
+#define LMAC_INTR_LINK_DOWN	BIT(1)
+
+/*  RX_DMAC_CTL configuration*/
+enum MCAST_MODE {
+		MCAST_MODE_REJECT,
+		MCAST_MODE_ACCEPT,
+		MCAST_MODE_CAM_FILTER,
+		RSVD
+};
+
+#define BCAST_ACCEPT	1
+#define CAM_ACCEPT	1
+
+int octeontx_bgx_initialize(unsigned int bgx_idx, unsigned int node);
+void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
+void bgx_get_count(int node, int *bgx_count);
+int bgx_get_lmac_count(int node, int bgx);
+void bgx_print_stats(int bgx_idx, int lmac);
+void xcv_init_hw(void);
+void xcv_setup_link(bool link_up, int link_speed);
+
+#undef LINK_INTR_ENABLE
+
+enum qlm_mode {
+	QLM_MODE_SGMII,         /* SGMII, each lane independent */
+	QLM_MODE_XAUI,      /* 1 XAUI or DXAUI, 4 lanes */
+	QLM_MODE_RXAUI,     /* 2 RXAUI, 2 lanes each */
+	QLM_MODE_XFI,       /* 4 XFI, 1 lane each */
+	QLM_MODE_XLAUI,     /* 1 XLAUI, 4 lanes each */
+	QLM_MODE_10G_KR,    /* 4 10GBASE-KR, 1 lane each */
+	QLM_MODE_40G_KR4,   /* 1 40GBASE-KR4, 4 lanes each */
+	QLM_MODE_QSGMII,    /* 4 QSGMII, each lane independent */
+	QLM_MODE_RGMII,     /* 1 RGX */
+};
+
+struct phy_info {
+	int mdio_bus;
+	int phy_addr;
+	bool autoneg_dis;
+};
+
+struct bgx_board_info {
+	struct phy_info phy_info[MAX_LMAC_PER_BGX];
+	bool lmac_reg[MAX_LMAC_PER_BGX];
+	bool lmac_enable[MAX_LMAC_PER_BGX];
+};
+
+enum LMAC_TYPE {
+	BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */
+	BGX_MODE_XAUI = 1,  /* 4 lanes, 3.125 Gbaud */
+	BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */
+	BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */
+	BGX_MODE_XFI = 3,   /* 1 lane, 10.3125 Gbaud */
+	BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
+	BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
+	BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
+	BGX_MODE_RGMII = 5,
+	BGX_MODE_QSGMII = 6,
+	BGX_MODE_INVALID = 7,
+};
+
+int rxaui_phy_xs_init(struct mii_dev *bus, int phy_addr);
+
+#endif /* BGX_H */
diff --git a/drivers/net/octeontx/nic.h b/drivers/net/octeontx/nic.h
new file mode 100644
index 0000000..af3576c
--- /dev/null
+++ b/drivers/net/octeontx/nic.h
@@ -0,0 +1,508 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef NIC_H
+#define	NIC_H
+
+#include <linux/netdevice.h>
+#include "bgx.h"
+
+#define	PCI_DEVICE_ID_CAVIUM_NICVF_1	0x0011
+
+/* Subsystem device IDs */
+#define PCI_SUBSYS_DEVID_88XX_NIC_PF		0xA11E
+#define PCI_SUBSYS_DEVID_81XX_NIC_PF		0xA21E
+#define PCI_SUBSYS_DEVID_83XX_NIC_PF		0xA31E
+
+#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF	0xA11E
+#define PCI_SUBSYS_DEVID_88XX_NIC_VF		0xA134
+#define PCI_SUBSYS_DEVID_81XX_NIC_VF		0xA234
+#define PCI_SUBSYS_DEVID_83XX_NIC_VF		0xA334
+
+#define	NIC_INTF_COUNT			2  /* Interfaces btw VNIC and TNS/BGX */
+#define	NIC_CHANS_PER_INF		128
+#define	NIC_MAX_CHANS			(NIC_INTF_COUNT * NIC_CHANS_PER_INF)
+
+/* PCI BAR nos */
+#define	PCI_CFG_REG_BAR_NUM		0
+#define	PCI_MSIX_REG_BAR_NUM		4
+
+/* NIC SRIOV VF count */
+#define	MAX_NUM_VFS_SUPPORTED		128
+#define	DEFAULT_NUM_VF_ENABLED		8
+
+#define	NIC_TNS_BYPASS_MODE		0
+#define	NIC_TNS_MODE			1
+
+/* NIC priv flags */
+#define	NIC_SRIOV_ENABLED		BIT(0)
+#define	NIC_TNS_ENABLED			BIT(1)
+
+/* VNIC HW optimiation features */
+#define	VNIC_RX_CSUM_OFFLOAD_SUPPORT
+#undef	VNIC_TX_CSUM_OFFLOAD_SUPPORT
+#undef	VNIC_SG_SUPPORT
+#undef	VNIC_TSO_SUPPORT
+#undef	VNIC_LRO_SUPPORT
+#undef  VNIC_RSS_SUPPORT
+
+/* TSO not supported in Thunder pass1 */
+#ifdef	VNIC_TSO_SUPPORT
+#define	VNIC_SW_TSO_SUPPORT
+#undef	VNIC_HW_TSO_SUPPORT
+#endif
+
+/* ETHTOOL enable or disable, undef this to disable */
+#define	NICVF_ETHTOOL_ENABLE
+
+/* Min/Max packet size */
+#define	NIC_HW_MIN_FRS			64
+#define	NIC_HW_MAX_FRS			9200 /* 9216 max packet including FCS */
+
+/* Max pkinds */
+#define	NIC_MAX_PKIND			16
+
+/* Max when CPI_ALG is IP diffserv */
+#define	NIC_MAX_CPI_PER_LMAC		64
+
+/* NIC VF Interrupts */
+#define	NICVF_INTR_CQ			0
+#define	NICVF_INTR_SQ			1
+#define	NICVF_INTR_RBDR			2
+#define	NICVF_INTR_PKT_DROP		3
+#define	NICVF_INTR_TCP_TIMER	4
+#define	NICVF_INTR_MBOX			5
+#define	NICVF_INTR_QS_ERR		6
+
+#define	NICVF_INTR_CQ_SHIFT			0
+#define	NICVF_INTR_SQ_SHIFT			8
+#define	NICVF_INTR_RBDR_SHIFT		16
+#define	NICVF_INTR_PKT_DROP_SHIFT	20
+#define	NICVF_INTR_TCP_TIMER_SHIFT	21
+#define	NICVF_INTR_MBOX_SHIFT		22
+#define	NICVF_INTR_QS_ERR_SHIFT		23
+
+#define	NICVF_INTR_CQ_MASK		(0xFF << NICVF_INTR_CQ_SHIFT)
+#define	NICVF_INTR_SQ_MASK		(0xFF << NICVF_INTR_SQ_SHIFT)
+#define	NICVF_INTR_RBDR_MASK		(0x03 << NICVF_INTR_RBDR_SHIFT)
+#define	NICVF_INTR_PKT_DROP_MASK	BIT(NICVF_INTR_PKT_DROP_SHIFT)
+#define	NICVF_INTR_TCP_TIMER_MASK	BIT(NICVF_INTR_TCP_TIMER_SHIFT)
+#define	NICVF_INTR_MBOX_MASK		BIT(NICVF_INTR_MBOX_SHIFT)
+#define	NICVF_INTR_QS_ERR_MASK		BIT(NICVF_INTR_QS_ERR_SHIFT)
+
+/* MSI-X interrupts */
+#define	NIC_PF_MSIX_VECTORS		10
+#define	NIC_VF_MSIX_VECTORS		20
+
+#define NIC_PF_INTR_ID_ECC0_SBE		0
+#define NIC_PF_INTR_ID_ECC0_DBE		1
+#define NIC_PF_INTR_ID_ECC1_SBE		2
+#define NIC_PF_INTR_ID_ECC1_DBE		3
+#define NIC_PF_INTR_ID_ECC2_SBE		4
+#define NIC_PF_INTR_ID_ECC2_DBE		5
+#define NIC_PF_INTR_ID_ECC3_SBE		6
+#define NIC_PF_INTR_ID_ECC3_DBE		7
+#define NIC_PF_INTR_ID_MBOX0		8
+#define NIC_PF_INTR_ID_MBOX1		9
+
+/* Global timer for CQ timer thresh interrupts
+ * Calculated for SCLK of 700Mhz
+ * value written should be a 1/16thof what is expected
+ *
+ * 1 tick per ms
+ */
+#define NICPF_CLK_PER_INT_TICK		43750
+
+struct nicvf_cq_poll {
+	u8	cq_idx;		/* Completion queue index */
+};
+
+#define NIC_MAX_RSS_HASH_BITS		8
+#define NIC_MAX_RSS_IDR_TBL_SIZE	BIT(NIC_MAX_RSS_HASH_BITS)
+#define RSS_HASH_KEY_SIZE		5 /* 320 bit key */
+
+#ifdef VNIC_RSS_SUPPORT
+struct nicvf_rss_info {
+	bool enable;
+#define	RSS_L2_EXTENDED_HASH_ENA	BIT(0)
+#define	RSS_IP_HASH_ENA			BIT(1)
+#define	RSS_TCP_HASH_ENA		BIT(2)
+#define	RSS_TCP_SYN_DIS			BIT(3)
+#define	RSS_UDP_HASH_ENA		BIT(4)
+#define RSS_L4_EXTENDED_HASH_ENA	BIT(5)
+#define	RSS_ROCE_ENA			BIT(6)
+#define	RSS_L3_BI_DIRECTION_ENA		BIT(7)
+#define	RSS_L4_BI_DIRECTION_ENA		BIT(8)
+	u64 cfg;
+	u8  hash_bits;
+	u16 rss_size;
+	u8  ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+	u64 key[RSS_HASH_KEY_SIZE];
+};
+#endif
+
+enum rx_stats_reg_offset {
+	RX_OCTS = 0x0,
+	RX_UCAST = 0x1,
+	RX_BCAST = 0x2,
+	RX_MCAST = 0x3,
+	RX_RED = 0x4,
+	RX_RED_OCTS = 0x5,
+	RX_ORUN = 0x6,
+	RX_ORUN_OCTS = 0x7,
+	RX_FCS = 0x8,
+	RX_L2ERR = 0x9,
+	RX_DRP_BCAST = 0xa,
+	RX_DRP_MCAST = 0xb,
+	RX_DRP_L3BCAST = 0xc,
+	RX_DRP_L3MCAST = 0xd,
+	RX_STATS_ENUM_LAST,
+};
+
+enum tx_stats_reg_offset {
+	TX_OCTS = 0x0,
+	TX_UCAST = 0x1,
+	TX_BCAST = 0x2,
+	TX_MCAST = 0x3,
+	TX_DROP = 0x4,
+	TX_STATS_ENUM_LAST,
+};
+
+struct nicvf_hw_stats {
+	u64 rx_bytes_ok;
+	u64 rx_ucast_frames_ok;
+	u64 rx_bcast_frames_ok;
+	u64 rx_mcast_frames_ok;
+	u64 rx_fcs_errors;
+	u64 rx_l2_errors;
+	u64 rx_drop_red;
+	u64 rx_drop_red_bytes;
+	u64 rx_drop_overrun;
+	u64 rx_drop_overrun_bytes;
+	u64 rx_drop_bcast;
+	u64 rx_drop_mcast;
+	u64 rx_drop_l3_bcast;
+	u64 rx_drop_l3_mcast;
+	u64 tx_bytes_ok;
+	u64 tx_ucast_frames_ok;
+	u64 tx_bcast_frames_ok;
+	u64 tx_mcast_frames_ok;
+	u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+	/* Rx */
+	u64 rx_frames_ok;
+	u64 rx_frames_64;
+	u64 rx_frames_127;
+	u64 rx_frames_255;
+	u64 rx_frames_511;
+	u64 rx_frames_1023;
+	u64 rx_frames_1518;
+	u64 rx_frames_jumbo;
+	u64 rx_drops;
+	/* Tx */
+	u64 tx_frames_ok;
+	u64 tx_drops;
+	u64 tx_busy;
+	u64 tx_tso;
+};
+
+struct hw_info {
+	u8		bgx_cnt;
+	u8		chans_per_lmac;
+	u8		chans_per_bgx; /* Rx/Tx chans */
+	u8		chans_per_rgx;
+	u8		chans_per_lbk;
+	u16		cpi_cnt;
+	u16		rssi_cnt;
+	u16		rss_ind_tbl_size;
+	u16		tl4_cnt;
+	u16		tl3_cnt;
+	u8		tl2_cnt;
+	u8		tl1_cnt;
+	bool		tl1_per_bgx; /* TL1 per BGX or per LMAC */
+	u8		model_id;
+};
+
+struct nicvf {
+	struct udevice		*dev;
+	u8			vf_id;
+	bool			sqs_mode:1;
+	bool			loopback_supported:1;
+	u8			tns_mode;
+	u8			node;
+	u16		mtu;
+	struct queue_set	*qs;
+#define		MAX_SQS_PER_VF_SINGLE_NODE	5
+#define		MAX_SQS_PER_VF			11
+	u8			num_qs;
+	void			*addnl_qs;
+	u16		vf_mtu;
+	void __iomem		*reg_base;
+#define	MAX_QUEUES_PER_QSET			8
+	struct nicvf_cq_poll	*napi[8];
+
+	u8			cpi_alg;
+
+	struct nicvf_hw_stats	stats;
+	struct nicvf_drv_stats	drv_stats;
+
+	struct nicpf		*nicpf;
+
+	/* VF <-> PF mailbox communication */
+	bool			pf_acked;
+	bool			pf_nacked;
+	bool			set_mac_pending;
+
+	bool			link_up;
+	u8			duplex;
+	u32		speed;
+	u8			rev_id;
+	u8			rx_queues;
+	u8			tx_queues;
+
+	bool			open;
+	bool			rb_alloc_fail;
+	void			*rcv_buf;
+	bool			hw_tso;
+};
+
+static inline int node_id(void *addr)
+{
+	return ((uintptr_t)addr >> 44) & 0x3;
+}
+
+struct nicpf {
+	struct udevice		*udev;
+	struct hw_info		*hw;
+	u8			node;
+	unsigned int		flags;
+	u16			total_vf_cnt;	/* Total num of VF supported */
+	u16			num_vf_en;	/* No of VF enabled */
+	void __iomem		*reg_base;	/* Register start address */
+	u16			rss_ind_tbl_size;
+	u8			num_sqs_en;	/* Secondary qsets enabled */
+	u64			nicvf[MAX_NUM_VFS_SUPPORTED];
+	u8			vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
+	u8			pqs_vf[MAX_NUM_VFS_SUPPORTED];
+	bool			sqs_used[MAX_NUM_VFS_SUPPORTED];
+	struct pkind_cfg	pkind;
+	u8			bgx_cnt;
+	u8			rev_id;
+#define	NIC_SET_VF_LMAC_MAP(bgx, lmac)	((((bgx) & 0xF) << 4) | ((lmac) & 0xF))
+#define	NIC_GET_BGX_FROM_VF_LMAC_MAP(map)	(((map) >> 4) & 0xF)
+#define	NIC_GET_LMAC_FROM_VF_LMAC_MAP(map)	((map) & 0xF)
+	u8			vf_lmac_map[MAX_LMAC];
+	u16			cpi_base[MAX_NUM_VFS_SUPPORTED];
+	u64			mac[MAX_NUM_VFS_SUPPORTED];
+	bool			mbx_lock[MAX_NUM_VFS_SUPPORTED];
+	u8			link[MAX_LMAC];
+	u8			duplex[MAX_LMAC];
+	u32			speed[MAX_LMAC];
+	bool			vf_enabled[MAX_NUM_VFS_SUPPORTED];
+	u16			rssi_base[MAX_NUM_VFS_SUPPORTED];
+	u8			lmac_cnt;
+};
+
+/* PF <--> VF Mailbox communication
+ * Eight 64bit registers are shared between PF and VF.
+ * Separate set for each VF.
+ * Writing '1' into last register mbx7 means end of message.
+ */
+
+/* PF <--> VF mailbox communication */
+#define	NIC_PF_VF_MAILBOX_SIZE		2
+#define	NIC_PF_VF_MBX_TIMEOUT		2000 /* ms */
+
+/* Mailbox message types */
+#define	NIC_MBOX_MSG_READY		0x01	/* Is PF ready to rcv msgs */
+#define	NIC_MBOX_MSG_ACK		0x02	/* ACK the message received */
+#define	NIC_MBOX_MSG_NACK		0x03	/* NACK the message received */
+#define	NIC_MBOX_MSG_QS_CFG		0x04	/* Configure Qset */
+#define	NIC_MBOX_MSG_RQ_CFG		0x05	/* Configure receive queue */
+#define	NIC_MBOX_MSG_SQ_CFG		0x06	/* Configure Send queue */
+#define	NIC_MBOX_MSG_RQ_DROP_CFG	0x07	/* Configure receive queue */
+#define	NIC_MBOX_MSG_SET_MAC		0x08	/* Add MAC ID to DMAC filter */
+#define	NIC_MBOX_MSG_SET_MAX_FRS	0x09	/* Set max frame size */
+#define	NIC_MBOX_MSG_CPI_CFG		0x0A	/* Config CPI, RSSI */
+#define	NIC_MBOX_MSG_RSS_SIZE		0x0B	/* Get RSS indir_tbl size */
+#define	NIC_MBOX_MSG_RSS_CFG		0x0C	/* Config RSS table */
+#define	NIC_MBOX_MSG_RSS_CFG_CONT	0x0D	/* RSS config continuation */
+#define	NIC_MBOX_MSG_RQ_BP_CFG		0x0E	/* RQ backpressure config */
+#define	NIC_MBOX_MSG_RQ_SW_SYNC		0x0F	/* Flush inflight pkts to RQ */
+#define	NIC_MBOX_MSG_BGX_STATS		0x10	/* Get stats from BGX */
+#define	NIC_MBOX_MSG_BGX_LINK_CHANGE	0x11	/* BGX:LMAC link status */
+#define	NIC_MBOX_MSG_ALLOC_SQS		0x12	/* Allocate secondary Qset */
+#define	NIC_MBOX_MSG_NICVF_PTR		0x13	/* Send nicvf ptr to PF */
+#define	NIC_MBOX_MSG_PNICVF_PTR		0x14	/* Get primary qset nicvf ptr */
+#define	NIC_MBOX_MSG_SNICVF_PTR		0x15	/* Send sqet nicvf ptr to PVF */
+#define	NIC_MBOX_MSG_LOOPBACK		0x16	/* Set interface in loopback */
+#define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
+#define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */
+
+struct nic_cfg_msg {
+	u8    msg;
+	u8    vf_id;
+	u8    node_id;
+	bool  tns_mode:1;
+	bool  sqs_mode:1;
+	bool  loopback_supported:1;
+	u8    mac_addr[6];
+};
+
+/* Qset configuration */
+struct qs_cfg_msg {
+	u8    msg;
+	u8    num;
+	u8    sqs_count;
+	u64   cfg;
+};
+
+/* Receive queue configuration */
+struct rq_cfg_msg {
+	u8    msg;
+	u8    qs_num;
+	u8    rq_num;
+	u64   cfg;
+};
+
+/* Send queue configuration */
+struct sq_cfg_msg {
+	u8    msg;
+	u8    qs_num;
+	u8    sq_num;
+	bool  sqs_mode;
+	u64   cfg;
+};
+
+/* Set VF's MAC address */
+struct set_mac_msg {
+	u8    msg;
+	u8    vf_id;
+	u8    mac_addr[6];
+};
+
+/* Set Maximum frame size */
+struct set_frs_msg {
+	u8    msg;
+	u8    vf_id;
+	u16   max_frs;
+};
+
+/* Set CPI algorithm type */
+struct cpi_cfg_msg {
+	u8    msg;
+	u8    vf_id;
+	u8    rq_cnt;
+	u8    cpi_alg;
+};
+
+/* Get RSS table size */
+struct rss_sz_msg {
+	u8    msg;
+	u8    vf_id;
+	u16   ind_tbl_size;
+};
+
+/* Set RSS configuration */
+struct rss_cfg_msg {
+	u8    msg;
+	u8    vf_id;
+	u8    hash_bits;
+	u8    tbl_len;
+	u8    tbl_offset;
+#define RSS_IND_TBL_LEN_PER_MBX_MSG	8
+	u8    ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
+};
+
+struct bgx_stats_msg {
+	u8    msg;
+	u8    vf_id;
+	u8    rx;
+	u8    idx;
+	u64   stats;
+};
+
+/* Physical interface link status */
+struct bgx_link_status {
+	u8    msg;
+	u8    link_up;
+	u8    duplex;
+	u32   speed;
+};
+
+#ifdef VNIC_MULTI_QSET_SUPPORT
+/* Get Extra Qset IDs */
+struct sqs_alloc {
+	u8    msg;
+	u8    vf_id;
+	u8    qs_count;
+};
+
+struct nicvf_ptr {
+	u8    msg;
+	u8    vf_id;
+	bool  sqs_mode;
+	u8    sqs_id;
+	u64   nicvf;
+};
+#endif
+
+/* Set interface in loopback mode */
+struct set_loopback {
+	u8    msg;
+	u8    vf_id;
+	bool  enable;
+};
+
+/* 128 bit shared memory between PF and each VF */
+union nic_mbx {
+	struct { u8 msg; }	msg;
+	struct nic_cfg_msg	nic_cfg;
+	struct qs_cfg_msg	qs;
+	struct rq_cfg_msg	rq;
+	struct sq_cfg_msg	sq;
+	struct set_mac_msg	mac;
+	struct set_frs_msg	frs;
+	struct cpi_cfg_msg	cpi_cfg;
+	struct rss_sz_msg	rss_size;
+	struct rss_cfg_msg	rss_cfg;
+	struct bgx_stats_msg    bgx_stats;
+	struct bgx_link_status  link_status;
+#ifdef VNIC_MULTI_QSET_SUPPORT
+	struct sqs_alloc        sqs_alloc;
+	struct nicvf_ptr	nicvf;
+#endif
+	struct set_loopback	lbk;
+};
+
+int nicvf_set_real_num_queues(struct udevice *dev,
+			      int tx_queues, int rx_queues);
+int nicvf_open(struct udevice *dev);
+void nicvf_stop(struct udevice *dev);
+int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
+void nicvf_update_stats(struct nicvf *nic);
+
+void nic_handle_mbx_intr(struct nicpf *nic, int vf);
+
+int bgx_poll_for_link(int node, int bgx_idx, int lmacid);
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
+void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+				int lmac_idx, bool enable);
+
+static inline bool pass1_silicon(unsigned int revision, int model_id)
+{
+	return ((revision < 8) && (model_id == 0x88));
+}
+
+static inline bool pass2_silicon(unsigned int revision, int model_id)
+{
+	return ((revision >= 8) && (model_id == 0x88));
+}
+
+#endif /* NIC_H */
diff --git a/drivers/net/octeontx/nic_main.c b/drivers/net/octeontx/nic_main.c
new file mode 100644
index 0000000..1a805f7
--- /dev/null
+++ b/drivers/net/octeontx/nic_main.c
@@ -0,0 +1,778 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <config.h>
+#include <net.h>
+#include <netdev.h>
+#include <malloc.h>
+#include <miiphy.h>
+#include <dm.h>
+#include <misc.h>
+#include <pci.h>
+#include <pci_ids.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+
+unsigned long rounddown_pow_of_two(unsigned long n)
+{
+	n |= n >> 1;
+	n |= n >> 2;
+	n |= n >> 4;
+	n |= n >> 8;
+	n |= n >> 16;
+	n |= n >> 32;
+
+	return(n + 1);
+}
+
+static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg);
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
+			       struct sq_cfg_msg *sq);
+static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf);
+static int nic_rcv_queue_sw_sync(struct nicpf *nic);
+
+/* Register read/write APIs */
+static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
+{
+	writeq(val, nic->reg_base + offset);
+}
+
+static u64 nic_reg_read(struct nicpf *nic, u64 offset)
+{
+	return readq(nic->reg_base + offset);
+}
+
+static u64 nic_get_mbx_addr(int vf)
+{
+	return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
+}
+
+static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
+{
+	void __iomem *mbx_addr = (void *)(nic->reg_base + nic_get_mbx_addr(vf));
+	u64 *msg = (u64 *)mbx;
+
+	/* In first revision HW, mbox interrupt is triggerred
+	 * when PF writes to MBOX(1), in next revisions when
+	 * PF writes to MBOX(0)
+	 */
+	if (pass1_silicon(nic->rev_id, nic->hw->model_id)) {
+		/* see the comment for nic_reg_write()/nic_reg_read()
+		 * functions above
+		 */
+		writeq(msg[0], mbx_addr);
+		writeq(msg[1], mbx_addr + 8);
+	} else {
+		writeq(msg[1], mbx_addr + 8);
+		writeq(msg[0], mbx_addr);
+	}
+}
+
+static void nic_mbx_send_ready(struct nicpf *nic, int vf)
+{
+	union nic_mbx mbx = {};
+	int bgx_idx, lmac, timeout = 5, link = -1;
+	const u8 *mac;
+
+	mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
+	mbx.nic_cfg.vf_id = vf;
+
+	if (nic->flags & NIC_TNS_ENABLED)
+		mbx.nic_cfg.tns_mode = NIC_TNS_MODE;
+	else
+		mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
+
+	if (vf < nic->num_vf_en) {
+		bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+		mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+		if (mac)
+			memcpy((u8 *)&mbx.nic_cfg.mac_addr, mac, 6);
+
+		while (timeout-- && (link <= 0)) {
+			link = bgx_poll_for_link(nic->node, bgx_idx, lmac);
+			debug("Link status: %d\n", link);
+			if (link <= 0)
+				mdelay(2000);
+		}
+	}
+#ifdef VNIC_MULTI_QSET_SUPPORT
+	mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
+#endif
+	mbx.nic_cfg.node_id = nic->node;
+
+	mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en;
+
+	nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* ACKs VF's mailbox message
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_ack(struct nicpf *nic, int vf)
+{
+	union nic_mbx mbx = {};
+
+	mbx.msg.msg = NIC_MBOX_MSG_ACK;
+	nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* NACKs VF's mailbox message that PF is not able to
+ * complete the action
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_nack(struct nicpf *nic, int vf)
+{
+	union nic_mbx mbx = {};
+
+	mbx.msg.msg = NIC_MBOX_MSG_NACK;
+	nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
+{
+	int bgx_idx, lmac_idx;
+
+	if (lbk->vf_id > nic->num_vf_en)
+		return -1;
+
+	bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+	lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+
+	bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
+
+	return 0;
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+void nic_handle_mbx_intr(struct nicpf *nic, int vf)
+{
+	union nic_mbx mbx = {};
+	u64 *mbx_data;
+	u64 mbx_addr;
+	u64 reg_addr;
+	u64 cfg;
+	int bgx, lmac;
+	int i;
+	int ret = 0;
+
+	nic->mbx_lock[vf] = true;
+
+	mbx_addr = nic_get_mbx_addr(vf);
+	mbx_data = (u64 *)&mbx;
+
+	for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+		*mbx_data = nic_reg_read(nic, mbx_addr);
+		mbx_data++;
+		mbx_addr += sizeof(u64);
+	}
+
+	debug("%s: Mailbox msg %d from VF%d\n", __func__, mbx.msg.msg, vf);
+	switch (mbx.msg.msg) {
+	case NIC_MBOX_MSG_READY:
+		nic_mbx_send_ready(nic, vf);
+		if (vf < nic->num_vf_en) {
+			nic->link[vf] = 0;
+			nic->duplex[vf] = 0;
+			nic->speed[vf] = 0;
+		}
+		ret = 1;
+		break;
+	case NIC_MBOX_MSG_QS_CFG:
+		reg_addr = NIC_PF_QSET_0_127_CFG |
+			   (mbx.qs.num << NIC_QS_ID_SHIFT);
+		cfg = mbx.qs.cfg;
+#ifdef VNIC_MULTI_QSET_SUPPORT
+		/* Check if its a secondary Qset */
+		if (vf >= nic->num_vf_en) {
+			cfg = cfg & (~0x7FULL);
+			/* Assign this Qset to primary Qset's VF */
+			cfg |= nic->pqs_vf[vf];
+		}
+#endif
+		nic_reg_write(nic, reg_addr, cfg);
+		break;
+	case NIC_MBOX_MSG_RQ_CFG:
+		reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
+			   (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+			   (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+		nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+		/* Enable CQE_RX2_S extension in CQE_RX descriptor.
+		 * This gets appended by default on 81xx/83xx chips,
+		 * for consistency enabling the same on 88xx pass2
+		 * where this is introduced.
+		 */
+		if (pass2_silicon(nic->rev_id, nic->hw->model_id))
+			nic_reg_write(nic, NIC_PF_RX_CFG, 0x01);
+		break;
+	case NIC_MBOX_MSG_RQ_BP_CFG:
+		reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
+			   (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+			   (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+		nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+		break;
+	case NIC_MBOX_MSG_RQ_SW_SYNC:
+		ret = nic_rcv_queue_sw_sync(nic);
+		break;
+	case NIC_MBOX_MSG_RQ_DROP_CFG:
+		reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
+			   (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+			   (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+		nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+		break;
+	case NIC_MBOX_MSG_SQ_CFG:
+		reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
+			   (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
+			   (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
+		nic_reg_write(nic, reg_addr, mbx.sq.cfg);
+		nic_tx_channel_cfg(nic, mbx.qs.num,
+				   (struct sq_cfg_msg *)&mbx.sq);
+		break;
+	case NIC_MBOX_MSG_SET_MAC:
+#ifdef VNIC_MULTI_QSET_SUPPORT
+		if (vf >= nic->num_vf_en)
+			break;
+#endif
+		lmac = mbx.mac.vf_id;
+		bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+		bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
+		break;
+	case NIC_MBOX_MSG_SET_MAX_FRS:
+		ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
+					mbx.frs.vf_id);
+		break;
+	case NIC_MBOX_MSG_CPI_CFG:
+		nic_config_cpi(nic, &mbx.cpi_cfg);
+		break;
+#ifdef VNIC_RSS_SUPPORT
+	case NIC_MBOX_MSG_RSS_SIZE:
+		nic_send_rss_size(nic, vf);
+		goto unlock;
+	case NIC_MBOX_MSG_RSS_CFG:
+	case NIC_MBOX_MSG_RSS_CFG_CONT:
+		nic_config_rss(nic, &mbx.rss_cfg);
+		break;
+#endif
+	case NIC_MBOX_MSG_CFG_DONE:
+		/* Last message of VF config msg sequence */
+		nic->vf_enabled[vf] = true;
+		if (vf >= nic->lmac_cnt)
+			goto unlock;
+
+		bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+		bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true);
+		goto unlock;
+	case NIC_MBOX_MSG_SHUTDOWN:
+		/* First msg in VF teardown sequence */
+		nic->vf_enabled[vf] = false;
+#ifdef VNIC_MULTI_QSET_SUPPORT
+		if (vf >= nic->num_vf_en)
+			nic->sqs_used[vf - nic->num_vf_en] = false;
+		nic->pqs_vf[vf] = 0;
+#endif
+		if (vf >= nic->lmac_cnt)
+			break;
+
+		bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+		bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false);
+		break;
+#ifdef VNIC_MULTI_QSET_SUPPORT
+	case NIC_MBOX_MSG_ALLOC_SQS:
+		nic_alloc_sqs(nic, &mbx.sqs_alloc);
+		goto unlock;
+	case NIC_MBOX_MSG_NICVF_PTR:
+		nic->nicvf[vf] = mbx.nicvf.nicvf;
+		break;
+	case NIC_MBOX_MSG_PNICVF_PTR:
+		nic_send_pnicvf(nic, vf);
+		goto unlock;
+	case NIC_MBOX_MSG_SNICVF_PTR:
+		nic_send_snicvf(nic, &mbx.nicvf);
+		goto unlock;
+#endif
+	case NIC_MBOX_MSG_LOOPBACK:
+		ret = nic_config_loopback(nic, &mbx.lbk);
+		break;
+	default:
+		printf("Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
+		break;
+	}
+
+	if (!ret)
+		nic_mbx_send_ack(nic, vf);
+	else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
+		nic_mbx_send_nack(nic, vf);
+unlock:
+	nic->mbx_lock[vf] = false;
+}
+
+static int nic_rcv_queue_sw_sync(struct nicpf *nic)
+{
+	int timeout = 20;
+
+	nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
+	while (timeout) {
+		if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
+			break;
+		udelay(2000);
+		timeout--;
+	}
+	nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
+	if (!timeout) {
+		printf("Recevie queue software sync failed");
+		return 1;
+	}
+	return 0;
+}
+
+static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
+{
+	u64 *pkind = (u64 *)&nic->pkind;
+
+	if (new_frs > NIC_HW_MAX_FRS || new_frs < NIC_HW_MIN_FRS) {
+		printf("Invalid MTU setting from VF%d rejected,", vf);
+		printf(" should be between %d and %d\n", NIC_HW_MIN_FRS,
+		       NIC_HW_MAX_FRS);
+		return 1;
+	}
+	new_frs += ETH_HLEN;
+	if (new_frs <= nic->pkind.maxlen)
+		return 0;
+
+	nic->pkind.maxlen = new_frs;
+
+	nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *pkind);
+	return 0;
+}
+
+/* Set minimum transmit packet size */
+static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
+{
+	int lmac;
+	u64 lmac_cfg;
+	struct hw_info *hw = nic->hw;
+	int max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
+
+	/* Max value that can be set is 60 */
+	if (size > 52)
+		size = 52;
+
+	/* CN81XX has RGX configured as FAKE BGX, adjust mac_lmac accordingly */
+	if (hw->chans_per_rgx)
+		max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1;
+
+	for (lmac = 0; lmac < max_lmac; lmac++) {
+		lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
+		lmac_cfg &= ~(0xF << 2);
+		lmac_cfg |= ((size / 4) << 2);
+		nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
+	}
+}
+
+/* Function to check number of LMACs present and set VF to LMAC mapping.
+ * Mapping will be used while initializing channels.
+ */
+static void nic_set_lmac_vf_mapping(struct nicpf *nic)
+{
+	int bgx, bgx_count, next_bgx_lmac = 0;
+	int lmac, lmac_cnt = 0;
+	u64 lmac_credit;
+
+	nic->num_vf_en = 0;
+	if (nic->flags & NIC_TNS_ENABLED) {
+		nic->num_vf_en = DEFAULT_NUM_VF_ENABLED;
+		return;
+	}
+
+	bgx_get_count(nic->node, &bgx_count);
+	debug("bgx_count: %d\n", bgx_count);
+
+	for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) {
+		if (!(bgx_count & (1 << bgx)))
+			continue;
+		nic->bgx_cnt++;
+		lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+		debug("lmac_cnt: %d for BGX%d\n", lmac_cnt, bgx);
+		for (lmac = 0; lmac < lmac_cnt; lmac++)
+			nic->vf_lmac_map[next_bgx_lmac++] =
+						NIC_SET_VF_LMAC_MAP(bgx, lmac);
+		nic->num_vf_en += lmac_cnt;
+
+		/* Program LMAC credits */
+		lmac_credit = (1ull << 1); /* chennel credit enable */
+		lmac_credit |= (0x1ff << 2);
+		lmac_credit |= (((((48 * 1024) / lmac_cnt) -
+				NIC_HW_MAX_FRS) / 16) << 12);
+		lmac = bgx * MAX_LMAC_PER_BGX;
+		for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
+			nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
+				      lmac_credit);
+	}
+}
+
+static void nic_get_hw_info(struct nicpf *nic)
+{
+	u16 sdevid;
+	struct hw_info *hw = nic->hw;
+
+	dm_pci_read_config16(nic->udev, PCI_SUBSYSTEM_ID, &sdevid);
+
+	switch (sdevid) {
+	case PCI_SUBSYS_DEVID_88XX_NIC_PF:
+		hw->bgx_cnt = MAX_BGX_PER_NODE;
+		hw->chans_per_lmac = 16;
+		hw->chans_per_bgx = 128;
+		hw->cpi_cnt = 2048;
+		hw->rssi_cnt = 4096;
+		hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+		hw->tl3_cnt = 256;
+		hw->tl2_cnt = 64;
+		hw->tl1_cnt = 2;
+		hw->tl1_per_bgx = true;
+		hw->model_id = 0x88;
+		break;
+	case PCI_SUBSYS_DEVID_81XX_NIC_PF:
+		hw->bgx_cnt = MAX_BGX_PER_NODE;
+		hw->chans_per_lmac = 8;
+		hw->chans_per_bgx = 32;
+		hw->chans_per_rgx = 8;
+		hw->chans_per_lbk = 24;
+		hw->cpi_cnt = 512;
+		hw->rssi_cnt = 256;
+		hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */
+		hw->tl3_cnt = 64;
+		hw->tl2_cnt = 16;
+		hw->tl1_cnt = 10;
+		hw->tl1_per_bgx = false;
+		hw->model_id = 0x81;
+		break;
+	case PCI_SUBSYS_DEVID_83XX_NIC_PF:
+		hw->bgx_cnt = MAX_BGX_PER_NODE;
+		hw->chans_per_lmac = 8;
+		hw->chans_per_bgx = 32;
+		hw->chans_per_lbk = 64;
+		hw->cpi_cnt = 2048;
+		hw->rssi_cnt = 1024;
+		hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */
+		hw->tl3_cnt = 256;
+		hw->tl2_cnt = 64;
+		hw->tl1_cnt = 18;
+		hw->tl1_per_bgx = false;
+		hw->model_id = 0x83;
+		break;
+	}
+
+	hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->udev);
+}
+
+static void nic_init_hw(struct nicpf *nic)
+{
+	int i;
+	u64 reg;
+	u64 *pkind = (u64 *)&nic->pkind;
+
+	/* Get HW capability info */
+	nic_get_hw_info(nic);
+
+	/* Enable NIC HW block */
+	nic_reg_write(nic, NIC_PF_CFG, 0x3);
+
+	/* Enable backpressure */
+	nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
+	nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, (1ULL << 63) | 0x08);
+	nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
+		      (1ULL << 63) | 0x09);
+
+	for (i = 0; i < NIC_MAX_CHANS; i++)
+		nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (i << 3), 1);
+
+	if (nic->flags & NIC_TNS_ENABLED) {
+		reg = NIC_TNS_MODE << 7;
+		reg |= 0x06;
+		nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, reg);
+		reg &= ~0xFull;
+		reg |= 0x07;
+		nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), reg);
+	} else {
+		/* Disable TNS mode on both interfaces */
+		reg = NIC_TNS_BYPASS_MODE << 7;
+		reg |= 0x08; /* Block identifier */
+		nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, reg);
+		reg &= ~0xFull;
+		reg |= 0x09;
+		nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), reg);
+	}
+
+	/* PKIND configuration */
+	nic->pkind.minlen = 0;
+	nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
+	nic->pkind.lenerr_en = 1;
+	nic->pkind.rx_hdr = 0;
+	nic->pkind.hdr_sl = 0;
+
+	for (i = 0; i < NIC_MAX_PKIND; i++)
+		nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), *pkind);
+
+	nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
+
+	/* Timer config */
+	nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+}
+
+/* Channel parse index configuration */
+static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
+{
+	struct hw_info *hw = nic->hw;
+	u32 vnic, bgx, lmac, chan;
+	u32 padd, cpi_count = 0;
+	u64 cpi_base, cpi, rssi_base, rssi;
+	u8  qset, rq_idx = 0;
+
+	vnic = cfg->vf_id;
+	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+
+	chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
+	cpi_base = vnic * NIC_MAX_CPI_PER_LMAC;
+	rssi_base = vnic * hw->rss_ind_tbl_size;
+
+	/* Rx channel configuration */
+	nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
+		      (1ull << 63) | (vnic << 0));
+	nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
+		      ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
+
+	if (cfg->cpi_alg == CPI_ALG_NONE)
+		cpi_count = 1;
+	else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
+		cpi_count = 8;
+	else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
+		cpi_count = 16;
+	else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
+		cpi_count = NIC_MAX_CPI_PER_LMAC;
+
+	/* RSS Qset, Qidx mapping */
+	qset = cfg->vf_id;
+	rssi = rssi_base;
+	for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
+		nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+			      (qset << 3) | rq_idx);
+		rq_idx++;
+	}
+
+	rssi = 0;
+	cpi = cpi_base;
+	for (; cpi < (cpi_base + cpi_count); cpi++) {
+		/* Determine port to channel adder */
+		if (cfg->cpi_alg != CPI_ALG_DIFF)
+			padd = cpi % cpi_count;
+		else
+			padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
+
+		/* Leave RSS_SIZE as '0' to disable RSS */
+		if (pass1_silicon(nic->rev_id, nic->hw->model_id)) {
+			nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+				      (vnic << 24) | (padd << 16) |
+				      (rssi_base + rssi));
+		} else {
+			/* Set MPI_ALG to '0' to disable MCAM parsing */
+			nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+				      (padd << 16));
+			/* MPI index is same as CPI if MPI_ALG is not enabled */
+			nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
+				      (vnic << 24) | (rssi_base + rssi));
+		}
+
+		if ((rssi + 1) >= cfg->rq_cnt)
+			continue;
+
+		if (cfg->cpi_alg == CPI_ALG_VLAN)
+			rssi++;
+		else if (cfg->cpi_alg == CPI_ALG_VLAN16)
+			rssi = ((cpi - cpi_base) & 0xe) >> 1;
+		else if (cfg->cpi_alg == CPI_ALG_DIFF)
+			rssi = ((cpi - cpi_base) & 0x38) >> 3;
+	}
+	nic->cpi_base[cfg->vf_id] = cpi_base;
+	nic->rssi_base[cfg->vf_id] = rssi_base;
+}
+
+/* Transmit channel configuration (TL4 -> TL3 -> Chan)
+ * VNIC0-SQ0 -> TL4(0)  -> TL4A(0) -> TL3[0] -> BGX0/LMAC0/Chan0
+ * VNIC1-SQ0 -> TL4(8)  -> TL4A(2) -> TL3[2] -> BGX0/LMAC1/Chan0
+ * VNIC2-SQ0 -> TL4(16) -> TL4A(4) -> TL3[4] -> BGX0/LMAC2/Chan0
+ * VNIC3-SQ0 -> TL4(32) -> TL4A(6) -> TL3[6] -> BGX0/LMAC3/Chan0
+ * VNIC4-SQ0 -> TL4(512)  -> TL4A(128) -> TL3[128] -> BGX1/LMAC0/Chan0
+ * VNIC5-SQ0 -> TL4(520)  -> TL4A(130) -> TL3[130] -> BGX1/LMAC1/Chan0
+ * VNIC6-SQ0 -> TL4(528)  -> TL4A(132) -> TL3[132] -> BGX1/LMAC2/Chan0
+ * VNIC7-SQ0 -> TL4(536)  -> TL4A(134) -> TL3[134] -> BGX1/LMAC3/Chan0
+ */
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
+			       struct sq_cfg_msg *sq)
+{
+	struct hw_info *hw = nic->hw;
+	u32 bgx, lmac, chan;
+	u32 tl2, tl3, tl4;
+	u32 rr_quantum;
+	u8 sq_idx = sq->sq_num;
+	u8 pqs_vnic = vnic;
+	int svf;
+	u16 sdevid;
+
+	dm_pci_read_config16(nic->udev, PCI_SUBSYSTEM_ID, &sdevid);
+
+	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
+	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
+
+	/* 24 bytes for FCS, IPG and preamble */
+	rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
+
+	/* For 88xx 0-511 TL4 transmits via BGX0 and
+	 * 512-1023 TL4s transmit via BGX1.
+	 */
+	if (hw->tl1_per_bgx) {
+		tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt);
+		if (!sq->sqs_mode) {
+			tl4 += (lmac * MAX_QUEUES_PER_QSET);
+		} else {
+			for (svf = 0; svf < MAX_SQS_PER_VF_SINGLE_NODE; svf++) {
+				if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+					break;
+			}
+			tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET);
+			tl4 += (lmac * MAX_QUEUES_PER_QSET *
+				MAX_SQS_PER_VF_SINGLE_NODE);
+			tl4 += (svf * MAX_QUEUES_PER_QSET);
+		}
+	} else {
+		tl4 = (vnic * MAX_QUEUES_PER_QSET);
+	}
+
+	tl4 += sq_idx;
+
+	tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt);
+	nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
+		      ((u64)vnic << NIC_QS_ID_SHIFT) |
+		      ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
+	nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
+		      ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
+
+	nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
+
+	/* On 88xx 0-127 channels are for BGX0 and
+	 * 127-255 channels for BGX1.
+	 *
+	 * On 81xx/83xx TL3_CHAN reg should be configured with channel
+	 * within LMAC i.e 0-7 and not the actual channel number like on 88xx
+	 */
+	chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
+	if (hw->tl1_per_bgx)
+		nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+	else
+		nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0);
+
+	/* Enable backpressure on the channel */
+	nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
+
+	tl2 = tl3 >> 2;
+	nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
+	nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
+	/* No priorities as of now */
+	nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
+
+	/* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
+	 * on 81xx/83xx TL2 needs to be configured to transmit to one of the
+	 * possible LMACs.
+	 *
+	 * This register doesn't exist on 88xx.
+	 */
+	if (!hw->tl1_per_bgx)
+		nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3),
+			      lmac + (bgx * MAX_LMAC_PER_BGX));
+}
+
+int nic_initialize(struct udevice *dev)
+{
+	struct nicpf *nic = dev_get_priv(dev);
+
+	nic->udev = dev;
+	nic->hw = calloc(1, sizeof(struct hw_info));
+	if (!nic->hw)
+		return -ENOMEM;
+
+	/* MAP PF's configuration registers */
+	nic->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+				       PCI_REGION_MEM);
+	if (!nic->reg_base) {
+		printf("Cannot map config register space, aborting\n");
+		goto exit;
+	}
+
+	nic->node = node_id(nic->reg_base);
+	dm_pci_read_config8(dev, PCI_REVISION_ID, &nic->rev_id);
+
+	/* By default set NIC in TNS bypass mode */
+	nic->flags &= ~NIC_TNS_ENABLED;
+
+	/* Initialize hardware */
+	nic_init_hw(nic);
+
+	nic_set_lmac_vf_mapping(nic);
+
+	/* Set RSS TBL size for each VF */
+	nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+
+	nic->rss_ind_tbl_size = rounddown_pow_of_two(nic->rss_ind_tbl_size);
+
+	return 0;
+exit:
+	free(nic->hw);
+	return -ENODEV;
+}
+
+int octeontx_nic_probe(struct udevice *dev)
+{
+	int ret = 0;
+	struct nicpf *nicpf = dev_get_priv(dev);
+
+	nicpf->udev = dev;
+	ret = nic_initialize(dev);
+	if (ret < 0) {
+		printf("couldn't initialize NIC PF\n");
+		return ret;
+	}
+
+	ret = pci_sriov_init(dev, nicpf->num_vf_en);
+	if (ret < 0)
+		printf("enabling SRIOV failed for num VFs %d\n",
+		       nicpf->num_vf_en);
+
+	return ret;
+}
+
+U_BOOT_DRIVER(octeontx_nic) = {
+	.name	= "octeontx_nic",
+	.id	= UCLASS_MISC,
+	.probe	= octeontx_nic_probe,
+	.priv_auto_alloc_size = sizeof(struct nicpf),
+};
+
+static struct pci_device_id octeontx_nic_supported[] = {
+	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NIC) },
+	{}
+};
+
+U_BOOT_PCI_DEVICE(octeontx_nic, octeontx_nic_supported);
+
diff --git a/drivers/net/octeontx/nic_reg.h b/drivers/net/octeontx/nic_reg.h
new file mode 100644
index 0000000..c214ebb
--- /dev/null
+++ b/drivers/net/octeontx/nic_reg.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef NIC_REG_H
+#define NIC_REG_H
+
+#define   NIC_PF_REG_COUNT			29573
+#define   NIC_VF_REG_COUNT			249
+
+/* Physical function register offsets */
+#define   NIC_PF_CFG				(0x0000)
+#define   NIC_PF_STATUS				(0x0010)
+
+#define   NIC_PF_INTR_TIMER_CFG			(0x0030)
+#define   NIC_PF_BIST_STATUS			(0x0040)
+#define   NIC_PF_SOFT_RESET			(0x0050)
+
+#define   NIC_PF_TCP_TIMER			(0x0060)
+#define   NIC_PF_BP_CFG				(0x0080)
+#define   NIC_PF_RRM_CFG			(0x0088)
+#define   NIC_PF_CQM_CF				(0x00A0)
+#define   NIC_PF_CNM_CF				(0x00A8)
+#define   NIC_PF_CNM_STATUS			(0x00B0)
+#define   NIC_PF_CQ_AVG_CFG			(0x00C0)
+#define   NIC_PF_RRM_AVG_CFG			(0x00C8)
+
+#define   NIC_PF_INTF_0_1_SEND_CFG		(0x0200)
+#define   NIC_PF_INTF_0_1_BP_CFG		(0x0208)
+#define   NIC_PF_INTF_0_1_BP_DIS_0_1		(0x0210)
+#define   NIC_PF_INTF_0_1_BP_SW_0_1		(0x0220)
+#define   NIC_PF_RBDR_BP_STATE_0_3		(0x0240)
+
+#define   NIC_PF_MAILBOX_INT			(0x0410)
+#define   NIC_PF_MAILBOX_INT_W1S		(0x0430)
+#define   NIC_PF_MAILBOX_ENA_W1C		(0x0450)
+#define   NIC_PF_MAILBOX_ENA_W1S		(0x0470)
+
+#define   NIC_PF_RX_ETYPE_0_7			(0x0500)
+#define   NIC_PF_RX_CFG				(0x05D0)
+#define   NIC_PF_PKIND_0_15_CFG			(0x0600)
+
+#define   NIC_PF_ECC0_FLIP0			(0x1000)
+#define   NIC_PF_ECC1_FLIP0			(0x1008)
+#define   NIC_PF_ECC2_FLIP0			(0x1010)
+#define   NIC_PF_ECC3_FLIP0			(0x1018)
+#define   NIC_PF_ECC0_FLIP1			(0x1080)
+#define   NIC_PF_ECC1_FLIP1			(0x1088)
+#define   NIC_PF_ECC2_FLIP1			(0x1090)
+#define   NIC_PF_ECC3_FLIP1			(0x1098)
+#define   NIC_PF_ECC0_CDIS			(0x1100)
+#define   NIC_PF_ECC1_CDIS			(0x1108)
+#define   NIC_PF_ECC2_CDIS			(0x1110)
+#define   NIC_PF_ECC3_CDIS			(0x1118)
+#define   NIC_PF_BIST0_STATUS			(0x1280)
+#define   NIC_PF_BIST1_STATUS			(0x1288)
+#define   NIC_PF_BIST2_STATUS			(0x1290)
+#define   NIC_PF_BIST3_STATUS			(0x1298)
+
+#define   NIC_PF_ECC0_SBE_INT			(0x2000)
+#define   NIC_PF_ECC0_SBE_INT_W1S		(0x2008)
+#define   NIC_PF_ECC0_SBE_ENA_W1C		(0x2010)
+#define   NIC_PF_ECC0_SBE_ENA_W1S		(0x2018)
+#define   NIC_PF_ECC0_DBE_INT			(0x2100)
+#define   NIC_PF_ECC0_DBE_INT_W1S		(0x2108)
+#define   NIC_PF_ECC0_DBE_ENA_W1C		(0x2110)
+#define   NIC_PF_ECC0_DBE_ENA_W1S		(0x2118)
+
+#define   NIC_PF_ECC1_SBE_INT			(0x2200)
+#define   NIC_PF_ECC1_SBE_INT_W1S		(0x2208)
+#define   NIC_PF_ECC1_SBE_ENA_W1C		(0x2210)
+#define   NIC_PF_ECC1_SBE_ENA_W1S		(0x2218)
+#define   NIC_PF_ECC1_DBE_INT			(0x2300)
+#define   NIC_PF_ECC1_DBE_INT_W1S		(0x2308)
+#define   NIC_PF_ECC1_DBE_ENA_W1C		(0x2310)
+#define   NIC_PF_ECC1_DBE_ENA_W1S		(0x2318)
+
+#define   NIC_PF_ECC2_SBE_INT			(0x2400)
+#define   NIC_PF_ECC2_SBE_INT_W1S		(0x2408)
+#define   NIC_PF_ECC2_SBE_ENA_W1C		(0x2410)
+#define   NIC_PF_ECC2_SBE_ENA_W1S		(0x2418)
+#define   NIC_PF_ECC2_DBE_INT			(0x2500)
+#define   NIC_PF_ECC2_DBE_INT_W1S		(0x2508)
+#define   NIC_PF_ECC2_DBE_ENA_W1C		(0x2510)
+#define   NIC_PF_ECC2_DBE_ENA_W1S		(0x2518)
+
+#define   NIC_PF_ECC3_SBE_INT			(0x2600)
+#define   NIC_PF_ECC3_SBE_INT_W1S		(0x2608)
+#define   NIC_PF_ECC3_SBE_ENA_W1C		(0x2610)
+#define   NIC_PF_ECC3_SBE_ENA_W1S		(0x2618)
+#define   NIC_PF_ECC3_DBE_INT			(0x2700)
+#define   NIC_PF_ECC3_DBE_INT_W1S		(0x2708)
+#define   NIC_PF_ECC3_DBE_ENA_W1C		(0x2710)
+#define   NIC_PF_ECC3_DBE_ENA_W1S		(0x2718)
+
+#define   NIC_PF_CPI_0_2047_CFG			(0x200000)
+#define   NIC_PF_MPI_0_2047_CFG			(0x210000)
+#define   NIC_PF_RSSI_0_4097_RQ			(0x220000)
+#define   NIC_PF_LMAC_0_7_CFG			(0x240000)
+#define   NIC_PF_LMAC_0_7_SW_XOFF		(0x242000)
+#define   NIC_PF_LMAC_0_7_CREDIT		(0x244000)
+#define   NIC_PF_CHAN_0_255_TX_CFG		(0x400000)
+#define   NIC_PF_CHAN_0_255_RX_CFG		(0x420000)
+#define   NIC_PF_CHAN_0_255_SW_XOFF		(0x440000)
+#define   NIC_PF_CHAN_0_255_CREDIT		(0x460000)
+#define   NIC_PF_CHAN_0_255_RX_BP_CFG		(0x480000)
+
+#define   NIC_PF_SW_SYNC_RX			(0x490000)
+
+#define   NIC_PF_SW_SYNC_RX_DONE		(0x490008)
+#define   NIC_PF_TL2_0_63_CFG			(0x500000)
+#define   NIC_PF_TL2_0_63_PRI			(0x520000)
+#define   NIC_PF_TL2_LMAC			(0x540000)
+#define   NIC_PF_TL2_0_63_SH_STATUS		(0x580000)
+#define   NIC_PF_TL3A_0_63_CFG			(0x5F0000)
+#define   NIC_PF_TL3_0_255_CFG			(0x600000)
+#define   NIC_PF_TL3_0_255_CHAN			(0x620000)
+#define   NIC_PF_TL3_0_255_PIR			(0x640000)
+#define   NIC_PF_TL3_0_255_SW_XOFF		(0x660000)
+#define   NIC_PF_TL3_0_255_CNM_RATE		(0x680000)
+#define   NIC_PF_TL3_0_255_SH_STATUS		(0x6A0000)
+#define   NIC_PF_TL4A_0_255_CFG			(0x6F0000)
+#define   NIC_PF_TL4_0_1023_CFG			(0x800000)
+#define   NIC_PF_TL4_0_1023_SW_XOFF		(0x820000)
+#define   NIC_PF_TL4_0_1023_SH_STATUS		(0x840000)
+#define   NIC_PF_TL4A_0_1023_CNM_RATE		(0x880000)
+#define   NIC_PF_TL4A_0_1023_CNM_STATUS		(0x8A0000)
+
+#define   NIC_PF_VF_0_127_MAILBOX_0_1		(0x20002030)
+#define   NIC_PF_VNIC_0_127_TX_STAT_0_4		(0x20004000)
+#define   NIC_PF_VNIC_0_127_RX_STAT_0_13	(0x20004100)
+#define   NIC_PF_QSET_0_127_LOCK_0_15		(0x20006000)
+#define   NIC_PF_QSET_0_127_CFG			(0x20010000)
+#define   NIC_PF_QSET_0_127_RQ_0_7_CFG		(0x20010400)
+#define   NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG	(0x20010420)
+#define   NIC_PF_QSET_0_127_RQ_0_7_BP_CFG	(0x20010500)
+#define   NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1	(0x20010600)
+#define   NIC_PF_QSET_0_127_SQ_0_7_CFG		(0x20010C00)
+#define   NIC_PF_QSET_0_127_SQ_0_7_CFG2		(0x20010C08)
+#define   NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1	(0x20010D00)
+
+#define   NIC_PF_MSIX_VEC_0_18_ADDR		(0x000000)
+#define   NIC_PF_MSIX_VEC_0_CTL			(0x000008)
+#define   NIC_PF_MSIX_PBA_0			(0x0F0000)
+
+/* Virtual function register offsets */
+#define   NIC_VNIC_CFG				(0x000020)
+#define   NIC_VF_PF_MAILBOX_0_1			(0x000130)
+#define   NIC_VF_INT				(0x000200)
+#define   NIC_VF_INT_W1S			(0x000220)
+#define   NIC_VF_ENA_W1C			(0x000240)
+#define   NIC_VF_ENA_W1S			(0x000260)
+
+#define   NIC_VNIC_RSS_CFG			(0x0020E0)
+#define   NIC_VNIC_RSS_KEY_0_4			(0x002200)
+#define   NIC_VNIC_TX_STAT_0_4			(0x004000)
+#define   NIC_VNIC_RX_STAT_0_13			(0x004100)
+#define   NIC_QSET_RQ_GEN_CFG			(0x010010)
+
+#define   NIC_QSET_CQ_0_7_CFG			(0x010400)
+#define   NIC_QSET_CQ_0_7_CFG2			(0x010408)
+#define   NIC_QSET_CQ_0_7_THRESH		(0x010410)
+#define   NIC_QSET_CQ_0_7_BASE			(0x010420)
+#define   NIC_QSET_CQ_0_7_HEAD			(0x010428)
+#define   NIC_QSET_CQ_0_7_TAIL			(0x010430)
+#define   NIC_QSET_CQ_0_7_DOOR			(0x010438)
+#define   NIC_QSET_CQ_0_7_STATUS		(0x010440)
+#define   NIC_QSET_CQ_0_7_STATUS2		(0x010448)
+#define   NIC_QSET_CQ_0_7_DEBUG			(0x010450)
+
+#define   NIC_QSET_RQ_0_7_CFG			(0x010600)
+#define   NIC_QSET_RQ_0_7_STAT_0_1		(0x010700)
+
+#define   NIC_QSET_SQ_0_7_CFG			(0x010800)
+#define   NIC_QSET_SQ_0_7_THRESH		(0x010810)
+#define   NIC_QSET_SQ_0_7_BASE			(0x010820)
+#define   NIC_QSET_SQ_0_7_HEAD			(0x010828)
+#define   NIC_QSET_SQ_0_7_TAIL			(0x010830)
+#define   NIC_QSET_SQ_0_7_DOOR			(0x010838)
+#define   NIC_QSET_SQ_0_7_STATUS		(0x010840)
+#define   NIC_QSET_SQ_0_7_DEBUG			(0x010848)
+#define   NIC_QSET_SQ_0_7_CNM_CHG		(0x010860)
+#define   NIC_QSET_SQ_0_7_STAT_0_1		(0x010900)
+
+#define   NIC_QSET_RBDR_0_1_CFG			(0x010C00)
+#define   NIC_QSET_RBDR_0_1_THRESH		(0x010C10)
+#define   NIC_QSET_RBDR_0_1_BASE		(0x010C20)
+#define   NIC_QSET_RBDR_0_1_HEAD		(0x010C28)
+#define   NIC_QSET_RBDR_0_1_TAIL		(0x010C30)
+#define   NIC_QSET_RBDR_0_1_DOOR		(0x010C38)
+#define   NIC_QSET_RBDR_0_1_STATUS0		(0x010C40)
+#define   NIC_QSET_RBDR_0_1_STATUS1		(0x010C48)
+#define   NIC_QSET_RBDR_0_1_PREFETCH_STATUS	(0x010C50)
+
+#define   NIC_VF_MSIX_VECTOR_0_19_ADDR		(0x000000)
+#define   NIC_VF_MSIX_VECTOR_0_19_CTL		(0x000008)
+#define   NIC_VF_MSIX_PBA			(0x0F0000)
+
+/* Offsets within registers */
+#define   NIC_MSIX_VEC_SHIFT			4
+#define   NIC_Q_NUM_SHIFT			18
+#define   NIC_QS_ID_SHIFT			21
+#define   NIC_VF_NUM_SHIFT			21
+
+/* Port kind configuration register */
+struct pkind_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	uint64_t reserved_42_63:22;
+	uint64_t hdr_sl:5;	/* Header skip length */
+	uint64_t rx_hdr:3;	/* TNS Receive header present */
+	uint64_t lenerr_en:1;	/* L2 length error check enable */
+	uint64_t reserved_32_32:1;
+	uint64_t maxlen:16;	/* Max frame size */
+	uint64_t minlen:16;	/* Min frame size */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	uint64_t minlen:16;
+	uint64_t maxlen:16;
+	uint64_t reserved_32_32:1;
+	uint64_t lenerr_en:1;
+	uint64_t rx_hdr:3;
+	uint64_t hdr_sl:5;
+	uint64_t reserved_42_63:22;
+#endif
+};
+
+static inline uint64_t BGXX_PF_BAR0(unsigned long param1)
+				     __attribute__ ((pure, always_inline));
+static inline uint64_t BGXX_PF_BAR0(unsigned long param1)
+{
+	assert(param1 <= 1);
+	return 0x87E0E0000000 + (param1 << 24);
+}
+
+#define BGXX_PF_BAR0_SIZE 0x400000
+#define NIC_PF_BAR0 0x843000000000
+#define NIC_PF_BAR0_SIZE 0x40000000
+
+static inline uint64_t NIC_VFX_BAR0(unsigned long param1)
+				     __attribute__ ((pure, always_inline));
+static inline uint64_t NIC_VFX_BAR0(unsigned long param1)
+{
+	assert(param1 <= 127);
+
+	return 0x8430A0000000 + (param1 << 21);
+}
+
+#define NIC_VFX_BAR0_SIZE 0x200000
+
+#endif /* NIC_REG_H */
diff --git a/drivers/net/octeontx/nicvf_main.c b/drivers/net/octeontx/nicvf_main.c
new file mode 100644
index 0000000..e13c8b9
--- /dev/null
+++ b/drivers/net/octeontx/nicvf_main.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <malloc.h>
+#include <misc.h>
+#include <net.h>
+#include <pci.h>
+#include <pci_ids.h>
+#include <phy.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+
+/* Register read/write APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
+{
+	writeq(val, nic->reg_base + offset);
+}
+
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
+{
+	return readq(nic->reg_base + offset);
+}
+
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+			   u64 qidx, u64 val)
+{
+	void *addr = nic->reg_base + offset;
+
+	writeq(val, (void *)(addr + (qidx << NIC_Q_NUM_SHIFT)));
+}
+
+u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
+{
+	void *addr = nic->reg_base + offset;
+
+	return readq((void *)(addr + (qidx << NIC_Q_NUM_SHIFT)));
+}
+
+static void  nicvf_handle_mbx_intr(struct nicvf *nic);
+
+/* VF -> PF mailbox communication */
+static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
+{
+	u64 *msg = (u64 *)mbx;
+
+	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
+	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
+}
+
+int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
+{
+	int timeout = NIC_PF_VF_MBX_TIMEOUT;
+	int sleep = 10;
+
+	nic->pf_acked = false;
+	nic->pf_nacked = false;
+
+	nicvf_write_to_mbx(nic, mbx);
+
+	nic_handle_mbx_intr(nic->nicpf, nic->vf_id);
+
+	/* Wait for previous message to be acked, timeout 2sec */
+	while (!nic->pf_acked) {
+		if (nic->pf_nacked)
+			return -1;
+		mdelay(sleep);
+		nicvf_handle_mbx_intr(nic);
+
+		if (nic->pf_acked)
+			break;
+		timeout -= sleep;
+		if (!timeout) {
+			printf("PF didn't ack to mbox msg %d from VF%d\n",
+			       (mbx->msg.msg & 0xFF), nic->vf_id);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/* Checks if VF is able to comminicate with PF
+ * and also gets the VNIC number this VF is associated to.
+ */
+static int nicvf_check_pf_ready(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+
+	mbx.msg.msg = NIC_MBOX_MSG_READY;
+	if (nicvf_send_msg_to_pf(nic, &mbx)) {
+		printf("PF didn't respond to READY msg\n");
+		return 0;
+	}
+
+	return 1;
+}
+
+static void  nicvf_handle_mbx_intr(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+	struct eth_pdata *pdata = dev_get_platdata(nic->dev);
+	u64 *mbx_data;
+	u64 mbx_addr;
+	int i;
+
+	mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+	mbx_data = (u64 *)&mbx;
+
+	for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+		*mbx_data = nicvf_reg_read(nic, mbx_addr);
+		mbx_data++;
+		mbx_addr += sizeof(u64);
+	}
+
+	debug("Mbox message: msg: 0x%x\n", mbx.msg.msg);
+	switch (mbx.msg.msg) {
+	case NIC_MBOX_MSG_READY:
+		nic->pf_acked = true;
+		nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
+		nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
+		nic->node = mbx.nic_cfg.node_id;
+		if (!nic->set_mac_pending)
+			memcpy(pdata->enetaddr,
+			       mbx.nic_cfg.mac_addr, 6);
+		nic->loopback_supported = mbx.nic_cfg.loopback_supported;
+		nic->link_up = false;
+		nic->duplex = 0;
+		nic->speed = 0;
+		break;
+	case NIC_MBOX_MSG_ACK:
+		nic->pf_acked = true;
+		break;
+	case NIC_MBOX_MSG_NACK:
+		nic->pf_nacked = true;
+		break;
+	case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+		nic->pf_acked = true;
+		nic->link_up = mbx.link_status.link_up;
+		nic->duplex = mbx.link_status.duplex;
+		nic->speed = mbx.link_status.speed;
+		if (nic->link_up) {
+			printf("%s: Link is Up %d Mbps %s\n",
+			       nic->dev->name, nic->speed,
+			       nic->duplex == 1 ?
+			       "Full duplex" : "Half duplex");
+		} else {
+			printf("%s: Link is Down\n", nic->dev->name);
+		}
+		break;
+	default:
+		printf("Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
+		break;
+	}
+
+	nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
+}
+
+static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct udevice *dev)
+{
+	union nic_mbx mbx = {};
+	struct eth_pdata *pdata = dev_get_platdata(dev);
+
+	mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
+	mbx.mac.vf_id = nic->vf_id;
+	memcpy(mbx.mac.mac_addr, pdata->enetaddr, 6);
+
+	return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_config_cpi(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+
+	mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
+	mbx.cpi_cfg.vf_id = nic->vf_id;
+	mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
+	mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
+
+	nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static int nicvf_init_resources(struct nicvf *nic)
+{
+	int err;
+
+	nic->num_qs = 1;
+
+	/* Enable Qset */
+	nicvf_qset_config(nic, true);
+
+	/* Initialize queues and HW for data transfer */
+	err = nicvf_config_data_transfer(nic, true);
+
+	if (err) {
+		printf("Failed to alloc/config VF's QSet resources\n");
+		return err;
+	}
+	return 0;
+}
+
+static void nicvf_snd_pkt_handler(struct nicvf *nic,
+				  struct cmp_queue *cq,
+				  void *cq_desc, int cqe_type)
+{
+	struct cqe_send_t *cqe_tx;
+	struct snd_queue *sq;
+	struct sq_hdr_subdesc *hdr;
+
+	cqe_tx = (struct cqe_send_t *)cq_desc;
+	sq = &nic->qs->sq[cqe_tx->sq_idx];
+
+	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
+	if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
+		return;
+
+	nicvf_check_cqe_tx_errs(nic, cq, cq_desc);
+	nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+}
+
+static int nicvf_rcv_pkt_handler(struct nicvf *nic,
+				 struct cmp_queue *cq, void *cq_desc,
+				 void **ppkt, int cqe_type)
+{
+	void *pkt;
+
+	size_t pkt_len;
+	struct cqe_rx_t *cqe_rx = (struct cqe_rx_t *)cq_desc;
+	int err = 0;
+
+	/* Check for errors */
+	err = nicvf_check_cqe_rx_errs(nic, cq, cq_desc);
+	if (err && !cqe_rx->rb_cnt)
+		return -1;
+
+	pkt = nicvf_get_rcv_pkt(nic, cq_desc, &pkt_len);
+	if (!pkt) {
+		debug("Packet not received\n");
+		return -1;
+	}
+
+	if (pkt)
+		*ppkt = pkt;
+
+	return pkt_len;
+}
+
+int nicvf_cq_handler(struct nicvf *nic, void **ppkt, int *pkt_len)
+{
+	int cq_qnum = 0;
+	int processed_sq_cqe = 0;
+	int processed_rq_cqe = 0;
+	int processed_cqe = 0;
+
+	unsigned long cqe_count, cqe_head;
+	struct queue_set *qs = nic->qs;
+	struct cmp_queue *cq = &qs->cq[cq_qnum];
+	struct cqe_rx_t *cq_desc;
+
+	/* Get num of valid CQ entries expect next one to be SQ completion */
+	cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_qnum);
+	cqe_count &= 0xFFFF;
+	if (!cqe_count)
+		return 0;
+
+	/* Get head of the valid CQ entries */
+	cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_qnum);
+	cqe_head >>= 9;
+	cqe_head &= 0xFFFF;
+
+	if (cqe_count) {
+		/* Get the CQ descriptor */
+		cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
+		cqe_head++;
+		cqe_head &= (cq->dmem.q_len - 1);
+		/* Initiate prefetch for next descriptor */
+		prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
+
+		switch (cq_desc->cqe_type) {
+		case CQE_TYPE_RX:
+			debug("%s: Got Rx CQE\n", nic->dev->name);
+			*pkt_len = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
+							 ppkt, CQE_TYPE_RX);
+			processed_rq_cqe++;
+			break;
+		case CQE_TYPE_SEND:
+			debug("%s: Got Tx CQE\n", nic->dev->name);
+			nicvf_snd_pkt_handler(nic, cq, cq_desc, CQE_TYPE_SEND);
+			processed_sq_cqe++;
+			break;
+		default:
+			debug("%s: Got CQ type %u\n", nic->dev->name,
+			      cq_desc->cqe_type);
+			break;
+		}
+		processed_cqe++;
+	}
+
+	/* Dequeue CQE */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
+			      cq_qnum, processed_cqe);
+
+	asm volatile ("dsb sy");
+
+	return (processed_sq_cqe | processed_rq_cqe);
+}
+
+/* Qset error interrupt handler
+ *
+ * As of now only CQ errors are handled
+ */
+void nicvf_handle_qs_err(struct nicvf *nic)
+{
+	struct queue_set *qs = nic->qs;
+	int qidx;
+	u64 status;
+
+	/* Check if it is CQ err */
+	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+		status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
+					      qidx);
+		if (!(status & CQ_ERR_MASK))
+			continue;
+		/* Process already queued CQEs and reconfig CQ */
+		nicvf_sq_disable(nic, qidx);
+		nicvf_cmp_queue_config(nic, qs, qidx, true);
+		nicvf_sq_free_used_descs(nic->dev, &qs->sq[qidx], qidx);
+		nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
+	}
+}
+
+static int nicvf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len)
+{
+	struct nicvf *nic = dev_get_priv(dev);
+
+	if (pkt && pkt_len)
+		free(pkt);
+	nicvf_refill_rbdr(nic);
+	return 0;
+}
+
+static int nicvf_xmit(struct udevice *dev, void *pkt, int pkt_len)
+{
+	struct nicvf *nic = dev_get_priv(dev);
+	int ret = 0;
+	int rcv_len = 0;
+	unsigned int timeout = 5000;
+	void *rpkt = NULL;
+
+	if (!nicvf_sq_append_pkt(nic, pkt, pkt_len)) {
+		printf("VF%d: TX ring full\n", nic->vf_id);
+		return -1;
+	}
+
+	/* check and update CQ for pkt sent */
+	while (!ret && timeout--) {
+		ret = nicvf_cq_handler(nic, &rpkt, &rcv_len);
+		if (!ret) {
+			debug("%s: %d, Not sent\n", __func__, __LINE__);
+			udelay(10);
+		}
+	}
+
+	return 0;
+}
+
+static int nicvf_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+	struct nicvf *nic = dev_get_priv(dev);
+	void *pkt;
+	int pkt_len = 0;
+#ifdef DEBUG
+	u8 *dpkt;
+	int i, j;
+#endif
+
+	nicvf_cq_handler(nic, &pkt, &pkt_len);
+
+	if (pkt_len) {
+#ifdef DEBUG
+		dpkt = pkt;
+		printf("RX packet contents:\n");
+		for (i = 0; i < 8; i++) {
+			puts("\t");
+			for (j = 0; j < 10; j++)
+				printf("%02x ", dpkt[i * 10 + j]);
+			puts("\n");
+		}
+#endif
+		*packetp = pkt;
+	}
+
+	return pkt_len;
+}
+
+void nicvf_stop(struct udevice *dev)
+{
+	struct nicvf *nic = dev_get_priv(dev);
+
+	if (!nic->open)
+		return;
+
+	/* Free resources */
+	nicvf_config_data_transfer(nic, false);
+
+	/* Disable HW Qset */
+	nicvf_qset_config(nic, false);
+
+	nic->open = false;
+}
+
+int nicvf_open(struct udevice *dev)
+{
+	int err;
+	struct nicvf *nic = dev_get_priv(dev);
+
+	nicvf_hw_set_mac_addr(nic, dev);
+
+	/* Configure CPI alorithm */
+	nic->cpi_alg = CPI_ALG_NONE;
+	nicvf_config_cpi(nic);
+
+	/* Initialize the queues */
+	err = nicvf_init_resources(nic);
+	if (err)
+		return -1;
+
+	if (!nicvf_check_pf_ready(nic))
+		return -1;
+
+	nic->open = true;
+
+	/* Make sure queue initialization is written */
+	asm volatile("dsb sy");
+
+	return 0;
+}
+
+int nicvf_write_hwaddr(struct udevice *dev)
+{
+	unsigned char ethaddr[ARP_HLEN];
+	struct eth_pdata *pdata = dev_get_platdata(dev);
+	struct nicvf *nic = dev_get_priv(dev);
+
+	/* If lower level firmware fails to set proper MAC
+	 * u-boot framework updates MAC to random address.
+	 * Use this hook to update mac address in environment.
+	 */
+	if (!eth_env_get_enetaddr_by_index("eth", dev->seq, ethaddr)) {
+		eth_env_set_enetaddr_by_index("eth", dev->seq, pdata->enetaddr);
+		debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
+	}
+	eth_env_get_enetaddr_by_index("eth", dev->seq, ethaddr);
+	if (memcmp(ethaddr, pdata->enetaddr, ARP_HLEN)) {
+		debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
+		nicvf_hw_set_mac_addr(nic, dev);
+	}
+	return 0;
+}
+
+static void nicvf_probe_mdio_devices(void)
+{
+	struct udevice *pdev;
+	int err;
+	static int probed;
+
+	if (probed)
+		return;
+
+	err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
+				 PCI_DEVICE_ID_CAVIUM_SMI, 0,
+				 &pdev);
+	if (err)
+		debug("%s couldn't find SMI device\n", __func__);
+	probed = 1;
+}
+
+int nicvf_initialize(struct udevice *dev)
+{
+	struct nicvf *nicvf = dev_get_priv(dev);
+	struct eth_pdata *pdata = dev_get_platdata(dev);
+	int    ret = 0, bgx, lmac;
+	char   name[16];
+	unsigned char ethaddr[ARP_HLEN];
+	struct udevice *pfdev;
+	struct nicpf *pf;
+	static int vfid;
+
+	if (dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
+			       PCI_DEVICE_ID_CAVIUM_NIC, 0, &pfdev)) {
+		printf("%s NIC PF device not found..VF probe failed\n",
+		       __func__);
+		return -1;
+	}
+	pf = dev_get_priv(pfdev);
+	nicvf->vf_id = vfid++;
+	nicvf->dev = dev;
+	nicvf->nicpf = pf;
+
+	nicvf_probe_mdio_devices();
+
+	/* Enable TSO support */
+	nicvf->hw_tso = true;
+
+	nicvf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+					 PCI_REGION_MEM);
+
+	debug("nicvf->reg_base: %p\n", nicvf->reg_base);
+
+	if (!nicvf->reg_base) {
+		printf("Cannot map config register space, aborting\n");
+		ret = -1;
+		goto fail;
+	}
+
+	ret = nicvf_set_qset_resources(nicvf);
+	if (ret)
+		return -1;
+
+	sprintf(name, "vnic%u", nicvf->vf_id);
+	debug("%s name %s\n", __func__, name);
+	device_set_name(dev, name);
+
+	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]);
+	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]);
+	debug("%s VF %d BGX %d LMAC %d\n", __func__, nicvf->vf_id, bgx, lmac);
+	debug("%s PF %p pfdev %p VF %p vfdev %p vf->pdata %p\n",
+	      __func__, nicvf->nicpf, nicvf->nicpf->udev, nicvf, nicvf->dev,
+	      pdata);
+
+	fdt_board_get_ethaddr(bgx, lmac, ethaddr);
+
+	debug("%s bgx %d lmac %d ethaddr %pM\n", __func__, bgx, lmac, ethaddr);
+
+	if (is_valid_ethaddr(ethaddr)) {
+		memcpy(pdata->enetaddr, ethaddr, ARP_HLEN);
+		eth_env_set_enetaddr_by_index("eth", dev->seq, ethaddr);
+	}
+	debug("%s enetaddr %pM ethaddr %pM\n", __func__,
+	      pdata->enetaddr, ethaddr);
+
+fail:
+	return ret;
+}
+
+int octeontx_vnic_probe(struct udevice *dev)
+{
+	return nicvf_initialize(dev);
+}
+
+static const struct eth_ops octeontx_vnic_ops = {
+	.start = nicvf_open,
+	.stop  = nicvf_stop,
+	.send  = nicvf_xmit,
+	.recv  = nicvf_recv,
+	.free_pkt = nicvf_free_pkt,
+	.write_hwaddr = nicvf_write_hwaddr,
+};
+
+U_BOOT_DRIVER(octeontx_vnic) = {
+	.name	= "vnic",
+	.id	= UCLASS_ETH,
+	.probe	= octeontx_vnic_probe,
+	.ops	= &octeontx_vnic_ops,
+	.priv_auto_alloc_size = sizeof(struct nicvf),
+	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
+};
+
+static struct pci_device_id octeontx_vnic_supported[] = {
+	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF) },
+	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF_1) },
+	{}
+};
+
+U_BOOT_PCI_DEVICE(octeontx_vnic, octeontx_vnic_supported);
diff --git a/drivers/net/octeontx/nicvf_queues.c b/drivers/net/octeontx/nicvf_queues.c
new file mode 100644
index 0000000..c7f262f
--- /dev/null
+++ b/drivers/net/octeontx/nicvf_queues.c
@@ -0,0 +1,1140 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <cpu_func.h>
+#include <dm/device.h>
+#include <malloc.h>
+#include <net.h>
+#include <phy.h>
+#include <linux/delay.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "nicvf_queues.h"
+
+static int nicvf_poll_reg(struct nicvf *nic, int qidx,
+			  u64 reg, int bit_pos, int bits, int val)
+{
+	u64 bit_mask;
+	u64 reg_val;
+	int timeout = 10;
+
+	bit_mask = (1ULL << bits) - 1;
+	bit_mask = (bit_mask << bit_pos);
+
+	while (timeout) {
+		reg_val = nicvf_queue_reg_read(nic, reg, qidx);
+		if (((reg_val & bit_mask) >> bit_pos) == val)
+			return 0;
+		udelay(2000);
+		timeout--;
+	}
+	printf("Poll on reg 0x%llx failed\n", reg);
+	return 1;
+}
+
+static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
+				  int q_len, int desc_size, int align_bytes)
+{
+	dmem->q_len = q_len;
+	dmem->size = (desc_size * q_len) + align_bytes;
+	/* Save address, need it while freeing */
+	dmem->unalign_base = calloc(1, dmem->size);
+	dmem->dma = (uintptr_t)dmem->unalign_base;
+
+	if (!dmem->unalign_base)
+		return -1;
+
+	/* Align memory address for 'align_bytes' */
+	dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
+	dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
+
+	return 0;
+}
+
+static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
+{
+	if (!dmem)
+		return;
+
+	free(dmem->unalign_base);
+
+	dmem->unalign_base = NULL;
+	dmem->base = NULL;
+}
+
+static void *nicvf_rb_ptr_to_pkt(struct nicvf *nic, uintptr_t rb_ptr)
+{
+	return (void *)rb_ptr;
+}
+
+static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
+			   int ring_len, int buf_size)
+{
+	int idx;
+	uintptr_t rbuf;
+	struct rbdr_entry_t *desc;
+
+	if (nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
+				   sizeof(struct rbdr_entry_t),
+				   NICVF_RCV_BUF_ALIGN_BYTES)) {
+		printf("Unable to allocate memory for rcv buffer ring\n");
+		return -1;
+	}
+
+	rbdr->desc = rbdr->dmem.base;
+	/* Buffer size has to be in multiples of 128 bytes */
+	rbdr->dma_size = buf_size;
+	rbdr->enable = true;
+	rbdr->thresh = RBDR_THRESH;
+
+	debug("%s: %d: allocating %lld bytes for rcv buffers\n",
+	      __func__, __LINE__,
+	      ring_len * buf_size + NICVF_RCV_BUF_ALIGN_BYTES);
+	rbdr->buf_mem = (uintptr_t)calloc(1, ring_len * buf_size
+						+ NICVF_RCV_BUF_ALIGN_BYTES);
+
+	if (!rbdr->buf_mem) {
+		printf("Unable to allocate memory for rcv buffers\n");
+		return -1;
+	}
+
+	rbdr->buffers = NICVF_ALIGNED_ADDR(rbdr->buf_mem,
+					   NICVF_RCV_BUF_ALIGN_BYTES);
+
+	debug("%s: %d: rbdr->buf_mem: %lx, rbdr->buffers: %lx\n",
+	      __func__, __LINE__, rbdr->buf_mem, rbdr->buffers);
+
+	for (idx = 0; idx < ring_len; idx++) {
+		rbuf = rbdr->buffers + DMA_BUFFER_LEN * idx;
+		desc = GET_RBDR_DESC(rbdr, idx);
+		desc->buf_addr = rbuf >> NICVF_RCV_BUF_ALIGN;
+		flush_dcache_range((uintptr_t)desc,
+				   (uintptr_t)desc + sizeof(desc));
+	}
+	return 0;
+}
+
+static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
+{
+	if (!rbdr)
+		return;
+
+	rbdr->enable = false;
+	if (!rbdr->dmem.base)
+		return;
+
+	debug("%s: %d: rbdr->buf_mem: %p\n", __func__,
+	      __LINE__, (void *)rbdr->buf_mem);
+	free((void *)rbdr->buf_mem);
+
+	/* Free RBDR ring */
+	nicvf_free_q_desc_mem(nic, &rbdr->dmem);
+}
+
+/* Refill receive buffer descriptors with new buffers.
+ * This runs in softirq context .
+ */
+void nicvf_refill_rbdr(struct nicvf *nic)
+{
+	struct queue_set *qs = nic->qs;
+	int rbdr_idx = qs->rbdr_cnt;
+	unsigned long qcount, head, tail, rb_cnt;
+	struct rbdr *rbdr;
+
+	if (!rbdr_idx)
+		return;
+	rbdr_idx--;
+	rbdr = &qs->rbdr[rbdr_idx];
+	/* Check if it's enabled */
+	if (!rbdr->enable) {
+		printf("Receive queue %d is disabled\n", rbdr_idx);
+		return;
+	}
+
+	/* check if valid descs reached or crossed threshold level */
+	qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
+	head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, rbdr_idx);
+	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx);
+
+	qcount &= 0x7FFFF;
+
+	rb_cnt = qs->rbdr_len - qcount - 1;
+
+	debug("%s: %d: qcount: %lu, head: %lx, tail: %lx, rb_cnt: %lu\n",
+	      __func__, __LINE__, qcount, head, tail, rb_cnt);
+
+	/* Notify HW */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, rbdr_idx, rb_cnt);
+
+	asm volatile ("dsb sy");
+}
+
+/* TBD: how to handle full packets received in CQ
+ * i.e conversion of buffers into SKBs
+ */
+static int nicvf_init_cmp_queue(struct nicvf *nic,
+				struct cmp_queue *cq, int q_len)
+{
+	if (nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len,
+				   CMP_QUEUE_DESC_SIZE,
+				   NICVF_CQ_BASE_ALIGN_BYTES)) {
+		printf("Unable to allocate memory for completion queue\n");
+		return -1;
+	}
+	cq->desc = cq->dmem.base;
+	if (!pass1_silicon(nic->rev_id, nic->nicpf->hw->model_id))
+		cq->thresh = CMP_QUEUE_CQE_THRESH;
+	else
+		cq->thresh = 0;
+	cq->intr_timer_thresh = CMP_QUEUE_TIMER_THRESH;
+
+	return 0;
+}
+
+static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
+{
+	if (!cq)
+		return;
+	if (!cq->dmem.base)
+		return;
+
+	nicvf_free_q_desc_mem(nic, &cq->dmem);
+}
+
+static int nicvf_init_snd_queue(struct nicvf *nic,
+				struct snd_queue *sq, int q_len)
+{
+	if (nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len,
+				   SND_QUEUE_DESC_SIZE,
+				   NICVF_SQ_BASE_ALIGN_BYTES)) {
+		printf("Unable to allocate memory for send queue\n");
+		return -1;
+	}
+
+	sq->desc = sq->dmem.base;
+	sq->skbuff = calloc(q_len, sizeof(u64));
+	sq->head = 0;
+	sq->tail = 0;
+	sq->free_cnt = q_len - 1;
+	sq->thresh = SND_QUEUE_THRESH;
+
+	return 0;
+}
+
+static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
+{
+	if (!sq)
+		return;
+	if (!sq->dmem.base)
+		return;
+
+	debug("%s: %d\n", __func__, __LINE__);
+	free(sq->skbuff);
+
+	nicvf_free_q_desc_mem(nic, &sq->dmem);
+}
+
+static void nicvf_reclaim_snd_queue(struct nicvf *nic,
+				    struct queue_set *qs, int qidx)
+{
+	/* Disable send queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+	/* Check if SQ is stopped */
+	if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
+		return;
+	/* Reset send queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+}
+
+static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
+				    struct queue_set *qs, int qidx)
+{
+	union nic_mbx mbx = {};
+
+	/* Make sure all packets in the pipeline are written back into mem */
+	mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
+	nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
+				    struct queue_set *qs, int qidx)
+{
+	/* Disable timer threshold (doesn't get reset upon CQ reset */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+	/* Disable completion queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+	/* Reset completion queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+}
+
+static void nicvf_reclaim_rbdr(struct nicvf *nic,
+			       struct rbdr *rbdr, int qidx)
+{
+	u64 tmp, fifo_state;
+	int timeout = 10;
+
+	/* Save head and tail pointers for feeing up buffers */
+	rbdr->head = nicvf_queue_reg_read(nic,
+					  NIC_QSET_RBDR_0_1_HEAD,
+					  qidx) >> 3;
+	rbdr->tail = nicvf_queue_reg_read(nic,
+					  NIC_QSET_RBDR_0_1_TAIL,
+					  qidx) >> 3;
+
+	/* If RBDR FIFO is in 'FAIL' state then do a reset first
+	 * before relaiming.
+	 */
+	fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+	if (((fifo_state >> 62) & 0x03) == 0x3)
+		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+				      qidx, NICVF_RBDR_RESET);
+
+	/* Disable RBDR */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+		return;
+	while (1) {
+		tmp = nicvf_queue_reg_read(nic,
+					   NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
+					   qidx);
+		if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
+			break;
+		mdelay(2000);
+		timeout--;
+		if (!timeout) {
+			printf("Failed polling on prefetch status\n");
+			return;
+		}
+	}
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+			      qidx, NICVF_RBDR_RESET);
+
+	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+		return;
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+		return;
+}
+
+/* Configures receive queue */
+static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+				   int qidx, bool enable)
+{
+	union nic_mbx mbx = {};
+	struct rcv_queue *rq;
+	union {
+		struct rq_cfg s;
+		u64    u;
+	} rq_cfg;
+
+	rq = &qs->rq[qidx];
+	rq->enable = enable;
+
+	/* Disable receive queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+
+	if (!rq->enable) {
+		nicvf_reclaim_rcv_queue(nic, qs, qidx);
+		return;
+	}
+
+	rq->cq_qs = qs->vnic_id;
+	rq->cq_idx = qidx;
+	rq->start_rbdr_qs = qs->vnic_id;
+	rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
+	rq->cont_rbdr_qs = qs->vnic_id;
+	rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
+	/* all writes of RBDR data to be loaded into L2 Cache as well*/
+	rq->caching = 1;
+
+	/* Send a mailbox msg to PF to config RQ */
+	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
+	mbx.rq.qs_num = qs->vnic_id;
+	mbx.rq.rq_num = qidx;
+	mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
+			  (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
+			  (rq->cont_qs_rbdr_idx << 8) |
+			  (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
+	nicvf_send_msg_to_pf(nic, &mbx);
+
+	mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+	mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
+	nicvf_send_msg_to_pf(nic, &mbx);
+
+	/* RQ drop config
+	 * Enable CQ drop to reserve sufficient CQEs for all tx packets
+	 */
+	mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+	mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
+	nicvf_send_msg_to_pf(nic, &mbx);
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
+
+	/* Enable Receive queue */
+	rq_cfg.s.ena = 1;
+	rq_cfg.s.tcp_ena = 0;
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.u);
+}
+
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+			    int qidx, bool enable)
+{
+	struct cmp_queue *cq;
+	union {
+		u64 u;
+		struct cq_cfg s;
+	} cq_cfg;
+
+	cq = &qs->cq[qidx];
+	cq->enable = enable;
+
+	if (!cq->enable) {
+		nicvf_reclaim_cmp_queue(nic, qs, qidx);
+		return;
+	}
+
+	/* Reset completion queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+
+	if (!cq->enable)
+		return;
+
+	/* Set completion queue base address */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
+			      qidx, (u64)(cq->dmem.phys_base));
+
+	/* Enable Completion queue */
+	cq_cfg.s.ena = 1;
+	cq_cfg.s.reset = 0;
+	cq_cfg.s.caching = 0;
+	cq_cfg.s.qsize = CMP_QSIZE;
+	cq_cfg.s.avg_con = 0;
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.u);
+
+	/* Set threshold value for interrupt generation */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
+			      cq->intr_timer_thresh);
+}
+
+/* Configures transmit queue */
+static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+				   int qidx, bool enable)
+{
+	union nic_mbx mbx = {};
+	struct snd_queue *sq;
+
+	union {
+		struct sq_cfg s;
+		u64 u;
+	} sq_cfg;
+
+	sq = &qs->sq[qidx];
+	sq->enable = enable;
+
+	if (!sq->enable) {
+		nicvf_reclaim_snd_queue(nic, qs, qidx);
+		return;
+	}
+
+	/* Reset send queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+
+	sq->cq_qs = qs->vnic_id;
+	sq->cq_idx = qidx;
+
+	/* Send a mailbox msg to PF to config SQ */
+	mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
+	mbx.sq.qs_num = qs->vnic_id;
+	mbx.sq.sq_num = qidx;
+	mbx.sq.sqs_mode = nic->sqs_mode;
+	mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
+	nicvf_send_msg_to_pf(nic, &mbx);
+
+	/* Set queue base address */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
+			      qidx, (u64)(sq->dmem.phys_base));
+
+	/* Enable send queue  & set queue size */
+	sq_cfg.s.ena = 1;
+	sq_cfg.s.reset = 0;
+	sq_cfg.s.ldwb = 0;
+	sq_cfg.s.qsize = SND_QSIZE;
+	sq_cfg.s.tstmp_bgx_intf = 0;
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.u);
+
+	/* Set threshold value for interrupt generation */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
+}
+
+/* Configures receive buffer descriptor ring */
+static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
+			      int qidx, bool enable)
+{
+	struct rbdr *rbdr;
+	union {
+		struct rbdr_cfg s;
+		u64 u;
+	} rbdr_cfg;
+
+	rbdr = &qs->rbdr[qidx];
+	nicvf_reclaim_rbdr(nic, rbdr, qidx);
+	if (!enable)
+		return;
+
+	/* Set descriptor base address */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
+			      qidx, (u64)(rbdr->dmem.phys_base));
+
+	/* Enable RBDR  & set queue size */
+	/* Buffer size should be in multiples of 128 bytes */
+	rbdr_cfg.s.ena = 1;
+	rbdr_cfg.s.reset = 0;
+	rbdr_cfg.s.ldwb = 0;
+	rbdr_cfg.s.qsize = RBDR_SIZE;
+	rbdr_cfg.s.avg_con = 0;
+	rbdr_cfg.s.lines = rbdr->dma_size / 128;
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+			      qidx, rbdr_cfg.u);
+
+	/* Notify HW */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+			      qidx, qs->rbdr_len - 1);
+
+	/* Set threshold value for interrupt generation */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
+			      qidx, rbdr->thresh - 1);
+}
+
+/* Requests PF to assign and enable Qset */
+void nicvf_qset_config(struct nicvf *nic, bool enable)
+{
+	union nic_mbx mbx = {};
+	struct queue_set *qs = nic->qs;
+	struct qs_cfg *qs_cfg;
+
+	if (!qs) {
+		printf("Qset is still not allocated, don't init queues\n");
+		return;
+	}
+
+	qs->enable = enable;
+	qs->vnic_id = nic->vf_id;
+
+	/* Send a mailbox msg to PF to config Qset */
+	mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
+	mbx.qs.num = qs->vnic_id;
+#ifdef VNIC_MULTI_QSET_SUPPORT
+	mbx.qs.sqs_count = nic->sqs_count;
+#endif
+
+	mbx.qs.cfg = 0;
+	qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
+	if (qs->enable) {
+		qs_cfg->ena = 1;
+#ifdef __BIG_ENDIAN
+		qs_cfg->be = 1;
+#endif
+		qs_cfg->vnic = qs->vnic_id;
+	}
+	nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_free_resources(struct nicvf *nic)
+{
+	int qidx;
+	struct queue_set *qs = nic->qs;
+
+	/* Free receive buffer descriptor ring */
+	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+		nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
+
+	/* Free completion queue */
+	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+		nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
+
+	/* Free send queue */
+	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+		nicvf_free_snd_queue(nic, &qs->sq[qidx]);
+}
+
+static int nicvf_alloc_resources(struct nicvf *nic)
+{
+	int qidx;
+	struct queue_set *qs = nic->qs;
+
+	/* Alloc receive buffer descriptor ring */
+	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+		if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
+				    DMA_BUFFER_LEN))
+			goto alloc_fail;
+	}
+
+	/* Alloc send queue */
+	for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
+		if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
+			goto alloc_fail;
+	}
+
+	/* Alloc completion queue */
+	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+		if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
+			goto alloc_fail;
+	}
+
+	return 0;
+alloc_fail:
+	nicvf_free_resources(nic);
+	return -1;
+}
+
+int nicvf_set_qset_resources(struct nicvf *nic)
+{
+	struct queue_set *qs;
+
+	qs = calloc(1, sizeof(struct queue_set));
+	if (!qs)
+		return -1;
+	nic->qs = qs;
+
+	/* Set count of each queue */
+	qs->rbdr_cnt = RBDR_CNT;
+	qs->rq_cnt = 1;
+	qs->sq_cnt = SND_QUEUE_CNT;
+	qs->cq_cnt = CMP_QUEUE_CNT;
+
+	/* Set queue lengths */
+	qs->rbdr_len = RCV_BUF_COUNT;
+	qs->sq_len = SND_QUEUE_LEN;
+	qs->cq_len = CMP_QUEUE_LEN;
+
+	nic->rx_queues = qs->rq_cnt;
+	nic->tx_queues = qs->sq_cnt;
+
+	return 0;
+}
+
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
+{
+	bool disable = false;
+	struct queue_set *qs = nic->qs;
+	int qidx;
+
+	if (!qs)
+		return 0;
+
+	if (enable) {
+		if (nicvf_alloc_resources(nic))
+			return -1;
+
+		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+			nicvf_snd_queue_config(nic, qs, qidx, enable);
+		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+			nicvf_cmp_queue_config(nic, qs, qidx, enable);
+		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+			nicvf_rbdr_config(nic, qs, qidx, enable);
+		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+			nicvf_rcv_queue_config(nic, qs, qidx, enable);
+	} else {
+		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+			nicvf_rcv_queue_config(nic, qs, qidx, disable);
+		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+			nicvf_rbdr_config(nic, qs, qidx, disable);
+		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+			nicvf_snd_queue_config(nic, qs, qidx, disable);
+		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+			nicvf_cmp_queue_config(nic, qs, qidx, disable);
+
+		nicvf_free_resources(nic);
+	}
+
+	return 0;
+}
+
+/* Get a free desc from SQ
+ * returns descriptor ponter & descriptor number
+ */
+static int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+	int qentry;
+
+	qentry = sq->tail;
+	sq->free_cnt -= desc_cnt;
+	sq->tail += desc_cnt;
+	sq->tail &= (sq->dmem.q_len - 1);
+
+	return qentry;
+}
+
+/* Free descriptor back to SQ for future use */
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+	sq->free_cnt += desc_cnt;
+	sq->head += desc_cnt;
+	sq->head &= (sq->dmem.q_len - 1);
+}
+
+static int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
+{
+	qentry++;
+	qentry &= (sq->dmem.q_len - 1);
+	return qentry;
+}
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
+{
+	u64 sq_cfg;
+
+	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+	sq_cfg |= NICVF_SQ_EN;
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+	/* Ring doorbell so that H/W restarts processing SQEs */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+}
+
+void nicvf_sq_disable(struct nicvf *nic, int qidx)
+{
+	u64 sq_cfg;
+
+	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+	sq_cfg &= ~NICVF_SQ_EN;
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+}
+
+void nicvf_sq_free_used_descs(struct udevice *dev, struct snd_queue *sq,
+			      int qidx)
+{
+	u64 head;
+	struct nicvf *nic = dev_get_priv(dev);
+	struct sq_hdr_subdesc *hdr;
+
+	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+
+	while (sq->head != head) {
+		hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+		if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
+			nicvf_put_sq_desc(sq, 1);
+			continue;
+		}
+		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+	}
+}
+
+/* Get the number of SQ descriptors needed to xmit this skb */
+static int nicvf_sq_subdesc_required(struct nicvf *nic)
+{
+	int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
+
+	return subdesc_cnt;
+}
+
+/* Add SQ HEADER subdescriptor.
+ * First subdescriptor for every send descriptor.
+ */
+static inline void
+nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
+			 int subdesc_cnt, void *pkt, size_t pkt_len)
+{
+	struct sq_hdr_subdesc *hdr;
+
+	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+	sq->skbuff[qentry] = (uintptr_t)pkt;
+
+	memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+	hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+	/* Enable notification via CQE after processing SQE */
+	hdr->post_cqe = 1;
+	/* No of subdescriptors following this */
+	hdr->subdesc_cnt = subdesc_cnt;
+	hdr->tot_len = pkt_len;
+
+	flush_dcache_range((uintptr_t)hdr,
+			   (uintptr_t)hdr + sizeof(struct sq_hdr_subdesc));
+}
+
+/* SQ GATHER subdescriptor
+ * Must follow HDR descriptor
+ */
+static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
+					       size_t size, uintptr_t data)
+{
+	struct sq_gather_subdesc *gather;
+
+	qentry &= (sq->dmem.q_len - 1);
+	gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
+
+	memset(gather, 0, SND_QUEUE_DESC_SIZE);
+	gather->subdesc_type = SQ_DESC_TYPE_GATHER;
+	gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
+	gather->size = size;
+	gather->addr = data;
+
+	flush_dcache_range((uintptr_t)gather,
+			   (uintptr_t)gather + sizeof(struct sq_hdr_subdesc));
+}
+
+/* Append an skb to a SQ for packet transfer. */
+int nicvf_sq_append_pkt(struct nicvf *nic, void *pkt, size_t pkt_size)
+{
+	int subdesc_cnt;
+	int sq_num = 0, qentry;
+	struct queue_set *qs;
+	struct snd_queue *sq;
+
+	qs = nic->qs;
+	sq = &qs->sq[sq_num];
+
+	subdesc_cnt = nicvf_sq_subdesc_required(nic);
+	if (subdesc_cnt > sq->free_cnt)
+		goto append_fail;
+
+	qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
+
+	/* Add SQ header subdesc */
+	nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
+				 pkt, pkt_size);
+
+	/* Add SQ gather subdescs */
+	qentry = nicvf_get_nxt_sqentry(sq, qentry);
+	nicvf_sq_add_gather_subdesc(sq, qentry, pkt_size, (uintptr_t)(pkt));
+
+	flush_dcache_range((uintptr_t)pkt,
+			   (uintptr_t)pkt + pkt_size);
+
+	/* make sure all memory stores are done before ringing doorbell */
+	asm volatile ("dsb sy");
+
+	/* Inform HW to xmit new packet */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+			      sq_num, subdesc_cnt);
+	return 1;
+
+append_fail:
+	printf("Not enough SQ descriptors to xmit pkt\n");
+	return 0;
+}
+
+static unsigned int frag_num(unsigned int i)
+{
+#ifdef __BIG_ENDIAN
+	return (i & ~3) + 3 - (i & 3);
+#else
+	return i;
+#endif
+}
+
+void *nicvf_get_rcv_pkt(struct nicvf *nic, void *cq_desc, size_t *pkt_len)
+{
+	int frag;
+	int payload_len = 0, tot_len;
+	void *pkt = NULL, *pkt_buf = NULL, *buffer;
+	struct cqe_rx_t *cqe_rx;
+	struct rbdr *rbdr;
+	struct rcv_queue *rq;
+	struct queue_set *qs = nic->qs;
+	u16 *rb_lens = NULL;
+	u64 *rb_ptrs = NULL;
+
+	cqe_rx = (struct cqe_rx_t *)cq_desc;
+
+	rq = &qs->rq[cqe_rx->rq_idx];
+	rbdr = &qs->rbdr[rq->start_qs_rbdr_idx];
+	rb_lens = cq_desc + (3 * sizeof(u64)); /* Use offsetof */
+	/* Except 88xx pass1 on all other chips CQE_RX2_S is added to
+	 * CQE_RX at word6, hence buffer pointers move by word
+	 *
+	 * Use existing 'hw_tso' flag which will be set for all chips
+	 * except 88xx pass1 instead of a additional cache line
+	 * access (or miss) by using pci dev's revision.
+	 */
+	if (!nic->hw_tso)
+		rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+	else
+		rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
+
+	/*
+	 * Figure out packet length to create packet buffer
+	 */
+	for (frag = 0; frag < cqe_rx->rb_cnt; frag++)
+		payload_len += rb_lens[frag_num(frag)];
+	*pkt_len = payload_len;
+	/* round up size to 8 byte multiple */
+	tot_len = (payload_len & (~0x7)) + 8;
+	buffer = calloc(1, tot_len);
+	if (!buffer) {
+		printf("%s - Failed to allocate packet buffer\n", __func__);
+		return NULL;
+	}
+	pkt_buf = buffer;
+	debug("total pkt buf %p len %ld tot_len %d\n", pkt_buf, *pkt_len,
+	      tot_len);
+	for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
+		payload_len = rb_lens[frag_num(frag)];
+
+		invalidate_dcache_range((uintptr_t)(*rb_ptrs),
+					(uintptr_t)(*rb_ptrs) + rbdr->dma_size);
+
+		/* First fragment */
+		*rb_ptrs = *rb_ptrs - cqe_rx->align_pad;
+
+		pkt = nicvf_rb_ptr_to_pkt(nic, *rb_ptrs);
+
+		invalidate_dcache_range((uintptr_t)pkt,
+					(uintptr_t)pkt + payload_len);
+
+		if (cqe_rx->align_pad)
+			pkt += cqe_rx->align_pad;
+		debug("pkt_buf %p, pkt %p payload_len %d\n", pkt_buf, pkt,
+		      payload_len);
+		memcpy(buffer, pkt, payload_len);
+		buffer += payload_len;
+		/* Next buffer pointer */
+		rb_ptrs++;
+	}
+	return pkt_buf;
+}
+
+/* Clear interrupt */
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+	u64 reg_val = 0;
+
+	switch (int_type) {
+	case NICVF_INTR_CQ:
+		reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+	break;
+	case NICVF_INTR_SQ:
+		reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+	break;
+	case NICVF_INTR_RBDR:
+		reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+	break;
+	case NICVF_INTR_PKT_DROP:
+		reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+	break;
+	case NICVF_INTR_TCP_TIMER:
+		reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+	break;
+	case NICVF_INTR_MBOX:
+		reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
+	break;
+	case NICVF_INTR_QS_ERR:
+		reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+	break;
+	default:
+		printf("Failed to clear interrupt: unknown type\n");
+	break;
+	}
+
+	nicvf_reg_write(nic, NIC_VF_INT, reg_val);
+}
+
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
+{
+	struct rcv_queue *rq;
+
+#define GET_RQ_STATS(reg) \
+	nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
+			    (rq_idx << NIC_Q_NUM_SHIFT) | ((reg) << 3))
+
+	rq = &nic->qs->rq[rq_idx];
+	rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
+	rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
+{
+	struct snd_queue *sq;
+
+#define GET_SQ_STATS(reg) \
+	nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
+			    (sq_idx << NIC_Q_NUM_SHIFT) | ((reg) << 3))
+
+	sq = &nic->qs->sq[sq_idx];
+	sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
+	sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+/* Check for errors in the receive cmp.queue entry */
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+			    struct cmp_queue *cq, void *cq_desc)
+{
+	struct cqe_rx_t *cqe_rx;
+	struct cmp_queue_stats *stats = &cq->stats;
+
+	cqe_rx = (struct cqe_rx_t *)cq_desc;
+	if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
+		stats->rx.errop.good++;
+		return 0;
+	}
+
+	switch (cqe_rx->err_level) {
+	case CQ_ERRLVL_MAC:
+		stats->rx.errlvl.mac_errs++;
+	break;
+	case CQ_ERRLVL_L2:
+		stats->rx.errlvl.l2_errs++;
+	break;
+	case CQ_ERRLVL_L3:
+		stats->rx.errlvl.l3_errs++;
+	break;
+	case CQ_ERRLVL_L4:
+		stats->rx.errlvl.l4_errs++;
+	break;
+	}
+
+	switch (cqe_rx->err_opcode) {
+	case CQ_RX_ERROP_RE_PARTIAL:
+		stats->rx.errop.partial_pkts++;
+	break;
+	case CQ_RX_ERROP_RE_JABBER:
+		stats->rx.errop.jabber_errs++;
+	break;
+	case CQ_RX_ERROP_RE_FCS:
+		stats->rx.errop.fcs_errs++;
+	break;
+	case CQ_RX_ERROP_RE_TERMINATE:
+		stats->rx.errop.terminate_errs++;
+	break;
+	case CQ_RX_ERROP_RE_RX_CTL:
+		stats->rx.errop.bgx_rx_errs++;
+	break;
+	case CQ_RX_ERROP_PREL2_ERR:
+		stats->rx.errop.prel2_errs++;
+	break;
+	case CQ_RX_ERROP_L2_FRAGMENT:
+		stats->rx.errop.l2_frags++;
+	break;
+	case CQ_RX_ERROP_L2_OVERRUN:
+		stats->rx.errop.l2_overruns++;
+	break;
+	case CQ_RX_ERROP_L2_PFCS:
+		stats->rx.errop.l2_pfcs++;
+	break;
+	case CQ_RX_ERROP_L2_PUNY:
+		stats->rx.errop.l2_puny++;
+	break;
+	case CQ_RX_ERROP_L2_MAL:
+		stats->rx.errop.l2_hdr_malformed++;
+	break;
+	case CQ_RX_ERROP_L2_OVERSIZE:
+		stats->rx.errop.l2_oversize++;
+	break;
+	case CQ_RX_ERROP_L2_UNDERSIZE:
+		stats->rx.errop.l2_undersize++;
+	break;
+	case CQ_RX_ERROP_L2_LENMISM:
+		stats->rx.errop.l2_len_mismatch++;
+	break;
+	case CQ_RX_ERROP_L2_PCLP:
+		stats->rx.errop.l2_pclp++;
+	break;
+	case CQ_RX_ERROP_IP_NOT:
+		stats->rx.errop.non_ip++;
+	break;
+	case CQ_RX_ERROP_IP_CSUM_ERR:
+		stats->rx.errop.ip_csum_err++;
+	break;
+	case CQ_RX_ERROP_IP_MAL:
+		stats->rx.errop.ip_hdr_malformed++;
+	break;
+	case CQ_RX_ERROP_IP_MALD:
+		stats->rx.errop.ip_payload_malformed++;
+	break;
+	case CQ_RX_ERROP_IP_HOP:
+		stats->rx.errop.ip_hop_errs++;
+	break;
+	case CQ_RX_ERROP_L3_ICRC:
+		stats->rx.errop.l3_icrc_errs++;
+	break;
+	case CQ_RX_ERROP_L3_PCLP:
+		stats->rx.errop.l3_pclp++;
+	break;
+	case CQ_RX_ERROP_L4_MAL:
+		stats->rx.errop.l4_malformed++;
+	break;
+	case CQ_RX_ERROP_L4_CHK:
+		stats->rx.errop.l4_csum_errs++;
+	break;
+	case CQ_RX_ERROP_UDP_LEN:
+		stats->rx.errop.udp_len_err++;
+	break;
+	case CQ_RX_ERROP_L4_PORT:
+		stats->rx.errop.bad_l4_port++;
+	break;
+	case CQ_RX_ERROP_TCP_FLAG:
+		stats->rx.errop.bad_tcp_flag++;
+	break;
+	case CQ_RX_ERROP_TCP_OFFSET:
+		stats->rx.errop.tcp_offset_errs++;
+	break;
+	case CQ_RX_ERROP_L4_PCLP:
+		stats->rx.errop.l4_pclp++;
+	break;
+	case CQ_RX_ERROP_RBDR_TRUNC:
+		stats->rx.errop.pkt_truncated++;
+	break;
+	}
+
+	return 1;
+}
+
+/* Check for errors in the send cmp.queue entry */
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+			    struct cmp_queue *cq, void *cq_desc)
+{
+	struct cqe_send_t *cqe_tx;
+	struct cmp_queue_stats *stats = &cq->stats;
+
+	cqe_tx = (struct cqe_send_t *)cq_desc;
+	switch (cqe_tx->send_status) {
+	case CQ_TX_ERROP_GOOD:
+		stats->tx.good++;
+		return 0;
+	break;
+	case CQ_TX_ERROP_DESC_FAULT:
+		stats->tx.desc_fault++;
+	break;
+	case CQ_TX_ERROP_HDR_CONS_ERR:
+		stats->tx.hdr_cons_err++;
+	break;
+	case CQ_TX_ERROP_SUBDC_ERR:
+		stats->tx.subdesc_err++;
+	break;
+	case CQ_TX_ERROP_IMM_SIZE_OFLOW:
+		stats->tx.imm_size_oflow++;
+	break;
+	case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
+		stats->tx.data_seq_err++;
+	break;
+	case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
+		stats->tx.mem_seq_err++;
+	break;
+	case CQ_TX_ERROP_LOCK_VIOL:
+		stats->tx.lock_viol++;
+	break;
+	case CQ_TX_ERROP_DATA_FAULT:
+		stats->tx.data_fault++;
+	break;
+	case CQ_TX_ERROP_TSTMP_CONFLICT:
+		stats->tx.tstmp_conflict++;
+	break;
+	case CQ_TX_ERROP_TSTMP_TIMEOUT:
+		stats->tx.tstmp_timeout++;
+	break;
+	case CQ_TX_ERROP_MEM_FAULT:
+		stats->tx.mem_fault++;
+	break;
+	case CQ_TX_ERROP_CK_OVERLAP:
+		stats->tx.csum_overlap++;
+	break;
+	case CQ_TX_ERROP_CK_OFLOW:
+		stats->tx.csum_overflow++;
+	break;
+	}
+
+	return 1;
+}
diff --git a/drivers/net/octeontx/nicvf_queues.h b/drivers/net/octeontx/nicvf_queues.h
new file mode 100644
index 0000000..833b2a1
--- /dev/null
+++ b/drivers/net/octeontx/nicvf_queues.h
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef NICVF_QUEUES_H
+#define NICVF_QUEUES_H
+
+#include "q_struct.h"
+
+#define MAX_QUEUE_SET			128
+#define MAX_RCV_QUEUES_PER_QS		8
+#define MAX_RCV_BUF_DESC_RINGS_PER_QS	2
+#define MAX_SND_QUEUES_PER_QS		8
+#define MAX_CMP_QUEUES_PER_QS		8
+
+/* VF's queue interrupt ranges */
+#define	NICVF_INTR_ID_CQ		0
+#define	NICVF_INTR_ID_SQ		8
+#define	NICVF_INTR_ID_RBDR		16
+#define	NICVF_INTR_ID_MISC		18
+#define	NICVF_INTR_ID_QS_ERR		19
+
+#define RBDR_SIZE0		0ULL /* 8K entries */
+#define RBDR_SIZE1		1ULL /* 16K entries */
+#define RBDR_SIZE2		2ULL /* 32K entries */
+#define RBDR_SIZE3		3ULL /* 64K entries */
+#define RBDR_SIZE4		4ULL /* 126K entries */
+#define RBDR_SIZE5		5ULL /* 256K entries */
+#define RBDR_SIZE6		6ULL /* 512K entries */
+
+#define SND_QUEUE_SIZE0		0ULL /* 1K entries */
+#define SND_QUEUE_SIZE1		1ULL /* 2K entries */
+#define SND_QUEUE_SIZE2		2ULL /* 4K entries */
+#define SND_QUEUE_SIZE3		3ULL /* 8K entries */
+#define SND_QUEUE_SIZE4		4ULL /* 16K entries */
+#define SND_QUEUE_SIZE5		5ULL /* 32K entries */
+#define SND_QUEUE_SIZE6		6ULL /* 64K entries */
+
+#define CMP_QUEUE_SIZE0		0ULL /* 1K entries */
+#define CMP_QUEUE_SIZE1		1ULL /* 2K entries */
+#define CMP_QUEUE_SIZE2		2ULL /* 4K entries */
+#define CMP_QUEUE_SIZE3		3ULL /* 8K entries */
+#define CMP_QUEUE_SIZE4		4ULL /* 16K entries */
+#define CMP_QUEUE_SIZE5		5ULL /* 32K entries */
+#define CMP_QUEUE_SIZE6		6ULL /* 64K entries */
+
+/* Default queue count per QS, its lengths and threshold values */
+#define RBDR_CNT			1
+#define RCV_QUEUE_CNT		1
+#define SND_QUEUE_CNT		1
+#define CMP_QUEUE_CNT		1 /* Max of RCV and SND qcount */
+
+#define SND_QSIZE		SND_QUEUE_SIZE0
+#define SND_QUEUE_LEN		BIT_ULL((SND_QSIZE + 10))
+#define SND_QUEUE_THRESH	2ULL
+#define MIN_SQ_DESC_PER_PKT_XMIT	2
+#define MAX_CQE_PER_PKT_XMIT		2
+
+#define CMP_QSIZE		CMP_QUEUE_SIZE0
+#define CMP_QUEUE_LEN		BIT_ULL((CMP_QSIZE + 10))
+#define CMP_QUEUE_CQE_THRESH	0
+#define CMP_QUEUE_TIMER_THRESH	1 /* 1 ms */
+
+#define RBDR_SIZE		RBDR_SIZE0
+#define RCV_BUF_COUNT		BIT_ULL((RBDR_SIZE + 13))
+#define RBDR_THRESH		(RCV_BUF_COUNT / 2)
+#define DMA_BUFFER_LEN		2048 /* In multiples of 128bytes */
+#define RCV_FRAG_LEN		DMA_BUFFER_LEN
+
+#define MAX_CQES_FOR_TX		((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) *\
+				 MAX_CQE_PER_PKT_XMIT)
+#define RQ_CQ_DROP		((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
+
+/* Descriptor size */
+#define SND_QUEUE_DESC_SIZE	16   /* 128 bits */
+#define CMP_QUEUE_DESC_SIZE	512
+
+/* Buffer / descriptor alignments */
+#define NICVF_RCV_BUF_ALIGN		7
+#define NICVF_RCV_BUF_ALIGN_BYTES	BIT_ULL(NICVF_RCV_BUF_ALIGN)
+#define NICVF_CQ_BASE_ALIGN_BYTES	512  /* 9 bits */
+#define NICVF_SQ_BASE_ALIGN_BYTES	128  /* 7 bits */
+
+#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES)	ALIGN(ADDR, ALIGN_BYTES)
+
+/* Queue enable/disable */
+#define NICVF_SQ_EN            BIT_ULL(19)
+
+/* Queue reset */
+#define NICVF_CQ_RESET		BIT_ULL(41)
+#define NICVF_SQ_RESET		BIT_ULL(17)
+#define NICVF_RBDR_RESET	BIT_ULL(43)
+
+enum CQ_RX_ERRLVL_E {
+	CQ_ERRLVL_MAC,
+	CQ_ERRLVL_L2,
+	CQ_ERRLVL_L3,
+	CQ_ERRLVL_L4,
+};
+
+enum CQ_RX_ERROP_E {
+	CQ_RX_ERROP_RE_NONE = 0x0,
+	CQ_RX_ERROP_RE_PARTIAL = 0x1,
+	CQ_RX_ERROP_RE_JABBER = 0x2,
+	CQ_RX_ERROP_RE_FCS = 0x7,
+	CQ_RX_ERROP_RE_TERMINATE = 0x9,
+	CQ_RX_ERROP_RE_RX_CTL = 0xb,
+	CQ_RX_ERROP_PREL2_ERR = 0x1f,
+	CQ_RX_ERROP_L2_FRAGMENT = 0x20,
+	CQ_RX_ERROP_L2_OVERRUN = 0x21,
+	CQ_RX_ERROP_L2_PFCS = 0x22,
+	CQ_RX_ERROP_L2_PUNY = 0x23,
+	CQ_RX_ERROP_L2_MAL = 0x24,
+	CQ_RX_ERROP_L2_OVERSIZE = 0x25,
+	CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
+	CQ_RX_ERROP_L2_LENMISM = 0x27,
+	CQ_RX_ERROP_L2_PCLP = 0x28,
+	CQ_RX_ERROP_IP_NOT = 0x41,
+	CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
+	CQ_RX_ERROP_IP_MAL = 0x43,
+	CQ_RX_ERROP_IP_MALD = 0x44,
+	CQ_RX_ERROP_IP_HOP = 0x45,
+	CQ_RX_ERROP_L3_ICRC = 0x46,
+	CQ_RX_ERROP_L3_PCLP = 0x47,
+	CQ_RX_ERROP_L4_MAL = 0x61,
+	CQ_RX_ERROP_L4_CHK = 0x62,
+	CQ_RX_ERROP_UDP_LEN = 0x63,
+	CQ_RX_ERROP_L4_PORT = 0x64,
+	CQ_RX_ERROP_TCP_FLAG = 0x65,
+	CQ_RX_ERROP_TCP_OFFSET = 0x66,
+	CQ_RX_ERROP_L4_PCLP = 0x67,
+	CQ_RX_ERROP_RBDR_TRUNC = 0x70,
+};
+
+enum CQ_TX_ERROP_E {
+	CQ_TX_ERROP_GOOD = 0x0,
+	CQ_TX_ERROP_DESC_FAULT = 0x10,
+	CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
+	CQ_TX_ERROP_SUBDC_ERR = 0x12,
+	CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
+	CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
+	CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
+	CQ_TX_ERROP_LOCK_VIOL = 0x83,
+	CQ_TX_ERROP_DATA_FAULT = 0x84,
+	CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
+	CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
+	CQ_TX_ERROP_MEM_FAULT = 0x87,
+	CQ_TX_ERROP_CK_OVERLAP = 0x88,
+	CQ_TX_ERROP_CK_OFLOW = 0x89,
+	CQ_TX_ERROP_ENUM_LAST = 0x8a,
+};
+
+struct cmp_queue_stats {
+	struct rx_stats {
+		struct {
+			u64 mac_errs;
+			u64 l2_errs;
+			u64 l3_errs;
+			u64 l4_errs;
+		} errlvl;
+		struct {
+			u64 good;
+			u64 partial_pkts;
+			u64 jabber_errs;
+			u64 fcs_errs;
+			u64 terminate_errs;
+			u64 bgx_rx_errs;
+			u64 prel2_errs;
+			u64 l2_frags;
+			u64 l2_overruns;
+			u64 l2_pfcs;
+			u64 l2_puny;
+			u64 l2_hdr_malformed;
+			u64 l2_oversize;
+			u64 l2_undersize;
+			u64 l2_len_mismatch;
+			u64 l2_pclp;
+			u64 non_ip;
+			u64 ip_csum_err;
+			u64 ip_hdr_malformed;
+			u64 ip_payload_malformed;
+			u64 ip_hop_errs;
+			u64 l3_icrc_errs;
+			u64 l3_pclp;
+			u64 l4_malformed;
+			u64 l4_csum_errs;
+			u64 udp_len_err;
+			u64 bad_l4_port;
+			u64 bad_tcp_flag;
+			u64 tcp_offset_errs;
+			u64 l4_pclp;
+			u64 pkt_truncated;
+		} errop;
+	} rx;
+	struct tx_stats {
+		u64 good;
+		u64 desc_fault;
+		u64 hdr_cons_err;
+		u64 subdesc_err;
+		u64 imm_size_oflow;
+		u64 data_seq_err;
+		u64 mem_seq_err;
+		u64 lock_viol;
+		u64 data_fault;
+		u64 tstmp_conflict;
+		u64 tstmp_timeout;
+		u64 mem_fault;
+		u64 csum_overlap;
+		u64 csum_overflow;
+	} tx;
+};
+
+enum RQ_SQ_STATS {
+	RQ_SQ_STATS_OCTS,
+	RQ_SQ_STATS_PKTS,
+};
+
+struct rx_tx_queue_stats {
+	u64	bytes;
+	u64	pkts;
+};
+
+struct q_desc_mem {
+	uintptr_t	dma;
+	u64	size;
+	u16	q_len;
+	uintptr_t	phys_base;
+	void		*base;
+	void		*unalign_base;
+	bool		allocated;
+};
+
+struct rbdr {
+	bool		enable;
+	u32	dma_size;
+	u32	thresh;      /* Threshold level for interrupt */
+	void		*desc;
+	u32	head;
+	u32	tail;
+	struct		q_desc_mem   dmem;
+	uintptr_t	buf_mem;
+	uintptr_t	buffers;
+};
+
+struct rcv_queue {
+	bool		enable;
+	struct	rbdr	*rbdr_start;
+	struct	rbdr	*rbdr_cont;
+	bool		en_tcp_reassembly;
+	u8		cq_qs;  /* CQ's QS to which this RQ is assigned */
+	u8		cq_idx; /* CQ index (0 to 7) in the QS */
+	u8		cont_rbdr_qs;      /* Continue buffer ptrs - QS num */
+	u8		cont_qs_rbdr_idx;  /* RBDR idx in the cont QS */
+	u8		start_rbdr_qs;     /* First buffer ptrs - QS num */
+	u8		start_qs_rbdr_idx; /* RBDR idx in the above QS */
+	u8         caching;
+	struct		rx_tx_queue_stats stats;
+};
+
+struct cmp_queue {
+	bool		enable;
+	u16	intr_timer_thresh;
+	u16	thresh;
+	void		*desc;
+	struct q_desc_mem   dmem;
+	struct cmp_queue_stats	stats;
+};
+
+struct snd_queue {
+	bool		enable;
+	u8		cq_qs;  /* CQ's QS to which this SQ is pointing */
+	u8		cq_idx; /* CQ index (0 to 7) in the above QS */
+	u16	thresh;
+	u32	free_cnt;
+	u32	head;
+	u32	tail;
+	u64	*skbuff;
+	void		*desc;
+	struct q_desc_mem   dmem;
+	struct rx_tx_queue_stats stats;
+};
+
+struct queue_set {
+	bool		enable;
+	bool		be_en;
+	u8		vnic_id;
+	u8		rq_cnt;
+	u8		cq_cnt;
+	u64	cq_len;
+	u8		sq_cnt;
+	u64	sq_len;
+	u8		rbdr_cnt;
+	u64	rbdr_len;
+	struct	rcv_queue	rq[MAX_RCV_QUEUES_PER_QS];
+	struct	cmp_queue	cq[MAX_CMP_QUEUES_PER_QS];
+	struct	snd_queue	sq[MAX_SND_QUEUES_PER_QS];
+	struct	rbdr		rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
+};
+
+#define GET_RBDR_DESC(RING, idx)\
+		(&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
+#define GET_SQ_DESC(RING, idx)\
+		(&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
+#define GET_CQ_DESC(RING, idx)\
+		(&(((union cq_desc_t *)((RING)->desc))[idx]))
+
+/* CQ status bits */
+#define	CQ_WR_FULL	BIT(26)
+#define	CQ_WR_DISABLE	BIT(25)
+#define	CQ_WR_FAULT	BIT(24)
+#define	CQ_CQE_COUNT	(0xFFFF << 0)
+
+#define	CQ_ERR_MASK	(CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+
+int nicvf_set_qset_resources(struct nicvf *nic);
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
+void nicvf_qset_config(struct nicvf *nic, bool enable);
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+			    int qidx, bool enable);
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
+void nicvf_sq_disable(struct nicvf *nic, int qidx);
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
+void nicvf_sq_free_used_descs(struct udevice *dev,
+			      struct snd_queue *sq, int qidx);
+int nicvf_sq_append_pkt(struct nicvf *nic, void *pkt, size_t pkt_len);
+
+void *nicvf_get_rcv_pkt(struct nicvf *nic, void *cq_desc, size_t *pkt_len);
+void nicvf_refill_rbdr(struct nicvf *nic);
+
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
+
+/* Register access APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+			   u64 qidx, u64 val);
+u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx);
+
+/* Stats */
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+			    struct cmp_queue *cq, void *cq_desc);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+			    struct cmp_queue *cq, void *cq_desc);
+#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/octeontx/q_struct.h b/drivers/net/octeontx/q_struct.h
new file mode 100644
index 0000000..87abb13
--- /dev/null
+++ b/drivers/net/octeontx/q_struct.h
@@ -0,0 +1,695 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef Q_STRUCT_H
+#define Q_STRUCT_H
+
+/* Load transaction types for reading segment bytes specified by
+ * NIC_SEND_GATHER_S[LD_TYPE].
+ */
+enum nic_send_ld_type_e {
+	NIC_SEND_LD_TYPE_E_LDD = 0x0,
+	NIC_SEND_LD_TYPE_E_LDT = 0x1,
+	NIC_SEND_LD_TYPE_E_LDWB = 0x2,
+	NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
+};
+
+enum ether_type_algorithm {
+	ETYPE_ALG_NONE = 0x0,
+	ETYPE_ALG_SKIP = 0x1,
+	ETYPE_ALG_ENDPARSE = 0x2,
+	ETYPE_ALG_VLAN = 0x3,
+	ETYPE_ALG_VLAN_STRIP = 0x4,
+};
+
+enum layer3_type {
+	L3TYPE_NONE = 0x00,
+	L3TYPE_GRH = 0x01,
+	L3TYPE_IPV4 = 0x04,
+	L3TYPE_IPV4_OPTIONS = 0x05,
+	L3TYPE_IPV6 = 0x06,
+	L3TYPE_IPV6_OPTIONS = 0x07,
+	L3TYPE_ET_STOP = 0x0D,
+	L3TYPE_OTHER = 0x0E,
+};
+
+enum layer4_type {
+	L4TYPE_NONE = 0x00,
+	L4TYPE_IPSEC_ESP = 0x01,
+	L4TYPE_IPFRAG = 0x02,
+	L4TYPE_IPCOMP = 0x03,
+	L4TYPE_TCP = 0x04,
+	L4TYPE_UDP = 0x05,
+	L4TYPE_SCTP = 0x06,
+	L4TYPE_GRE = 0x07,
+	L4TYPE_ROCE_BTH = 0x08,
+	L4TYPE_OTHER = 0x0E,
+};
+
+/* CPI and RSSI configuration */
+enum cpi_algorithm_type {
+	CPI_ALG_NONE = 0x0,
+	CPI_ALG_VLAN = 0x1,
+	CPI_ALG_VLAN16 = 0x2,
+	CPI_ALG_DIFF = 0x3,
+};
+
+enum rss_algorithm_type {
+	RSS_ALG_NONE = 0x00,
+	RSS_ALG_PORT = 0x01,
+	RSS_ALG_IP = 0x02,
+	RSS_ALG_TCP_IP = 0x03,
+	RSS_ALG_UDP_IP = 0x04,
+	RSS_ALG_SCTP_IP = 0x05,
+	RSS_ALG_GRE_IP = 0x06,
+	RSS_ALG_ROCE = 0x07,
+};
+
+enum rss_hash_cfg {
+	RSS_HASH_L2ETC = 0x00,
+	RSS_HASH_IP = 0x01,
+	RSS_HASH_TCP = 0x02,
+	RSS_TCP_SYN_DIS = 0x03,
+	RSS_HASH_UDP = 0x04,
+	RSS_HASH_L4ETC = 0x05,
+	RSS_HASH_ROCE = 0x06,
+	RSS_L3_BIDI = 0x07,
+	RSS_L4_BIDI = 0x08,
+};
+
+/* Completion queue entry types */
+enum cqe_type {
+	CQE_TYPE_INVALID = 0x0,
+	CQE_TYPE_RX = 0x2,
+	CQE_TYPE_RX_SPLIT = 0x3,
+	CQE_TYPE_RX_TCP = 0x4,
+	CQE_TYPE_SEND = 0x8,
+	CQE_TYPE_SEND_PTP = 0x9,
+};
+
+enum cqe_rx_tcp_status {
+	CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
+	CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
+};
+
+enum cqe_send_status {
+	CQE_SEND_STATUS_GOOD = 0x00,
+	CQE_SEND_STATUS_DESC_FAULT = 0x01,
+	CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
+	CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
+	CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
+	CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
+	CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
+	CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
+	CQE_SEND_STATUS_LOCK_VIOL = 0x84,
+	CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
+	CQE_SEND_STATUS_DATA_FAULT = 0x86,
+	CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
+	CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
+	CQE_SEND_STATUS_MEM_FAULT = 0x89,
+	CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
+	CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
+};
+
+enum cqe_rx_tcp_end_reason {
+	CQE_RX_TCP_END_FIN_FLAG_DET = 0,
+	CQE_RX_TCP_END_INVALID_FLAG = 1,
+	CQE_RX_TCP_END_TIMEOUT = 2,
+	CQE_RX_TCP_END_OUT_OF_SEQ = 3,
+	CQE_RX_TCP_END_PKT_ERR = 4,
+	CQE_RX_TCP_END_QS_DISABLED = 0x0F,
+};
+
+/* Packet protocol level error enumeration */
+enum cqe_rx_err_level {
+	CQE_RX_ERRLVL_RE = 0x0,
+	CQE_RX_ERRLVL_L2 = 0x1,
+	CQE_RX_ERRLVL_L3 = 0x2,
+	CQE_RX_ERRLVL_L4 = 0x3,
+};
+
+/* Packet protocol level error type enumeration */
+enum cqe_rx_err_opcode {
+	CQE_RX_ERR_RE_NONE = 0x0,
+	CQE_RX_ERR_RE_PARTIAL = 0x1,
+	CQE_RX_ERR_RE_JABBER = 0x2,
+	CQE_RX_ERR_RE_FCS = 0x7,
+	CQE_RX_ERR_RE_TERMINATE = 0x9,
+	CQE_RX_ERR_RE_RX_CTL = 0xb,
+	CQE_RX_ERR_PREL2_ERR = 0x1f,
+	CQE_RX_ERR_L2_FRAGMENT = 0x20,
+	CQE_RX_ERR_L2_OVERRUN = 0x21,
+	CQE_RX_ERR_L2_PFCS = 0x22,
+	CQE_RX_ERR_L2_PUNY = 0x23,
+	CQE_RX_ERR_L2_MAL = 0x24,
+	CQE_RX_ERR_L2_OVERSIZE = 0x25,
+	CQE_RX_ERR_L2_UNDERSIZE = 0x26,
+	CQE_RX_ERR_L2_LENMISM = 0x27,
+	CQE_RX_ERR_L2_PCLP = 0x28,
+	CQE_RX_ERR_IP_NOT = 0x41,
+	CQE_RX_ERR_IP_CHK = 0x42,
+	CQE_RX_ERR_IP_MAL = 0x43,
+	CQE_RX_ERR_IP_MALD = 0x44,
+	CQE_RX_ERR_IP_HOP = 0x45,
+	CQE_RX_ERR_L3_ICRC = 0x46,
+	CQE_RX_ERR_L3_PCLP = 0x47,
+	CQE_RX_ERR_L4_MAL = 0x61,
+	CQE_RX_ERR_L4_CHK = 0x62,
+	CQE_RX_ERR_UDP_LEN = 0x63,
+	CQE_RX_ERR_L4_PORT = 0x64,
+	CQE_RX_ERR_TCP_FLAG = 0x65,
+	CQE_RX_ERR_TCP_OFFSET = 0x66,
+	CQE_RX_ERR_L4_PCLP = 0x67,
+	CQE_RX_ERR_RBDR_TRUNC = 0x70,
+};
+
+struct cqe_rx_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64   cqe_type:4; /* W0 */
+	u64   stdn_fault:1;
+	u64   rsvd0:1;
+	u64   rq_qs:7;
+	u64   rq_idx:3;
+	u64   rsvd1:12;
+	u64   rss_alg:4;
+	u64   rsvd2:4;
+	u64   rb_cnt:4;
+	u64   vlan_found:1;
+	u64   vlan_stripped:1;
+	u64   vlan2_found:1;
+	u64   vlan2_stripped:1;
+	u64   l4_type:4;
+	u64   l3_type:4;
+	u64   l2_present:1;
+	u64   err_level:3;
+	u64   err_opcode:8;
+
+	u64   pkt_len:16; /* W1 */
+	u64   l2_ptr:8;
+	u64   l3_ptr:8;
+	u64   l4_ptr:8;
+	u64   cq_pkt_len:8;
+	u64   align_pad:3;
+	u64   rsvd3:1;
+	u64   chan:12;
+
+	u64   rss_tag:32; /* W2 */
+	u64   vlan_tci:16;
+	u64   vlan_ptr:8;
+	u64   vlan2_ptr:8;
+
+	u64   rb3_sz:16; /* W3 */
+	u64   rb2_sz:16;
+	u64   rb1_sz:16;
+	u64   rb0_sz:16;
+
+	u64   rb7_sz:16; /* W4 */
+	u64   rb6_sz:16;
+	u64   rb5_sz:16;
+	u64   rb4_sz:16;
+
+	u64   rb11_sz:16; /* W5 */
+	u64   rb10_sz:16;
+	u64   rb9_sz:16;
+	u64   rb8_sz:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64   err_opcode:8;
+	u64   err_level:3;
+	u64   l2_present:1;
+	u64   l3_type:4;
+	u64   l4_type:4;
+	u64   vlan2_stripped:1;
+	u64   vlan2_found:1;
+	u64   vlan_stripped:1;
+	u64   vlan_found:1;
+	u64   rb_cnt:4;
+	u64   rsvd2:4;
+	u64   rss_alg:4;
+	u64   rsvd1:12;
+	u64   rq_idx:3;
+	u64   rq_qs:7;
+	u64   rsvd0:1;
+	u64   stdn_fault:1;
+	u64   cqe_type:4; /* W0 */
+	u64   chan:12;
+	u64   rsvd3:1;
+	u64   align_pad:3;
+	u64   cq_pkt_len:8;
+	u64   l4_ptr:8;
+	u64   l3_ptr:8;
+	u64   l2_ptr:8;
+	u64   pkt_len:16; /* W1 */
+	u64   vlan2_ptr:8;
+	u64   vlan_ptr:8;
+	u64   vlan_tci:16;
+	u64   rss_tag:32; /* W2 */
+	u64   rb0_sz:16;
+	u64   rb1_sz:16;
+	u64   rb2_sz:16;
+	u64   rb3_sz:16; /* W3 */
+	u64   rb4_sz:16;
+	u64   rb5_sz:16;
+	u64   rb6_sz:16;
+	u64   rb7_sz:16; /* W4 */
+	u64   rb8_sz:16;
+	u64   rb9_sz:16;
+	u64   rb10_sz:16;
+	u64   rb11_sz:16; /* W5 */
+#endif
+	u64   rb0_ptr:64;
+	u64   rb1_ptr:64;
+	u64   rb2_ptr:64;
+	u64   rb3_ptr:64;
+	u64   rb4_ptr:64;
+	u64   rb5_ptr:64;
+	u64   rb6_ptr:64;
+	u64   rb7_ptr:64;
+	u64   rb8_ptr:64;
+	u64   rb9_ptr:64;
+	u64   rb10_ptr:64;
+	u64   rb11_ptr:64;
+};
+
+struct cqe_rx_tcp_err_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64   cqe_type:4; /* W0 */
+	u64   rsvd0:60;
+
+	u64   rsvd1:4; /* W1 */
+	u64   partial_first:1;
+	u64   rsvd2:27;
+	u64   rbdr_bytes:8;
+	u64   rsvd3:24;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64   rsvd0:60;
+	u64   cqe_type:4;
+
+	u64   rsvd3:24;
+	u64   rbdr_bytes:8;
+	u64   rsvd2:27;
+	u64   partial_first:1;
+	u64   rsvd1:4;
+#endif
+};
+
+struct cqe_rx_tcp_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64   cqe_type:4; /* W0 */
+	u64   rsvd0:52;
+	u64   cq_tcp_status:8;
+
+	u64   rsvd1:32; /* W1 */
+	u64   tcp_cntx_bytes:8;
+	u64   rsvd2:8;
+	u64   tcp_err_bytes:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64   cq_tcp_status:8;
+	u64   rsvd0:52;
+	u64   cqe_type:4; /* W0 */
+
+	u64   tcp_err_bytes:16;
+	u64   rsvd2:8;
+	u64   tcp_cntx_bytes:8;
+	u64   rsvd1:32; /* W1 */
+#endif
+};
+
+struct cqe_send_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64   cqe_type:4; /* W0 */
+	u64   rsvd0:4;
+	u64   sqe_ptr:16;
+	u64   rsvd1:4;
+	u64   rsvd2:10;
+	u64   sq_qs:7;
+	u64   sq_idx:3;
+	u64   rsvd3:8;
+	u64   send_status:8;
+
+	u64   ptp_timestamp:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64   send_status:8;
+	u64   rsvd3:8;
+	u64   sq_idx:3;
+	u64   sq_qs:7;
+	u64   rsvd2:10;
+	u64   rsvd1:4;
+	u64   sqe_ptr:16;
+	u64   rsvd0:4;
+	u64   cqe_type:4; /* W0 */
+
+	u64   ptp_timestamp:64; /* W1 */
+#endif
+};
+
+union cq_desc_t {
+	u64 u[64];
+	struct cqe_send_t snd_hdr;
+	struct cqe_rx_t rx_hdr;
+	struct cqe_rx_tcp_t rx_tcp_hdr;
+	struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
+};
+
+struct rbdr_entry_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64   rsvd0:15;
+	u64   buf_addr:42;
+	u64   cache_align:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64   cache_align:7;
+	u64   buf_addr:42;
+	u64   rsvd0:15;
+#endif
+};
+
+/* TCP reassembly context */
+struct rbe_tcp_cnxt_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64   tcp_pkt_cnt:12;
+	u64   rsvd1:4;
+	u64   align_hdr_bytes:4;
+	u64   align_ptr_bytes:4;
+	u64   ptr_bytes:16;
+	u64   rsvd2:24;
+	u64   cqe_type:4;
+	u64   rsvd0:54;
+	u64   tcp_end_reason:2;
+	u64   tcp_status:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64   tcp_status:4;
+	u64   tcp_end_reason:2;
+	u64   rsvd0:54;
+	u64   cqe_type:4;
+	u64   rsvd2:24;
+	u64   ptr_bytes:16;
+	u64   align_ptr_bytes:4;
+	u64   align_hdr_bytes:4;
+	u64   rsvd1:4;
+	u64   tcp_pkt_cnt:12;
+#endif
+};
+
+/* Always Big endian */
+struct rx_hdr_t {
+	u64   opaque:32;
+	u64   rss_flow:8;
+	u64   skip_length:6;
+	u64   disable_rss:1;
+	u64   disable_tcp_reassembly:1;
+	u64   nodrop:1;
+	u64   dest_alg:2;
+	u64   rsvd0:2;
+	u64   dest_rq:11;
+};
+
+enum send_l4_csum_type {
+	SEND_L4_CSUM_DISABLE = 0x00,
+	SEND_L4_CSUM_UDP = 0x01,
+	SEND_L4_CSUM_TCP = 0x02,
+	SEND_L4_CSUM_SCTP = 0x03,
+};
+
+enum send_crc_alg {
+	SEND_CRCALG_CRC32 = 0x00,
+	SEND_CRCALG_CRC32C = 0x01,
+	SEND_CRCALG_ICRC = 0x02,
+};
+
+enum send_load_type {
+	SEND_LD_TYPE_LDD = 0x00,
+	SEND_LD_TYPE_LDT = 0x01,
+	SEND_LD_TYPE_LDWB = 0x02,
+};
+
+enum send_mem_alg_type {
+	SEND_MEMALG_SET = 0x00,
+	SEND_MEMALG_ADD = 0x08,
+	SEND_MEMALG_SUB = 0x09,
+	SEND_MEMALG_ADDLEN = 0x0A,
+	SEND_MEMALG_SUBLEN = 0x0B,
+};
+
+enum send_mem_dsz_type {
+	SEND_MEMDSZ_B64 = 0x00,
+	SEND_MEMDSZ_B32 = 0x01,
+	SEND_MEMDSZ_B8 = 0x03,
+};
+
+enum sq_subdesc_type {
+	SQ_DESC_TYPE_INVALID = 0x00,
+	SQ_DESC_TYPE_HEADER = 0x01,
+	SQ_DESC_TYPE_CRC = 0x02,
+	SQ_DESC_TYPE_IMMEDIATE = 0x03,
+	SQ_DESC_TYPE_GATHER = 0x04,
+	SQ_DESC_TYPE_MEMORY = 0x05,
+};
+
+struct sq_crc_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64    rsvd1:32;
+	u64    crc_ival:32;
+	u64    subdesc_type:4;
+	u64    crc_alg:2;
+	u64    rsvd0:10;
+	u64    crc_insert_pos:16;
+	u64    hdr_start:16;
+	u64    crc_len:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64    crc_len:16;
+	u64    hdr_start:16;
+	u64    crc_insert_pos:16;
+	u64    rsvd0:10;
+	u64    crc_alg:2;
+	u64    subdesc_type:4;
+	u64    crc_ival:32;
+	u64    rsvd1:32;
+#endif
+};
+
+struct sq_gather_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64    subdesc_type:4; /* W0 */
+	u64    ld_type:2;
+	u64    rsvd0:42;
+	u64    size:16;
+
+	u64    rsvd1:15; /* W1 */
+	u64    addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64    size:16;
+	u64    rsvd0:42;
+	u64    ld_type:2;
+	u64    subdesc_type:4; /* W0 */
+
+	u64    addr:49;
+	u64    rsvd1:15; /* W1 */
+#endif
+};
+
+/* SQ immediate subdescriptor */
+struct sq_imm_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64    subdesc_type:4; /* W0 */
+	u64    rsvd0:46;
+	u64    len:14;
+
+	u64    data:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64    len:14;
+	u64    rsvd0:46;
+	u64    subdesc_type:4; /* W0 */
+
+	u64    data:64; /* W1 */
+#endif
+};
+
+struct sq_mem_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64    subdesc_type:4; /* W0 */
+	u64    mem_alg:4;
+	u64    mem_dsz:2;
+	u64    wmem:1;
+	u64    rsvd0:21;
+	u64    offset:32;
+
+	u64    rsvd1:15; /* W1 */
+	u64    addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64    offset:32;
+	u64    rsvd0:21;
+	u64    wmem:1;
+	u64    mem_dsz:2;
+	u64    mem_alg:4;
+	u64    subdesc_type:4; /* W0 */
+
+	u64    addr:49;
+	u64    rsvd1:15; /* W1 */
+#endif
+};
+
+struct sq_hdr_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64    subdesc_type:4;
+	u64    tso:1;
+	u64    post_cqe:1; /* Post CQE on no error also */
+	u64    dont_send:1;
+	u64    tstmp:1;
+	u64    subdesc_cnt:8;
+	u64    csum_l4:2;
+	u64    csum_l3:1;
+	u64    rsvd0:5;
+	u64    l4_offset:8;
+	u64    l3_offset:8;
+	u64    rsvd1:4;
+	u64    tot_len:20; /* W0 */
+
+	u64    tso_sdc_cont:8;
+	u64    tso_sdc_first:8;
+	u64    tso_l4_offset:8;
+	u64    tso_flags_last:12;
+	u64    tso_flags_first:12;
+	u64    rsvd2:2;
+	u64    tso_max_paysize:14; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64    tot_len:20;
+	u64    rsvd1:4;
+	u64    l3_offset:8;
+	u64    l4_offset:8;
+	u64    rsvd0:5;
+	u64    csum_l3:1;
+	u64    csum_l4:2;
+	u64    subdesc_cnt:8;
+	u64    tstmp:1;
+	u64    dont_send:1;
+	u64    post_cqe:1; /* Post CQE on no error also */
+	u64    tso:1;
+	u64    subdesc_type:4; /* W0 */
+
+	u64    tso_max_paysize:14;
+	u64    rsvd2:2;
+	u64    tso_flags_first:12;
+	u64    tso_flags_last:12;
+	u64    tso_l4_offset:8;
+	u64    tso_sdc_first:8;
+	u64    tso_sdc_cont:8; /* W1 */
+#endif
+};
+
+/* Queue config register formats */
+struct rq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_2_63:62;
+	u64 ena:1;
+	u64 tcp_ena:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64 tcp_ena:1;
+	u64 ena:1;
+	u64 reserved_2_63:62;
+#endif
+};
+
+struct cq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_43_63:21;
+	u64 ena:1;
+	u64 reset:1;
+	u64 caching:1;
+	u64 reserved_35_39:5;
+	u64 qsize:3;
+	u64 reserved_25_31:7;
+	u64 avg_con:9;
+	u64 reserved_0_15:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64 reserved_0_15:16;
+	u64 avg_con:9;
+	u64 reserved_25_31:7;
+	u64 qsize:3;
+	u64 reserved_35_39:5;
+	u64 caching:1;
+	u64 reset:1;
+	u64 ena:1;
+	u64 reserved_43_63:21;
+#endif
+};
+
+struct sq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_20_63:44;
+	u64 ena:1;
+	u64 reserved_18_18:1;
+	u64 reset:1;
+	u64 ldwb:1;
+	u64 reserved_11_15:5;
+	u64 qsize:3;
+	u64 reserved_3_7:5;
+	u64 tstmp_bgx_intf:3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64 tstmp_bgx_intf:3;
+	u64 reserved_3_7:5;
+	u64 qsize:3;
+	u64 reserved_11_15:5;
+	u64 ldwb:1;
+	u64 reset:1;
+	u64 reserved_18_18:1;
+	u64 ena:1;
+	u64 reserved_20_63:44;
+#endif
+};
+
+struct rbdr_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_45_63:19;
+	u64 ena:1;
+	u64 reset:1;
+	u64 ldwb:1;
+	u64 reserved_36_41:6;
+	u64 qsize:4;
+	u64 reserved_25_31:7;
+	u64 avg_con:9;
+	u64 reserved_12_15:4;
+	u64 lines:12;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64 lines:12;
+	u64 reserved_12_15:4;
+	u64 avg_con:9;
+	u64 reserved_25_31:7;
+	u64 qsize:4;
+	u64 reserved_36_41:6;
+	u64 ldwb:1;
+	u64 reset:1;
+	u64 ena: 1;
+	u64 reserved_45_63:19;
+#endif
+};
+
+struct qs_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u64 reserved_32_63:32;
+	u64 ena:1;
+	u64 reserved_27_30:4;
+	u64 sq_ins_ena:1;
+	u64 sq_ins_pos:6;
+	u64 lock_ena:1;
+	u64 lock_viol_cqe_ena:1;
+	u64 send_tstmp_ena:1;
+	u64 be:1;
+	u64 reserved_7_15:9;
+	u64 vnic:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+	u64 vnic:7;
+	u64 reserved_7_15:9;
+	u64 be:1;
+	u64 send_tstmp_ena:1;
+	u64 lock_viol_cqe_ena:1;
+	u64 lock_ena:1;
+	u64 sq_ins_pos:6;
+	u64 sq_ins_ena:1;
+	u64 reserved_27_30:4;
+	u64 ena:1;
+	u64 reserved_32_63:32;
+#endif
+};
+
+#endif /* Q_STRUCT_H */
diff --git a/drivers/net/octeontx/smi.c b/drivers/net/octeontx/smi.c
new file mode 100644
index 0000000..8e2c3ca
--- /dev/null
+++ b/drivers/net/octeontx/smi.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <malloc.h>
+#include <miiphy.h>
+#include <misc.h>
+#include <pci.h>
+#include <pci_ids.h>
+#include <phy.h>
+#include <asm/io.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+
+#define PCI_DEVICE_ID_OCTEONTX_SMI 0xA02B
+
+DECLARE_GLOBAL_DATA_PTR;
+
+enum octeontx_smi_mode {
+	CLAUSE22 = 0,
+	CLAUSE45 = 1,
+};
+
+enum {
+	SMI_OP_C22_WRITE = 0,
+	SMI_OP_C22_READ = 1,
+
+	SMI_OP_C45_ADDR = 0,
+	SMI_OP_C45_WRITE = 1,
+	SMI_OP_C45_PRIA = 2,
+	SMI_OP_C45_READ = 3,
+};
+
+union smi_x_clk {
+	u64 u;
+	struct smi_x_clk_s {
+		int phase:8;
+		int sample:4;
+		int preamble:1;
+		int clk_idle:1;
+		int reserved_14_14:1;
+		int sample_mode:1;
+		int sample_hi:5;
+		int reserved_21_23:3;
+		int mode:1;
+	} s;
+};
+
+union smi_x_cmd {
+	u64 u;
+	struct smi_x_cmd_s {
+		int reg_adr:5;
+		int reserved_5_7:3;
+		int phy_adr:5;
+		int reserved_13_15:3;
+		int phy_op:2;
+	} s;
+};
+
+union smi_x_wr_dat {
+	u64 u;
+	struct smi_x_wr_dat_s {
+		unsigned int dat:16;
+		int val:1;
+		int pending:1;
+	} s;
+};
+
+union smi_x_rd_dat {
+	u64 u;
+	struct smi_x_rd_dat_s {
+		unsigned int dat:16;
+		int val:1;
+		int pending:1;
+	} s;
+};
+
+union smi_x_en {
+	u64 u;
+	struct smi_x_en_s {
+		int en:1;
+	} s;
+};
+
+#define SMI_X_RD_DAT	0x10ull
+#define SMI_X_WR_DAT	0x08ull
+#define SMI_X_CMD	0x00ull
+#define SMI_X_CLK	0x18ull
+#define SMI_X_EN	0x20ull
+
+struct octeontx_smi_priv {
+	void __iomem *baseaddr;
+	enum octeontx_smi_mode mode;
+};
+
+#define MDIO_TIMEOUT 10000
+
+void octeontx_smi_setmode(struct mii_dev *bus, enum octeontx_smi_mode mode)
+{
+	struct octeontx_smi_priv *priv = bus->priv;
+	union smi_x_clk smix_clk;
+
+	smix_clk.u = readq(priv->baseaddr + SMI_X_CLK);
+	smix_clk.s.mode = mode;
+	smix_clk.s.preamble = mode == CLAUSE45;
+	writeq(smix_clk.u, priv->baseaddr + SMI_X_CLK);
+
+	priv->mode = mode;
+}
+
+int octeontx_c45_addr(struct mii_dev *bus, int addr, int devad, int regnum)
+{
+	struct octeontx_smi_priv *priv = bus->priv;
+
+	union smi_x_cmd smix_cmd;
+	union smi_x_wr_dat smix_wr_dat;
+	unsigned long timeout = MDIO_TIMEOUT;
+
+	smix_wr_dat.u = 0;
+	smix_wr_dat.s.dat = regnum;
+
+	writeq(smix_wr_dat.u, priv->baseaddr + SMI_X_WR_DAT);
+
+	smix_cmd.u = 0;
+	smix_cmd.s.phy_op = SMI_OP_C45_ADDR;
+	smix_cmd.s.phy_adr = addr;
+	smix_cmd.s.reg_adr = devad;
+
+	writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD);
+
+	do {
+		smix_wr_dat.u = readq(priv->baseaddr + SMI_X_WR_DAT);
+		udelay(100);
+		timeout--;
+	} while (smix_wr_dat.s.pending && timeout);
+
+	return timeout == 0;
+}
+
+int octeontx_phy_read(struct mii_dev *bus, int addr, int devad, int regnum)
+{
+	struct octeontx_smi_priv *priv = bus->priv;
+	union smi_x_cmd smix_cmd;
+	union smi_x_rd_dat smix_rd_dat;
+	unsigned long timeout = MDIO_TIMEOUT;
+	int ret;
+
+	enum octeontx_smi_mode mode = (devad < 0) ? CLAUSE22 : CLAUSE45;
+
+	debug("RD: Mode: %u, baseaddr: %p, addr: %d, devad: %d, reg: %d\n",
+	      mode, priv->baseaddr, addr, devad, regnum);
+
+	octeontx_smi_setmode(bus, mode);
+
+	if (mode == CLAUSE45) {
+		ret = octeontx_c45_addr(bus, addr, devad, regnum);
+
+		debug("RD: ret: %u\n", ret);
+
+		if (ret)
+			return 0;
+	}
+
+	smix_cmd.u = 0;
+	smix_cmd.s.phy_adr = addr;
+
+	if (mode == CLAUSE45) {
+		smix_cmd.s.reg_adr = devad;
+		smix_cmd.s.phy_op = SMI_OP_C45_READ;
+	} else {
+		smix_cmd.s.reg_adr = regnum;
+		smix_cmd.s.phy_op = SMI_OP_C22_READ;
+	}
+
+	writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD);
+
+	do {
+		smix_rd_dat.u = readq(priv->baseaddr + SMI_X_RD_DAT);
+		udelay(10);
+		timeout--;
+	} while (smix_rd_dat.s.pending && timeout);
+
+	debug("SMIX_RD_DAT: %lx\n", (unsigned long)smix_rd_dat.u);
+
+	return smix_rd_dat.s.dat;
+}
+
+int octeontx_phy_write(struct mii_dev *bus, int addr, int devad, int regnum,
+		       u16 value)
+{
+	struct octeontx_smi_priv *priv = bus->priv;
+	union smi_x_cmd smix_cmd;
+	union smi_x_wr_dat smix_wr_dat;
+	unsigned long timeout = MDIO_TIMEOUT;
+	int ret;
+
+	enum octeontx_smi_mode mode = (devad < 0) ? CLAUSE22 : CLAUSE45;
+
+	debug("WR: Mode: %u, baseaddr: %p, addr: %d, devad: %d, reg: %d\n",
+	      mode, priv->baseaddr, addr, devad, regnum);
+
+	if (mode == CLAUSE45) {
+		ret = octeontx_c45_addr(bus, addr, devad, regnum);
+
+		debug("WR: ret: %u\n", ret);
+
+		if (ret)
+			return ret;
+	}
+
+	smix_wr_dat.u = 0;
+	smix_wr_dat.s.dat = value;
+
+	writeq(smix_wr_dat.u, priv->baseaddr + SMI_X_WR_DAT);
+
+	smix_cmd.u = 0;
+	smix_cmd.s.phy_adr = addr;
+
+	if (mode == CLAUSE45) {
+		smix_cmd.s.reg_adr = devad;
+		smix_cmd.s.phy_op = SMI_OP_C45_WRITE;
+	} else {
+		smix_cmd.s.reg_adr = regnum;
+		smix_cmd.s.phy_op = SMI_OP_C22_WRITE;
+	}
+
+	writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD);
+
+	do {
+		smix_wr_dat.u = readq(priv->baseaddr + SMI_X_WR_DAT);
+		udelay(10);
+		timeout--;
+	} while (smix_wr_dat.s.pending && timeout);
+
+	debug("SMIX_WR_DAT: %lx\n", (unsigned long)smix_wr_dat.u);
+
+	return timeout == 0;
+}
+
+int octeontx_smi_reset(struct mii_dev *bus)
+{
+	struct octeontx_smi_priv *priv = bus->priv;
+
+	union smi_x_en smi_en;
+
+	smi_en.s.en = 0;
+	writeq(smi_en.u, priv->baseaddr + SMI_X_EN);
+
+	smi_en.s.en = 1;
+	writeq(smi_en.u, priv->baseaddr + SMI_X_EN);
+
+	octeontx_smi_setmode(bus, CLAUSE22);
+
+	return 0;
+}
+
+/* PHY XS initialization, primarily for RXAUI
+ *
+ */
+int rxaui_phy_xs_init(struct mii_dev *bus, int phy_addr)
+{
+	int reg;
+	ulong start_time;
+	int phy_id1, phy_id2;
+	int oui, model_number;
+
+	phy_id1 = octeontx_phy_read(bus, phy_addr, 1, 0x2);
+	phy_id2 = octeontx_phy_read(bus, phy_addr, 1, 0x3);
+	model_number = (phy_id2 >> 4) & 0x3F;
+	debug("%s model %x\n", __func__, model_number);
+	oui = phy_id1;
+	oui <<= 6;
+	oui |= (phy_id2 >> 10) & 0x3F;
+	debug("%s oui %x\n", __func__, oui);
+	switch (oui) {
+	case 0x5016:
+		if (model_number == 9) {
+			debug("%s +\n", __func__);
+			/* Perform hardware reset in XGXS control */
+			reg = octeontx_phy_read(bus, phy_addr, 4, 0x0);
+			if ((reg & 0xffff) < 0)
+				goto read_error;
+			reg |= 0x8000;
+			octeontx_phy_write(bus, phy_addr, 4, 0x0, reg);
+
+			start_time = get_timer(0);
+			do {
+				reg = octeontx_phy_read(bus, phy_addr, 4, 0x0);
+				if ((reg & 0xffff) < 0)
+					goto read_error;
+			} while ((reg & 0x8000) && get_timer(start_time) < 500);
+			if (reg & 0x8000) {
+				printf("HW reset for M88X3120 PHY failed");
+				printf("MII_BMCR: 0x%x\n", reg);
+				return -1;
+			}
+			/* program 4.49155 with 0x5 */
+			octeontx_phy_write(bus, phy_addr, 4, 0xc003, 0x5);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+
+read_error:
+	debug("M88X3120 PHY config read failed\n");
+	return -1;
+}
+
+int octeontx_smi_probe(struct udevice *dev)
+{
+	int ret, subnode, cnt = 0, node = dev->node.of_offset;
+	struct mii_dev *bus;
+	struct octeontx_smi_priv *priv;
+	pci_dev_t bdf = dm_pci_get_bdf(dev);
+
+	debug("SMI PCI device: %x\n", bdf);
+	dev->req_seq = PCI_FUNC(bdf);
+	if (!dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM)) {
+		printf("Failed to map PCI region for bdf %x\n", bdf);
+		return -1;
+	}
+
+	fdt_for_each_subnode(subnode, gd->fdt_blob, node) {
+		ret = fdt_node_check_compatible(gd->fdt_blob, subnode,
+						"cavium,thunder-8890-mdio");
+		if (ret)
+			continue;
+
+		bus = mdio_alloc();
+		priv = malloc(sizeof(*priv));
+		if (!bus || !priv) {
+			printf("Failed to allocate OcteonTX MDIO bus # %u\n",
+			       dev->seq);
+			return -1;
+		}
+
+		bus->read = octeontx_phy_read;
+		bus->write = octeontx_phy_write;
+		bus->reset = octeontx_smi_reset;
+		bus->priv = priv;
+
+		priv->mode = CLAUSE22;
+		priv->baseaddr = (void __iomem *)fdtdec_get_addr(gd->fdt_blob,
+								 subnode,
+								 "reg");
+		debug("mdio base addr %p\n", priv->baseaddr);
+
+		/* use given name or generate its own unique name */
+		snprintf(bus->name, MDIO_NAME_LEN, "smi%d", cnt++);
+
+		ret = mdio_register(bus);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static const struct udevice_id octeontx_smi_ids[] = {
+	{ .compatible = "cavium,thunder-8890-mdio-nexus" },
+	{}
+};
+
+U_BOOT_DRIVER(octeontx_smi) = {
+	.name	= "octeontx_smi",
+	.id	= UCLASS_MISC,
+	.probe	= octeontx_smi_probe,
+	.of_match = octeontx_smi_ids,
+};
+
+static struct pci_device_id octeontx_smi_supported[] = {
+	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_SMI) },
+	{}
+};
+
+U_BOOT_PCI_DEVICE(octeontx_smi, octeontx_smi_supported);
diff --git a/drivers/net/octeontx/xcv.c b/drivers/net/octeontx/xcv.c
new file mode 100644
index 0000000..8dd558b
--- /dev/null
+++ b/drivers/net/octeontx/xcv.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <config.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdt_support.h>
+#include <pci.h>
+#include <malloc.h>
+#include <miiphy.h>
+#include <misc.h>
+#include <net.h>
+#include <netdev.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/libfdt.h>
+
+#include <asm/arch/csrs/csrs-xcv.h>
+
+#define XCVX_BASE		0x87E0DB000000ULL
+
+/* Initialize XCV block */
+void xcv_init_hw(void)
+{
+	union xcvx_reset reset;
+	union xcvx_dll_ctl xcv_dll_ctl;
+
+	/* Take the DLL out of reset */
+	reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+	reset.s.dllrst = 0;
+	writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+
+	/* Take the clock tree out of reset */
+	reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+	reset.s.clkrst = 0;
+	writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+
+	/* Once the 125MHz ref clock is stable, wait 10us for DLL to lock */
+	udelay(10);
+
+	/* Optionally, bypass the DLL setting */
+	xcv_dll_ctl.u = readq(XCVX_BASE + XCVX_DLL_CTL(0));
+	xcv_dll_ctl.s.clkrx_set = 0;
+	xcv_dll_ctl.s.clkrx_byp = 1;
+	xcv_dll_ctl.s.clktx_byp = 0;
+	writeq(xcv_dll_ctl.u, XCVX_BASE + XCVX_DLL_CTL(0));
+
+	/* Enable the compensation controller */
+	reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+	reset.s.comp = 1;
+	writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+	reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+
+	/* Wait for 1040 reference clock cycles for the compensation state
+	 * machine lock.
+	 */
+	udelay(100);
+
+	/* Enable the XCV block */
+	reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+	reset.s.enable = 1;
+	writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+
+	/* set XCV(0)_RESET[CLKRST] to 1 */
+	reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+	reset.s.clkrst = 1;
+	writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+}
+
+/*
+ * Configure XCV link based on the speed
+ * link_up   : Set to 1 when link is up otherwise 0
+ * link_speed: The speed of the link.
+ */
+void xcv_setup_link(bool link_up, int link_speed)
+{
+	union xcvx_ctl xcv_ctl;
+	union xcvx_reset reset;
+	union xcvx_batch_crd_ret xcv_crd_ret;
+	int speed = 2;
+
+	/* Check RGMII link */
+	if (link_speed == 100)
+		speed = 1;
+	else if (link_speed == 10)
+		speed = 0;
+
+	if (link_up) {
+		/* Set operating speed */
+		xcv_ctl.u = readq(XCVX_BASE + XCVX_CTL(0));
+		xcv_ctl.s.speed = speed;
+		writeq(xcv_ctl.u, XCVX_BASE + XCVX_CTL(0));
+
+		/* Datapaths come out of reset
+		 * - The datapath resets will disengage BGX from the
+		 *   RGMII interface
+		 * - XCV will continue to return TX credits for each tick
+		 *   that is sent on the TX data path
+		 */
+		reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+		reset.s.tx_dat_rst_n = 1;
+		reset.s.rx_dat_rst_n = 1;
+		writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+
+		/* Enable packet flow */
+		reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+		reset.s.tx_pkt_rst_n = 1;
+		reset.s.rx_pkt_rst_n = 1;
+		writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+
+		xcv_crd_ret.u = readq(XCVX_BASE + XCVX_BATCH_CRD_RET(0));
+		xcv_crd_ret.s.crd_ret = 1;
+		writeq(xcv_crd_ret.u, XCVX_BASE + XCVX_BATCH_CRD_RET(0));
+	} else {
+		/* Enable packet flow */
+		reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+		reset.s.tx_pkt_rst_n = 0;
+		reset.s.rx_pkt_rst_n = 0;
+		writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
+		reset.u = readq(XCVX_BASE + XCVX_RESET(0));
+	}
+}
diff --git a/drivers/net/octeontx2/Makefile b/drivers/net/octeontx2/Makefile
new file mode 100644
index 0000000..c930072
--- /dev/null
+++ b/drivers/net/octeontx2/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier:    GPL-2.0
+#
+# Copyright (C) 2018 Marvell International Ltd.
+#
+
+obj-$(CONFIG_NET_OCTEONTX2) += cgx.o nix_af.o nix.o rvu_pf.o \
+				rvu_af.o rvu_common.o
+
diff --git a/drivers/net/octeontx2/cgx.c b/drivers/net/octeontx2/cgx.c
new file mode 100644
index 0000000..ff2ebc2
--- /dev/null
+++ b/drivers/net/octeontx2/cgx.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <errno.h>
+#include <malloc.h>
+#include <misc.h>
+#include <net.h>
+#include <pci_ids.h>
+#include <linux/list.h>
+#include <asm/arch/board.h>
+#include <asm/arch/csrs/csrs-cgx.h>
+#include <asm/io.h>
+
+#include "cgx.h"
+
+char lmac_type_to_str[][8] = {
+	"SGMII",
+	"XAUI",
+	"RXAUI",
+	"10G_R",
+	"40G_R",
+	"RGMII",
+	"QSGMII",
+	"25G_R",
+	"50G_R",
+	"100G_R",
+	"USXGMII",
+};
+
+char lmac_speed_to_str[][8] = {
+	"0",
+	"10M",
+	"100M",
+	"1G",
+	"2.5G",
+	"5G",
+	"10G",
+	"20G",
+	"25G",
+	"40G",
+	"50G",
+	"80G",
+	"100G",
+};
+
+/**
+ * Given an LMAC/PF instance number, return the lmac
+ * Per design, each PF has only one LMAC mapped.
+ *
+ * @param instance	instance to find
+ *
+ * @return	pointer to lmac data structure or NULL if not found
+ */
+struct lmac *nix_get_cgx_lmac(int lmac_instance)
+{
+	struct cgx *cgx;
+	struct udevice *dev;
+	int i, idx, err;
+
+	for (i = 0; i < CGX_PER_NODE; i++) {
+		err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
+					 PCI_DEVICE_ID_OCTEONTX2_CGX, i,
+					 &dev);
+		if (err)
+			continue;
+
+		cgx = dev_get_priv(dev);
+		debug("%s udev %p cgx %p instance %d\n", __func__, dev, cgx,
+		      lmac_instance);
+		for (idx = 0; idx < cgx->lmac_count; idx++) {
+			if (cgx->lmac[idx]->instance == lmac_instance)
+				return cgx->lmac[idx];
+		}
+	}
+	return NULL;
+}
+
+void cgx_lmac_mac_filter_clear(struct lmac *lmac)
+{
+	union cgxx_cmrx_rx_dmac_ctl0 dmac_ctl0;
+	union cgxx_cmr_rx_dmacx_cam0 dmac_cam0;
+	void *reg_addr;
+
+	dmac_cam0.u = 0x0;
+	reg_addr = lmac->cgx->reg_base +
+			CGXX_CMR_RX_DMACX_CAM0(lmac->lmac_id * 8);
+	writeq(dmac_cam0.u, reg_addr);
+	debug("%s: reg %p dmac_cam0 %llx\n", __func__, reg_addr, dmac_cam0.u);
+
+	dmac_ctl0.u = 0x0;
+	dmac_ctl0.s.bcst_accept = 1;
+	dmac_ctl0.s.mcst_mode = 1;
+	dmac_ctl0.s.cam_accept = 0;
+	reg_addr = lmac->cgx->reg_base +
+			CGXX_CMRX_RX_DMAC_CTL0(lmac->lmac_id);
+	writeq(dmac_ctl0.u, reg_addr);
+	debug("%s: reg %p dmac_ctl0 %llx\n", __func__, reg_addr, dmac_ctl0.u);
+}
+
+void cgx_lmac_mac_filter_setup(struct lmac *lmac)
+{
+	union cgxx_cmrx_rx_dmac_ctl0 dmac_ctl0;
+	union cgxx_cmr_rx_dmacx_cam0 dmac_cam0;
+	u64 mac, tmp;
+	void *reg_addr;
+
+	memcpy((void *)&tmp, lmac->mac_addr, 6);
+	debug("%s: tmp %llx\n", __func__, tmp);
+	debug("%s: swab tmp %llx\n", __func__, swab64(tmp));
+	mac = swab64(tmp) >> 16;
+	debug("%s: mac %llx\n", __func__, mac);
+	dmac_cam0.u = 0x0;
+	dmac_cam0.s.id = lmac->lmac_id;
+	dmac_cam0.s.adr = mac;
+	dmac_cam0.s.en = 1;
+	reg_addr = lmac->cgx->reg_base +
+			CGXX_CMR_RX_DMACX_CAM0(lmac->lmac_id * 8);
+	writeq(dmac_cam0.u, reg_addr);
+	debug("%s: reg %p dmac_cam0 %llx\n", __func__, reg_addr, dmac_cam0.u);
+	dmac_ctl0.u = 0x0;
+	dmac_ctl0.s.bcst_accept = 1;
+	dmac_ctl0.s.mcst_mode = 0;
+	dmac_ctl0.s.cam_accept = 1;
+	reg_addr = lmac->cgx->reg_base +
+			CGXX_CMRX_RX_DMAC_CTL0(lmac->lmac_id);
+	writeq(dmac_ctl0.u, reg_addr);
+	debug("%s: reg %p dmac_ctl0 %llx\n", __func__, reg_addr, dmac_ctl0.u);
+}
+
+int cgx_lmac_set_pkind(struct lmac *lmac, u8 lmac_id, int pkind)
+{
+	cgx_write(lmac->cgx, lmac_id, CGXX_CMRX_RX_ID_MAP(0),
+		  (pkind & 0x3f));
+	return 0;
+}
+
+int cgx_lmac_link_status(struct lmac *lmac, int lmac_id, u64 *status)
+{
+	int ret = 0;
+
+	ret = cgx_intf_get_link_sts(lmac->cgx->cgx_id, lmac_id, status);
+	if (ret) {
+		debug("%s request failed for cgx%d lmac%d\n",
+		      __func__, lmac->cgx->cgx_id, lmac->lmac_id);
+		ret = -1;
+	}
+	return ret;
+}
+
+int cgx_lmac_rx_tx_enable(struct lmac *lmac, int lmac_id, bool enable)
+{
+	struct cgx *cgx = lmac->cgx;
+	union cgxx_cmrx_config cmrx_config;
+
+	if (!cgx || lmac_id >= cgx->lmac_count)
+		return -ENODEV;
+
+	cmrx_config.u = cgx_read(cgx, lmac_id, CGXX_CMRX_CONFIG(0));
+	cmrx_config.s.data_pkt_rx_en =
+	cmrx_config.s.data_pkt_tx_en = enable ? 1 : 0;
+	cgx_write(cgx, lmac_id, CGXX_CMRX_CONFIG(0), cmrx_config.u);
+	return 0;
+}
+
+int cgx_lmac_link_enable(struct lmac *lmac, int lmac_id, bool enable,
+			 u64 *status)
+{
+	int ret = 0;
+
+	ret = cgx_intf_link_up_dwn(lmac->cgx->cgx_id, lmac_id, enable,
+				   status);
+	if (ret) {
+		debug("%s request failed for cgx%d lmac%d\n",
+		      __func__, lmac->cgx->cgx_id, lmac->lmac_id);
+		ret = -1;
+	}
+	return ret;
+}
+
+int cgx_lmac_internal_loopback(struct lmac *lmac, int lmac_id, bool enable)
+{
+	struct cgx *cgx = lmac->cgx;
+	union cgxx_cmrx_config cmrx_cfg;
+	union cgxx_gmp_pcs_mrx_control mrx_control;
+	union cgxx_spux_control1 spux_control1;
+	enum lmac_type lmac_type;
+
+	if (!cgx || lmac_id >= cgx->lmac_count)
+		return -ENODEV;
+
+	cmrx_cfg.u = cgx_read(cgx, lmac_id, CGXX_CMRX_CONFIG(0));
+	lmac_type = cmrx_cfg.s.lmac_type;
+	if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
+		mrx_control.u = cgx_read(cgx, lmac_id,
+					 CGXX_GMP_PCS_MRX_CONTROL(0));
+		mrx_control.s.loopbck1 = enable ? 1 : 0;
+		cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CONTROL(0),
+			  mrx_control.u);
+	} else {
+		spux_control1.u = cgx_read(cgx, lmac_id,
+					   CGXX_SPUX_CONTROL1(0));
+		spux_control1.s.loopbck = enable ? 1 : 0;
+		cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1(0),
+			  spux_control1.u);
+	}
+	return 0;
+}
+
+static int cgx_lmac_init(struct cgx *cgx)
+{
+	struct lmac *lmac;
+	union cgxx_cmrx_config cmrx_cfg;
+	static int instance = 1;
+	int i;
+
+	cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMR_RX_LMACS());
+	debug("%s: Found %d lmacs for cgx %d@%p\n", __func__, cgx->lmac_count,
+	      cgx->cgx_id, cgx->reg_base);
+
+	for (i = 0; i < cgx->lmac_count; i++) {
+		lmac = calloc(1, sizeof(*lmac));
+		if (!lmac)
+			return -ENOMEM;
+		lmac->instance = instance++;
+		snprintf(lmac->name, sizeof(lmac->name), "cgx_fwi_%d_%d",
+			 cgx->cgx_id, i);
+		/* Get LMAC type */
+		cmrx_cfg.u = cgx_read(cgx, i, CGXX_CMRX_CONFIG(0));
+		lmac->lmac_type = cmrx_cfg.s.lmac_type;
+
+		lmac->lmac_id = i;
+		lmac->cgx = cgx;
+		cgx->lmac[i] = lmac;
+		debug("%s: map id %d to lmac %p (%s), type:%d instance %d\n",
+		      __func__, i, lmac, lmac->name, lmac->lmac_type,
+		      lmac->instance);
+		lmac->init_pend = 1;
+		printf("CGX%d LMAC%d [%s]\n", lmac->cgx->cgx_id,
+		       lmac->lmac_id, lmac_type_to_str[lmac->lmac_type]);
+		octeontx2_board_get_mac_addr((lmac->instance - 1),
+					     lmac->mac_addr);
+		debug("%s: MAC %pM\n", __func__, lmac->mac_addr);
+		cgx_lmac_mac_filter_setup(lmac);
+	}
+	return 0;
+}
+
+int cgx_probe(struct udevice *dev)
+{
+	struct cgx *cgx = dev_get_priv(dev);
+	int err;
+
+	cgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+				       PCI_REGION_MEM);
+	cgx->dev = dev;
+	cgx->cgx_id = ((u64)(cgx->reg_base) >> 24) & 0x7;
+
+	debug("%s CGX BAR %p, id: %d\n", __func__, cgx->reg_base,
+	      cgx->cgx_id);
+	debug("%s CGX %p, udev: %p\n", __func__, cgx, dev);
+
+	err = cgx_lmac_init(cgx);
+
+	return err;
+}
+
+int cgx_remove(struct udevice *dev)
+{
+	struct cgx *cgx = dev_get_priv(dev);
+	int i;
+
+	debug("%s: cgx remove reg_base %p cgx_id %d",
+	      __func__, cgx->reg_base, cgx->cgx_id);
+	for (i = 0; i < cgx->lmac_count; i++)
+		cgx_lmac_mac_filter_clear(cgx->lmac[i]);
+
+	return 0;
+}
+
+U_BOOT_DRIVER(cgx) = {
+	.name	= "cgx",
+	.id	= UCLASS_MISC,
+	.probe	= cgx_probe,
+	.remove	= cgx_remove,
+	.priv_auto_alloc_size = sizeof(struct cgx),
+};
+
+static struct pci_device_id cgx_supported[] = {
+	{PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_CGX) },
+	{}
+};
+
+U_BOOT_PCI_DEVICE(cgx, cgx_supported);
diff --git a/drivers/net/octeontx2/cgx.h b/drivers/net/octeontx2/cgx.h
new file mode 100644
index 0000000..f287692
--- /dev/null
+++ b/drivers/net/octeontx2/cgx.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef __CGX_H__
+#define __CGX_H__
+
+#include "cgx_intf.h"
+
+#define PCI_DEVICE_ID_OCTEONTX2_CGX	0xA059
+
+#define MAX_LMAC_PER_CGX		4
+#define CGX_PER_NODE			3
+
+enum lmac_type {
+	LMAC_MODE_SGMII		= 0,
+	LMAC_MODE_XAUI		= 1,
+	LMAC_MODE_RXAUI		= 2,
+	LMAC_MODE_10G_R		= 3,
+	LMAC_MODE_40G_R		= 4,
+	LMAC_MODE_QSGMII	= 6,
+	LMAC_MODE_25G_R		= 7,
+	LMAC_MODE_50G_R		= 8,
+	LMAC_MODE_100G_R	= 9,
+	LMAC_MODE_USXGMII	= 10,
+};
+
+extern char lmac_type_to_str[][8];
+
+extern char lmac_speed_to_str[][8];
+
+struct lmac_priv {
+	u8 enable:1;
+	u8 full_duplex:1;
+	u8 speed:4;
+	u8 mode:1;
+	u8 rsvd:1;
+	u8 mac_addr[6];
+};
+
+struct cgx;
+struct nix;
+struct nix_af;
+
+struct lmac {
+	struct cgx	*cgx;
+	struct nix	*nix;
+	char		name[16];
+	enum lmac_type	lmac_type;
+	bool		init_pend;
+	u8		instance;
+	u8		lmac_id;
+	u8		pknd;
+	u8		link_num;
+	u32		chan_num;
+	u8		mac_addr[6];
+};
+
+struct cgx {
+	struct nix_af		*nix_af;
+	void __iomem		*reg_base;
+	struct udevice		*dev;
+	struct lmac		*lmac[MAX_LMAC_PER_CGX];
+	u8			cgx_id;
+	u8			lmac_count;
+};
+
+static inline void cgx_write(struct cgx *cgx, u8 lmac, u64 offset, u64 val)
+{
+	writeq(val, cgx->reg_base + CMR_SHIFT(lmac) + offset);
+}
+
+static inline u64 cgx_read(struct cgx *cgx, u8 lmac, u64 offset)
+{
+	return readq(cgx->reg_base + CMR_SHIFT(lmac) + offset);
+}
+
+/**
+ * Given an LMAC/PF instance number, return the lmac
+ * Per design, each PF has only one LMAC mapped.
+ *
+ * @param instance	instance to find
+ *
+ * @return	pointer to lmac data structure or NULL if not found
+ */
+struct lmac *nix_get_cgx_lmac(int lmac_instance);
+
+int cgx_lmac_set_pkind(struct lmac *lmac, u8 lmac_id, int pkind);
+int cgx_lmac_internal_loopback(struct lmac *lmac, int lmac_id, bool enable);
+int cgx_lmac_rx_tx_enable(struct lmac *lmac, int lmac_id, bool enable);
+int cgx_lmac_link_enable(struct lmac *lmac, int lmac_id, bool enable,
+			 u64 *status);
+int cgx_lmac_link_status(struct lmac *lmac, int lmac_id, u64 *status);
+void cgx_lmac_mac_filter_setup(struct lmac *lmac);
+
+int cgx_intf_get_link_sts(u8 cgx, u8 lmac, u64 *lnk_sts);
+int cgx_intf_link_up_dwn(u8 cgx, u8 lmac, u8 up_dwn, u64 *lnk_sts);
+int cgx_intf_get_mac_addr(u8 cgx, u8 lmac, u8 *mac);
+int cgx_intf_set_macaddr(struct udevice *dev);
+int cgx_intf_prbs(u8 qlm, u8 mode, u32 time, u8 lane);
+int cgx_intf_display_eye(u8 qlm, u8 lane);
+int cgx_intf_display_serdes(u8 qlm, u8 lane);
+
+#endif /* __CGX_H__ */
diff --git a/drivers/net/octeontx2/cgx_intf.c b/drivers/net/octeontx2/cgx_intf.c
new file mode 100644
index 0000000..37d9a2b
--- /dev/null
+++ b/drivers/net/octeontx2/cgx_intf.c
@@ -0,0 +1,715 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <errno.h>
+#include <malloc.h>
+#include <misc.h>
+#include <net.h>
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+
+#include <asm/arch/board.h>
+#include <asm/io.h>
+
+#include "cgx_intf.h"
+#include "cgx.h"
+#include "nix.h"
+
+static u64 cgx_rd_scrx(u8 cgx, u8 lmac, u8 index)
+{
+	u64 addr;
+
+	addr = (index == 1) ? CGX_CMR_SCRATCH1 : CGX_CMR_SCRATCH0;
+	addr += CGX_SHIFT(cgx) + CMR_SHIFT(lmac);
+	return readq(addr);
+}
+
+static void cgx_wr_scrx(u8 cgx, u8 lmac, u8 index, u64 val)
+{
+	u64 addr;
+
+	addr = (index == 1) ? CGX_CMR_SCRATCH1 : CGX_CMR_SCRATCH0;
+	addr += CGX_SHIFT(cgx) + CMR_SHIFT(lmac);
+	writeq(val, addr);
+}
+
+static u64 cgx_rd_scr0(u8 cgx, u8 lmac)
+{
+	return cgx_rd_scrx(cgx, lmac, 0);
+}
+
+static u64 cgx_rd_scr1(u8 cgx, u8 lmac)
+{
+	return cgx_rd_scrx(cgx, lmac, 1);
+}
+
+static void cgx_wr_scr0(u8 cgx, u8 lmac, u64 val)
+{
+	return cgx_wr_scrx(cgx, lmac, 0, val);
+}
+
+static void cgx_wr_scr1(u8 cgx, u8 lmac, u64 val)
+{
+	return cgx_wr_scrx(cgx, lmac, 1, val);
+}
+
+static inline void set_ownership(u8 cgx, u8 lmac, u8 val)
+{
+	union cgx_scratchx1 scr1;
+
+	scr1.u = cgx_rd_scr1(cgx, lmac);
+	scr1.s.own_status = val;
+	cgx_wr_scr1(cgx, lmac, scr1.u);
+}
+
+static int wait_for_ownership(u8 cgx, u8 lmac)
+{
+	union cgx_scratchx1 scr1;
+	union cgx_scratchx0 scr0;
+	u64 cmrx_int;
+	int timeout = 5000;
+
+	do {
+		scr1.u = cgx_rd_scr1(cgx, lmac);
+		scr0.u = cgx_rd_scr0(cgx, lmac);
+		/* clear async events if any */
+		if (scr0.s.evt_sts.evt_type == CGX_EVT_ASYNC &&
+		    scr0.s.evt_sts.ack) {
+			/* clear interrupt */
+			cmrx_int = readq(CGX_CMR_INT +
+					 CGX_SHIFT(cgx) + CMR_SHIFT(lmac));
+			cmrx_int |= 0x2; // Overflw bit
+			writeq(cmrx_int, CGX_CMR_INT +
+					 CGX_SHIFT(cgx) + CMR_SHIFT(lmac));
+
+			/* clear ack */
+			scr0.s.evt_sts.ack = 0;
+			cgx_wr_scr0(cgx, lmac, scr0.u);
+		}
+
+		if (timeout-- < 0) {
+			debug("timeout waiting for ownership\n");
+			return -ETIMEDOUT;
+		}
+		mdelay(1);
+	} while ((scr1.s.own_status == CGX_OWN_FIRMWARE) &&
+		  scr0.s.evt_sts.ack);
+
+	return 0;
+}
+
+int cgx_intf_req(u8 cgx, u8 lmac, union cgx_cmd_s cmd_args, u64 *rsp,
+		 int use_cmd_id_only)
+{
+	union cgx_scratchx1 scr1;
+	union cgx_scratchx0 scr0;
+	u64 cmrx_int;
+	int timeout = 500;
+	int err = 0;
+	u8 cmd = cmd_args.cmd.id;
+
+	if (wait_for_ownership(cgx, lmac)) {
+		err = -ETIMEDOUT;
+		goto error;
+	}
+
+	/* send command */
+	scr1.u = cgx_rd_scr1(cgx, lmac);
+
+	if (use_cmd_id_only) {
+		scr1.s.cmd.id = cmd;
+	} else {
+		cmd_args.own_status = scr1.s.own_status;
+		scr1.s = cmd_args;
+	}
+	cgx_wr_scr1(cgx, lmac, scr1.u);
+
+	set_ownership(cgx, lmac, CGX_OWN_FIRMWARE);
+
+	/* wait for response and ownership */
+	do {
+		scr0.u = cgx_rd_scr0(cgx, lmac);
+		scr1.u = cgx_rd_scr1(cgx, lmac);
+		mdelay(10);
+	} while (timeout-- && (!scr0.s.evt_sts.ack) &&
+		 (scr1.s.own_status == CGX_OWN_FIRMWARE));
+	if (timeout < 0) {
+		debug("%s timeout waiting for ack\n", __func__);
+		err = -ETIMEDOUT;
+		goto error;
+	}
+
+	if (cmd == CGX_CMD_INTF_SHUTDOWN)
+		goto error;
+
+	if (scr0.s.evt_sts.evt_type != CGX_EVT_CMD_RESP) {
+		debug("%s received async event instead of cmd resp event\n",
+		      __func__);
+		err = -1;
+		goto error;
+	}
+	if (scr0.s.evt_sts.id != cmd) {
+		debug("%s received resp for cmd %d expected cmd %d\n",
+		      __func__, scr0.s.evt_sts.id, cmd);
+		err = -1;
+		goto error;
+	}
+	if (scr0.s.evt_sts.stat != CGX_STAT_SUCCESS) {
+		debug("%s cmd%d failed on cgx%u lmac%u with errcode %d\n",
+		      __func__, cmd, cgx, lmac, scr0.s.link_sts.err_type);
+		err = -1;
+	}
+
+error:
+	/* clear interrupt */
+	cmrx_int = readq(CGX_CMR_INT + CGX_SHIFT(cgx) + CMR_SHIFT(lmac));
+	cmrx_int |= 0x2; // Overflw bit
+	writeq(cmrx_int, CGX_CMR_INT + CGX_SHIFT(cgx) + CMR_SHIFT(lmac));
+
+	/* clear ownership and ack */
+	scr0.s.evt_sts.ack = 0;
+	cgx_wr_scr0(cgx, lmac, scr0.u);
+
+	*rsp = err ? 0 : scr0.u;
+
+	return err;
+}
+
+int cgx_intf_get_mac_addr(u8 cgx, u8 lmac, u8 *mac)
+{
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_GET_MAC_ADDR;
+
+	ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1);
+	if (ret)
+		return -1;
+
+	scr0.u >>= 9;
+	memcpy(mac, &scr0.u, 6);
+
+	return 0;
+}
+
+int cgx_intf_get_ver(u8 cgx, u8 lmac, u8 *ver)
+{
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_GET_FW_VER;
+
+	ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1);
+	if (ret)
+		return -1;
+
+	scr0.u >>= 9;
+	*ver = scr0.u & 0xFFFF;
+
+	return 0;
+}
+
+int cgx_intf_get_link_sts(u8 cgx, u8 lmac, u64 *lnk_sts)
+{
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_GET_LINK_STS;
+
+	ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1);
+	if (ret)
+		return -1;
+
+	scr0.u >>= 9;
+	/* pass the same format as cgx_lnk_sts_s
+	 * err_type:10, speed:4, full_duplex:1, link_up:1
+	 */
+	*lnk_sts = scr0.u & 0xFFFF;
+	return 0;
+}
+
+int cgx_intf_link_up_dwn(u8 cgx, u8 lmac, u8 up_dwn, u64 *lnk_sts)
+{
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = up_dwn ? CGX_CMD_LINK_BRING_UP : CGX_CMD_LINK_BRING_DOWN;
+
+	ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1);
+	if (ret)
+		return -1;
+
+	scr0.u >>= 9;
+	/* pass the same format as cgx_lnk_sts_s
+	 * err_type:10, speed:4, full_duplex:1, link_up:1
+	 */
+	*lnk_sts = scr0.u & 0xFFFF;
+	return 0;
+}
+
+void cgx_intf_shutdown(void)
+{
+	union cgx_scratchx0 scr0;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_INTF_SHUTDOWN;
+
+	cgx_intf_req(0, 0, cmd, &scr0.u, 1);
+}
+
+int cgx_intf_prbs(u8 qlm, u8 mode, u32 time, u8 lane)
+{
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_PRBS;
+
+	cmd.prbs_args.qlm = qlm;
+	cmd.prbs_args.mode = mode;
+	cmd.prbs_args.time = time;
+	cmd.prbs_args.lane = lane;
+
+	ret = cgx_intf_req(0, 0, cmd, &scr0.u, 0);
+	if (ret)
+		return -1;
+
+	return 0;
+}
+
+enum cgx_mode {
+	MODE_10G_C2C,
+	MODE_10G_C2M,
+	MODE_10G_KR,
+	MODE_25G_C2C,
+	MODE_25G_2_C2C,
+	MODE_50G_C2C,
+	MODE_50G_4_C2C
+};
+
+static char intf_speed_to_str[][8] = {
+	"10M",
+	"100M",
+	"1G",
+	"2.5G",
+	"5G",
+	"10G",
+	"20G",
+	"25G",
+	"40G",
+	"50G",
+	"80G",
+	"100G",
+};
+
+static void mode_to_args(int mode, struct cgx_mode_change_args *args)
+{
+	args->an = 0;
+	args->duplex = 0;
+	args->port = 0;
+
+	switch (mode) {
+	case MODE_10G_C2C:
+		args->speed = CGX_LINK_10G;
+		args->mode = BIT_ULL(CGX_MODE_10G_C2C_BIT);
+		break;
+	case MODE_10G_C2M:
+		args->speed = CGX_LINK_10G;
+		args->mode = BIT_ULL(CGX_MODE_10G_C2M_BIT);
+		break;
+	case MODE_10G_KR:
+		args->speed = CGX_LINK_10G;
+		args->mode = BIT_ULL(CGX_MODE_10G_KR_BIT);
+		args->an = 1;
+		break;
+	case MODE_25G_C2C:
+		args->speed = CGX_LINK_25G;
+		args->mode = BIT_ULL(CGX_MODE_25G_C2C_BIT);
+		break;
+	case MODE_25G_2_C2C:
+		args->speed = CGX_LINK_25G;
+		args->mode = BIT_ULL(CGX_MODE_25G_2_C2C_BIT);
+		break;
+	case MODE_50G_C2C:
+		args->speed = CGX_LINK_50G;
+		args->mode = BIT_ULL(CGX_MODE_50G_C2C_BIT);
+		break;
+	case MODE_50G_4_C2C:
+		args->speed = CGX_LINK_50G;
+		args->mode = BIT_ULL(CGX_MODE_50G_4_C2C_BIT);
+	}
+}
+
+int cgx_intf_set_mode(struct udevice *ethdev, int mode)
+{
+	struct rvu_pf *rvu = dev_get_priv(ethdev);
+	struct nix *nix = rvu->nix;
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_MODE_CHANGE;
+
+	mode_to_args(mode, &cmd.mode_change_args);
+
+	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
+			   cmd, &scr0.u, 0);
+	if (ret) {
+		printf("Mode change command failed for %s\n", ethdev->name);
+		return -1;
+	}
+
+	cmd.cmd.id = CGX_CMD_GET_LINK_STS;
+	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
+			   cmd, &scr0.u, 1);
+	if (ret) {
+		printf("Get Link Status failed for %s\n", ethdev->name);
+		return -1;
+	}
+
+	printf("Current Link Status: ");
+	if (scr0.s.link_sts.speed) {
+		printf("%s\n", intf_speed_to_str[scr0.s.link_sts.speed]);
+		switch (scr0.s.link_sts.fec) {
+		case 0:
+			printf("FEC_NONE\n");
+			break;
+		case 1:
+			printf("FEC_BASE_R\n");
+			break;
+		case 2:
+			printf("FEC_RS\n");
+			break;
+		}
+		printf("Auto Negotiation %sabled\n",
+		       scr0.s.link_sts.an ? "En" : "Dis");
+		printf("%s Duplex\n",
+		       scr0.s.link_sts.full_duplex ? "Full" : "Half");
+	} else {
+		printf("Down\n");
+	}
+	return 0;
+}
+
+int cgx_intf_get_mode(struct udevice *ethdev)
+{
+	struct rvu_pf *rvu = dev_get_priv(ethdev);
+	struct nix *nix = rvu->nix;
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_GET_LINK_STS;
+	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
+			   cmd, &scr0.u, 1);
+	if (ret) {
+		printf("Get link status failed for %s\n", ethdev->name);
+		return -1;
+	}
+	printf("Current Interface Mode: ");
+	switch (scr0.s.link_sts.mode) {
+	case CGX_MODE_10G_C2C_BIT:
+		printf("10G_C2C\n");
+		break;
+	case CGX_MODE_10G_C2M_BIT:
+		printf("10G_C2M\n");
+		break;
+	case CGX_MODE_10G_KR_BIT:
+		printf("10G_KR\n");
+		break;
+	case CGX_MODE_25G_C2C_BIT:
+		printf("25G_C2C\n");
+		break;
+	case CGX_MODE_25G_2_C2C_BIT:
+		printf("25G_2_C2C\n");
+		break;
+	case CGX_MODE_50G_C2C_BIT:
+		printf("50G_C2C\n");
+		break;
+	case CGX_MODE_50G_4_C2C_BIT:
+		printf("50G_4_C2C\n");
+		break;
+	default:
+		printf("Unknown\n");
+		break;
+	}
+	return 0;
+}
+
+int cgx_intf_get_fec(struct udevice *ethdev)
+{
+	struct rvu_pf *rvu = dev_get_priv(ethdev);
+	struct nix *nix = rvu->nix;
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_GET_SUPPORTED_FEC;
+
+	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
+			   cmd, &scr0.u, 1);
+	if (ret) {
+		printf("Get supported FEC failed for %s\n", ethdev->name);
+		return -1;
+	}
+
+	printf("Supported FEC type: ");
+	switch (scr0.s.supported_fec.fec) {
+	case 0:
+		printf("FEC_NONE\n");
+		break;
+	case 1:
+		printf("FEC_BASE_R\n");
+		break;
+	case 2:
+		printf("FEC_RS\n");
+		break;
+	case 3:
+		printf("FEC_BASE_R FEC_RS\n");
+		break;
+	}
+
+	cmd.cmd.id = CGX_CMD_GET_LINK_STS;
+	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
+			   cmd, &scr0.u, 1);
+	if (ret) {
+		printf("Get active fec failed for %s\n", ethdev->name);
+		return -1;
+	}
+	printf("Active FEC type: ");
+	switch (scr0.s.link_sts.fec) {
+	case 0:
+		printf("FEC_NONE\n");
+		break;
+	case 1:
+		printf("FEC_BASE_R\n");
+		break;
+	case 2:
+		printf("FEC_RS\n");
+		break;
+	}
+	return 0;
+}
+
+int cgx_intf_set_fec(struct udevice *ethdev, int type)
+{
+	struct rvu_pf *rvu = dev_get_priv(ethdev);
+	struct nix *nix = rvu->nix;
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_SET_FEC;
+	cmd.fec_args.fec = type;
+
+	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
+			   cmd, &scr0.u, 0);
+	if (ret) {
+		printf("Set FEC type %d failed for %s\n", type, ethdev->name);
+		return -1;
+	}
+	return 0;
+}
+
+int cgx_intf_get_phy_mod_type(struct udevice *ethdev)
+{
+	struct rvu_pf *rvu = dev_get_priv(ethdev);
+	struct nix *nix = rvu->nix;
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_GET_PHY_MOD_TYPE;
+
+	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
+			   cmd, &scr0.u, 1);
+	if (ret) {
+		printf("Get PHYMOD type failed for %s\n", ethdev->name);
+		return -1;
+	}
+	printf("Current phy mod type %s\n",
+	       scr0.s.phy_mod_type.mod ? "PAM4" : "NRZ");
+	return 0;
+}
+
+int cgx_intf_set_phy_mod_type(struct udevice *ethdev, int type)
+{
+	struct rvu_pf *rvu = dev_get_priv(ethdev);
+	struct nix *nix = rvu->nix;
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_SET_PHY_MOD_TYPE;
+	cmd.phy_mod_args.mod = type;
+
+	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
+			   cmd, &scr0.u, 0);
+	if (ret) {
+		printf("Set PHYMOD type %d failed for %s\n", type,
+		       ethdev->name);
+		return -1;
+	}
+
+	return 0;
+}
+
+int cgx_intf_set_an_lbk(struct udevice *ethdev, int enable)
+{
+	struct rvu_pf *rvu = dev_get_priv(ethdev);
+	struct nix *nix = rvu->nix;
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_AN_LOOPBACK;
+	cmd.cmd_args.enable = enable;
+
+	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
+			   cmd, &scr0.u, 0);
+	if (ret) {
+		printf("Set AN loopback command failed on %s\n", ethdev->name);
+		return -1;
+	}
+	printf("AN loopback %s for %s\n", enable ? "set" : "clear",
+	       ethdev->name);
+
+	return 0;
+}
+
+int cgx_intf_get_ignore(struct udevice *ethdev, int cgx, int lmac)
+{
+	struct rvu_pf *rvu;
+	struct nix *nix;
+	union cgx_scratchx0 scr0;
+	int ret, cgx_id = cgx, lmac_id = lmac;
+	union cgx_cmd_s cmd;
+
+	if (ethdev) {
+		rvu = dev_get_priv(ethdev);
+		nix = rvu->nix;
+		cgx_id = nix->lmac->cgx->cgx_id;
+		lmac_id = nix->lmac->lmac_id;
+	}
+	cmd.cmd.id = CGX_CMD_GET_PERSIST_IGNORE;
+
+	ret = cgx_intf_req(cgx_id, lmac_id, cmd, &scr0.u, 1);
+	if (ret) {
+		if (ethdev)
+			printf("Get ignore command failed for %s\n",
+			       ethdev->name);
+		else
+			printf("Get ignore command failed for CGX%d LMAC%d\n",
+			       cgx_id, lmac_id);
+		return -1;
+	}
+	if (ethdev)
+		printf("Persist settings %signored for %s\n",
+		       scr0.s.persist.ignore ? "" : "not ", ethdev->name);
+	else
+		printf("Persist settings %signored for CGX%d LMAC%d\n",
+		       scr0.s.persist.ignore ? "" : "not ", cgx_id, lmac_id);
+
+	return 0;
+}
+
+int cgx_intf_set_ignore(struct udevice *ethdev, int cgx, int lmac, int ignore)
+{
+	struct rvu_pf *rvu;
+	struct nix *nix;
+	union cgx_scratchx0 scr0;
+	int ret, cgx_id = cgx, lmac_id = lmac;
+	union cgx_cmd_s cmd;
+
+	if (ethdev) {
+		rvu = dev_get_priv(ethdev);
+		nix = rvu->nix;
+		cgx_id = nix->lmac->cgx->cgx_id;
+		lmac_id = nix->lmac->lmac_id;
+	}
+	cmd.cmd.id = CGX_CMD_SET_PERSIST_IGNORE;
+	cmd.persist_args.ignore = ignore;
+
+	ret = cgx_intf_req(cgx_id, lmac_id, cmd, &scr0.u, 0);
+	if (ret) {
+		if (ethdev)
+			printf("Set ignore command failed for %s\n",
+			       ethdev->name);
+		else
+			printf("Set ignore command failed for CGX%d LMAC%d\n",
+			       cgx_id, lmac_id);
+		return -1;
+	}
+
+	return 0;
+}
+
+int cgx_intf_set_macaddr(struct udevice *ethdev)
+{
+	struct rvu_pf *rvu = dev_get_priv(ethdev);
+	struct nix *nix = rvu->nix;
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+	u64 mac, tmp;
+
+	memcpy((void *)&tmp, nix->lmac->mac_addr, 6);
+	mac = swab64(tmp) >> 16;
+	cmd.cmd.id = CGX_CMD_SET_MAC_ADDR;
+	cmd.mac_args.addr = mac;
+	cmd.mac_args.pf_id = rvu->pfid;
+
+	ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
+			   cmd, &scr0.u, 0);
+	if (ret) {
+		printf("Set user mac addr failed for %s\n", ethdev->name);
+		return -1;
+	}
+
+	return 0;
+}
+
+int cgx_intf_display_eye(u8 qlm, u8 lane)
+{
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_DISPLAY_EYE;
+
+	cmd.dsp_eye_args.qlm = qlm;
+	cmd.dsp_eye_args.lane = lane;
+
+	ret = cgx_intf_req(0, 0, cmd, &scr0.u, 0);
+	if (ret)
+		return -1;
+
+	return 0;
+}
+
+int cgx_intf_display_serdes(u8 qlm, u8 lane)
+{
+	union cgx_scratchx0 scr0;
+	int ret;
+	union cgx_cmd_s cmd;
+
+	cmd.cmd.id = CGX_CMD_DISPLAY_SERDES;
+
+	cmd.dsp_eye_args.qlm = qlm;
+	cmd.dsp_eye_args.lane = lane;
+
+	ret = cgx_intf_req(0, 0, cmd, &scr0.u, 0);
+	if (ret)
+		return -1;
+
+	return 0;
+}
diff --git a/drivers/net/octeontx2/cgx_intf.h b/drivers/net/octeontx2/cgx_intf.h
new file mode 100644
index 0000000..62a7203
--- /dev/null
+++ b/drivers/net/octeontx2/cgx_intf.h
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef __CGX_INTF_H__
+#define __CGX_INTF_H__
+
+#define CGX_FIRMWARE_MAJOR_VER		1
+#define CGX_FIRMWARE_MINOR_VER		0
+
+/* Register offsets */
+#define CGX_CMR_INT		0x87e0e0000040
+#define CGX_CMR_SCRATCH0	0x87e0e0001050
+#define CGX_CMR_SCRATCH1	0x87e0e0001058
+
+#define CGX_SHIFT(x)		(0x1000000 * ((x) & 0x3))
+#define CMR_SHIFT(x)		(0x40000 * ((x) & 0x3))
+
+/* CGX error types. set for cmd response status as CGX_STAT_FAIL */
+enum cgx_error_type {
+	CGX_ERR_NONE,
+	CGX_ERR_LMAC_NOT_ENABLED,
+	CGX_ERR_LMAC_MODE_INVALID,
+	CGX_ERR_REQUEST_ID_INVALID,
+	CGX_ERR_PREV_ACK_NOT_CLEAR,
+	CGX_ERR_PHY_LINK_DOWN,
+	CGX_ERR_PCS_RESET_FAIL,
+	CGX_ERR_AN_CPT_FAIL,
+	CGX_ERR_TX_NOT_IDLE,
+	CGX_ERR_RX_NOT_IDLE,
+	CGX_ERR_SPUX_BR_BLKLOCK_FAIL,
+	CGX_ERR_SPUX_RX_ALIGN_FAIL,
+	CGX_ERR_SPUX_TX_FAULT,
+	CGX_ERR_SPUX_RX_FAULT,
+	CGX_ERR_SPUX_RESET_FAIL,
+	CGX_ERR_SPUX_AN_RESET_FAIL,
+	CGX_ERR_SPUX_USX_AN_RESET_FAIL,
+	CGX_ERR_SMUX_RX_LINK_NOT_OK,
+	CGX_ERR_PCS_LINK_FAIL,
+	CGX_ERR_TRAINING_FAIL,
+	CGX_ERR_RX_EQU_FAIL,
+	CGX_ERR_SPUX_BER_FAIL,
+	CGX_ERR_SPUX_RSFEC_ALGN_FAIL,
+	CGX_ERR_SPUX_MARKER_LOCK_FAIL,
+	CGX_ERR_SET_FEC_INVALID,
+	CGX_ERR_SET_FEC_FAIL,
+	CGX_ERR_MODULE_INVALID,
+	CGX_ERR_MODULE_NOT_PRESENT,
+	CGX_ERR_SPEED_CHANGE_INVALID,	/* = 28 */
+	/* FIXME : add more error types when adding support for new modes */
+};
+
+/* LINK speed types */
+enum cgx_link_speed {
+	CGX_LINK_NONE,
+	CGX_LINK_10M,
+	CGX_LINK_100M,
+	CGX_LINK_1G,
+	CGX_LINK_2HG,	/* 2.5 Gbps */
+	CGX_LINK_5G,
+	CGX_LINK_10G,
+	CGX_LINK_20G,
+	CGX_LINK_25G,
+	CGX_LINK_40G,
+	CGX_LINK_50G,
+	CGX_LINK_80G,
+	CGX_LINK_100G,
+	CGX_LINK_MAX,
+};
+
+/* REQUEST ID types. Input to firmware */
+enum cgx_cmd_id {
+	CGX_CMD_NONE = 0,
+	CGX_CMD_GET_FW_VER,
+	CGX_CMD_GET_MAC_ADDR,
+	CGX_CMD_SET_MTU,
+	CGX_CMD_GET_LINK_STS,		/* optional to user */
+	CGX_CMD_LINK_BRING_UP,		/* = 5 */
+	CGX_CMD_LINK_BRING_DOWN,
+	CGX_CMD_INTERNAL_LBK,
+	CGX_CMD_EXTERNAL_LBK,
+	CGX_CMD_HIGIG,
+	CGX_CMD_LINK_STAT_CHANGE,	/* = 10 */
+	CGX_CMD_MODE_CHANGE,		/* hot plug support */
+	CGX_CMD_INTF_SHUTDOWN,
+	CGX_CMD_GET_MKEX_SIZE,
+	CGX_CMD_GET_MKEX_PROFILE,
+	CGX_CMD_GET_FWD_BASE,		/* get base address of shared FW data */
+	CGX_CMD_GET_LINK_MODES,		/* Supported Link Modes */
+	CGX_CMD_SET_LINK_MODE,
+	CGX_CMD_GET_SUPPORTED_FEC,
+	CGX_CMD_SET_FEC,
+	CGX_CMD_GET_AN,			/* = 20 */
+	CGX_CMD_SET_AN,
+	CGX_CMD_GET_ADV_LINK_MODES,
+	CGX_CMD_GET_ADV_FEC,
+	CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */
+	CGX_CMD_SET_PHY_MOD_TYPE,	/* = 25 */
+	CGX_CMD_PRBS,
+	CGX_CMD_DISPLAY_EYE,
+	CGX_CMD_GET_PHY_FEC_STATS,
+	CGX_CMD_DISPLAY_SERDES,
+	CGX_CMD_AN_LOOPBACK,	/* = 30 */
+	CGX_CMD_GET_PERSIST_IGNORE,
+	CGX_CMD_SET_PERSIST_IGNORE,
+	CGX_CMD_SET_MAC_ADDR,
+};
+
+/* async event ids */
+enum cgx_evt_id {
+	CGX_EVT_NONE,
+	CGX_EVT_LINK_CHANGE,
+};
+
+/* event types - cause of interrupt */
+enum cgx_evt_type {
+	CGX_EVT_ASYNC,
+	CGX_EVT_CMD_RESP
+};
+
+enum cgx_stat {
+	CGX_STAT_SUCCESS,
+	CGX_STAT_FAIL
+};
+
+enum cgx_cmd_own {
+	/* default ownership with kernel/uefi/u-boot */
+	CGX_OWN_NON_SECURE_SW,
+	/* set by kernel/uefi/u-boot after posting a new request to ATF */
+	CGX_OWN_FIRMWARE,
+};
+
+/* Supported LINK MODE enums
+ * Each link mode is a bit mask of these
+ * enums which are represented as bits
+ */
+enum cgx_mode_t {
+	CGX_MODE_SGMII_BIT = 0,
+	CGX_MODE_1000_BASEX_BIT,
+	CGX_MODE_QSGMII_BIT,
+	CGX_MODE_10G_C2C_BIT,
+	CGX_MODE_10G_C2M_BIT,
+	CGX_MODE_10G_KR_BIT,
+	CGX_MODE_20G_C2C_BIT,
+	CGX_MODE_25G_C2C_BIT,
+	CGX_MODE_25G_C2M_BIT,
+	CGX_MODE_25G_2_C2C_BIT,
+	CGX_MODE_25G_CR_BIT,
+	CGX_MODE_25G_KR_BIT,
+	CGX_MODE_40G_C2C_BIT,
+	CGX_MODE_40G_C2M_BIT,
+	CGX_MODE_40G_CR4_BIT,
+	CGX_MODE_40G_KR4_BIT,
+	CGX_MODE_40GAUI_C2C_BIT,
+	CGX_MODE_50G_C2C_BIT,
+	CGX_MODE_50G_C2M_BIT,
+	CGX_MODE_50G_4_C2C_BIT,
+	CGX_MODE_50G_CR_BIT,
+	CGX_MODE_50G_KR_BIT,
+	CGX_MODE_80GAUI_C2C_BIT,
+	CGX_MODE_100G_C2C_BIT,
+	CGX_MODE_100G_C2M_BIT,
+	CGX_MODE_100G_CR4_BIT,
+	CGX_MODE_100G_KR4_BIT,
+	CGX_MODE_MAX_BIT /* = 29 */
+};
+
+/* scratchx(0) CSR used for ATF->non-secure SW communication.
+ * This acts as the status register
+ * Provides details on command ack/status, link status, error details
+ */
+
+/* CAUTION : below structures are placed in order based on the bit positions
+ * For any updates/new bitfields, corresponding structures needs to be updated
+ */
+struct cgx_evt_sts_s {			/* start from bit 0 */
+	u64 ack:1;
+	u64 evt_type:1;		/* cgx_evt_type */
+	u64 stat:1;		/* cgx_stat */
+	u64 id:6;			/* cgx_evt_id/cgx_cmd_id */
+	u64 reserved:55;
+};
+
+/* all the below structures are in the same memory location of SCRATCHX(0)
+ * value can be read/written based on command ID
+ */
+
+/* Resp to command IDs with command status as CGX_STAT_FAIL
+ * Not applicable for commands :
+ *	CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE
+ *	check struct cgx_lnk_sts_s comments
+ */
+struct cgx_err_sts_s {			/* start from bit 9 */
+	u64 reserved1:9;
+	u64 type:10;		/* cgx_error_type */
+	u64 reserved2:35;
+};
+
+/* Resp to cmd ID as CGX_CMD_GET_FW_VER with cmd status as CGX_STAT_SUCCESS */
+struct cgx_ver_s {			/* start from bit 9 */
+	u64 reserved1:9;
+	u64 major_ver:4;
+	u64 minor_ver:4;
+	u64 reserved2:47;
+};
+
+/* Resp to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as CGX_STAT_SUCCESS
+ * Returns each byte of MAC address in a separate bit field
+ */
+struct cgx_mac_addr_s {			/* start from bit 9 */
+	u64 reserved1:9;
+	u64 addr_0:8;
+	u64 addr_1:8;
+	u64 addr_2:8;
+	u64 addr_3:8;
+	u64 addr_4:8;
+	u64 addr_5:8;
+	u64 reserved2:7;
+};
+
+/* Resp to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
+ * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
+ * In case of CGX_STAT_FAIL, it indicates CGX configuration failed when
+ * processing link up/down/change command. Both err_type and current link status
+ * will be updated
+ * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current
+ * link status will be updated
+ */
+struct cgx_lnk_sts_s {
+	u64 reserved1:9;
+	u64 link_up:1;
+	u64 full_duplex:1;
+	u64 speed:4;	/* cgx_link_speed */
+	u64 err_type:10;
+	u64 an:1;		/* Current AN state : enabled/disabled */
+	u64 fec:2;		/* Current FEC type if enabled, if not 0 */
+	u64 port:8;	/* Share the current port info if required */
+	u64 mode:8;	/* cgx_mode_t enum integer value */
+	u64 reserved2:20;
+};
+
+struct sh_fwd_base_s {
+	u64 reserved1:9;
+	u64 addr:55;
+};
+
+struct cgx_link_modes_s {
+	u64 reserved1:9;
+	u64 modes:55;
+};
+
+/* Resp to cmd ID - CGX_CMD_GET_ADV_FEC/CGX_CMD_GET_SUPPORTED_FEC
+ * fec : 2 bits
+ * typedef enum cgx_fec_type {
+ *     CGX_FEC_NONE,
+ *     CGX_FEC_BASE_R,
+ *     CGX_FEC_RS
+ * } fec_type_t;
+ */
+struct cgx_fec_types_s {
+	u64 reserved1:9;
+	u64 fec:2;
+	u64 reserved2:53;
+};
+
+/* Resp to cmd ID - CGX_CMD_GET_AN */
+struct cgx_get_an_s {
+	u64 reserved1:9;
+	u64 an:1;
+	u64 reserved2:54;
+};
+
+/* Resp to cmd ID - CGX_CMD_GET_PHY_MOD_TYPE */
+struct cgx_get_phy_mod_type_s {
+	u64 reserved1:9;
+	u64 mod:1;		/* 0=NRZ, 1=PAM4 */
+	u64 reserved2:54;
+};
+
+/* Resp to cmd ID - CGX_CMD_GET_PERSIST_IGNORE */
+struct cgx_get_flash_ignore_s {
+	uint64_t reserved1:9;
+	uint64_t ignore:1;
+	uint64_t reserved2:54;
+};
+
+union cgx_rsp_sts {
+	/* Fixed, applicable for all commands/events */
+	struct cgx_evt_sts_s evt_sts;
+	/* response to CGX_CMD_LINK_BRINGUP/DOWN/LINK_CHANGE */
+	struct cgx_lnk_sts_s link_sts;
+	/* response to CGX_CMD_GET_FW_VER */
+	struct cgx_ver_s ver;
+	/* response to CGX_CMD_GET_MAC_ADDR */
+	struct cgx_mac_addr_s mac_s;
+	/* response to CGX_CMD_GET_FWD_BASE */
+	struct sh_fwd_base_s fwd_base_s;
+	/* response if evt_status = CMD_FAIL */
+	struct cgx_err_sts_s err;
+	/* response to CGX_CMD_GET_SUPPORTED_FEC */
+	struct cgx_fec_types_s supported_fec;
+	/* response to CGX_CMD_GET_LINK_MODES */
+	struct cgx_link_modes_s supported_modes;
+	/* response to CGX_CMD_GET_ADV_LINK_MODES */
+	struct cgx_link_modes_s adv_modes;
+	/* response to CGX_CMD_GET_ADV_FEC */
+	struct cgx_fec_types_s adv_fec;
+	/* response to CGX_CMD_GET_AN */
+	struct cgx_get_an_s an;
+	/* response to CGX_CMD_GET_PHY_MOD_TYPE */
+	struct cgx_get_phy_mod_type_s phy_mod_type;
+	/* response to CGX_CMD_GET_PERSIST_IGNORE */
+	struct cgx_get_flash_ignore_s persist;
+#ifdef NT_FW_CONFIG
+	/* response to CGX_CMD_GET_MKEX_SIZE */
+	struct cgx_mcam_profile_sz_s prfl_sz;
+	/* response to CGX_CMD_GET_MKEX_PROFILE */
+	struct cgx_mcam_profile_addr_s prfl_addr;
+#endif
+};
+
+union cgx_scratchx0 {
+	u64 u;
+	union cgx_rsp_sts s;
+};
+
+/* scratchx(1) CSR used for non-secure SW->ATF communication
+ * This CSR acts as a command register
+ */
+struct cgx_cmd {			/* start from bit 2 */
+	u64 reserved1:2;
+	u64 id:6;			/* cgx_request_id */
+	u64 reserved2:56;
+};
+
+/* all the below structures are in the same memory location of SCRATCHX(1)
+ * corresponding arguments for command Id needs to be updated
+ */
+
+/* Any command using enable/disable as an argument need
+ * to pass the option via this structure.
+ * Ex: Loopback, HiGig...
+ */
+struct cgx_ctl_args {			/* start from bit 8 */
+	u64 reserved1:8;
+	u64 enable:1;
+	u64 reserved2:55;
+};
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */
+struct cgx_mtu_args {
+	u64 reserved1:8;
+	u64 size:16;
+	u64 reserved2:40;
+};
+
+/* command argument to be passed for cmd ID - CGX_CMD_MODE_CHANGE */
+struct cgx_mode_change_args {
+	uint64_t reserved1:8;
+	uint64_t speed:4; /* cgx_link_speed enum */
+	uint64_t duplex:1; /* 0 - full duplex, 1 - half duplex */
+	uint64_t an:1;	/* 0 - disable AN, 1 - enable AN */
+	uint64_t port:8; /* device port */
+	uint64_t mode:42;
+};
+
+/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */
+struct cgx_link_change_args {		/* start from bit 8 */
+	u64 reserved1:8;
+	u64 link_up:1;
+	u64 full_duplex:1;
+	u64 speed:4;		/* cgx_link_speed */
+	u64 reserved2:50;
+};
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_LINK_MODE */
+struct cgx_set_mode_args {
+	u64 reserved1:8;
+	u64 mode:56;
+};
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_FEC */
+struct cgx_set_fec_args {
+	u64 reserved1:8;
+	u64 fec:2;
+	u64 reserved2:54;
+};
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_PHY_MOD_TYPE */
+struct cgx_set_phy_mod_args {
+	u64 reserved1:8;
+	u64 mod:1;		/* 0=NRZ, 1=PAM4 */
+	u64 reserved2:55;
+};
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_PERSIST_IGNORE */
+struct cgx_set_flash_ignore_args {
+	uint64_t reserved1:8;
+	uint64_t ignore:1;
+	uint64_t reserved2:55;
+};
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_MAC_ADDR */
+struct cgx_mac_addr_args {
+	uint64_t reserved1:8;
+	uint64_t addr:48;
+	uint64_t pf_id:8;
+};
+
+struct cgx_prbs_args {
+	u64 reserved1:8; /* start from bit 8 */
+	u64 lane:8;
+	u64 qlm:8;
+	u64 stop_on_error:1;
+	u64 mode:8;
+	u64 time:31;
+};
+
+struct cgx_display_eye_args {
+	u64 reserved1:8; /* start from bit 8 */
+	u64 qlm:8;
+	u64 lane:47;
+};
+
+union cgx_cmd_s {
+	u64 own_status:2;			/* cgx_cmd_own */
+	struct cgx_cmd cmd;
+	struct cgx_ctl_args cmd_args;
+	struct cgx_mtu_args mtu_size;
+	struct cgx_link_change_args lnk_args; /* Input to CGX_CMD_LINK_CHANGE */
+	struct cgx_set_mode_args mode_args;
+	struct cgx_mode_change_args mode_change_args;
+	struct cgx_set_fec_args fec_args;
+	struct cgx_set_phy_mod_args phy_mod_args;
+	struct cgx_set_flash_ignore_args persist_args;
+	struct cgx_mac_addr_args mac_args;
+	/* any other arg for command id * like : mtu, dmac filtering control */
+	struct cgx_prbs_args prbs_args;
+	struct cgx_display_eye_args dsp_eye_args;
+};
+
+union cgx_scratchx1 {
+	u64 u;
+	union cgx_cmd_s s;
+};
+
+#endif /* __CGX_INTF_H__ */
diff --git a/drivers/net/octeontx2/lmt.h b/drivers/net/octeontx2/lmt.h
new file mode 100644
index 0000000..84a7eab
--- /dev/null
+++ b/drivers/net/octeontx2/lmt.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations.  This should only be used when there are no memory operation
+ * ordering constraints.  (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr    address in memory to add incr to
+ * @param incr   amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline s64 atomic_fetch_and_add64_nosync(s64 *ptr, s64 incr)
+{
+	s64 result;
+	/* Atomic add with no ordering */
+	asm volatile("ldadd %x[i], %x[r], [%[b]]"
+		     : [r] "=r" (result), "+m" (*ptr)
+		     : [i] "r" (incr), [b] "r" (ptr)
+		     : "memory");
+	return result;
+}
+
+static inline void lmt_cancel(const struct nix *nix)
+{
+	writeq(0, nix->lmt_base + LMT_LF_LMTCANCEL());
+}
+
+static inline u64 *lmt_store_ptr(struct nix *nix)
+{
+	return (u64 *)((u8 *)(nix->lmt_base) +
+				       LMT_LF_LMTLINEX(0));
+}
+
+static inline s64 lmt_submit(u64 io_address)
+{
+	s64 result = 0;
+
+	asm volatile("ldeor xzr, %x[rf],[%[rs]]"
+			: [rf] "=r"(result) : [rs] "r"(io_address));
+	return result;
+}
diff --git a/drivers/net/octeontx2/nix.c b/drivers/net/octeontx2/nix.c
new file mode 100644
index 0000000..0a3e8e4
--- /dev/null
+++ b/drivers/net/octeontx2/nix.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <errno.h>
+#include <log.h>
+#include <malloc.h>
+#include <memalign.h>
+#include <misc.h>
+#include <net.h>
+#include <pci.h>
+#include <watchdog.h>
+
+#include <asm/arch/board.h>
+#include <asm/arch/csrs/csrs-lmt.h>
+#include <asm/io.h>
+#include <asm/types.h>
+
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/types.h>
+
+#include "nix.h"
+#include "lmt.h"
+#include "cgx.h"
+
+/**
+ * NIX needs a lot of memory areas. Rather than handle all the failure cases,
+ * we'll use a wrapper around alloc that prints an error if a memory
+ * allocation fails.
+ *
+ * @param num_elements
+ *                  Number of elements to allocate
+ * @param elem_size Size of each element
+ * @param msg       Text string to show when allocation fails
+ *
+ * @return A valid memory location or NULL on failure
+ */
+static void *nix_memalloc(int num_elements, size_t elem_size, const char *msg)
+{
+	size_t alloc_size = num_elements * elem_size;
+	void *base = memalign(CONFIG_SYS_CACHELINE_SIZE, alloc_size);
+
+	if (!base)
+		printf("NIX: Mem alloc failed for %s (%d * %zu = %zu bytes)\n",
+		       msg ? msg : __func__, num_elements, elem_size,
+		       alloc_size);
+	else
+		memset(base, 0, alloc_size);
+
+	debug("NIX: Memory alloc for %s (%d * %zu = %zu bytes) at %p\n",
+	      msg ? msg : __func__, num_elements, elem_size, alloc_size, base);
+	return base;
+}
+
+int npc_lf_setup(struct nix *nix)
+{
+	int err;
+
+	err = npc_lf_admin_setup(nix);
+	if (err) {
+		printf("%s: Error setting up npc lf admin\n", __func__);
+		return err;
+	}
+
+	return 0;
+}
+
+static int npa_setup_pool(struct npa *npa, u32 pool_id,
+			  size_t buffer_size, u32 queue_length, void *buffers[])
+{
+	struct {
+		union npa_lf_aura_op_free0 f0;
+		union npa_lf_aura_op_free1 f1;
+	} aura_descr;
+	int index;
+
+	for (index = 0; index < queue_length; index++) {
+		buffers[index] = memalign(CONFIG_SYS_CACHELINE_SIZE,
+					  buffer_size);
+		if (!buffers[index]) {
+			printf("%s: Out of memory %d, size: %zu\n",
+			       __func__, index, buffer_size);
+			return -ENOMEM;
+		}
+		debug("%s: allocating buffer %d, addr %p size: %zu\n",
+		      __func__, index, buffers[index], buffer_size);
+
+		/* Add the newly obtained pointer to the pool.  128 bit
+		 * writes only.
+		 */
+		aura_descr.f0.s.addr = (u64)buffers[index];
+		aura_descr.f1.u = 0;
+		aura_descr.f1.s.aura = pool_id;
+		st128(npa->npa_base + NPA_LF_AURA_OP_FREE0(),
+		      aura_descr.f0.u, aura_descr.f1.u);
+	}
+
+	return 0;
+}
+
+int npa_lf_setup(struct nix *nix)
+{
+	struct rvu_pf *rvu = dev_get_priv(nix->dev);
+	struct nix_af *nix_af = nix->nix_af;
+	struct npa *npa;
+	union npa_af_const npa_af_const;
+	union npa_aura_s *aura;
+	union npa_pool_s *pool;
+	union rvu_func_addr_s block_addr;
+	int idx;
+	int stack_page_pointers;
+	int stack_page_bytes;
+	int err;
+
+	npa = (struct npa *)calloc(1, sizeof(struct npa));
+	if (!npa) {
+		printf("%s: out of memory for npa instance\n", __func__);
+		return -ENOMEM;
+	}
+	block_addr.u = 0;
+	block_addr.s.block = RVU_BLOCK_ADDR_E_NPA;
+	npa->npa_base = rvu->pf_base + block_addr.u;
+	npa->npa_af = nix_af->npa_af;
+	nix->npa = npa;
+
+	npa_af_const.u = npa_af_reg_read(npa->npa_af, NPA_AF_CONST());
+	stack_page_pointers = npa_af_const.s.stack_page_ptrs;
+	stack_page_bytes = npa_af_const.s.stack_page_bytes;
+
+	npa->stack_pages[NPA_POOL_RX] = (RQ_QLEN + stack_page_pointers - 1) /
+							stack_page_pointers;
+	npa->stack_pages[NPA_POOL_TX] = (SQ_QLEN + stack_page_pointers - 1) /
+							stack_page_pointers;
+	npa->stack_pages[NPA_POOL_SQB] = (SQB_QLEN + stack_page_pointers - 1) /
+							stack_page_pointers;
+	npa->pool_stack_pointers = stack_page_pointers;
+
+	npa->q_len[NPA_POOL_RX] = RQ_QLEN;
+	npa->q_len[NPA_POOL_TX] = SQ_QLEN;
+	npa->q_len[NPA_POOL_SQB] = SQB_QLEN;
+
+	npa->buf_size[NPA_POOL_RX] = MAX_MTU + CONFIG_SYS_CACHELINE_SIZE;
+	npa->buf_size[NPA_POOL_TX] = MAX_MTU + CONFIG_SYS_CACHELINE_SIZE;
+	npa->buf_size[NPA_POOL_SQB] = nix_af->sqb_size;
+
+	npa->aura_ctx = nix_memalloc(NPA_POOL_COUNT,
+				     sizeof(union npa_aura_s),
+				     "aura context");
+	if (!npa->aura_ctx) {
+		printf("%s: Out of memory for aura context\n", __func__);
+		return -ENOMEM;
+	}
+
+	for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
+		npa->pool_ctx[idx] = nix_memalloc(1,
+						  sizeof(union npa_pool_s),
+						  "pool context");
+		if (!npa->pool_ctx[idx]) {
+			printf("%s: Out of memory for pool context\n",
+			       __func__);
+			return -ENOMEM;
+		}
+		npa->pool_stack[idx] = nix_memalloc(npa->stack_pages[idx],
+						    stack_page_bytes,
+						    "pool stack");
+		if (!npa->pool_stack[idx]) {
+			printf("%s: Out of memory for pool stack\n", __func__);
+			return -ENOMEM;
+		}
+	}
+
+	err = npa_lf_admin_setup(npa, nix->lf, (dma_addr_t)npa->aura_ctx);
+	if (err) {
+		printf("%s: Error setting up NPA LF admin for lf %d\n",
+		       __func__, nix->lf);
+		return err;
+	}
+
+	/* Set up the auras */
+	for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
+		aura = npa->aura_ctx + (idx * sizeof(union npa_aura_s));
+		pool = npa->pool_ctx[idx];
+		debug("%s aura %p pool %p\n", __func__, aura, pool);
+		memset(aura, 0, sizeof(union npa_aura_s));
+		aura->s.fc_ena = 0;
+		aura->s.pool_addr = (u64)npa->pool_ctx[idx];
+		debug("%s aura.s.pool_addr %llx pool_addr %p\n", __func__,
+		      aura->s.pool_addr, npa->pool_ctx[idx]);
+		aura->s.shift = 64 - __builtin_clzll(npa->q_len[idx]) - 8;
+		aura->s.count = npa->q_len[idx];
+		aura->s.limit = npa->q_len[idx];
+		aura->s.ena = 1;
+		err = npa_attach_aura(nix_af, nix->lf, aura, idx);
+		if (err)
+			return err;
+
+		memset(pool, 0, sizeof(*pool));
+		pool->s.fc_ena = 0;
+		pool->s.nat_align = 1;
+		pool->s.stack_base = (u64)(npa->pool_stack[idx]);
+		debug("%s pool.s.stack_base %llx stack_base %p\n", __func__,
+		      pool->s.stack_base, npa->pool_stack[idx]);
+		pool->s.buf_size =
+			npa->buf_size[idx] / CONFIG_SYS_CACHELINE_SIZE;
+		pool->s.stack_max_pages = npa->stack_pages[idx];
+		pool->s.shift =
+			64 - __builtin_clzll(npa->pool_stack_pointers) - 8;
+		pool->s.ptr_start = 0;
+		pool->s.ptr_end = (1ULL << 40) -  1;
+		pool->s.ena = 1;
+		err = npa_attach_pool(nix_af, nix->lf, pool, idx);
+		if (err)
+			return err;
+	}
+
+	for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
+		npa->buffers[idx] = nix_memalloc(npa->q_len[idx],
+						 sizeof(void *),
+						 "buffers");
+		if (!npa->buffers[idx]) {
+			printf("%s: Out of memory\n", __func__);
+			return -ENOMEM;
+		}
+	}
+
+	for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
+		err = npa_setup_pool(npa, idx, npa->buf_size[idx],
+				     npa->q_len[idx], npa->buffers[idx]);
+		if (err) {
+			printf("%s: Error setting up pool %d\n",
+			       __func__, idx);
+			return err;
+		}
+	}
+	return 0;
+}
+
+int npa_lf_shutdown(struct nix *nix)
+{
+	struct npa *npa = nix->npa;
+	int err;
+	int pool;
+
+	err = npa_lf_admin_shutdown(nix->nix_af, nix->lf, NPA_POOL_COUNT);
+	if (err) {
+		printf("%s: Error %d shutting down NPA LF admin\n",
+		       __func__, err);
+		return err;
+	}
+	free(npa->aura_ctx);
+	npa->aura_ctx = NULL;
+
+	for (pool = 0; pool < NPA_POOL_COUNT; pool++) {
+		free(npa->pool_ctx[pool]);
+		npa->pool_ctx[pool] = NULL;
+		free(npa->pool_stack[pool]);
+		npa->pool_stack[pool] = NULL;
+		free(npa->buffers[pool]);
+		npa->buffers[pool] = NULL;
+	}
+
+	return 0;
+}
+
+int nix_lf_setup(struct nix *nix)
+{
+	struct nix_af *nix_af = nix->nix_af;
+	int idx;
+	int err = -1;
+
+	/* Alloc NIX RQ HW context memory */
+	nix->rq_ctx_base = nix_memalloc(nix->rq_cnt, nix_af->rq_ctx_sz,
+					"RQ CTX");
+	if (!nix->rq_ctx_base)
+		goto error;
+	memset(nix->rq_ctx_base, 0, nix_af->rq_ctx_sz);
+
+	/* Alloc NIX SQ HW context memory */
+	nix->sq_ctx_base = nix_memalloc(nix->sq_cnt, nix_af->sq_ctx_sz,
+					"SQ CTX");
+	if (!nix->sq_ctx_base)
+		goto error;
+	memset(nix->sq_ctx_base, 0, nix_af->sq_ctx_sz);
+
+	/* Alloc NIX CQ HW context memory */
+	nix->cq_ctx_base = nix_memalloc(nix->cq_cnt, nix_af->cq_ctx_sz,
+					"CQ CTX");
+	if (!nix->cq_ctx_base)
+		goto error;
+	memset(nix->cq_ctx_base, 0, nix_af->cq_ctx_sz * NIX_CQ_COUNT);
+	/* Alloc NIX CQ Ring memory */
+	for (idx = 0; idx < NIX_CQ_COUNT; idx++) {
+		err = qmem_alloc(&nix->cq[idx], CQ_ENTRIES, CQ_ENTRY_SIZE);
+		if (err)
+			goto error;
+	}
+
+	/* Alloc memory for Qints HW contexts */
+	nix->qint_base = nix_memalloc(nix_af->qints, nix_af->qint_ctx_sz,
+				      "Qint CTX");
+	if (!nix->qint_base)
+		goto error;
+	/* Alloc memory for CQints HW contexts */
+	nix->cint_base = nix_memalloc(nix_af->cints, nix_af->cint_ctx_sz,
+				      "Cint CTX");
+	if (!nix->cint_base)
+		goto error;
+	/* Alloc NIX RSS HW context memory and config the base */
+	nix->rss_base = nix_memalloc(nix->rss_grps, nix_af->rsse_ctx_sz,
+				     "RSS CTX");
+	if (!nix->rss_base)
+		goto error;
+
+	err = nix_lf_admin_setup(nix);
+	if (err) {
+		printf("%s: Error setting up LF\n", __func__);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	if (nix->rq_ctx_base)
+		free(nix->rq_ctx_base);
+	nix->rq_ctx_base = NULL;
+	if (nix->rq_ctx_base)
+		free(nix->rq_ctx_base);
+	nix->rq_ctx_base = NULL;
+	if (nix->sq_ctx_base)
+		free(nix->sq_ctx_base);
+	nix->sq_ctx_base = NULL;
+	if (nix->cq_ctx_base)
+		free(nix->cq_ctx_base);
+	nix->cq_ctx_base = NULL;
+
+	for (idx = 0; idx < NIX_CQ_COUNT; idx++)
+		qmem_free(&nix->cq[idx]);
+
+	return err;
+}
+
+int nix_lf_shutdown(struct nix *nix)
+{
+	struct nix_af *nix_af = nix->nix_af;
+	int index;
+	int err;
+
+	err = nix_lf_admin_shutdown(nix_af, nix->lf, nix->cq_cnt,
+				    nix->rq_cnt, nix->sq_cnt);
+	if (err) {
+		printf("%s: Error shutting down LF admin\n", __func__);
+		return err;
+	}
+
+	if (nix->rq_ctx_base)
+		free(nix->rq_ctx_base);
+	nix->rq_ctx_base = NULL;
+	if (nix->rq_ctx_base)
+		free(nix->rq_ctx_base);
+	nix->rq_ctx_base = NULL;
+	if (nix->sq_ctx_base)
+		free(nix->sq_ctx_base);
+	nix->sq_ctx_base = NULL;
+	if (nix->cq_ctx_base)
+		free(nix->cq_ctx_base);
+	nix->cq_ctx_base = NULL;
+
+	for (index = 0; index < NIX_CQ_COUNT; index++)
+		qmem_free(&nix->cq[index]);
+
+	debug("%s: nix lf %d reset --\n", __func__, nix->lf);
+	return 0;
+}
+
+struct nix *nix_lf_alloc(struct udevice *dev)
+{
+	union rvu_func_addr_s block_addr;
+	struct nix *nix;
+	struct rvu_pf *rvu = dev_get_priv(dev);
+	struct rvu_af *rvu_af = dev_get_priv(rvu->afdev);
+	union rvu_pf_func_s pf_func;
+	int err;
+
+	debug("%s(%s )\n", __func__, dev->name);
+
+	nix = (struct nix *)calloc(1, sizeof(*nix));
+	if (!nix) {
+		printf("%s: Out of memory for nix instance\n", __func__);
+		return NULL;
+	}
+	nix->nix_af = rvu_af->nix_af;
+
+	block_addr.u = 0;
+	block_addr.s.block = RVU_BLOCK_ADDR_E_NIXX(0);
+	nix->nix_base = rvu->pf_base + block_addr.u;
+	block_addr.u = 0;
+	block_addr.s.block = RVU_BLOCK_ADDR_E_NPC;
+	nix->npc_base = rvu->pf_base + block_addr.u;
+	block_addr.u = 0;
+	block_addr.s.block = RVU_BLOCK_ADDR_E_LMT;
+	nix->lmt_base = rvu->pf_base + block_addr.u;
+
+	pf_func.u = 0;
+	pf_func.s.pf = rvu->pfid;
+	nix->pf_func = pf_func.u;
+	nix->lf = rvu->nix_lfid;
+	nix->pf = rvu->pfid;
+	nix->dev = dev;
+	nix->sq_cnt = 1;
+	nix->rq_cnt = 1;
+	nix->rss_grps = 1;
+	nix->cq_cnt = 2;
+	nix->xqe_sz = NIX_CQE_SIZE_W16;
+
+	nix->lmac = nix_get_cgx_lmac(nix->pf);
+	if (!nix->lmac) {
+		printf("%s: Error: could not find lmac for pf %d\n",
+		       __func__, nix->pf);
+		free(nix);
+		return NULL;
+	}
+	nix->lmac->link_num =
+		NIX_LINK_E_CGXX_LMACX(nix->lmac->cgx->cgx_id,
+				      nix->lmac->lmac_id);
+	nix->lmac->chan_num =
+		NIX_CHAN_E_CGXX_LMACX_CHX(nix->lmac->cgx->cgx_id,
+					  nix->lmac->lmac_id, 0);
+	/* This is rx pkind in 1:1 mapping to NIX_LINK_E */
+	nix->lmac->pknd = nix->lmac->link_num;
+
+	cgx_lmac_set_pkind(nix->lmac, nix->lmac->lmac_id, nix->lmac->pknd);
+	debug("%s(%s CGX%x LMAC%x)\n", __func__, dev->name,
+	      nix->lmac->cgx->cgx_id, nix->lmac->lmac_id);
+	debug("%s(%s Link %x Chan %x Pknd %x)\n", __func__, dev->name,
+	      nix->lmac->link_num, nix->lmac->chan_num,	nix->lmac->pknd);
+
+	err = npa_lf_setup(nix);
+	if (err)
+		return NULL;
+
+	err = npc_lf_setup(nix);
+	if (err)
+		return NULL;
+
+	err = nix_lf_setup(nix);
+	if (err)
+		return NULL;
+
+	return nix;
+}
+
+u64 npa_aura_op_alloc(struct npa *npa, u64 aura_id)
+{
+	union npa_lf_aura_op_allocx op_allocx;
+
+	op_allocx.u = atomic_fetch_and_add64_nosync(npa->npa_base +
+			NPA_LF_AURA_OP_ALLOCX(0), aura_id);
+	return op_allocx.s.addr;
+}
+
+u64 nix_cq_op_status(struct nix *nix, u64 cq_id)
+{
+	union nixx_lf_cq_op_status op_status;
+	s64 *reg = nix->nix_base + NIXX_LF_CQ_OP_STATUS();
+
+	op_status.u = atomic_fetch_and_add64_nosync(reg, cq_id << 32);
+	return op_status.u;
+}
+
+/* TX */
+static inline void nix_write_lmt(struct nix *nix, void *buffer,
+				 int num_words)
+{
+	int i;
+
+	u64 *lmt_ptr = lmt_store_ptr(nix);
+	u64 *ptr = buffer;
+
+	debug("%s lmt_ptr %p %p\n", __func__, nix->lmt_base, lmt_ptr);
+	for (i = 0; i < num_words; i++) {
+		debug("%s data %llx lmt_ptr %p\n", __func__, ptr[i],
+		      lmt_ptr + i);
+		lmt_ptr[i] = ptr[i];
+	}
+}
+
+void nix_cqe_tx_pkt_handler(struct nix *nix, void *cqe)
+{
+	union nix_cqe_hdr_s *txcqe = (union nix_cqe_hdr_s *)cqe;
+
+	debug("%s: txcqe: %p\n", __func__, txcqe);
+
+	if (txcqe->s.cqe_type != NIX_XQE_TYPE_E_SEND) {
+		printf("%s: Error: Unsupported CQ header type %d\n",
+		       __func__, txcqe->s.cqe_type);
+		return;
+	}
+	nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(),
+			 (NIX_CQ_TX << 32) | 1);
+}
+
+void nix_lf_flush_tx(struct udevice *dev)
+{
+	struct rvu_pf *rvu = dev_get_priv(dev);
+	struct nix *nix = rvu->nix;
+	union nixx_lf_cq_op_status op_status;
+	u32 head, tail;
+	void *cq_tx_base = nix->cq[NIX_CQ_TX].base;
+	union nix_cqe_hdr_s *cqe;
+
+	/* ack tx cqe entries */
+	op_status.u = nix_cq_op_status(nix, NIX_CQ_TX);
+	head = op_status.s.head;
+	tail = op_status.s.tail;
+	head &= (nix->cq[NIX_CQ_TX].qsize - 1);
+	tail &= (nix->cq[NIX_CQ_TX].qsize - 1);
+
+	debug("%s cq tx head %d tail %d\n", __func__, head, tail);
+	while (head != tail) {
+		cqe = cq_tx_base + head * nix->cq[NIX_CQ_TX].entry_sz;
+		nix_cqe_tx_pkt_handler(nix, cqe);
+		op_status.u = nix_cq_op_status(nix, NIX_CQ_TX);
+		head = op_status.s.head;
+		tail = op_status.s.tail;
+		head &= (nix->cq[NIX_CQ_TX].qsize - 1);
+		tail &= (nix->cq[NIX_CQ_TX].qsize - 1);
+		debug("%s cq tx head %d tail %d\n", __func__, head, tail);
+	}
+}
+
+int nix_lf_xmit(struct udevice *dev, void *pkt, int pkt_len)
+{
+	struct rvu_pf *rvu = dev_get_priv(dev);
+	struct nix *nix = rvu->nix;
+	struct nix_tx_dr tx_dr;
+	int dr_sz = (sizeof(struct nix_tx_dr) + 15) / 16 - 1;
+	s64 result;
+	void *packet;
+
+	nix_lf_flush_tx(dev);
+	memset((void *)&tx_dr, 0, sizeof(struct nix_tx_dr));
+	/* Dump TX packet in to NPA buffer */
+	packet = (void *)npa_aura_op_alloc(nix->npa, NPA_POOL_TX);
+	if (!packet) {
+		printf("%s TX buffers unavailable\n", __func__);
+		return -1;
+	}
+	memcpy(packet, pkt, pkt_len);
+	debug("%s TX buffer %p\n", __func__, packet);
+
+	tx_dr.hdr.s.aura = NPA_POOL_TX;
+	tx_dr.hdr.s.df = 0;
+	tx_dr.hdr.s.pnc = 1;
+	tx_dr.hdr.s.sq = 0;
+	tx_dr.hdr.s.total = pkt_len;
+	tx_dr.hdr.s.sizem1 = dr_sz - 2; /* FIXME - for now hdr+sg+sg1addr */
+	debug("%s dr_sz %d\n", __func__, dr_sz);
+
+	tx_dr.tx_sg.s.segs = 1;
+	tx_dr.tx_sg.s.subdc = NIX_SUBDC_E_SG;
+	tx_dr.tx_sg.s.seg1_size = pkt_len;
+	tx_dr.tx_sg.s.ld_type = NIX_SENDLDTYPE_E_LDT;
+	tx_dr.sg1_addr = (dma_addr_t)packet;
+
+#define DEBUG_PKT
+#ifdef DEBUG_PKT
+	debug("TX PKT Data\n");
+	for (int i = 0; i < pkt_len; i++) {
+		if (i && (i % 8 == 0))
+			debug("\n");
+		debug("%02x ", *((u8 *)pkt + i));
+	}
+	debug("\n");
+#endif
+	do {
+		nix_write_lmt(nix, &tx_dr, (dr_sz - 1) * 2);
+		__iowmb();
+		result = lmt_submit((u64)(nix->nix_base +
+					       NIXX_LF_OP_SENDX(0)));
+		WATCHDOG_RESET();
+	} while (result == 0);
+
+	return 0;
+}
+
+/* RX */
+void nix_lf_flush_rx(struct udevice *dev)
+{
+	struct rvu_pf *rvu = dev_get_priv(dev);
+	struct nix *nix = rvu->nix;
+	union nixx_lf_cq_op_status op_status;
+	void *cq_rx_base = nix->cq[NIX_CQ_RX].base;
+	struct nix_rx_dr *rx_dr;
+	union nix_rx_parse_s *rxparse;
+	u32 head, tail;
+	u32 rx_cqe_sz = nix->cq[NIX_CQ_RX].entry_sz;
+	u64 *seg;
+
+	/* flush rx cqe entries */
+	op_status.u = nix_cq_op_status(nix, NIX_CQ_RX);
+	head = op_status.s.head;
+	tail = op_status.s.tail;
+	head &= (nix->cq[NIX_CQ_RX].qsize - 1);
+	tail &= (nix->cq[NIX_CQ_RX].qsize - 1);
+
+	debug("%s cq rx head %d tail %d\n", __func__, head, tail);
+	while (head != tail) {
+		rx_dr = (struct nix_rx_dr *)cq_rx_base + head * rx_cqe_sz;
+		rxparse = &rx_dr->rx_parse;
+
+		debug("%s: rx parse: %p\n", __func__, rxparse);
+		debug("%s: rx parse: desc_sizem1 %x pkt_lenm1 %x\n",
+		      __func__, rxparse->s.desc_sizem1, rxparse->s.pkt_lenm1);
+
+		seg = (dma_addr_t *)(&rx_dr->rx_sg + 1);
+
+		st128(nix->npa->npa_base + NPA_LF_AURA_OP_FREE0(),
+		      seg[0], (1ULL << 63) | NPA_POOL_RX);
+
+		debug("%s return %llx to NPA\n", __func__, seg[0]);
+		nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(),
+				 (NIX_CQ_RX << 32) | 1);
+
+		op_status.u = nix_cq_op_status(nix, NIX_CQ_RX);
+		head = op_status.s.head;
+		tail = op_status.s.tail;
+		head &= (nix->cq[NIX_CQ_RX].qsize - 1);
+		tail &= (nix->cq[NIX_CQ_RX].qsize - 1);
+		debug("%s cq rx head %d tail %d\n", __func__, head, tail);
+	}
+}
+
+int nix_lf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len)
+{
+	struct rvu_pf *rvu = dev_get_priv(dev);
+	struct nix *nix = rvu->nix;
+
+	/* Return rx packet to NPA */
+	debug("%s return %p to NPA\n", __func__, pkt);
+	st128(nix->npa->npa_base + NPA_LF_AURA_OP_FREE0(), (u64)pkt,
+	      (1ULL << 63) | NPA_POOL_RX);
+	nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(),
+			 (NIX_CQ_RX << 32) | 1);
+
+	nix_lf_flush_tx(dev);
+	return 0;
+}
+
+int nix_lf_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+	struct rvu_pf *rvu = dev_get_priv(dev);
+	struct nix *nix = rvu->nix;
+	union nixx_lf_cq_op_status op_status;
+	void *cq_rx_base = nix->cq[NIX_CQ_RX].base;
+	struct nix_rx_dr *rx_dr;
+	union nix_rx_parse_s *rxparse;
+	void *pkt, *cqe;
+	int pkt_len = 0;
+	u64 *addr;
+	u32 head, tail;
+
+	/* fetch rx cqe entries */
+	op_status.u = nix_cq_op_status(nix, NIX_CQ_RX);
+	head = op_status.s.head;
+	tail = op_status.s.tail;
+	head &= (nix->cq[NIX_CQ_RX].qsize - 1);
+	tail &= (nix->cq[NIX_CQ_RX].qsize - 1);
+	debug("%s cq rx head %d tail %d\n", __func__, head, tail);
+	if (head == tail)
+		return -EAGAIN;
+
+	debug("%s: rx_base %p head %d sz %d\n", __func__, cq_rx_base, head,
+	      nix->cq[NIX_CQ_RX].entry_sz);
+	cqe = cq_rx_base + head * nix->cq[NIX_CQ_RX].entry_sz;
+	rx_dr = (struct nix_rx_dr *)cqe;
+	rxparse = &rx_dr->rx_parse;
+
+	debug("%s: rx completion: %p\n", __func__, cqe);
+	debug("%s: rx dr: %p\n", __func__, rx_dr);
+	debug("%s: rx parse: %p\n", __func__, rxparse);
+	debug("%s: rx parse: desc_sizem1 %x pkt_lenm1 %x\n",
+	      __func__, rxparse->s.desc_sizem1, rxparse->s.pkt_lenm1);
+	debug("%s: rx parse: pkind %x chan %x\n",
+	      __func__, rxparse->s.pkind, rxparse->s.chan);
+
+	if (rx_dr->hdr.s.cqe_type != NIX_XQE_TYPE_E_RX) {
+		printf("%s: Error: Unsupported CQ header type in Rx %d\n",
+		       __func__, rx_dr->hdr.s.cqe_type);
+		return -1;
+	}
+
+	pkt_len = rxparse->s.pkt_lenm1 + 1;
+	addr = (dma_addr_t *)(&rx_dr->rx_sg + 1);
+	pkt = (void *)addr[0];
+
+	debug("%s: segs: %d (%d@0x%llx, %d@0x%llx, %d@0x%llx)\n", __func__,
+	      rx_dr->rx_sg.s.segs, rx_dr->rx_sg.s.seg1_size, addr[0],
+	      rx_dr->rx_sg.s.seg2_size, addr[1],
+	      rx_dr->rx_sg.s.seg3_size, addr[2]);
+	if (pkt_len < rx_dr->rx_sg.s.seg1_size + rx_dr->rx_sg.s.seg2_size +
+			rx_dr->rx_sg.s.seg3_size) {
+		debug("%s: Error: rx buffer size too small\n", __func__);
+		return -1;
+	}
+
+	__iowmb();
+#define DEBUG_PKT
+#ifdef DEBUG_PKT
+	debug("RX PKT Data\n");
+	for (int i = 0; i < pkt_len; i++) {
+		if (i && (i % 8 == 0))
+			debug("\n");
+		debug("%02x ", *((u8 *)pkt + i));
+	}
+	debug("\n");
+#endif
+
+	*packetp = (uchar *)pkt;
+
+	return pkt_len;
+}
+
+int nix_lf_setup_mac(struct udevice *dev)
+{
+	struct rvu_pf *rvu = dev_get_priv(dev);
+	struct nix *nix = rvu->nix;
+	struct eth_pdata *pdata = dev_get_platdata(dev);
+
+	/* If lower level firmware fails to set proper MAC
+	 * u-boot framework updates MAC to random address.
+	 * Use this hook to update mac address in cgx lmac
+	 * and call mac filter setup to update new address.
+	 */
+	if (memcmp(nix->lmac->mac_addr, pdata->enetaddr, ARP_HLEN)) {
+		memcpy(nix->lmac->mac_addr, pdata->enetaddr, 6);
+		eth_env_set_enetaddr_by_index("eth", rvu->dev->seq,
+					      pdata->enetaddr);
+		cgx_lmac_mac_filter_setup(nix->lmac);
+		/* Update user given MAC address to ATF for update
+		 * in sh_fwdata to use in Linux.
+		 */
+		cgx_intf_set_macaddr(dev);
+		debug("%s: lMAC %pM\n", __func__, nix->lmac->mac_addr);
+		debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
+	}
+	debug("%s: setupMAC %pM\n", __func__, pdata->enetaddr);
+	return 0;
+}
+
+void nix_lf_halt(struct udevice *dev)
+{
+	struct rvu_pf *rvu = dev_get_priv(dev);
+	struct nix *nix = rvu->nix;
+
+	cgx_lmac_rx_tx_enable(nix->lmac, nix->lmac->lmac_id, false);
+
+	mdelay(1);
+
+	/* Flush tx and rx descriptors */
+	nix_lf_flush_rx(dev);
+	nix_lf_flush_tx(dev);
+}
+
+int nix_lf_init(struct udevice *dev)
+{
+	struct rvu_pf *rvu = dev_get_priv(dev);
+	struct nix *nix = rvu->nix;
+	struct lmac *lmac = nix->lmac;
+	int ret;
+	u64 link_sts;
+	u8 link, speed;
+	u16 errcode;
+
+	printf("Waiting for CGX%d LMAC%d [%s] link status...",
+	       lmac->cgx->cgx_id, lmac->lmac_id,
+	       lmac_type_to_str[lmac->lmac_type]);
+
+	if (lmac->init_pend) {
+		/* Bring up LMAC */
+		ret = cgx_lmac_link_enable(lmac, lmac->lmac_id,
+					   true, &link_sts);
+		lmac->init_pend = 0;
+	} else {
+		ret = cgx_lmac_link_status(lmac, lmac->lmac_id, &link_sts);
+	}
+
+	if (ret) {
+		printf(" [Down]\n");
+		return -1;
+	}
+
+	link = link_sts & 0x1;
+	speed = (link_sts >> 2) & 0xf;
+	errcode = (link_sts >> 6) & 0x2ff;
+	debug("%s: link %x speed %x errcode %x\n",
+	      __func__, link, speed, errcode);
+
+	/* Print link status */
+	printf(" [%s]\n", link ? lmac_speed_to_str[speed] : "Down");
+	if (!link)
+		return -1;
+
+	if (!lmac->init_pend)
+		cgx_lmac_rx_tx_enable(lmac, lmac->lmac_id, true);
+
+	return 0;
+}
+
+void nix_get_cgx_lmac_id(struct udevice *dev, int *cgxid, int *lmacid)
+{
+	struct rvu_pf *rvu = dev_get_priv(dev);
+	struct nix *nix = rvu->nix;
+	struct lmac *lmac = nix->lmac;
+
+	*cgxid = lmac->cgx->cgx_id;
+	*lmacid = lmac->lmac_id;
+}
+
+void nix_print_mac_info(struct udevice *dev)
+{
+	struct rvu_pf *rvu = dev_get_priv(dev);
+	struct nix *nix = rvu->nix;
+	struct lmac *lmac = nix->lmac;
+
+	printf(" CGX%d LMAC%d [%s]", lmac->cgx->cgx_id, lmac->lmac_id,
+	       lmac_type_to_str[lmac->lmac_type]);
+}
+
diff --git a/drivers/net/octeontx2/nix.h b/drivers/net/octeontx2/nix.h
new file mode 100644
index 0000000..03260dd
--- /dev/null
+++ b/drivers/net/octeontx2/nix.h
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef __NIX_H__
+#define	__NIX_H__
+
+#include <asm/arch/csrs/csrs-npa.h>
+#include <asm/arch/csrs/csrs-nix.h>
+#include "rvu.h"
+
+/** Maximum number of LMACs supported */
+#define MAX_LMAC			12
+
+/* NIX RX action operation*/
+#define NIX_RX_ACTIONOP_DROP		(0x0ull)
+#define NIX_RX_ACTIONOP_UCAST		(0x1ull)
+#define NIX_RX_ACTIONOP_UCAST_IPSEC	(0x2ull)
+#define NIX_RX_ACTIONOP_MCAST		(0x3ull)
+#define NIX_RX_ACTIONOP_RSS		(0x4ull)
+
+/* NIX TX action operation*/
+#define NIX_TX_ACTIONOP_DROP		(0x0ull)
+#define NIX_TX_ACTIONOP_UCAST_DEFAULT	(0x1ull)
+#define NIX_TX_ACTIONOP_UCAST_CHAN	(0x2ull)
+#define NIX_TX_ACTIONOP_MCAST		(0x3ull)
+#define NIX_TX_ACTIONOP_DROP_VIOL	(0x5ull)
+
+#define NIX_INTF_RX			0
+#define NIX_INTF_TX			1
+
+#define NIX_INTF_TYPE_CGX		0
+#define NIX_INTF_TYPE_LBK		1
+#define NIX_MAX_HW_MTU			9212
+#define NIX_MIN_HW_MTU			40
+#define MAX_MTU				1536
+
+#define NPA_POOL_COUNT			3
+#define NPA_AURA_COUNT(x)		(1ULL << ((x) + 6))
+#define NPA_POOL_RX			0ULL
+#define NPA_POOL_TX			1ULL
+#define NPA_POOL_SQB			2ULL
+#define RQ_QLEN				Q_COUNT(Q_SIZE_1K)
+#define SQ_QLEN				Q_COUNT(Q_SIZE_1K)
+#define SQB_QLEN			Q_COUNT(Q_SIZE_16)
+
+#define NIX_CQ_RX			0ULL
+#define NIX_CQ_TX			1ULL
+#define NIX_CQ_COUNT			2ULL
+#define NIX_CQE_SIZE_W16		(16 * sizeof(u64))
+#define NIX_CQE_SIZE_W64		(64 * sizeof(u64))
+
+/** Size of aura hardware context */
+#define NPA_AURA_HW_CTX_SIZE		48
+/** Size of pool hardware context */
+#define NPA_POOL_HW_CTX_SIZE		64
+
+#define NPA_DEFAULT_PF_FUNC		0xffff
+
+#define NIX_CHAN_CGX_LMAC_CHX(a, b, c)	(0x800 + 0x100 * (a) + 0x10 * (b) + (c))
+#define NIX_LINK_CGX_LMAC(a, b)		(0 + 4 * (a) + (b))
+#define NIX_LINK_LBK(a)			(12 + (a))
+#define NIX_CHAN_LBK_CHX(a, b)		(0 + 0x100 * (a) + (b))
+#define MAX_LMAC_PKIND			12
+
+/** Number of Admin queue entries */
+#define AQ_RING_SIZE	Q_COUNT(Q_SIZE_16)
+
+/** Each completion queue contains 256 entries, see NIC_CQ_CTX_S[qsize] */
+#define CQS_QSIZE			Q_SIZE_256
+#define CQ_ENTRIES			Q_COUNT(CQS_QSIZE)
+/**
+ * Each completion queue entry contains 128 bytes, see
+ * NIXX_AF_LFX_CFG[xqe_size]
+ */
+#define CQ_ENTRY_SIZE			NIX_CQE_SIZE_W16
+
+enum npa_aura_size {
+	NPA_AURA_SZ_0,
+	NPA_AURA_SZ_128,
+	NPA_AURA_SZ_256,
+	NPA_AURA_SZ_512,
+	NPA_AURA_SZ_1K,
+	NPA_AURA_SZ_2K,
+	NPA_AURA_SZ_4K,
+	NPA_AURA_SZ_8K,
+	NPA_AURA_SZ_16K,
+	NPA_AURA_SZ_32K,
+	NPA_AURA_SZ_64K,
+	NPA_AURA_SZ_128K,
+	NPA_AURA_SZ_256K,
+	NPA_AURA_SZ_512K,
+	NPA_AURA_SZ_1M,
+	NPA_AURA_SZ_MAX,
+};
+
+#define NPA_AURA_SIZE_DEFAULT		NPA_AURA_SZ_128
+
+/* NIX Transmit schedulers */
+enum nix_scheduler {
+	NIX_TXSCH_LVL_SMQ = 0x0,
+	NIX_TXSCH_LVL_MDQ = 0x0,
+	NIX_TXSCH_LVL_TL4 = 0x1,
+	NIX_TXSCH_LVL_TL3 = 0x2,
+	NIX_TXSCH_LVL_TL2 = 0x3,
+	NIX_TXSCH_LVL_TL1 = 0x4,
+	NIX_TXSCH_LVL_CNT = 0x5,
+};
+
+struct cgx;
+
+struct nix_stats {
+	u64	num_packets;
+	u64	num_bytes;
+};
+
+struct nix;
+struct lmac;
+
+struct npa_af {
+	void __iomem		*npa_af_base;
+	struct admin_queue	aq;
+	u32			aura;
+};
+
+struct npa {
+	struct npa_af		*npa_af;
+	void __iomem		*npa_base;
+	void __iomem		*npc_base;
+	void __iomem		*lmt_base;
+	/** Hardware aura context */
+	void			*aura_ctx;
+	/** Hardware pool context */
+	void			*pool_ctx[NPA_POOL_COUNT];
+	void			*pool_stack[NPA_POOL_COUNT];
+	void                    **buffers[NPA_POOL_COUNT];
+	u32                     pool_stack_pages[NPA_POOL_COUNT];
+	u32			pool_stack_pointers;
+	u32			q_len[NPA_POOL_COUNT];
+	u32			buf_size[NPA_POOL_COUNT];
+	u32			stack_pages[NPA_POOL_COUNT];
+};
+
+struct nix_af {
+	struct udevice			*dev;
+	struct nix			*lmacs[MAX_LMAC];
+	struct npa_af			*npa_af;
+	void __iomem			*nix_af_base;
+	void __iomem			*npc_af_base;
+	struct admin_queue		aq;
+	u8				num_lmacs;
+	s8				index;
+	u8				xqe_size;
+	u32				sqb_size;
+	u32				qints;
+	u32				cints;
+	u32				sq_ctx_sz;
+	u32				rq_ctx_sz;
+	u32				cq_ctx_sz;
+	u32				rsse_ctx_sz;
+	u32				cint_ctx_sz;
+	u32				qint_ctx_sz;
+};
+
+struct nix_tx_dr {
+	union nix_send_hdr_s	hdr;
+	union nix_send_sg_s	tx_sg;
+	dma_addr_t			sg1_addr;
+	dma_addr_t			sg2_addr;
+	dma_addr_t			sg3_addr;
+	u64				in_use;
+};
+
+struct nix_rx_dr {
+	union nix_cqe_hdr_s hdr;
+	union nix_rx_parse_s rx_parse;
+	union nix_rx_sg_s rx_sg;
+};
+
+struct nix {
+	struct udevice			*dev;
+	struct eth_device		*netdev;
+	struct nix_af			*nix_af;
+	struct npa			*npa;
+	struct lmac			*lmac;
+	union nix_cint_hw_s	*cint_base;
+	union nix_cq_ctx_s		*cq_ctx_base;
+	union nix_qint_hw_s	*qint_base;
+	union nix_rq_ctx_s		*rq_ctx_base;
+	union nix_rsse_s		*rss_base;
+	union nix_sq_ctx_s		*sq_ctx_base;
+	void				*cqe_base;
+	struct qmem			sq;
+	struct qmem			cq[NIX_CQ_COUNT];
+	struct qmem			rq;
+	struct qmem			rss;
+	struct qmem			cq_ints;
+	struct qmem			qints;
+	char				name[16];
+	void __iomem			*nix_base;	/** PF reg base */
+	void __iomem			*npc_base;
+	void __iomem			*lmt_base;
+	struct nix_stats		tx_stats;
+	struct nix_stats		rx_stats;
+	u32				aura;
+	int				pknd;
+	int				lf;
+	int				pf;
+	u16				pf_func;
+	u32				rq_cnt;	/** receive queues count */
+	u32				sq_cnt;	/** send queues count */
+	u32				cq_cnt;	/** completion queues count */
+	u16				rss_sz;
+	u16				sqb_size;
+	u8				rss_grps;
+	u8				xqe_sz;
+};
+
+struct nix_aq_cq_dis {
+	union nix_aq_res_s	resp ALIGNED;
+	union nix_cq_ctx_s	cq ALIGNED;
+	union nix_cq_ctx_s	mcq ALIGNED;
+};
+
+struct nix_aq_rq_dis {
+	union nix_aq_res_s	resp ALIGNED;
+	union nix_rq_ctx_s	rq ALIGNED;
+	union nix_rq_ctx_s	mrq ALIGNED;
+};
+
+struct nix_aq_sq_dis {
+	union nix_aq_res_s	resp ALIGNED;
+	union nix_sq_ctx_s	sq ALIGNED;
+	union nix_sq_ctx_s	msq ALIGNED;
+};
+
+struct nix_aq_cq_request {
+	union nix_aq_res_s	resp ALIGNED;
+	union nix_cq_ctx_s	cq ALIGNED;
+};
+
+struct nix_aq_rq_request {
+	union nix_aq_res_s	resp ALIGNED;
+	union nix_rq_ctx_s	rq ALIGNED;
+};
+
+struct nix_aq_sq_request {
+	union nix_aq_res_s	resp ALIGNED;
+	union nix_sq_ctx_s	sq ALIGNED;
+};
+
+static inline u64 nix_af_reg_read(struct nix_af *nix_af, u64 offset)
+{
+	u64 val = readq(nix_af->nix_af_base + offset);
+
+	debug("%s reg %p val %llx\n", __func__, nix_af->nix_af_base + offset,
+	      val);
+	return val;
+}
+
+static inline void nix_af_reg_write(struct nix_af *nix_af, u64 offset,
+				    u64 val)
+{
+	debug("%s reg %p val %llx\n", __func__, nix_af->nix_af_base + offset,
+	      val);
+	writeq(val, nix_af->nix_af_base + offset);
+}
+
+static inline u64 nix_pf_reg_read(struct nix *nix, u64 offset)
+{
+	u64 val = readq(nix->nix_base + offset);
+
+	debug("%s reg %p val %llx\n", __func__, nix->nix_base + offset,
+	      val);
+	return val;
+}
+
+static inline void nix_pf_reg_write(struct nix *nix, u64 offset,
+				    u64 val)
+{
+	debug("%s reg %p val %llx\n", __func__, nix->nix_base + offset,
+	      val);
+	writeq(val, nix->nix_base + offset);
+}
+
+static inline u64 npa_af_reg_read(struct npa_af *npa_af, u64 offset)
+{
+	u64 val = readq(npa_af->npa_af_base + offset);
+
+	debug("%s reg %p val %llx\n", __func__, npa_af->npa_af_base + offset,
+	      val);
+	return val;
+}
+
+static inline void npa_af_reg_write(struct npa_af *npa_af, u64 offset,
+				    u64 val)
+{
+	debug("%s reg %p val %llx\n", __func__, npa_af->npa_af_base + offset,
+	      val);
+	writeq(val, npa_af->npa_af_base + offset);
+}
+
+static inline u64 npc_af_reg_read(struct nix_af *nix_af, u64 offset)
+{
+	u64 val = readq(nix_af->npc_af_base + offset);
+
+	debug("%s reg %p val %llx\n", __func__, nix_af->npc_af_base + offset,
+	      val);
+	return val;
+}
+
+static inline void npc_af_reg_write(struct nix_af *nix_af, u64 offset,
+				    u64 val)
+{
+	debug("%s reg %p val %llx\n", __func__, nix_af->npc_af_base + offset,
+	      val);
+	writeq(val, nix_af->npc_af_base + offset);
+}
+
+int npa_attach_aura(struct nix_af *nix_af, int lf,
+		    const union npa_aura_s *desc, u32 aura_id);
+int npa_attach_pool(struct nix_af *nix_af, int lf,
+		    const union npa_pool_s *desc, u32 pool_id);
+int npa_af_setup(struct npa_af *npa_af);
+int npa_af_shutdown(struct npa_af *npa_af);
+int npa_lf_setup(struct nix *nix);
+int npa_lf_shutdown(struct nix *nix);
+int npa_lf_admin_setup(struct npa *npa, int lf, dma_addr_t aura_base);
+int npa_lf_admin_shutdown(struct nix_af *nix_af, int lf, u32 pool_count);
+
+int npc_lf_admin_setup(struct nix *nix);
+int npc_af_shutdown(struct nix_af *nix_af);
+
+int nix_af_setup(struct nix_af *nix_af);
+int nix_af_shutdown(struct nix_af *nix_af);
+int nix_lf_setup(struct nix *nix);
+int nix_lf_shutdown(struct nix *nix);
+struct nix *nix_lf_alloc(struct udevice *dev);
+int nix_lf_admin_setup(struct nix *nix);
+int nix_lf_admin_shutdown(struct nix_af *nix_af, int lf,
+			  u32 cq_count, u32 rq_count, u32 sq_count);
+struct rvu_af *get_af(void);
+
+int nix_lf_setup_mac(struct udevice *dev);
+int nix_lf_read_rom_mac(struct udevice *dev);
+void nix_lf_halt(struct udevice *dev);
+int nix_lf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len);
+int nix_lf_recv(struct udevice *dev, int flags, uchar **packetp);
+int nix_lf_init(struct udevice *dev);
+int nix_lf_xmit(struct udevice *dev, void *pkt, int pkt_len);
+
+#endif /* __NIX_H__ */
diff --git a/drivers/net/octeontx2/nix_af.c b/drivers/net/octeontx2/nix_af.c
new file mode 100644
index 0000000..d513917
--- /dev/null
+++ b/drivers/net/octeontx2/nix_af.c
@@ -0,0 +1,1102 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <errno.h>
+#include <malloc.h>
+#include <memalign.h>
+#include <misc.h>
+#include <net.h>
+#include <pci.h>
+#include <watchdog.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <asm/arch/board.h>
+#include <asm/arch/csrs/csrs-npc.h>
+#include <asm/arch/csrs/csrs-lmt.h>
+#include <asm/io.h>
+
+#include "nix.h"
+#include "lmt.h"
+#include "cgx.h"
+
+static struct nix_aq_cq_dis cq_dis ALIGNED;
+static struct nix_aq_rq_dis rq_dis ALIGNED;
+static struct nix_aq_sq_dis sq_dis ALIGNED;
+
+/***************
+ * NPA API
+ ***************/
+int npa_attach_aura(struct nix_af *nix_af, int lf,
+		    const union npa_aura_s *desc, u32 aura_id)
+{
+	struct npa_af *npa = nix_af->npa_af;
+	union npa_aq_inst_s *inst;
+	union npa_aq_res_s *res;
+	union npa_af_aq_status aq_stat;
+	union npa_aura_s *context;
+	u64 head;
+	ulong start;
+
+	debug("%s(%p, %d, %p, %u)\n", __func__, nix_af, lf, desc, aura_id);
+	aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS());
+	head = aq_stat.s.head_ptr;
+	inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head;
+	res = (union npa_aq_res_s *)(npa->aq.res.base);
+
+	memset(inst, 0, sizeof(*inst));
+	inst->s.lf = lf;
+	inst->s.doneint = 0;
+	inst->s.ctype = NPA_AQ_CTYPE_E_AURA;
+	inst->s.op = NPA_AQ_INSTOP_E_INIT;
+	inst->s.res_addr = npa->aq.res.iova;
+	inst->s.cindex = aura_id;
+
+	context = (union npa_aura_s *)(npa->aq.res.base +
+						CONFIG_SYS_CACHELINE_SIZE);
+	memset(npa->aq.res.base, 0, npa->aq.res.entry_sz);
+	memcpy(context, desc, sizeof(union npa_aura_s));
+	__iowmb();
+	npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1);
+
+	start = get_timer(0);
+	while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) &&
+	       (get_timer(start) < 1000))
+		WATCHDOG_RESET();
+	if (res->s.compcode != NPA_AQ_COMP_E_GOOD) {
+		printf("%s: Error: result 0x%x not good\n",
+		       __func__, res->s.compcode);
+		return -1;
+	}
+
+	return 0;
+}
+
+int npa_attach_pool(struct nix_af *nix_af, int lf,
+		    const union npa_pool_s *desc, u32 pool_id)
+{
+	union npa_aq_inst_s *inst;
+	union npa_aq_res_s *res;
+	union npa_af_aq_status aq_stat;
+	struct npa_af *npa = nix_af->npa_af;
+	union npa_aura_s *context;
+	u64 head;
+	ulong start;
+
+	debug("%s(%p, %d, %p, %u)\n", __func__, nix_af, lf, desc, pool_id);
+	aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS());
+	head = aq_stat.s.head_ptr;
+
+	inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head;
+	res = (union npa_aq_res_s *)(npa->aq.res.base);
+
+	memset(inst, 0, sizeof(*inst));
+	inst->s.cindex = pool_id;
+	inst->s.lf = lf;
+	inst->s.doneint = 0;
+	inst->s.ctype = NPA_AQ_CTYPE_E_POOL;
+	inst->s.op = NPA_AQ_INSTOP_E_INIT;
+	inst->s.res_addr = npa->aq.res.iova;
+
+	context = (union npa_aura_s *)(npa->aq.res.base +
+						CONFIG_SYS_CACHELINE_SIZE);
+	memset(npa->aq.res.base, 0, npa->aq.res.entry_sz);
+	memcpy(context, desc, sizeof(union npa_aura_s));
+	__iowmb();
+	npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1);
+
+	start = get_timer(0);
+	while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) &&
+	       (get_timer(start) < 1000))
+		WATCHDOG_RESET();
+
+	if (res->s.compcode != NPA_AQ_COMP_E_GOOD) {
+		printf("%s: Error: result 0x%x not good\n",
+		       __func__, res->s.compcode);
+		return -1;
+	}
+
+	return 0;
+}
+
+int npa_lf_admin_setup(struct npa *npa, int lf, dma_addr_t aura_base)
+{
+	union npa_af_lf_rst lf_rst;
+	union npa_af_lfx_auras_cfg auras_cfg;
+	struct npa_af *npa_af = npa->npa_af;
+
+	debug("%s(%p, %d, 0x%llx)\n", __func__, npa_af, lf, aura_base);
+	lf_rst.u = 0;
+	lf_rst.s.exec = 1;
+	lf_rst.s.lf = lf;
+	npa_af_reg_write(npa_af, NPA_AF_LF_RST(), lf_rst.u);
+
+	do {
+		lf_rst.u = npa_af_reg_read(npa_af, NPA_AF_LF_RST());
+		WATCHDOG_RESET();
+	} while (lf_rst.s.exec);
+
+	/* Set Aura size and enable caching of contexts */
+	auras_cfg.u = npa_af_reg_read(npa_af, NPA_AF_LFX_AURAS_CFG(lf));
+	auras_cfg.s.loc_aura_size = NPA_AURA_SIZE_DEFAULT; //FIXME aura_size;
+	auras_cfg.s.caching = 1;
+	auras_cfg.s.rmt_aura_size = 0;
+	auras_cfg.s.rmt_aura_offset = 0;
+	auras_cfg.s.rmt_lf = 0;
+	npa_af_reg_write(npa_af, NPA_AF_LFX_AURAS_CFG(lf), auras_cfg.u);
+	/* Configure aura HW context base */
+	npa_af_reg_write(npa_af, NPA_AF_LFX_LOC_AURAS_BASE(lf),
+			 aura_base);
+
+	return 0;
+}
+
+int npa_lf_admin_shutdown(struct nix_af *nix_af, int lf, u32 pool_count)
+{
+	int pool_id;
+	u32 head;
+	union npa_aq_inst_s *inst;
+	union npa_aq_res_s *res;
+	struct npa_aq_pool_request {
+		union npa_aq_res_s	resp ALIGNED;
+		union npa_pool_s p0 ALIGNED;
+		union npa_pool_s p1 ALIGNED;
+	} pool_req ALIGNED;
+	struct npa_aq_aura_request {
+		union npa_aq_res_s	resp ALIGNED;
+		union npa_aura_s a0 ALIGNED;
+		union npa_aura_s a1 ALIGNED;
+	} aura_req ALIGNED;
+	union npa_af_aq_status aq_stat;
+	union npa_af_lf_rst lf_rst;
+	struct npa_af *npa = nix_af->npa_af;
+	ulong start;
+
+	for (pool_id = 0; pool_id < pool_count; pool_id++) {
+		aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS());
+		head = aq_stat.s.head_ptr;
+		inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head;
+		res = &pool_req.resp;
+
+		memset(inst, 0, sizeof(*inst));
+		inst->s.cindex = pool_id;
+		inst->s.lf = lf;
+		inst->s.doneint = 0;
+		inst->s.ctype = NPA_AQ_CTYPE_E_POOL;
+		inst->s.op = NPA_AQ_INSTOP_E_WRITE;
+		inst->s.res_addr = (u64)&pool_req.resp;
+
+		memset((void *)&pool_req, 0, sizeof(pool_req));
+		pool_req.p0.s.ena = 0;
+		pool_req.p1.s.ena = 1;	/* Write mask */
+		__iowmb();
+
+		npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1);
+
+		start = get_timer(0);
+		while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) &&
+		       (get_timer(start) < 1000))
+			WATCHDOG_RESET();
+
+		if (res->s.compcode != NPA_AQ_COMP_E_GOOD) {
+			printf("%s: Error: result 0x%x not good for lf %d\n"
+			       " aura id %d", __func__, res->s.compcode, lf,
+				pool_id);
+			return -1;
+		}
+		debug("%s(LF %d, pool id %d) disabled\n", __func__, lf,
+		      pool_id);
+	}
+
+	for (pool_id = 0; pool_id < pool_count; pool_id++) {
+		aq_stat.u = npa_af_reg_read(npa, NPA_AF_AQ_STATUS());
+		head = aq_stat.s.head_ptr;
+		inst = (union npa_aq_inst_s *)(npa->aq.inst.base) + head;
+		res = &aura_req.resp;
+
+		memset(inst, 0, sizeof(*inst));
+		inst->s.cindex = pool_id;
+		inst->s.lf = lf;
+		inst->s.doneint = 0;
+		inst->s.ctype = NPA_AQ_CTYPE_E_AURA;
+		inst->s.op = NPA_AQ_INSTOP_E_WRITE;
+		inst->s.res_addr = (u64)&aura_req.resp;
+
+		memset((void *)&aura_req, 0, sizeof(aura_req));
+		aura_req.a0.s.ena = 0;
+		aura_req.a1.s.ena = 1;	/* Write mask */
+		__iowmb();
+
+		npa_af_reg_write(npa, NPA_AF_AQ_DOOR(), 1);
+
+		start = get_timer(0);
+		while ((res->s.compcode == NPA_AQ_COMP_E_NOTDONE) &&
+		       (get_timer(start) < 1000))
+			WATCHDOG_RESET();
+
+		if (res->s.compcode != NPA_AQ_COMP_E_GOOD) {
+			printf("%s: Error: result 0x%x not good for lf %d\n"
+			       " aura id %d", __func__, res->s.compcode, lf,
+			       pool_id);
+			return -1;
+		}
+		debug("%s(LF %d, aura id %d) disabled\n", __func__, lf,
+		      pool_id);
+	}
+
+	/* Reset the LF */
+	lf_rst.u = 0;
+	lf_rst.s.exec = 1;
+	lf_rst.s.lf = lf;
+	npa_af_reg_write(npa, NPA_AF_LF_RST(), lf_rst.u);
+
+	do {
+		lf_rst.u = npa_af_reg_read(npa, NPA_AF_LF_RST());
+		WATCHDOG_RESET();
+	} while (lf_rst.s.exec);
+
+	return 0;
+}
+
+int npa_af_setup(struct npa_af *npa_af)
+{
+	int err;
+	union npa_af_gen_cfg npa_cfg;
+	union npa_af_ndc_cfg ndc_cfg;
+	union npa_af_aq_cfg aq_cfg;
+	union npa_af_blk_rst blk_rst;
+
+	err = rvu_aq_alloc(&npa_af->aq, Q_COUNT(AQ_SIZE),
+			   sizeof(union npa_aq_inst_s),
+			   sizeof(union npa_aq_res_s));
+	if (err) {
+		printf("%s: Error %d allocating admin queue\n", __func__, err);
+		return err;
+	}
+	debug("%s: NPA admin queue allocated at %p %llx\n", __func__,
+	      npa_af->aq.inst.base, npa_af->aq.inst.iova);
+
+	blk_rst.u = 0;
+	blk_rst.s.rst = 1;
+	npa_af_reg_write(npa_af, NPA_AF_BLK_RST(), blk_rst.u);
+
+	/* Wait for reset to complete */
+	do {
+		blk_rst.u = npa_af_reg_read(npa_af, NPA_AF_BLK_RST());
+		WATCHDOG_RESET();
+	} while (blk_rst.s.busy);
+
+	/* Set little Endian */
+	npa_cfg.u = npa_af_reg_read(npa_af, NPA_AF_GEN_CFG());
+	npa_cfg.s.af_be = 0;
+	npa_af_reg_write(npa_af, NPA_AF_GEN_CFG(), npa_cfg.u);
+	/* Enable NDC cache */
+	ndc_cfg.u = npa_af_reg_read(npa_af, NPA_AF_NDC_CFG());
+	ndc_cfg.s.ndc_bypass = 0;
+	npa_af_reg_write(npa_af, NPA_AF_NDC_CFG(), ndc_cfg.u);
+	/* Set up queue size */
+	aq_cfg.u = npa_af_reg_read(npa_af, NPA_AF_AQ_CFG());
+	aq_cfg.s.qsize = AQ_SIZE;
+	npa_af_reg_write(npa_af, NPA_AF_AQ_CFG(), aq_cfg.u);
+	/* Set up queue base address */
+	npa_af_reg_write(npa_af, NPA_AF_AQ_BASE(), npa_af->aq.inst.iova);
+
+	return 0;
+}
+
+int npa_af_shutdown(struct npa_af *npa_af)
+{
+	union npa_af_blk_rst blk_rst;
+
+	blk_rst.u = 0;
+	blk_rst.s.rst = 1;
+	npa_af_reg_write(npa_af, NPA_AF_BLK_RST(), blk_rst.u);
+
+	/* Wait for reset to complete */
+	do {
+		blk_rst.u = npa_af_reg_read(npa_af, NPA_AF_BLK_RST());
+		WATCHDOG_RESET();
+	} while (blk_rst.s.busy);
+
+	rvu_aq_free(&npa_af->aq);
+
+	debug("%s: npa af reset --\n", __func__);
+
+	return 0;
+}
+
+/***************
+ * NIX API
+ ***************/
+/**
+ * Setup SMQ -> TL4 -> TL3 -> TL2 -> TL1 -> MAC mapping
+ *
+ * @param nix     Handle to setup
+ *
+ * @return 0, or negative on failure
+ */
+static int nix_af_setup_sq(struct nix *nix)
+{
+	union nixx_af_tl1x_schedule tl1_sched;
+	union nixx_af_tl2x_parent tl2_parent;
+	union nixx_af_tl3x_parent tl3_parent;
+	union nixx_af_tl3_tl2x_cfg tl3_tl2_cfg;
+	union nixx_af_tl3_tl2x_linkx_cfg tl3_tl2_link_cfg;
+	union nixx_af_tl4x_parent tl4_parent;
+	union nixx_af_tl4x_sdp_link_cfg tl4_sdp_link_cfg;
+	union nixx_af_smqx_cfg smq_cfg;
+	union nixx_af_mdqx_schedule mdq_sched;
+	union nixx_af_mdqx_parent mdq_parent;
+	union nixx_af_rx_linkx_cfg link_cfg;
+	int tl1_index = nix->lmac->link_num; /* NIX_LINK_E enum */
+	int tl2_index = tl1_index;
+	int tl3_index = tl2_index;
+	int tl4_index = tl3_index;
+	int smq_index = tl4_index;
+	struct nix_af *nix_af = nix->nix_af;
+	u64 offset = 0;
+
+	tl1_sched.u = nix_af_reg_read(nix_af,
+				      NIXX_AF_TL1X_SCHEDULE(tl1_index));
+	tl1_sched.s.rr_quantum = MAX_MTU;
+	nix_af_reg_write(nix_af, NIXX_AF_TL1X_SCHEDULE(tl1_index),
+			 tl1_sched.u);
+
+	tl2_parent.u = nix_af_reg_read(nix_af,
+				       NIXX_AF_TL2X_PARENT(tl2_index));
+	tl2_parent.s.parent = tl1_index;
+	nix_af_reg_write(nix_af, NIXX_AF_TL2X_PARENT(tl2_index),
+			 tl2_parent.u);
+
+	tl3_parent.u = nix_af_reg_read(nix_af,
+				       NIXX_AF_TL3X_PARENT(tl3_index));
+	tl3_parent.s.parent = tl2_index;
+	nix_af_reg_write(nix_af, NIXX_AF_TL3X_PARENT(tl3_index),
+			 tl3_parent.u);
+	tl3_tl2_cfg.u = nix_af_reg_read(nix_af,
+					NIXX_AF_TL3_TL2X_CFG(tl3_index));
+	tl3_tl2_cfg.s.express = 0;
+	nix_af_reg_write(nix_af, NIXX_AF_TL3_TL2X_CFG(tl3_index),
+			 tl3_tl2_cfg.u);
+
+	offset = NIXX_AF_TL3_TL2X_LINKX_CFG(tl3_index,
+					    nix->lmac->link_num);
+	tl3_tl2_link_cfg.u = nix_af_reg_read(nix_af, offset);
+	tl3_tl2_link_cfg.s.bp_ena = 1;
+	tl3_tl2_link_cfg.s.ena = 1;
+	tl3_tl2_link_cfg.s.relchan = 0;
+	offset = NIXX_AF_TL3_TL2X_LINKX_CFG(tl3_index,
+					    nix->lmac->link_num);
+	nix_af_reg_write(nix_af, offset, tl3_tl2_link_cfg.u);
+
+	tl4_parent.u = nix_af_reg_read(nix_af,
+				       NIXX_AF_TL4X_PARENT(tl4_index));
+	tl4_parent.s.parent = tl3_index;
+	nix_af_reg_write(nix_af, NIXX_AF_TL4X_PARENT(tl4_index),
+			 tl4_parent.u);
+
+	offset = NIXX_AF_TL4X_SDP_LINK_CFG(tl4_index);
+	tl4_sdp_link_cfg.u = nix_af_reg_read(nix_af, offset);
+	tl4_sdp_link_cfg.s.bp_ena = 0;
+	tl4_sdp_link_cfg.s.ena = 0;
+	tl4_sdp_link_cfg.s.relchan = 0;
+	offset = NIXX_AF_TL4X_SDP_LINK_CFG(tl4_index);
+	nix_af_reg_write(nix_af, offset, tl4_sdp_link_cfg.u);
+
+	smq_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_SMQX_CFG(smq_index));
+	smq_cfg.s.express = 0;
+	smq_cfg.s.lf = nix->lf;
+	smq_cfg.s.desc_shp_ctl_dis = 1;
+	smq_cfg.s.maxlen = MAX_MTU;
+	smq_cfg.s.minlen = NIX_MIN_HW_MTU;
+	nix_af_reg_write(nix_af, NIXX_AF_SMQX_CFG(smq_index), smq_cfg.u);
+
+	mdq_sched.u = nix_af_reg_read(nix_af,
+				      NIXX_AF_MDQX_SCHEDULE(smq_index));
+	mdq_sched.s.rr_quantum = MAX_MTU;
+	offset = NIXX_AF_MDQX_SCHEDULE(smq_index);
+	nix_af_reg_write(nix_af, offset, mdq_sched.u);
+	mdq_parent.u = nix_af_reg_read(nix_af,
+				       NIXX_AF_MDQX_PARENT(smq_index));
+	mdq_parent.s.parent = tl4_index;
+	nix_af_reg_write(nix_af, NIXX_AF_MDQX_PARENT(smq_index),
+			 mdq_parent.u);
+
+	link_cfg.u = 0;
+	link_cfg.s.maxlen = NIX_MAX_HW_MTU;
+	link_cfg.s.minlen = NIX_MIN_HW_MTU;
+	nix_af_reg_write(nix->nix_af,
+			 NIXX_AF_RX_LINKX_CFG(nix->lmac->link_num),
+			 link_cfg.u);
+
+	return 0;
+}
+
+/**
+ * Issue a command to the NIX AF Admin Queue
+ *
+ * @param nix    nix handle
+ * @param lf     Logical function number for command
+ * @param op     Operation
+ * @param ctype  Context type
+ * @param cindex Context index
+ * @param resp   Result pointer
+ *
+ * @return	0 for success, -EBUSY on failure
+ */
+static int nix_aq_issue_command(struct nix_af *nix_af,
+				int lf,
+				int op,
+				int ctype,
+				int cindex, union nix_aq_res_s *resp)
+{
+	union nixx_af_aq_status aq_status;
+	union nix_aq_inst_s *aq_inst;
+	union nix_aq_res_s *result = resp;
+	ulong start;
+
+	debug("%s(%p, 0x%x, 0x%x, 0x%x, 0x%x, %p)\n", __func__, nix_af, lf,
+	      op, ctype, cindex, resp);
+	aq_status.u = nix_af_reg_read(nix_af, NIXX_AF_AQ_STATUS());
+	aq_inst = (union nix_aq_inst_s *)(nix_af->aq.inst.base) +
+						aq_status.s.head_ptr;
+	aq_inst->u[0] = 0;
+	aq_inst->u[1] = 0;
+	aq_inst->s.op = op;
+	aq_inst->s.ctype = ctype;
+	aq_inst->s.lf = lf;
+	aq_inst->s.cindex = cindex;
+	aq_inst->s.doneint = 0;
+	aq_inst->s.res_addr = (u64)resp;
+	debug("%s: inst@%p: 0x%llx 0x%llx\n", __func__, aq_inst,
+	      aq_inst->u[0], aq_inst->u[1]);
+	__iowmb();
+
+	/* Ring doorbell and wait for result */
+	nix_af_reg_write(nix_af, NIXX_AF_AQ_DOOR(), 1);
+
+	start = get_timer(0);
+	/* Wait for completion */
+	do {
+		WATCHDOG_RESET();
+		dsb();
+	} while (result->s.compcode == 0 && get_timer(start) < 2);
+
+	if (result->s.compcode != NIX_AQ_COMP_E_GOOD) {
+		printf("NIX:AQ fail or time out with code %d after %ld ms\n",
+		       result->s.compcode, get_timer(start));
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int nix_attach_receive_queue(struct nix_af *nix_af, int lf)
+{
+	struct nix_aq_rq_request rq_req ALIGNED;
+	int err;
+
+	debug("%s(%p, %d)\n", __func__, nix_af, lf);
+
+	memset(&rq_req, 0, sizeof(struct nix_aq_rq_request));
+
+	rq_req.rq.s.ena = 1;
+	rq_req.rq.s.spb_ena = 1;
+	rq_req.rq.s.ipsech_ena = 0;
+	rq_req.rq.s.ena_wqwd = 0;
+	rq_req.rq.s.cq = NIX_CQ_RX;
+	rq_req.rq.s.substream = 0;	/* FIXME: Substream IDs? */
+	rq_req.rq.s.wqe_aura = -1;	/* No WQE aura */
+	rq_req.rq.s.spb_aura = NPA_POOL_RX;
+	rq_req.rq.s.lpb_aura = NPA_POOL_RX;
+	/* U-Boot doesn't use WQE group for anything */
+	rq_req.rq.s.pb_caching = 1;
+	rq_req.rq.s.xqe_drop_ena = 0;	/* Disable RED dropping */
+	rq_req.rq.s.spb_drop_ena = 0;
+	rq_req.rq.s.lpb_drop_ena = 0;
+	rq_req.rq.s.spb_sizem1 = (MAX_MTU / (3 * 8)) - 1; /* 512 bytes */
+	rq_req.rq.s.lpb_sizem1 = (MAX_MTU / 8) - 1;
+	rq_req.rq.s.first_skip = 0;
+	rq_req.rq.s.later_skip = 0;
+	rq_req.rq.s.xqe_imm_copy = 0;
+	rq_req.rq.s.xqe_hdr_split = 0;
+	rq_req.rq.s.xqe_drop = 0;
+	rq_req.rq.s.xqe_pass = 0;
+	rq_req.rq.s.wqe_pool_drop = 0;	/* No WQE pool */
+	rq_req.rq.s.wqe_pool_pass = 0;	/* No WQE pool */
+	rq_req.rq.s.spb_aura_drop = 255;
+	rq_req.rq.s.spb_aura_pass = 255;
+	rq_req.rq.s.spb_pool_drop = 0;
+	rq_req.rq.s.spb_pool_pass = 0;
+	rq_req.rq.s.lpb_aura_drop = 255;
+	rq_req.rq.s.lpb_aura_pass = 255;
+	rq_req.rq.s.lpb_pool_drop = 0;
+	rq_req.rq.s.lpb_pool_pass = 0;
+	rq_req.rq.s.qint_idx = 0;
+
+	err = nix_aq_issue_command(nix_af, lf,
+				   NIX_AQ_INSTOP_E_INIT,
+				   NIX_AQ_CTYPE_E_RQ,
+				   0, &rq_req.resp);
+	if (err) {
+		printf("%s: Error requesting send queue\n", __func__);
+		return err;
+	}
+
+	return 0;
+}
+
+static int nix_attach_send_queue(struct nix *nix)
+{
+	struct nix_af *nix_af = nix->nix_af;
+	struct nix_aq_sq_request sq_req ALIGNED;
+	int err;
+
+	debug("%s(%p)\n", __func__, nix_af);
+	err = nix_af_setup_sq(nix);
+
+	memset(&sq_req, 0, sizeof(sq_req));
+
+	sq_req.sq.s.ena = 1;
+	sq_req.sq.s.cq_ena = 1;
+	sq_req.sq.s.max_sqe_size = NIX_MAXSQESZ_E_W16;
+	sq_req.sq.s.substream = 0; // FIXME: Substream IDs?
+	sq_req.sq.s.sdp_mcast = 0;
+	sq_req.sq.s.cq = NIX_CQ_TX;
+	sq_req.sq.s.cq_limit = 0;
+	sq_req.sq.s.smq = nix->lmac->link_num; // scheduling index
+	sq_req.sq.s.sso_ena = 0;
+	sq_req.sq.s.smq_rr_quantum = MAX_MTU / 4;
+	sq_req.sq.s.default_chan = nix->lmac->chan_num;
+	sq_req.sq.s.sqe_stype = NIX_STYPE_E_STP;
+	sq_req.sq.s.qint_idx = 0;
+	sq_req.sq.s.sqb_aura = NPA_POOL_SQB;
+
+	err = nix_aq_issue_command(nix_af, nix->lf,
+				   NIX_AQ_INSTOP_E_INIT,
+				   NIX_AQ_CTYPE_E_SQ,
+				   0, &sq_req.resp);
+	if (err) {
+		printf("%s: Error requesting send queue\n", __func__);
+		return err;
+	}
+
+	return 0;
+}
+
+static int nix_attach_completion_queue(struct nix *nix, int cq_idx)
+{
+	struct nix_af *nix_af = nix->nix_af;
+	struct nix_aq_cq_request cq_req ALIGNED;
+	int err;
+
+	debug("%s(%p)\n", __func__, nix_af);
+	memset(&cq_req, 0, sizeof(cq_req));
+	cq_req.cq.s.ena = 1;
+	cq_req.cq.s.bpid = nix->lmac->pknd;
+	cq_req.cq.s.substream = 0;	/* FIXME: Substream IDs? */
+	cq_req.cq.s.drop_ena = 0;
+	cq_req.cq.s.caching = 1;
+	cq_req.cq.s.qsize = CQS_QSIZE;
+	cq_req.cq.s.drop = 255 * 7 / 8;
+	cq_req.cq.s.qint_idx = 0;
+	cq_req.cq.s.cint_idx = 0;
+	cq_req.cq.s.base = nix->cq[cq_idx].iova;
+	debug("%s: CQ(%d)  base %p\n", __func__, cq_idx,
+	      nix->cq[cq_idx].base);
+
+	err = nix_aq_issue_command(nix_af, nix->lf,
+				   NIX_AQ_INSTOP_E_INIT,
+				   NIX_AQ_CTYPE_E_CQ,
+				   cq_idx, &cq_req.resp);
+	if (err) {
+		printf("%s: Error requesting completion queue\n", __func__);
+		return err;
+	}
+	debug("%s: CQ(%d) allocated, base %p\n", __func__, cq_idx,
+	      nix->cq[cq_idx].base);
+
+	return 0;
+}
+
+int nix_lf_admin_setup(struct nix *nix)
+{
+	union nixx_af_lfx_rqs_cfg rqs_cfg;
+	union nixx_af_lfx_sqs_cfg sqs_cfg;
+	union nixx_af_lfx_cqs_cfg cqs_cfg;
+	union nixx_af_lfx_rss_cfg rss_cfg;
+	union nixx_af_lfx_cints_cfg cints_cfg;
+	union nixx_af_lfx_qints_cfg qints_cfg;
+	union nixx_af_lfx_rss_grpx rss_grp;
+	union nixx_af_lfx_tx_cfg2 tx_cfg2;
+	union nixx_af_lfx_cfg lfx_cfg;
+	union nixx_af_lf_rst lf_rst;
+	u32 index;
+	struct nix_af *nix_af = nix->nix_af;
+	int err;
+
+	/* Reset the LF */
+	lf_rst.u = 0;
+	lf_rst.s.lf = nix->lf;
+	lf_rst.s.exec = 1;
+	nix_af_reg_write(nix_af, NIXX_AF_LF_RST(), lf_rst.u);
+
+	do {
+		lf_rst.u = nix_af_reg_read(nix_af, NIXX_AF_LF_RST());
+		WATCHDOG_RESET();
+	} while (lf_rst.s.exec);
+
+	/* Config NIX RQ HW context and base*/
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_RQS_BASE(nix->lf),
+			 (u64)nix->rq_ctx_base);
+	/* Set caching and queue count in HW */
+	rqs_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_RQS_CFG(nix->lf));
+	rqs_cfg.s.caching = 1;
+	rqs_cfg.s.max_queuesm1 = nix->rq_cnt - 1;
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_RQS_CFG(nix->lf), rqs_cfg.u);
+
+	/* Config NIX SQ HW context and base*/
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_SQS_BASE(nix->lf),
+			 (u64)nix->sq_ctx_base);
+	sqs_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_SQS_CFG(nix->lf));
+	sqs_cfg.s.caching = 1;
+	sqs_cfg.s.max_queuesm1 = nix->sq_cnt - 1;
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_SQS_CFG(nix->lf), sqs_cfg.u);
+
+	/* Config NIX CQ HW context and base*/
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_CQS_BASE(nix->lf),
+			 (u64)nix->cq_ctx_base);
+	cqs_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_CQS_CFG(nix->lf));
+	cqs_cfg.s.caching = 1;
+	cqs_cfg.s.max_queuesm1 = nix->cq_cnt - 1;
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_CQS_CFG(nix->lf), cqs_cfg.u);
+
+	/* Config NIX RSS HW context and base */
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_RSS_BASE(nix->lf),
+			 (u64)nix->rss_base);
+	rss_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_RSS_CFG(nix->lf));
+	rss_cfg.s.ena = 1;
+	rss_cfg.s.size = ilog2(nix->rss_sz) / 256;
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_RSS_CFG(nix->lf), rss_cfg.u);
+
+	for (index = 0; index < nix->rss_grps; index++) {
+		rss_grp.u = 0;
+		rss_grp.s.sizem1 = 0x7;
+		rss_grp.s.offset = nix->rss_sz * index;
+		nix_af_reg_write(nix_af,
+				 NIXX_AF_LFX_RSS_GRPX(nix->lf, index),
+				 rss_grp.u);
+	}
+
+	/* Config CQints HW contexts and base */
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_CINTS_BASE(nix->lf),
+			 (u64)nix->cint_base);
+	cints_cfg.u = nix_af_reg_read(nix_af,
+				      NIXX_AF_LFX_CINTS_CFG(nix->lf));
+	cints_cfg.s.caching = 1;
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_CINTS_CFG(nix->lf),
+			 cints_cfg.u);
+
+	/* Config Qints HW context and base */
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_QINTS_BASE(nix->lf),
+			 (u64)nix->qint_base);
+	qints_cfg.u = nix_af_reg_read(nix_af,
+				      NIXX_AF_LFX_QINTS_CFG(nix->lf));
+	qints_cfg.s.caching = 1;
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_QINTS_CFG(nix->lf),
+			 qints_cfg.u);
+
+	debug("%s(%p, %d, %d)\n", __func__, nix_af, nix->lf, nix->pf);
+
+	/* Enable LMTST for this NIX LF */
+	tx_cfg2.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_TX_CFG2(nix->lf));
+	tx_cfg2.s.lmt_ena = 1;
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_TX_CFG2(nix->lf), tx_cfg2.u);
+
+	/* Use 16-word XQEs, write the npa pf_func number only */
+	lfx_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_LFX_CFG(nix->lf));
+	lfx_cfg.s.xqe_size = NIX_XQESZ_E_W16;
+	lfx_cfg.s.npa_pf_func = nix->pf_func;
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_CFG(nix->lf), lfx_cfg.u);
+
+	nix_af_reg_write(nix_af, NIXX_AF_LFX_RX_CFG(nix->lf), 0);
+
+	for (index = 0; index < nix->cq_cnt; index++) {
+		err = nix_attach_completion_queue(nix, index);
+		if (err) {
+			printf("%s: Error attaching completion queue %d\n",
+			       __func__, index);
+			return err;
+		}
+	}
+
+	for (index = 0; index < nix->rq_cnt; index++) {
+		err = nix_attach_receive_queue(nix_af, nix->lf);
+		if (err) {
+			printf("%s: Error attaching receive queue %d\n",
+			       __func__, index);
+			return err;
+		}
+	}
+
+	for (index = 0; index < nix->sq_cnt; index++) {
+		err = nix_attach_send_queue(nix);
+		if (err) {
+			printf("%s: Error attaching send queue %d\n",
+			       __func__, index);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+int nix_lf_admin_shutdown(struct nix_af *nix_af, int lf,
+			  u32 cq_count, u32 rq_count, u32 sq_count)
+{
+	union nixx_af_rx_sw_sync sw_sync;
+	union nixx_af_lf_rst lf_rst;
+	int index, err;
+
+	/* Flush all tx packets */
+	sw_sync.u = 0;
+	sw_sync.s.ena = 1;
+	nix_af_reg_write(nix_af, NIXX_AF_RX_SW_SYNC(), sw_sync.u);
+
+	do {
+		sw_sync.u = nix_af_reg_read(nix_af, NIXX_AF_RX_SW_SYNC());
+		WATCHDOG_RESET();
+	} while (sw_sync.s.ena);
+
+	for (index = 0; index < rq_count; index++) {
+		memset((void *)&rq_dis, 0, sizeof(rq_dis));
+		rq_dis.rq.s.ena = 0;	/* Context */
+		rq_dis.mrq.s.ena = 1;	/* Mask */
+		__iowmb();
+
+		err = nix_aq_issue_command(nix_af, lf,
+					   NIX_AQ_INSTOP_E_WRITE,
+					   NIX_AQ_CTYPE_E_RQ,
+					   index, &rq_dis.resp);
+		if (err) {
+			printf("%s: Error disabling LF %d RQ(%d)\n",
+			       __func__, lf, index);
+			return err;
+		}
+		debug("%s: LF %d RQ(%d) disabled\n", __func__, lf, index);
+	}
+
+	for (index = 0; index < sq_count; index++) {
+		memset((void *)&sq_dis, 0, sizeof(sq_dis));
+		sq_dis.sq.s.ena = 0;	/* Context */
+		sq_dis.msq.s.ena = 1;	/* Mask */
+		__iowmb();
+
+		err = nix_aq_issue_command(nix_af, lf,
+					   NIX_AQ_INSTOP_E_WRITE,
+					   NIX_AQ_CTYPE_E_SQ,
+					   index, &sq_dis.resp);
+		if (err) {
+			printf("%s: Error disabling LF %d SQ(%d)\n",
+			       __func__, lf, index);
+			return err;
+		}
+		debug("%s: LF %d SQ(%d) disabled\n", __func__, lf, index);
+	}
+
+	for (index = 0; index < cq_count; index++) {
+		memset((void *)&cq_dis, 0, sizeof(cq_dis));
+		cq_dis.cq.s.ena = 0;	/* Context */
+		cq_dis.mcq.s.ena = 1;	/* Mask */
+		__iowmb();
+
+		err = nix_aq_issue_command(nix_af, lf,
+					   NIX_AQ_INSTOP_E_WRITE,
+					   NIX_AQ_CTYPE_E_CQ,
+					   index, &cq_dis.resp);
+		if (err) {
+			printf("%s: Error disabling LF %d CQ(%d)\n",
+			       __func__, lf, index);
+			return err;
+		}
+		debug("%s: LF %d CQ(%d) disabled\n", __func__, lf, index);
+	}
+
+	/* Reset the LF */
+	lf_rst.u = 0;
+	lf_rst.s.lf = lf;
+	lf_rst.s.exec = 1;
+	nix_af_reg_write(nix_af, NIXX_AF_LF_RST(), lf_rst.u);
+
+	do {
+		lf_rst.u = nix_af_reg_read(nix_af, NIXX_AF_LF_RST());
+		WATCHDOG_RESET();
+	} while (lf_rst.s.exec);
+
+	return 0;
+}
+
+int npc_lf_admin_setup(struct nix *nix)
+{
+	union npc_af_const af_const;
+	union npc_af_pkindx_action0 action0;
+	union npc_af_pkindx_action1 action1;
+	union npc_af_intfx_kex_cfg kex_cfg;
+	union npc_af_intfx_miss_stat_act intfx_stat_act;
+	union npc_af_mcamex_bankx_camx_intf camx_intf;
+	union npc_af_mcamex_bankx_camx_w0 camx_w0;
+	union npc_af_mcamex_bankx_cfg bankx_cfg;
+	union npc_af_mcamex_bankx_stat_act mcamex_stat_act;
+
+	union nix_rx_action_s rx_action;
+	union nix_tx_action_s tx_action;
+
+	struct nix_af *nix_af = nix->nix_af;
+	u32 kpus;
+	int pkind = nix->lmac->link_num;
+	int index;
+	u64 offset;
+
+	debug("%s(%p, pkind 0x%x)\n", __func__, nix_af, pkind);
+	af_const.u = npc_af_reg_read(nix_af, NPC_AF_CONST());
+	kpus = af_const.s.kpus;
+
+	action0.u = 0;
+	action0.s.parse_done = 1;
+	npc_af_reg_write(nix_af, NPC_AF_PKINDX_ACTION0(pkind), action0.u);
+
+	action1.u = 0;
+	npc_af_reg_write(nix_af, NPC_AF_PKINDX_ACTION1(pkind), action1.u);
+
+	kex_cfg.u = 0;
+	kex_cfg.s.keyw = NPC_MCAMKEYW_E_X1;
+	kex_cfg.s.parse_nibble_ena = 0x7;
+	npc_af_reg_write(nix_af,
+			 NPC_AF_INTFX_KEX_CFG(NPC_INTF_E_NIXX_RX(0)),
+			 kex_cfg.u);
+
+	/* HW Issue */
+	kex_cfg.u = 0;
+	kex_cfg.s.parse_nibble_ena = 0x7;
+	npc_af_reg_write(nix_af,
+			 NPC_AF_INTFX_KEX_CFG(NPC_INTF_E_NIXX_TX(0)),
+			 kex_cfg.u);
+
+	camx_intf.u = 0;
+	camx_intf.s.intf = ~NPC_INTF_E_NIXX_RX(0);
+	npc_af_reg_write(nix_af,
+			 NPC_AF_MCAMEX_BANKX_CAMX_INTF(pkind, 0, 0),
+			 camx_intf.u);
+
+	camx_intf.u = 0;
+	camx_intf.s.intf = NPC_INTF_E_NIXX_RX(0);
+	npc_af_reg_write(nix_af,
+			 NPC_AF_MCAMEX_BANKX_CAMX_INTF(pkind, 0, 1),
+			 camx_intf.u);
+
+	camx_w0.u = 0;
+	camx_w0.s.md = ~(nix->lmac->chan_num) & (~((~0x0ull) << 12));
+	debug("NPC LF ADMIN camx_w0.u %llx\n", camx_w0.u);
+	npc_af_reg_write(nix_af,
+			 NPC_AF_MCAMEX_BANKX_CAMX_W0(pkind, 0, 0),
+			 camx_w0.u);
+
+	camx_w0.u = 0;
+	camx_w0.s.md = nix->lmac->chan_num;
+	npc_af_reg_write(nix_af,
+			 NPC_AF_MCAMEX_BANKX_CAMX_W0(pkind, 0, 1),
+			 camx_w0.u);
+
+	npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_CAMX_W1(pkind, 0, 0),
+			 0);
+
+	npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_CAMX_W1(pkind, 0, 1),
+			 0);
+
+	/* Enable stats for NPC INTF RX */
+	mcamex_stat_act.u = 0;
+	mcamex_stat_act.s.ena = 1;
+	mcamex_stat_act.s.stat_sel = pkind;
+	npc_af_reg_write(nix_af,
+			 NPC_AF_MCAMEX_BANKX_STAT_ACT(pkind, 0),
+			 mcamex_stat_act.u);
+	intfx_stat_act.u = 0;
+	intfx_stat_act.s.ena = 1;
+	intfx_stat_act.s.stat_sel = 16;
+	offset = NPC_AF_INTFX_MISS_STAT_ACT(NPC_INTF_E_NIXX_RX(0));
+	npc_af_reg_write(nix_af, offset, intfx_stat_act.u);
+	rx_action.u = 0;
+	rx_action.s.pf_func = nix->pf_func;
+	rx_action.s.op = NIX_RX_ACTIONOP_E_UCAST;
+	npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_ACTION(pkind, 0),
+			 rx_action.u);
+
+	for (index = 0; index < kpus; index++)
+		npc_af_reg_write(nix_af, NPC_AF_KPUX_CFG(index), 0);
+
+	rx_action.u = 0;
+	rx_action.s.pf_func = nix->pf_func;
+	rx_action.s.op = NIX_RX_ACTIONOP_E_DROP;
+	npc_af_reg_write(nix_af,
+			 NPC_AF_INTFX_MISS_ACT(NPC_INTF_E_NIXX_RX(0)),
+			 rx_action.u);
+	bankx_cfg.u = 0;
+	bankx_cfg.s.ena = 1;
+	npc_af_reg_write(nix_af, NPC_AF_MCAMEX_BANKX_CFG(pkind, 0),
+			 bankx_cfg.u);
+
+	tx_action.u = 0;
+	tx_action.s.op = NIX_TX_ACTIONOP_E_UCAST_DEFAULT;
+	npc_af_reg_write(nix_af,
+			 NPC_AF_INTFX_MISS_ACT(NPC_INTF_E_NIXX_TX(0)),
+			 tx_action.u);
+
+#ifdef DEBUG
+	/* Enable debug capture on RX intf */
+	npc_af_reg_write(nix_af, NPC_AF_DBG_CTL(), 0x4);
+#endif
+
+	return 0;
+}
+
+int npc_af_shutdown(struct nix_af *nix_af)
+{
+	union npc_af_blk_rst blk_rst;
+
+	blk_rst.u = 0;
+	blk_rst.s.rst = 1;
+	npc_af_reg_write(nix_af, NPC_AF_BLK_RST(), blk_rst.u);
+
+	/* Wait for reset to complete */
+	do {
+		blk_rst.u = npc_af_reg_read(nix_af, NPC_AF_BLK_RST());
+		WATCHDOG_RESET();
+	} while (blk_rst.s.busy);
+
+	debug("%s: npc af reset --\n", __func__);
+
+	return 0;
+}
+
+int nix_af_setup(struct nix_af *nix_af)
+{
+	int err;
+	union nixx_af_const2 af_const2;
+	union nixx_af_const3 af_const3;
+	union nixx_af_sq_const sq_const;
+	union nixx_af_cfg af_cfg;
+	union nixx_af_status af_status;
+	union nixx_af_ndc_cfg ndc_cfg;
+	union nixx_af_aq_cfg aq_cfg;
+	union nixx_af_blk_rst blk_rst;
+
+	debug("%s(%p)\n", __func__, nix_af);
+	err = rvu_aq_alloc(&nix_af->aq, Q_COUNT(AQ_SIZE),
+			   sizeof(union nix_aq_inst_s),
+			   sizeof(union nix_aq_res_s));
+	if (err) {
+		printf("%s: Error allocating nix admin queue\n", __func__);
+		return err;
+	}
+
+	blk_rst.u = 0;
+	blk_rst.s.rst = 1;
+	nix_af_reg_write(nix_af, NIXX_AF_BLK_RST(), blk_rst.u);
+
+	/* Wait for reset to complete */
+	do {
+		blk_rst.u = nix_af_reg_read(nix_af, NIXX_AF_BLK_RST());
+		WATCHDOG_RESET();
+	} while (blk_rst.s.busy);
+
+	/* Put in LE mode */
+	af_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_CFG());
+	if (af_cfg.s.force_cond_clk_en || af_cfg.s.calibrate_x2p ||
+	    af_cfg.s.force_intf_clk_en) {
+		printf("%s: Error: Invalid NIX_AF_CFG value 0x%llx\n",
+		       __func__, af_cfg.u);
+		return -1;
+	}
+	af_cfg.s.af_be = 0;
+	af_cfg.u |= 0x5E;	/* HW Issue */
+	nix_af_reg_write(nix_af, NIXX_AF_CFG(), af_cfg.u);
+
+	/* Perform Calibration */
+	af_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_CFG());
+	af_cfg.s.calibrate_x2p = 1;
+	nix_af_reg_write(nix_af, NIXX_AF_CFG(), af_cfg.u);
+
+	/* Wait for calibration to complete */
+	do {
+		af_status.u = nix_af_reg_read(nix_af, NIXX_AF_STATUS());
+		WATCHDOG_RESET();
+	} while (af_status.s.calibrate_done == 0);
+
+	af_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_CFG());
+	af_cfg.s.calibrate_x2p = 0;
+	nix_af_reg_write(nix_af, NIXX_AF_CFG(), af_cfg.u);
+
+	/* Enable NDC cache */
+	ndc_cfg.u = nix_af_reg_read(nix_af, NIXX_AF_NDC_CFG());
+	ndc_cfg.s.ndc_ign_pois = 0;
+	ndc_cfg.s.byp_sq = 0;
+	ndc_cfg.s.byp_sqb = 0;
+	ndc_cfg.s.byp_cqs = 0;
+	ndc_cfg.s.byp_cints = 0;
+	ndc_cfg.s.byp_dyno = 0;
+	ndc_cfg.s.byp_mce = 0;
+	ndc_cfg.s.byp_rqc = 0;
+	ndc_cfg.s.byp_rsse = 0;
+	ndc_cfg.s.byp_mc_data = 0;
+	ndc_cfg.s.byp_mc_wqe = 0;
+	ndc_cfg.s.byp_mr_data = 0;
+	ndc_cfg.s.byp_mr_wqe = 0;
+	ndc_cfg.s.byp_qints = 0;
+	nix_af_reg_write(nix_af, NIXX_AF_NDC_CFG(), ndc_cfg.u);
+
+	/* Set up queue size */
+	aq_cfg.u = 0;
+	aq_cfg.s.qsize = AQ_SIZE;
+	nix_af_reg_write(nix_af, NIXX_AF_AQ_CFG(), aq_cfg.u);
+
+	/* Set up queue base address */
+	nix_af_reg_write(nix_af, NIXX_AF_AQ_BASE(), nix_af->aq.inst.iova);
+
+	af_const3.u = nix_af_reg_read(nix_af, NIXX_AF_CONST3());
+	af_const2.u = nix_af_reg_read(nix_af, NIXX_AF_CONST2());
+	sq_const.u = nix_af_reg_read(nix_af, NIXX_AF_SQ_CONST());
+	nix_af->rq_ctx_sz = 1ULL << af_const3.s.rq_ctx_log2bytes;
+	nix_af->sq_ctx_sz = 1ULL << af_const3.s.sq_ctx_log2bytes;
+	nix_af->cq_ctx_sz = 1ULL << af_const3.s.cq_ctx_log2bytes;
+	nix_af->rsse_ctx_sz = 1ULL << af_const3.s.rsse_log2bytes;
+	nix_af->qints = af_const2.s.qints;
+	nix_af->cints = af_const2.s.cints;
+	nix_af->cint_ctx_sz = 1ULL << af_const3.s.cint_log2bytes;
+	nix_af->qint_ctx_sz = 1ULL << af_const3.s.qint_log2bytes;
+	nix_af->sqb_size = sq_const.s.sqb_size;
+
+	return 0;
+}
+
+int nix_af_shutdown(struct nix_af *nix_af)
+{
+	union nixx_af_blk_rst blk_rst;
+
+	blk_rst.u = 0;
+	blk_rst.s.rst = 1;
+	nix_af_reg_write(nix_af, NIXX_AF_BLK_RST(), blk_rst.u);
+
+	/* Wait for reset to complete */
+	do {
+		blk_rst.u = nix_af_reg_read(nix_af, NIXX_AF_BLK_RST());
+		WATCHDOG_RESET();
+	} while (blk_rst.s.busy);
+
+	rvu_aq_free(&nix_af->aq);
+
+	debug("%s: nix af reset --\n", __func__);
+
+	return 0;
+}
diff --git a/drivers/net/octeontx2/npc.h b/drivers/net/octeontx2/npc.h
new file mode 100644
index 0000000..6e645cd
--- /dev/null
+++ b/drivers/net/octeontx2/npc.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef __NPC_H__
+#define __NPC_H__
+
+#define RSVD_MCAM_ENTRIES_PER_PF	2	/** Ucast and Bcast */
+#define RSVD_MCAM_ENTRIES_PER_NIXLF	1	/** Ucast for VFs */
+
+struct npc_kpu_profile_cam {
+	u8 state;
+	u8 state_mask;
+	u16 dp0;
+	u16 dp0_mask;
+	u16 dp1;
+	u16 dp1_mask;
+	u16 dp2;
+	u16 dp2_mask;
+};
+
+struct npc_kpu_profile_action {
+	u8 errlev;
+	u8 errcode;
+	u8 dp0_offset;
+	u8 dp1_offset;
+	u8 dp2_offset;
+	u8 bypass_count;
+	u8 parse_done;
+	u8 next_state;
+	u8 ptr_advance;
+	u8 cap_ena;
+	u8 lid;
+	u8 ltype;
+	u8 flags;
+	u8 offset;
+	u8 mask;
+	u8 right;
+	u8 shift;
+};
+
+struct npc_kpu_profile {
+	int cam_entries;
+	int action_entries;
+	struct npc_kpu_profile_cam *cam;
+	struct npc_kpu_profile_action *action;
+};
+
+struct npc_pkind {
+	struct rsrc_bmap rsrc;
+	u32	*pfchan_map;
+};
+
+struct npc_mcam {
+	struct rsrc_bmap rsrc;
+	u16	*pfvf_map;
+	u16	total_entries; /* Total number of MCAM entries */
+	u16	entries;  /* Total - reserved for NIX LFs */
+	u8	banks_per_entry;  /* Number of keywords in key */
+	u8	keysize;
+	u8	banks;    /* Number of MCAM banks */
+	u16	banksize; /* Number of MCAM entries in each bank */
+	u16	counters; /* Number of match counters */
+	u16	nixlf_offset;
+	u16	pf_offset;
+};
+
+struct nix_af_handle;
+struct nix_handle;
+struct rvu_hwinfo;
+
+struct npc_af {
+	struct nix_af_handle	*nix_af;
+	struct npc_pkind	pkind;
+	void __iomem		*npc_af_base;
+	u8			npc_kpus;	/** Number of parser units */
+	struct npc_mcam		mcam;
+	struct rvu_block	block;
+	struct rvu_hwinfo	*hw;
+};
+
+struct npc {
+	struct npc_af		*npc_af;
+	void __iomem		*npc_base;
+	struct nix_handle	*nix;
+}
+
+#endif /* __NPC_H__ */
+
diff --git a/drivers/net/octeontx2/rvu.h b/drivers/net/octeontx2/rvu.h
new file mode 100644
index 0000000..f455260
--- /dev/null
+++ b/drivers/net/octeontx2/rvu.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef __RVU_H__
+#define __RVU_H__
+
+#include <asm/arch/csrs/csrs-rvu.h>
+
+#define ALIGNED		__aligned(CONFIG_SYS_CACHELINE_SIZE)
+
+#define Q_SIZE_16		0ULL /* 16 entries */
+#define Q_SIZE_64		1ULL /* 64 entries */
+#define Q_SIZE_256		2ULL
+#define Q_SIZE_1K		3ULL
+#define Q_SIZE_4K		4ULL
+#define Q_SIZE_16K		5ULL
+#define Q_SIZE_64K		6ULL
+#define Q_SIZE_256K		7ULL
+#define Q_SIZE_1M		8ULL /* Million entries */
+#define Q_SIZE_MIN		Q_SIZE_16
+#define Q_SIZE_MAX		Q_SIZE_1M
+
+#define Q_COUNT(x)		(16ULL << (2 * (x)))
+#define Q_SIZE(x, n)		((ilog2(x) - (n)) / 2)
+
+/* Admin queue info */
+
+/* Since we intend to add only one instruction at a time,
+ * keep queue size to it's minimum.
+ */
+#define AQ_SIZE			Q_SIZE_16
+/* HW head & tail pointer mask */
+#define AQ_PTR_MASK		0xFFFFF
+
+struct qmem {
+	void		*base;
+	dma_addr_t	iova;
+	size_t		alloc_sz;
+	u32		qsize;
+	u8		entry_sz;
+};
+
+struct admin_queue {
+	struct qmem inst;
+	struct qmem res;
+};
+
+struct rvu_af {
+	struct udevice *dev;
+	void __iomem *af_base;
+	struct nix_af *nix_af;
+};
+
+struct rvu_pf {
+	struct udevice *dev;
+	struct udevice *afdev;
+	void __iomem *pf_base;
+	struct nix *nix;
+	u8 pfid;
+	int nix_lfid;
+	int npa_lfid;
+};
+
+/**
+ * Store 128 bit value
+ *
+ * @param[out]	dest	pointer to destination address
+ * @param	val0	first 64 bits to write
+ * @param	val1	second 64 bits to write
+ */
+static inline void st128(void *dest, u64 val0, u64 val1)
+{
+	__asm__ __volatile__("stp %x[x0], %x[x1], [%[pm]]" :
+		: [x0]"r"(val0), [x1]"r"(val1), [pm]"r"(dest)
+		: "memory");
+}
+
+/**
+ * Load 128 bit value
+ *
+ * @param[in]	source		pointer to 128 bits of data to load
+ * @param[out]	val0		first 64 bits of data
+ * @param[out]	val1		second 64 bits of data
+ */
+static inline void ld128(const u64 *src, u64 *val0, u64 *val1)
+{
+	__asm__ __volatile__ ("ldp %x[x0], %x[x1], [%[pm]]" :
+		 : [x0]"r"(*val0), [x1]"r"(*val1), [pm]"r"(src));
+}
+
+void qmem_free(struct qmem *q);
+int qmem_alloc(struct qmem *q, u32 qsize, size_t entry_sz);
+
+/**
+ * Allocates an admin queue for instructions and results
+ *
+ * @param	aq	admin queue to allocate for
+ * @param	qsize	Number of entries in the queue
+ * @param	inst_size	Size of each instruction
+ * @param	res_size	Size of each result
+ *
+ * @return	-ENOMEM on error, 0 on success
+ */
+int rvu_aq_alloc(struct admin_queue *aq, unsigned int qsize,
+		 size_t inst_size, size_t res_size);
+
+/**
+ * Frees an admin queue
+ *
+ * @param	aq	Admin queue to free
+ */
+void rvu_aq_free(struct admin_queue *aq);
+
+void rvu_get_lfid_for_pf(int pf, int *nixid, int *npaid);
+
+#endif /* __RVU_H__ */
+
diff --git a/drivers/net/octeontx2/rvu_af.c b/drivers/net/octeontx2/rvu_af.c
new file mode 100644
index 0000000..7750089
--- /dev/null
+++ b/drivers/net/octeontx2/rvu_af.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <errno.h>
+#include <malloc.h>
+#include <misc.h>
+#include <net.h>
+#include <pci_ids.h>
+#include <linux/list.h>
+#include <asm/io.h>
+#include <asm/arch/board.h>
+#include <asm/arch/csrs/csrs-npa.h>
+
+#include "nix.h"
+
+struct udevice *rvu_af_dev;
+
+inline struct rvu_af *get_af(void)
+{
+	return rvu_af_dev ? dev_get_priv(rvu_af_dev) : NULL;
+}
+
+void rvu_get_lfid_for_pf(int pf, int *nixid, int *npaid)
+{
+	union nixx_af_rvu_lf_cfg_debug nix_lf_dbg;
+	union npa_af_rvu_lf_cfg_debug npa_lf_dbg;
+	union rvu_pf_func_s pf_func;
+	struct rvu_af *af = dev_get_priv(rvu_af_dev);
+	struct nix_af *nix_af = af->nix_af;
+
+	pf_func.u = 0;
+	pf_func.s.pf = pf;
+
+	nix_lf_dbg.u = 0;
+	nix_lf_dbg.s.pf_func = pf_func.u & 0xFFFF;
+	nix_lf_dbg.s.exec = 1;
+	nix_af_reg_write(nix_af, NIXX_AF_RVU_LF_CFG_DEBUG(),
+			 nix_lf_dbg.u);
+	do {
+		nix_lf_dbg.u = nix_af_reg_read(nix_af,
+					       NIXX_AF_RVU_LF_CFG_DEBUG());
+	} while (nix_lf_dbg.s.exec);
+
+	if (nix_lf_dbg.s.lf_valid)
+		*nixid = nix_lf_dbg.s.lf;
+
+	debug("%s: nix lf_valid %d lf %d nixid %d\n", __func__,
+	      nix_lf_dbg.s.lf_valid, nix_lf_dbg.s.lf, *nixid);
+
+	npa_lf_dbg.u = 0;
+	npa_lf_dbg.s.pf_func = pf_func.u & 0xFFFF;
+	npa_lf_dbg.s.exec = 1;
+	npa_af_reg_write(nix_af->npa_af, NPA_AF_RVU_LF_CFG_DEBUG(),
+			 npa_lf_dbg.u);
+	do {
+		npa_lf_dbg.u = npa_af_reg_read(nix_af->npa_af,
+					       NPA_AF_RVU_LF_CFG_DEBUG());
+	} while (npa_lf_dbg.s.exec);
+
+	if (npa_lf_dbg.s.lf_valid)
+		*npaid = npa_lf_dbg.s.lf;
+	debug("%s: npa lf_valid %d lf %d npaid %d\n", __func__,
+	      npa_lf_dbg.s.lf_valid, npa_lf_dbg.s.lf, *npaid);
+}
+
+struct nix_af *rvu_af_init(struct rvu_af *rvu_af)
+{
+	struct nix_af *nix_af;
+	union rvu_af_addr_s block_addr;
+	int err;
+
+	nix_af = (struct nix_af *)calloc(1, sizeof(struct nix_af));
+	if (!nix_af) {
+		printf("%s: out of memory\n", __func__);
+		goto error;
+	}
+
+	nix_af->dev = rvu_af->dev;
+
+	block_addr.u = 0;
+	block_addr.s.block = RVU_BLOCK_ADDR_E_NIXX(0);
+	nix_af->nix_af_base = rvu_af->af_base + block_addr.u;
+
+	nix_af->npa_af = (struct npa_af *)calloc(1, sizeof(struct npa_af));
+	if (!nix_af->npa_af) {
+		printf("%s: out of memory\n", __func__);
+		goto error;
+	}
+
+	block_addr.u = 0;
+	block_addr.s.block = RVU_BLOCK_ADDR_E_NPA;
+	nix_af->npa_af->npa_af_base = rvu_af->af_base + block_addr.u;
+
+	block_addr.u = 0;
+	block_addr.s.block = RVU_BLOCK_ADDR_E_NPC;
+	nix_af->npc_af_base = rvu_af->af_base + block_addr.u;
+
+	debug("%s: Setting up npa admin\n", __func__);
+	err = npa_af_setup(nix_af->npa_af);
+	if (err) {
+		printf("%s: Error %d setting up NPA admin\n", __func__, err);
+		goto error;
+	}
+	debug("%s: Setting up nix af\n", __func__);
+	err = nix_af_setup(nix_af);
+	if (err) {
+		printf("%s: Error %d setting up NIX admin\n", __func__, err);
+		goto error;
+	}
+	debug("%s: nix_af: %p\n", __func__, nix_af);
+	return nix_af;
+
+error:
+	if (nix_af->npa_af) {
+		free(nix_af->npa_af);
+		memset(nix_af, 0, sizeof(*nix_af));
+	}
+	if (nix_af)
+		free(nix_af);
+	return NULL;
+}
+
+int rvu_af_probe(struct udevice *dev)
+{
+	struct rvu_af *af_ptr = dev_get_priv(dev);
+
+	af_ptr->af_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+					 PCI_REGION_MEM);
+	debug("%s RVU AF BAR %p\n", __func__, af_ptr->af_base);
+	af_ptr->dev = dev;
+	rvu_af_dev = dev;
+
+	af_ptr->nix_af = rvu_af_init(af_ptr);
+	if (!af_ptr->nix_af) {
+		printf("%s: Error: could not initialize NIX AF\n", __func__);
+		return -1;
+	}
+	debug("%s: Done\n", __func__);
+
+	return 0;
+}
+
+int rvu_af_remove(struct udevice *dev)
+{
+	struct rvu_af *rvu_af = dev_get_priv(dev);
+
+	nix_af_shutdown(rvu_af->nix_af);
+	npa_af_shutdown(rvu_af->nix_af->npa_af);
+	npc_af_shutdown(rvu_af->nix_af);
+
+	debug("%s: rvu af down --\n", __func__);
+	return 0;
+}
+
+U_BOOT_DRIVER(rvu_af) = {
+	.name   = "rvu_af",
+	.id     = UCLASS_MISC,
+	.probe  = rvu_af_probe,
+	.remove = rvu_af_remove,
+	.priv_auto_alloc_size = sizeof(struct rvu_af),
+};
+
+static struct pci_device_id rvu_af_supported[] = {
+	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RVU_AF) },
+	{}
+};
+
+U_BOOT_PCI_DEVICE(rvu_af, rvu_af_supported);
diff --git a/drivers/net/octeontx2/rvu_common.c b/drivers/net/octeontx2/rvu_common.c
new file mode 100644
index 0000000..173b28b
--- /dev/null
+++ b/drivers/net/octeontx2/rvu_common.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <errno.h>
+#include <malloc.h>
+#include <misc.h>
+#include <net.h>
+#include <asm/io.h>
+
+#include "rvu.h"
+
+int qmem_alloc(struct qmem *q, u32 qsize, size_t entry_sz)
+{
+	q->base = memalign(CONFIG_SYS_CACHELINE_SIZE, qsize * entry_sz);
+	if (!q->base)
+		return -ENOMEM;
+	q->entry_sz = entry_sz;
+	q->qsize = qsize;
+	q->alloc_sz = (size_t)qsize * entry_sz;
+	q->iova = (dma_addr_t)(q->base);
+	debug("NIX: qmem alloc for (%d * %d = %ld bytes) at %p\n",
+	      q->qsize, q->entry_sz, q->alloc_sz, q->base);
+	return 0;
+}
+
+void qmem_free(struct qmem *q)
+{
+	if (q->base)
+		free(q->base);
+	memset(q, 0, sizeof(*q));
+}
+
+/**
+ * Allocates an admin queue for instructions and results
+ *
+ * @param	aq	admin queue to allocate for
+ * @param	qsize	Number of entries in the queue
+ * @param	inst_size	Size of each instruction
+ * @param	res_size	Size of each result
+ *
+ * @return	-ENOMEM on error, 0 on success
+ */
+int rvu_aq_alloc(struct admin_queue *aq, unsigned int qsize,
+		 size_t inst_size, size_t res_size)
+{
+	int err;
+
+	err = qmem_alloc(&aq->inst, qsize, inst_size);
+	if (err)
+		return err;
+	err = qmem_alloc(&aq->res, qsize, res_size);
+	if (err)
+		qmem_free(&aq->inst);
+
+	return err;
+}
+
+/**
+ * Frees an admin queue
+ *
+ * @param	aq	Admin queue to free
+ */
+void rvu_aq_free(struct admin_queue *aq)
+{
+	qmem_free(&aq->inst);
+	qmem_free(&aq->res);
+	memset(aq, 0, sizeof(*aq));
+}
diff --git a/drivers/net/octeontx2/rvu_pf.c b/drivers/net/octeontx2/rvu_pf.c
new file mode 100644
index 0000000..201ecf2
--- /dev/null
+++ b/drivers/net/octeontx2/rvu_pf.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <errno.h>
+#include <malloc.h>
+#include <misc.h>
+#include <net.h>
+#include <pci_ids.h>
+#include <asm/io.h>
+#include <asm/types.h>
+#include <asm/arch/board.h>
+#include "cgx.h"
+#include "nix.h"
+
+extern struct udevice *rvu_af_dev;
+
+int rvu_pf_init(struct rvu_pf *rvu)
+{
+	struct nix *nix;
+	struct eth_pdata *pdata = dev_get_platdata(rvu->dev);
+
+	debug("%s: Allocating nix lf\n", __func__);
+	nix = nix_lf_alloc(rvu->dev);
+	if (!nix) {
+		printf("%s: Error allocating lf for pf %d\n",
+		       __func__, rvu->pfid);
+		return -1;
+	}
+	rvu->nix = nix;
+
+	/* to make post_probe happy */
+	if (is_valid_ethaddr(nix->lmac->mac_addr)) {
+		memcpy(pdata->enetaddr, nix->lmac->mac_addr, 6);
+		eth_env_set_enetaddr_by_index("eth", rvu->dev->seq,
+					      pdata->enetaddr);
+	}
+
+	return 0;
+}
+
+static const struct eth_ops nix_eth_ops = {
+	.start			= nix_lf_init,
+	.send			= nix_lf_xmit,
+	.recv			= nix_lf_recv,
+	.free_pkt		= nix_lf_free_pkt,
+	.stop			= nix_lf_halt,
+	.write_hwaddr		= nix_lf_setup_mac,
+};
+
+int rvu_pf_probe(struct udevice *dev)
+{
+	struct rvu_pf *rvu = dev_get_priv(dev);
+	int err;
+	char name[16];
+
+	debug("%s: name: %s\n", __func__, dev->name);
+
+	rvu->pf_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_2, PCI_REGION_MEM);
+	rvu->pfid = dev->seq + 1; // RVU PF's start from 1;
+	rvu->dev = dev;
+	if (!rvu_af_dev) {
+		printf("%s: Error: Could not find RVU AF device\n",
+		       __func__);
+		return -1;
+	}
+	rvu->afdev = rvu_af_dev;
+
+	debug("RVU PF %u BAR2 %p\n", rvu->pfid, rvu->pf_base);
+
+	rvu_get_lfid_for_pf(rvu->pfid, &rvu->nix_lfid, &rvu->npa_lfid);
+
+	err = rvu_pf_init(rvu);
+	if (err)
+		printf("%s: Error %d adding nix\n", __func__, err);
+
+	/*
+	 * modify device name to include index/sequence number,
+	 * for better readability, this is 1:1 mapping with eth0/1/2.. names.
+	 */
+	sprintf(name, "rvu_pf#%d", dev->seq);
+	device_set_name(dev, name);
+	debug("%s: name: %s\n", __func__, dev->name);
+	return err;
+}
+
+int rvu_pf_remove(struct udevice *dev)
+{
+	struct rvu_pf *rvu = dev_get_priv(dev);
+
+	nix_lf_shutdown(rvu->nix);
+	npa_lf_shutdown(rvu->nix);
+
+	debug("%s: rvu pf%d down --\n", __func__,  rvu->pfid);
+
+	return 0;
+}
+
+U_BOOT_DRIVER(rvu_pf) = {
+	.name   = "rvu_pf",
+	.id     = UCLASS_ETH,
+	.probe	= rvu_pf_probe,
+	.remove = rvu_pf_remove,
+	.ops    = &nix_eth_ops,
+	.priv_auto_alloc_size = sizeof(struct rvu_pf),
+	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
+};
+
+static struct pci_device_id rvu_pf_supported[] = {
+	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RVU_PF) },
+	{}
+};
+
+U_BOOT_PCI_DEVICE(rvu_pf, rvu_pf_supported);
diff --git a/drivers/phy/omap-usb2-phy.c b/drivers/phy/omap-usb2-phy.c
index adc454d..a981cb2 100644
--- a/drivers/phy/omap-usb2-phy.c
+++ b/drivers/phy/omap-usb2-phy.c
@@ -12,6 +12,7 @@
 #include <errno.h>
 #include <generic-phy.h>
 #include <regmap.h>
+#include <soc.h>
 #include <syscon.h>
 #include <linux/bitops.h>
 #include <linux/err.h>
@@ -196,6 +197,11 @@
 	.exit = omap_usb2_phy_exit,
 };
 
+static const struct soc_attr am65x_sr10_soc_devices[] = {
+	{ .family = "AM65X", .revision = "SR1.0" },
+	{ /* sentinel */ }
+};
+
 int omap_usb2_phy_probe(struct udevice *dev)
 {
 	int rc;
@@ -222,10 +228,9 @@
 	 * Disabling the USB2_PHY Charger Detect function will put D+
 	 * into the normal state.
 	 *
-	 * Using property "ti,dis-chg-det-quirk" in the DT usb2-phy node
-	 * to enable this workaround for AM654x PG1.0.
+	 * Enable this workaround for AM654x PG1.0.
 	 */
-	if (dev_read_bool(dev, "ti,dis-chg-det-quirk"))
+	if (soc_device_match(am65x_sr10_soc_devices))
 		priv->flags |= OMAP_USB2_DISABLE_CHG_DET;
 
 	regmap = syscon_regmap_lookup_by_phandle(dev, "syscon-phy-power");
diff --git a/drivers/spi/mvebu_a3700_spi.c b/drivers/spi/mvebu_a3700_spi.c
index e860b9e..eb13cf3 100644
--- a/drivers/spi/mvebu_a3700_spi.c
+++ b/drivers/spi/mvebu_a3700_spi.c
@@ -15,6 +15,7 @@
 #include <asm/io.h>
 #include <dm/device_compat.h>
 #include <linux/bitops.h>
+#include <asm/gpio.h>
 
 DECLARE_GLOBAL_DATA_PTR;
 
@@ -27,6 +28,7 @@
 #define MVEBU_SPI_A3700_SPI_EN_0		BIT(16)
 #define MVEBU_SPI_A3700_CLK_PRESCALE_MASK	0x1f
 
+#define MAX_CS_COUNT	4
 
 /* SPI registers */
 struct spi_reg {
@@ -39,16 +41,23 @@
 struct mvebu_spi_platdata {
 	struct spi_reg *spireg;
 	struct clk clk;
+	struct gpio_desc cs_gpios[MAX_CS_COUNT];
 };
 
-static void spi_cs_activate(struct spi_reg *reg, int cs)
+static void spi_cs_activate(struct mvebu_spi_platdata *plat, int cs)
 {
-	setbits_le32(&reg->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs);
+	if (CONFIG_IS_ENABLED(DM_GPIO) && dm_gpio_is_valid(&plat->cs_gpios[cs]))
+		dm_gpio_set_value(&plat->cs_gpios[cs], 1);
+	else
+		setbits_le32(&plat->spireg->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs);
 }
 
-static void spi_cs_deactivate(struct spi_reg *reg, int cs)
+static void spi_cs_deactivate(struct mvebu_spi_platdata *plat, int cs)
 {
-	clrbits_le32(&reg->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs);
+	if (CONFIG_IS_ENABLED(DM_GPIO) && dm_gpio_is_valid(&plat->cs_gpios[cs]))
+		dm_gpio_set_value(&plat->cs_gpios[cs], 0);
+	else
+		clrbits_le32(&plat->spireg->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs);
 }
 
 /**
@@ -150,7 +159,7 @@
 	/* Activate CS */
 	if (flags & SPI_XFER_BEGIN) {
 		debug("SPI: activate cs.\n");
-		spi_cs_activate(reg, spi_chip_select(dev));
+		spi_cs_activate(plat, spi_chip_select(dev));
 	}
 
 	/* Send and/or receive */
@@ -169,7 +178,7 @@
 			return ret;
 
 		debug("SPI: deactivate cs.\n");
-		spi_cs_deactivate(reg, spi_chip_select(dev));
+		spi_cs_deactivate(plat, spi_chip_select(dev));
 	}
 
 	return 0;
@@ -247,6 +256,26 @@
 
 	writel(data, &reg->cfg);
 
+	/* Set up CS GPIOs in device tree, if any */
+	if (CONFIG_IS_ENABLED(DM_GPIO) && gpio_get_list_count(bus, "cs-gpios") > 0) {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(plat->cs_gpios); i++) {
+			ret = gpio_request_by_name(bus, "cs-gpios", i, &plat->cs_gpios[i], 0);
+			if (ret < 0 || !dm_gpio_is_valid(&plat->cs_gpios[i])) {
+				/* Use the native CS function for this line */
+				continue;
+			}
+
+			ret = dm_gpio_set_dir_flags(&plat->cs_gpios[i],
+						    GPIOD_IS_OUT | GPIOD_ACTIVE_LOW);
+			if (ret) {
+				dev_err(bus, "Setting cs %d error\n", i);
+				return ret;
+			}
+		}
+	}
+
 	return 0;
 }
 
diff --git a/drivers/spi/nxp_fspi.c b/drivers/spi/nxp_fspi.c
index c507437..ebd9492 100644
--- a/drivers/spi/nxp_fspi.c
+++ b/drivers/spi/nxp_fspi.c
@@ -520,7 +520,7 @@
 	fspi_writel(f, FSPI_LCKER_LOCK, f->iobase + FSPI_LCKCR);
 }
 
-#if CONFIG_IS_ENABLED(CONFIG_CLK)
+#if CONFIG_IS_ENABLED(CLK)
 static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
 {
 	int ret;
@@ -808,7 +808,7 @@
 	int ret, i;
 	u32 reg;
 
-#if CONFIG_IS_ENABLED(CONFIG_CLK)
+#if CONFIG_IS_ENABLED(CLK)
 	/* disable and unprepare clock to avoid glitch pass to controller */
 	nxp_fspi_clk_disable_unprep(f);
 
@@ -898,7 +898,7 @@
 
 static int nxp_fspi_set_speed(struct udevice *bus, uint speed)
 {
-#if CONFIG_IS_ENABLED(CONFIG_CLK)
+#if CONFIG_IS_ENABLED(CLK)
 	struct nxp_fspi *f = dev_get_priv(bus);
 	int ret;
 
@@ -924,7 +924,7 @@
 static int nxp_fspi_ofdata_to_platdata(struct udevice *bus)
 {
 	struct nxp_fspi *f = dev_get_priv(bus);
-#if CONFIG_IS_ENABLED(CONFIG_CLK)
+#if CONFIG_IS_ENABLED(CLK)
 	int ret;
 #endif
 
@@ -950,7 +950,7 @@
 	f->ahb_addr = map_physmem(ahb_addr, ahb_size, MAP_NOCACHE);
 	f->memmap_phy_size = ahb_size;
 
-#if CONFIG_IS_ENABLED(CONFIG_CLK)
+#if CONFIG_IS_ENABLED(CLK)
 	ret = clk_get_by_name(bus, "fspi_en", &f->clk_en);
 	if (ret) {
 		dev_err(bus, "failed to get fspi_en clock\n");
diff --git a/drivers/sysreset/Kconfig b/drivers/sysreset/Kconfig
index 6ebc90e..70692f0 100644
--- a/drivers/sysreset/Kconfig
+++ b/drivers/sysreset/Kconfig
@@ -79,12 +79,12 @@
 	  This enables the system reset driver support for Intel SOCFPGA SoCs
 	  (Cyclone 5, Arria 5 and Arria 10).
 
-config SYSRESET_SOCFPGA_S10
-	bool "Enable support for Intel SOCFPGA Stratix 10"
-	depends on ARCH_SOCFPGA && TARGET_SOCFPGA_STRATIX10
+config SYSRESET_SOCFPGA_SOC64
+	bool "Enable support for Intel SOCFPGA SoC64 family (Stratix10/Agilex)"
+	depends on ARCH_SOCFPGA && (TARGET_SOCFPGA_STRATIX10 || TARGET_SOCFPGA_AGILEX)
 	help
 	  This enables the system reset driver support for Intel SOCFPGA
-	  Stratix SoCs.
+	  SoC64 SoCs.
 
 config SYSRESET_TI_SCI
 	bool "TI System Control Interface (TI SCI) system reset driver"
diff --git a/drivers/sysreset/Makefile b/drivers/sysreset/Makefile
index df2293b..920c692 100644
--- a/drivers/sysreset/Makefile
+++ b/drivers/sysreset/Makefile
@@ -13,7 +13,7 @@
 obj-$(CONFIG_SYSRESET_OCTEON) += sysreset_octeon.o
 obj-$(CONFIG_SYSRESET_PSCI) += sysreset_psci.o
 obj-$(CONFIG_SYSRESET_SOCFPGA) += sysreset_socfpga.o
-obj-$(CONFIG_SYSRESET_SOCFPGA_S10) += sysreset_socfpga_s10.o
+obj-$(CONFIG_SYSRESET_SOCFPGA_SOC64) += sysreset_socfpga_soc64.o
 obj-$(CONFIG_SYSRESET_TI_SCI) += sysreset-ti-sci.o
 obj-$(CONFIG_SYSRESET_SYSCON) += sysreset_syscon.o
 obj-$(CONFIG_SYSRESET_WATCHDOG) += sysreset_watchdog.o
diff --git a/drivers/sysreset/sysreset_mpc83xx.c b/drivers/sysreset/sysreset_mpc83xx.c
index 456f006..4e89971 100644
--- a/drivers/sysreset/sysreset_mpc83xx.c
+++ b/drivers/sysreset/sysreset_mpc83xx.c
@@ -106,7 +106,7 @@
 	if (!force && !gd->arch.arbiter_event_address)
 		return 0;
 
-	if (CONFIG_IS_ENABLED(CONFIG_DISPLAY_AER_FULL)) {
+	if (CONFIG_IS_ENABLED(DISPLAY_AER_FULL)) {
 		res = snprintf(buf, size,
 			       "Arbiter Event Status:\n"
 			       "    %s: 0x%08lX\n"
@@ -119,7 +119,7 @@
 			       "Master ID", mstr_id, master[mstr_id],
 			       "Transfer Size", tsize_val, tsize_bytes,
 			       "Transfer Type", ttype, transfer[ttype]);
-	} else if (CONFIG_IS_ENABLED(CONFIG_DISPLAY_AER_BRIEF)) {
+	} else if (CONFIG_IS_ENABLED(DISPLAY_AER_BRIEF)) {
 		res = snprintf(buf, size,
 			       "Arbiter Event Status: AEATR=0x%08lX, AEADR=0x%08lX\n",
 			       gd->arch.arbiter_event_attributes,
@@ -183,8 +183,8 @@
 	 * TODO(mario.six@gdsys.cc): Move this into a dedicated
 	 *			     arbiter driver
 	 */
-	if (CONFIG_IS_ENABLED(CONFIG_DISPLAY_AER_FULL) ||
-	    CONFIG_IS_ENABLED(CONFIG_DISPLAY_AER_BRIEF)) {
+	if (CONFIG_IS_ENABLED(DISPLAY_AER_FULL) ||
+	    CONFIG_IS_ENABLED(DISPLAY_AER_BRIEF)) {
 		/*
 		 * If there was a bus monitor reset event, we force the arbiter
 		 * event to be printed
diff --git a/drivers/sysreset/sysreset_socfpga_s10.c b/drivers/sysreset/sysreset_socfpga_soc64.c
similarity index 100%
rename from drivers/sysreset/sysreset_socfpga_s10.c
rename to drivers/sysreset/sysreset_socfpga_soc64.c
diff --git a/env/Kconfig b/env/Kconfig
index c6ba088..aa63fae 100644
--- a/env/Kconfig
+++ b/env/Kconfig
@@ -477,6 +477,10 @@
 	                   If none, first valid partition in device D. If no
 	                   partition table then means device D.
 
+	  If ENV_EXT4_INTERFACE is set to "mmc" then device 'D' can be omitted,
+	  leaving the string starting with a colon, and the boot device will
+	  be used.
+
 config ENV_EXT4_FILE
 	string "Name of the EXT4 file to use for the environment"
 	depends on ENV_IS_IN_EXT4
diff --git a/env/ext4.c b/env/ext4.c
index f823b69..e666f7b 100644
--- a/env/ext4.c
+++ b/env/ext4.c
@@ -41,7 +41,21 @@
 
 __weak const char *env_ext4_get_dev_part(void)
 {
+#ifdef CONFIG_MMC
+	static char *part_str;
+
+	if (!part_str) {
+		part_str = CONFIG_ENV_EXT4_DEVICE_AND_PART;
+		if (!strcmp(CONFIG_ENV_EXT4_INTERFACE, "mmc") && part_str[0] == ':') {
+			part_str = "0" CONFIG_ENV_EXT4_DEVICE_AND_PART;
+			part_str[0] += mmc_get_env_dev();
+		}
+	}
+
+	return part_str;
+#else
 	return (const char *)CONFIG_ENV_EXT4_DEVICE_AND_PART;
+#endif
 }
 
 static int env_ext4_save_buffer(env_t *env_new)
diff --git a/env/fat.c b/env/fat.c
index 71bf8bf..653a38f 100644
--- a/env/fat.c
+++ b/env/fat.c
@@ -29,15 +29,6 @@
 # define LOADENV
 #endif
 
-__weak int mmc_get_env_dev(void)
-{
-#ifdef CONFIG_SYS_MMC_ENV_DEV
-	return CONFIG_SYS_MMC_ENV_DEV;
-#else
-	return 0;
-#endif
-}
-
 static char *env_fat_device_and_part(void)
 {
 #ifdef CONFIG_MMC
diff --git a/env/mmc.c b/env/mmc.c
index ba87270..4e67180 100644
--- a/env/mmc.c
+++ b/env/mmc.c
@@ -24,11 +24,6 @@
 
 DECLARE_GLOBAL_DATA_PTR;
 
-__weak int mmc_get_env_dev(void)
-{
-	return CONFIG_SYS_MMC_ENV_DEV;
-}
-
 #if CONFIG_IS_ENABLED(OF_CONTROL)
 static inline int mmc_offset_try_partition(const char *str, int copy, s64 *val)
 {
diff --git a/include/altera.h b/include/altera.h
index 22d55cf..946413c 100644
--- a/include/altera.h
+++ b/include/altera.h
@@ -56,10 +56,10 @@
 	Altera_StratixII,
 	/* StratixV Family */
 	Altera_StratixV,
-	/* Stratix10 Family */
-	Intel_FPGA_Stratix10,
 	/* SoCFPGA Family */
 	Altera_SoCFPGA,
+	/* Intel FPGA Family with SDM (Secure Device Manager) Mailbox */
+	Intel_FPGA_SDM_Mailbox,
 
 	/* Add new models here */
 
@@ -120,8 +120,9 @@
 int stratixv_load(Altera_desc *desc, const void *rbf_data, size_t rbf_size);
 #endif
 
-#ifdef CONFIG_FPGA_STRATIX10
-int stratix10_load(Altera_desc *desc, const void *rbf_data, size_t rbf_size);
+#ifdef CONFIG_FPGA_INTEL_SDM_MAILBOX
+int intel_sdm_mb_load(Altera_desc *desc, const void *rbf_data,
+		      size_t rbf_size);
 #endif
 
 #endif /* _ALTERA_H_ */
diff --git a/include/asm-generic/global_data.h b/include/asm-generic/global_data.h
index d4a4e22..ebb740d 100644
--- a/include/asm-generic/global_data.h
+++ b/include/asm-generic/global_data.h
@@ -24,149 +24,473 @@
 #include <membuff.h>
 #include <linux/list.h>
 
-typedef struct global_data {
+typedef struct global_data gd_t;
+
+/**
+ * struct global_data - global data structure
+ */
+struct global_data {
+	/**
+	 * @bd: board information
+	 */
 	struct bd_info *bd;
+	/**
+	 * @flags: global data flags
+	 *
+	 * See &enum gd_flags
+	 */
 	unsigned long flags;
+	/**
+	 * @baudrate: baud rate of the serial interface
+	 */
 	unsigned int baudrate;
-	unsigned long cpu_clk;		/* CPU clock in Hz!		*/
+	/**
+	 * @cpu_clk: CPU clock rate in Hz
+	 */
+	unsigned long cpu_clk;
+	/**
+	 * @bus_clk: platform clock rate in Hz
+	 */
 	unsigned long bus_clk;
+	/**
+	 * @pci_clk: PCI clock rate in Hz
+	 */
 	/* We cannot bracket this with CONFIG_PCI due to mpc5xxx */
 	unsigned long pci_clk;
+	/**
+	 * @mem_clk: memory clock rate in Hz
+	 */
 	unsigned long mem_clk;
 #if defined(CONFIG_LCD) || defined(CONFIG_VIDEO) || defined(CONFIG_DM_VIDEO)
-	unsigned long fb_base;		/* Base address of framebuffer mem */
+	/**
+	 * @fb_base: base address of frame buffer memory
+	 */
+	unsigned long fb_base;
 #endif
 #if defined(CONFIG_POST)
-	unsigned long post_log_word;	/* Record POST activities */
-	unsigned long post_log_res;	/* success of POST test */
-	unsigned long post_init_f_time;	/* When post_init_f started */
+	/**
+	 * @post_log_word: active POST tests
+	 *
+	 * @post_log_word is a bit mask defining which POST tests are recorded
+	 * (see constants POST_*).
+	 */
+	unsigned long post_log_word;
+	/**
+	 * @post_log_res: POST results
+	 *
+	 * @post_log_res is a bit mask with the POST results. A bit with value 1
+	 * indicates successful execution.
+	 */
+	unsigned long post_log_res;
+	/**
+	 * @post_init_f_time: time in ms when post_init_f() started
+	 */
+	unsigned long post_init_f_time;
 #endif
 #ifdef CONFIG_BOARD_TYPES
+	/**
+	 * @board_type: board type
+	 *
+	 * If a U-Boot configuration supports multiple board types, the actual
+	 * board type may be stored in this field.
+	 */
 	unsigned long board_type;
 #endif
-	unsigned long have_console;	/* serial_init() was called */
+	/**
+	 * @have_console: console is available
+	 *
+	 * A value of 1 indicates that serial_init() was called and a console
+	 * is available.
+	 * A value of 0 indicates that console input and output drivers shall
+	 * not be called.
+	 */
+	unsigned long have_console;
 #if CONFIG_IS_ENABLED(PRE_CONSOLE_BUFFER)
-	unsigned long precon_buf_idx;	/* Pre-Console buffer index */
+	/**
+	 * @precon_buf_idx: pre-console buffer index
+	 *
+	 * @precon_buf_idx indicates the current position of the buffer used to
+	 * collect output before the console becomes available
+	 */
+	unsigned long precon_buf_idx;
 #endif
-	unsigned long env_addr;		/* Address  of Environment struct */
-	unsigned long env_valid;	/* Environment valid? enum env_valid */
-	unsigned long env_has_init;	/* Bitmask of boolean of struct env_location offsets */
-	int env_load_prio;		/* Priority of the loaded environment */
-
-	unsigned long ram_base;		/* Base address of RAM used by U-Boot */
-	unsigned long ram_top;		/* Top address of RAM used by U-Boot */
-	unsigned long relocaddr;	/* Start address of U-Boot in RAM */
-	phys_size_t ram_size;		/* RAM size */
-	unsigned long mon_len;		/* monitor len */
-	unsigned long irq_sp;		/* irq stack pointer */
-	unsigned long start_addr_sp;	/* start_addr_stackpointer */
+	/**
+	 * @env_addr: address of environment structure
+	 *
+	 * @env_addr contains the address of the structure holding the
+	 * environment variables.
+	 */
+	unsigned long env_addr;
+	/**
+	 * @env_valid: environment is valid
+	 *
+	 * See &enum env_valid
+	 */
+	unsigned long env_valid;
+	/**
+	 * @env_has_init: bit mask indicating environment locations
+	 *
+	 * &enum env_location defines which bit relates to which location
+	 */
+	unsigned long env_has_init;
+	/**
+	 * @env_load_prio: priority of the loaded environment
+	 */
+	int env_load_prio;
+	/**
+	 * @ram_base: base address of RAM used by U-Boot
+	 */
+	unsigned long ram_base;
+	/**
+	 * @ram_top: top address of RAM used by U-Boot
+	 */
+	unsigned long ram_top;
+	/**
+	 * @relocaddr: start address of U-Boot in RAM
+	 *
+	 * After relocation this field indicates the address to which U-Boot
+	 * has been relocated. It can be displayed using the bdinfo command.
+	 * Its value is needed to display the source code when debugging with
+	 * GDB using the 'add-symbol-file u-boot <relocaddr>' command.
+	 */
+	unsigned long relocaddr;
+	/**
+	 * @ram_size: RAM size in bytes
+	 */
+	phys_size_t ram_size;
+	/**
+	 * @mon_len: monitor length in bytes
+	 */
+	unsigned long mon_len;
+	/**
+	 * @irq_sp: IRQ stack pointer
+	 */
+	unsigned long irq_sp;
+	/**
+	 * @start_addr_sp: initial stack pointer address
+	 */
+	unsigned long start_addr_sp;
+	/**
+	 * @reloc_off: relocation offset
+	 */
 	unsigned long reloc_off;
-	struct global_data *new_gd;	/* relocated global data */
+	/**
+	 * @new_gd: pointer to relocated global data
+	 */
+	struct global_data *new_gd;
 
 #ifdef CONFIG_DM
-	struct udevice	*dm_root;	/* Root instance for Driver Model */
-	struct udevice	*dm_root_f;	/* Pre-relocation root instance */
-	struct list_head uclass_root;	/* Head of core tree */
+	/**
+	 * @dm_root: root instance for Driver Model
+	 */
+	struct udevice *dm_root;
+	/**
+	 * @dm_root_f: pre-relocation root instance
+	 */
+	struct udevice *dm_root_f;
+	/**
+	 * @uclass_root: head of core tree
+	 */
+	struct list_head uclass_root;
 #endif
 #ifdef CONFIG_TIMER
-	struct udevice	*timer;		/* Timer instance for Driver Model */
+	/**
+	 * @timer: timer instance for Driver Model
+	 */
+	struct udevice *timer;
 #endif
-
-	const void *fdt_blob;		/* Our device tree, NULL if none */
-	void *new_fdt;			/* Relocated FDT */
-	unsigned long fdt_size;		/* Space reserved for relocated FDT */
+	/**
+	 * @fdt_blob: U-Boot's own device tree, NULL if none
+	 */
+	const void *fdt_blob;
+	/**
+	 * @new_fdt: relocated device tree
+	 */
+	void *new_fdt;
+	/**
+	 * @fdt_size: space reserved for relocated device space
+	 */
+	unsigned long fdt_size;
 #ifdef CONFIG_OF_LIVE
+	/**
+	 * @of_root: root node of the live tree
+	 */
 	struct device_node *of_root;
 #endif
 
 #if CONFIG_IS_ENABLED(MULTI_DTB_FIT)
-	const void *multi_dtb_fit;	/* uncompressed multi-dtb FIT image */
+	/**
+	 * @multi_dtb_fit: pointer to uncompressed multi-dtb FIT image
+	 */
+	const void *multi_dtb_fit;
 #endif
-	struct jt_funcs *jt;		/* jump table */
-	char env_buf[32];		/* buffer for env_get() before reloc. */
+	/**
+	 * @jt: jump table
+	 *
+	 * The jump table contains pointers to exported functions. A pointer to
+	 * the jump table is passed to standalone applications.
+	 */
+	struct jt_funcs *jt;
+	/**
+	 * @env_buf: buffer for env_get() before reloc
+	 */
+	char env_buf[32];
 #ifdef CONFIG_TRACE
-	void		*trace_buff;	/* The trace buffer */
+	/**
+	 * @trace_buff: trace buffer
+	 *
+	 * When tracing function in U-Boot this field points to the buffer
+	 * recording the function calls.
+	 */
+	void *trace_buff;
 #endif
 #if defined(CONFIG_SYS_I2C)
-	int		cur_i2c_bus;	/* current used i2c bus */
+	/**
+	 * @cur_i2c_bus: currently used I2C bus
+	 */
+	int cur_i2c_bus;
 #endif
+	/**
+	 * @timebase_h: high 32 bits of timer
+	 */
 	unsigned int timebase_h;
+	/**
+	 * @timebase_l: low 32 bits of timer
+	 */
 	unsigned int timebase_l;
 #if CONFIG_VAL(SYS_MALLOC_F_LEN)
-	unsigned long malloc_base;	/* base address of early malloc() */
-	unsigned long malloc_limit;	/* limit address */
-	unsigned long malloc_ptr;	/* current address */
+	/**
+	 * @malloc_base: base address of early malloc()
+	 */
+	unsigned long malloc_base;
+	/**
+	 * @malloc_limit: limit address of early malloc()
+	 */
+	unsigned long malloc_limit;
+	/**
+	 * @malloc_ptr: current address of early malloc()
+	 */
+	unsigned long malloc_ptr;
 #endif
 #ifdef CONFIG_PCI
-	struct pci_controller *hose;	/* PCI hose for early use */
-	phys_addr_t pci_ram_top;	/* top of region accessible to PCI */
+	/**
+	 * @hose: PCI hose for early use
+	 */
+	struct pci_controller *hose;
+	/**
+	 * @pci_ram_top: top of region accessible to PCI
+	 */
+	phys_addr_t pci_ram_top;
 #endif
 #ifdef CONFIG_PCI_BOOTDELAY
+	/**
+	 * @pcidelay_done: delay time before scanning of PIC hose expired
+	 *
+	 * If CONFIG_PCI_BOOTDELAY=y, pci_hose_scan() waits for the number of
+	 * milliseconds defined by environment variable pcidelay before
+	 * scanning. Once this delay has expired the flag @pcidelay_done
+	 * is set to 1.
+	 */
 	int pcidelay_done;
 #endif
-	struct udevice *cur_serial_dev;	/* current serial device */
-	struct arch_global_data arch;	/* architecture-specific data */
+	/**
+	 * @cur_serial_dev: current serial device
+	 */
+	struct udevice *cur_serial_dev;
+	/**
+	 * @arch: architecture-specific data
+	 */
+	struct arch_global_data arch;
 #ifdef CONFIG_CONSOLE_RECORD
-	struct membuff console_out;	/* console output */
-	struct membuff console_in;	/* console input */
+	/**
+	 * @console_out: output buffer for console recording
+	 *
+	 * This buffer is used to collect output during console recording.
+	 */
+	struct membuff console_out;
+	/**
+	 * @console_in: input buffer for console recording
+	 *
+	 * If console recording is activated, this buffer can be used to
+	 * emulate input.
+	 */
+	struct membuff console_in;
 #endif
 #ifdef CONFIG_DM_VIDEO
-	ulong video_top;		/* Top of video frame buffer area */
-	ulong video_bottom;		/* Bottom of video frame buffer area */
+	/**
+	 * @video_top: top of video frame buffer area
+	 */
+	ulong video_top;
+	/**
+	 * @video_bottom: bottom of video frame buffer area
+	 */
+	ulong video_bottom;
 #endif
 #ifdef CONFIG_BOOTSTAGE
-	struct bootstage_data *bootstage;	/* Bootstage information */
-	struct bootstage_data *new_bootstage;	/* Relocated bootstage info */
+	/**
+	 * @bootstage: boot stage information
+	 */
+	struct bootstage_data *bootstage;
+	/**
+	 * @new_bootstage: relocated boot stage information
+	 */
+	struct bootstage_data *new_bootstage;
 #endif
 #ifdef CONFIG_LOG
-	int log_drop_count;		/* Number of dropped log messages */
-	int default_log_level;		/* For devices with no filters */
-	struct list_head log_head;	/* List of struct log_device */
-	int log_fmt;			/* Mask containing log format info */
+	/**
+	 * @log_drop_count: number of dropped log messages
+	 *
+	 * This counter is incremented for each log message which can not
+	 * be processed because logging is not yet available as signaled by
+	 * flag %GD_FLG_LOG_READY in @flags.
+	 */
+	int log_drop_count;
+	/**
+	 * @default_log_level: default logging level
+	 *
+	 * For logging devices without filters @default_log_level defines the
+	 * logging level, cf. &enum log_level_t.
+	 */
+	int default_log_level;
+	/**
+	 * @log_head: list of logging devices
+	 */
+	struct list_head log_head;
+	/**
+	 * @log_fmt: bit mask for logging format
+	 *
+	 * The @log_fmt bit mask selects the fields to be shown in log messages.
+	 * &enum log_fmt defines the bits of the bit mask.
+	 */
+	int log_fmt;
 #endif
 #if CONFIG_IS_ENABLED(BLOBLIST)
-	struct bloblist_hdr *bloblist;	/* Bloblist information */
-	struct bloblist_hdr *new_bloblist;	/* Relocated blolist info */
+	/**
+	 * @bloblist: blob list information
+	 */
+	struct bloblist_hdr *bloblist;
+	/**
+	 * @new_bloblist: relocated blob list information
+	 */
+	struct bloblist_hdr *new_bloblist;
 # ifdef CONFIG_SPL
+	/**
+	 * @spl_handoff: SPL hand-off information
+	 */
 	struct spl_handoff *spl_handoff;
 # endif
 #endif
 #if defined(CONFIG_TRANSLATION_OFFSET)
-	fdt_addr_t translation_offset;	/* optional translation offset */
+	/**
+	 * @translation_offset: optional translation offset
+	 *
+	 * See CONFIG_TRANSLATION_OFFSET.
+	 */
+	fdt_addr_t translation_offset;
 #endif
 #if CONFIG_IS_ENABLED(WDT)
+	/**
+	 * @watchdog_dev: watchdog device
+	 */
 	struct udevice *watchdog_dev;
 #endif
-} gd_t;
-#endif
+};
 
+/**
+ * gd_board_type() - retrieve board type
+ *
+ * Return: global board type
+ */
 #ifdef CONFIG_BOARD_TYPES
 #define gd_board_type()		gd->board_type
 #else
 #define gd_board_type()		0
 #endif
 
-/*
- * Global Data Flags
+/**
+ * enum gd_flags - global data flags
+ *
+ * See field flags of &struct global_data.
  */
-#define GD_FLG_RELOC		0x00001	/* Code was relocated to RAM	   */
-#define GD_FLG_DEVINIT		0x00002	/* Devices have been initialized   */
-#define GD_FLG_SILENT		0x00004	/* Silent mode			   */
-#define GD_FLG_POSTFAIL		0x00008	/* Critical POST test failed	   */
-#define GD_FLG_POSTSTOP		0x00010	/* POST seqeunce aborted	   */
-#define GD_FLG_LOGINIT		0x00020	/* Log Buffer has been initialized */
-#define GD_FLG_DISABLE_CONSOLE	0x00040	/* Disable console (in & out)	   */
-#define GD_FLG_ENV_READY	0x00080	/* Env. imported into hash table   */
-#define GD_FLG_SERIAL_READY	0x00100	/* Pre-reloc serial console ready  */
-#define GD_FLG_FULL_MALLOC_INIT	0x00200	/* Full malloc() is ready	   */
-#define GD_FLG_SPL_INIT		0x00400	/* spl_init() has been called	   */
-#define GD_FLG_SKIP_RELOC	0x00800	/* Don't relocate		   */
-#define GD_FLG_RECORD		0x01000	/* Record console		   */
-#define GD_FLG_ENV_DEFAULT	0x02000 /* Default variable flag	   */
-#define GD_FLG_SPL_EARLY_INIT	0x04000 /* Early SPL init is done	   */
-#define GD_FLG_LOG_READY	0x08000 /* Log system is ready for use	   */
-#define GD_FLG_WDT_READY	0x10000 /* Watchdog is ready for use	   */
-#define GD_FLG_SKIP_LL_INIT	0x20000	/* Don't perform low-level init	   */
-#define GD_FLG_SMP_READY	0x40000	/* SMP init is complete		   */
+enum gd_flags {
+	/**
+	 * @GD_FLG_RELOC: code was relocated to RAM
+	 */
+	GD_FLG_RELOC = 0x00001,
+	/**
+	 * @GD_FLG_DEVINIT: devices have been initialized
+	 */
+	GD_FLG_DEVINIT = 0x00002,
+	/**
+	 * @GD_FLG_SILENT: silent mode
+	 */
+	GD_FLG_SILENT = 0x00004,
+	/**
+	 * @GD_FLG_POSTFAIL: critical POST test failed
+	 */
+	GD_FLG_POSTFAIL = 0x00008,
+	/**
+	 * @GD_FLG_POSTSTOP: POST sequence aborted
+	 */
+	GD_FLG_POSTSTOP = 0x00010,
+	/**
+	 * @GD_FLG_LOGINIT: log Buffer has been initialized
+	 */
+	GD_FLG_LOGINIT = 0x00020,
+	/**
+	 * @GD_FLG_DISABLE_CONSOLE: disable console (in & out)
+	 */
+	GD_FLG_DISABLE_CONSOLE = 0x00040,
+	/**
+	 * @GD_FLG_ENV_READY: environment imported into hash table
+	 */
+	GD_FLG_ENV_READY = 0x00080,
+	/**
+	 * @GD_FLG_SERIAL_READY: pre-relocation serial console ready
+	 */
+	GD_FLG_SERIAL_READY = 0x00100,
+	/**
+	 * @GD_FLG_FULL_MALLOC_INIT: full malloc() is ready
+	 */
+	GD_FLG_FULL_MALLOC_INIT = 0x00200,
+	/**
+	 * @GD_FLG_SPL_INIT: spl_init() has been called
+	 */
+	GD_FLG_SPL_INIT = 0x00400,
+	/**
+	 * @GD_FLG_SKIP_RELOC: don't relocate
+	 */
+	GD_FLG_SKIP_RELOC = 0x00800,
+	/**
+	 * @GD_FLG_RECORD: record console
+	 */
+	GD_FLG_RECORD = 0x01000,
+	/**
+	 * @GD_FLG_ENV_DEFAULT: default variable flag
+	 */
+	GD_FLG_ENV_DEFAULT = 0x02000,
+	/**
+	 * @GD_FLG_SPL_EARLY_INIT: early SPL initialization is done
+	 */
+	GD_FLG_SPL_EARLY_INIT = 0x04000,
+	/**
+	 * @GD_FLG_LOG_READY: log system is ready for use
+	 */
+	GD_FLG_LOG_READY = 0x08000,
+	/**
+	 * @GD_FLG_WDT_READY: watchdog is ready for use
+	 */
+	GD_FLG_WDT_READY = 0x10000,
+	/**
+	 * @GD_FLG_SKIP_LL_INIT: don't perform low-level initialization
+	 */
+	GD_FLG_SKIP_LL_INIT = 0x20000,
+	/**
+	 * @GD_FLG_SMP_READY: SMP initialization is complete
+	 */
+	GD_FLG_SMP_READY = 0x40000,
+};
+
+#endif /* __ASSEMBLY__ */
 
 #endif /* __ASM_GENERIC_GBL_DATA_H */
diff --git a/include/bootm.h b/include/bootm.h
index 0350c34..a812a6b 100644
--- a/include/bootm.h
+++ b/include/bootm.h
@@ -75,4 +75,14 @@
  */
 void switch_to_non_secure_mode(void);
 
+/**
+ * arch_preboot_os() - arch specific configuration before booting
+ */
+void arch_preboot_os(void);
+
+/**
+ * board_preboot_os() - board specific configuration before booting
+ */
+void board_preboot_os(void);
+
 #endif
diff --git a/include/configs/am65x_evm.h b/include/configs/am65x_evm.h
index df06056..9eed0ea 100644
--- a/include/configs/am65x_evm.h
+++ b/include/configs/am65x_evm.h
@@ -69,9 +69,6 @@
 	"findfdt="							\
 		"setenv name_fdt k3-am654-base-board.dtb;"		\
 		"setenv fdtfile ${name_fdt}\0"				\
-	"loadaddr=0x80080000\0"						\
-	"fdtaddr=0x82000000\0"						\
-	"overlayaddr=0x83000000\0"					\
 	"name_kern=Image\0"						\
 	"console=ttyS2,115200n8\0"					\
 	"stdin=serial,usbkbd\0"						\
@@ -93,8 +90,8 @@
 		"fdt resize 0x100000;"					\
 		"for overlay in $name_overlays;"			\
 		"do;"							\
-		"load mmc ${bootpart} ${overlayaddr} ${bootdir}/${overlay};"	\
-		"fdt apply ${overlayaddr};"				\
+		"load mmc ${bootpart} ${dtboaddr} ${bootdir}/${overlay};"	\
+		"fdt apply ${dtboaddr};"				\
 		"done;\0"						\
 	"get_kern_mmc=load mmc ${bootpart} ${loadaddr} "		\
 		"${bootdir}/${name_kern}\0"				\
@@ -133,6 +130,7 @@
 
 /* Incorporate settings into the U-Boot environment */
 #define CONFIG_EXTRA_ENV_SETTINGS					\
+	DEFAULT_LINUX_BOOT_ENV						\
 	DEFAULT_MMC_TI_ARGS						\
 	DEFAULT_FIT_TI_ARGS						\
 	EXTRA_ENV_AM65X_BOARD_SETTINGS					\
diff --git a/include/configs/j721e_evm.h b/include/configs/j721e_evm.h
index 1b47e18..b707fc4 100644
--- a/include/configs/j721e_evm.h
+++ b/include/configs/j721e_evm.h
@@ -69,9 +69,6 @@
 	"findfdt="							\
 		"setenv name_fdt ${default_device_tree};"		\
 		"setenv fdtfile ${name_fdt}\0"				\
-	"loadaddr=0x80080000\0"						\
-	"fdtaddr=0x82000000\0"						\
-	"overlayaddr=0x83000000\0"					\
 	"name_kern=Image\0"						\
 	"console=ttyS2,115200n8\0"					\
 	"args_all=setenv optargs earlycon=ns16550a,mmio32,0x02800000 "	\
@@ -114,8 +111,8 @@
 		"fdt resize 0x100000;"					\
 		"for overlay in $name_overlays;"			\
 		"do;"							\
-		"load mmc ${bootpart} ${overlayaddr} ${bootdir}/${overlay} && "	\
-		"fdt apply ${overlayaddr};"				\
+		"load mmc ${bootpart} ${dtboaddr} ${bootdir}/${overlay} && "	\
+		"fdt apply ${dtboaddr};"				\
 		"done;\0"						\
 	"partitions=" PARTS_DEFAULT					\
 	"get_kern_mmc=load mmc ${bootpart} ${loadaddr} "		\
@@ -165,6 +162,7 @@
 
 /* Incorporate settings into the U-Boot environment */
 #define CONFIG_EXTRA_ENV_SETTINGS					\
+	DEFAULT_LINUX_BOOT_ENV						\
 	DEFAULT_MMC_TI_ARGS						\
 	DEFAULT_FIT_TI_ARGS						\
 	EXTRA_ENV_J721E_BOARD_SETTINGS					\
diff --git a/include/configs/mvebu_armada-37xx.h b/include/configs/mvebu_armada-37xx.h
index 27428d5..0d58560 100644
--- a/include/configs/mvebu_armada-37xx.h
+++ b/include/configs/mvebu_armada-37xx.h
@@ -17,8 +17,6 @@
 
 #define CONFIG_SYS_BOOTM_LEN	SZ_64M /* Increase max gunzip size */
 
-/* auto boot */
-
 #define CONFIG_SYS_BAUDRATE_TABLE	{ 9600, 19200, 38400, 57600, \
 					  115200, 230400, 460800, 921600 }
 
@@ -57,11 +55,8 @@
 /*
  * SPI Flash configuration
  */
-
 #define CONFIG_MTD_PARTITIONS		/* required for UBI partition support */
 
-/* Environment in SPI NOR flash */
-
 /*
  * Ethernet Driver configuration
  */
@@ -70,8 +65,6 @@
 
 #define CONFIG_USB_MAX_CONTROLLER_COUNT (3 + 3)
 
-/* USB ethernet */
-
 /*
  * SATA/SCSI/AHCI configuration
  */
diff --git a/include/environment/ti/ufs.h b/include/environment/ti/ufs.h
index d457e20..6619ec9 100644
--- a/include/environment/ti/ufs.h
+++ b/include/environment/ti/ufs.h
@@ -26,8 +26,8 @@
 		"fdt resize 0x100000;" \
 		"for overlay in $name_overlays;" \
 		"do;" \
-		"load scsi ${bootpart} ${overlayaddr} ${bootdir}/${overlay} && " \
-		"fdt apply ${overlayaddr};" \
+		"load scsi ${bootpart} ${dtboaddr} ${bootdir}/${overlay} && " \
+		"fdt apply ${dtboaddr};" \
 		"done;\0"
 
 #endif
diff --git a/include/fdt_support.h b/include/fdt_support.h
index 9684cff..dbbac0f 100644
--- a/include/fdt_support.h
+++ b/include/fdt_support.h
@@ -359,4 +359,7 @@
 #ifdef CONFIG_FSL_MC_ENET
 void fdt_fixup_board_enet(void *blob);
 #endif
+#ifdef CONFIG_CMD_PSTORE
+void fdt_fixup_pstore(void *blob);
+#endif
 #endif /* ifndef __FDT_SUPPORT_H */
diff --git a/include/image.h b/include/image.h
index 9a5a87d..10995b8 100644
--- a/include/image.h
+++ b/include/image.h
@@ -1463,7 +1463,7 @@
 		       unsigned char **cipher, int *cipher_len);
 
 	int (*add_cipher_data)(struct image_cipher_info *info,
-			       void *keydest);
+			       void *keydest, void *fit, int node_noffset);
 
 	int (*decrypt)(struct image_cipher_info *info,
 		       const void *cipher, size_t cipher_len,
diff --git a/include/u-boot/aes.h b/include/u-boot/aes.h
index 3228104..acbc50b 100644
--- a/include/u-boot/aes.h
+++ b/include/u-boot/aes.h
@@ -13,7 +13,8 @@
 int image_aes_encrypt(struct image_cipher_info *info,
 		      const unsigned char *data, int size,
 		      unsigned char **cipher, int *cipher_len);
-int image_aes_add_cipher_data(struct image_cipher_info *info, void *keydest);
+int image_aes_add_cipher_data(struct image_cipher_info *info, void *keydest,
+			      void *fit, int node_noffset);
 #else
 int image_aes_encrypt(struct image_cipher_info *info,
 		      const unsigned char *data, int size,
@@ -22,7 +23,8 @@
 	return -ENXIO;
 }
 
-int image_aes_add_cipher_data(struct image_cipher_info *info, void *keydest)
+int image_aes_add_cipher_data(struct image_cipher_info *info, void *keydest,
+			      void *fit, int node_noffset)
 {
 	return -ENXIO;
 }
diff --git a/lib/aes/aes-encrypt.c b/lib/aes/aes-encrypt.c
index de00a83..a6d1720 100644
--- a/lib/aes/aes-encrypt.c
+++ b/lib/aes/aes-encrypt.c
@@ -74,7 +74,8 @@
 	return ret;
 }
 
-int image_aes_add_cipher_data(struct image_cipher_info *info, void *keydest)
+int image_aes_add_cipher_data(struct image_cipher_info *info, void *keydest,
+			      void *fit, int node_noffset)
 {
 	int parent, node;
 	char name[128];
@@ -97,8 +98,13 @@
 		goto done;
 
 	/* Either create or overwrite the named key node */
-	snprintf(name, sizeof(name), "key-%s-%s-%s",
-		 info->name, info->keyname, info->ivname);
+	if (info->ivname)
+		snprintf(name, sizeof(name), "key-%s-%s-%s",
+			 info->name, info->keyname, info->ivname);
+	else
+		snprintf(name, sizeof(name), "key-%s-%s",
+			 info->name, info->keyname);
+
 	node = fdt_subnode_offset(keydest, parent, name);
 	if (node == -FDT_ERR_NOTFOUND) {
 		node = fdt_add_subnode(keydest, parent, name);
@@ -116,9 +122,17 @@
 		ret = node;
 	}
 
+	if (ret)
+		goto done;
+
-	if (!ret)
+	if (info->ivname)
+		/* Store the IV in the u-boot device tree */
 		ret = fdt_setprop(keydest, node, "iv",
 				  info->iv, info->cipher->iv_len);
+	else
+		/* Store the IV in the FIT image */
+		ret = fdt_setprop(fit, node_noffset, "iv",
+				  info->iv, info->cipher->iv_len);
 
 	if (!ret)
 		ret = fdt_setprop(keydest, node, "key",
diff --git a/lib/hashtable.c b/lib/hashtable.c
index 4a8c50b..7c08f5c 100644
--- a/lib/hashtable.c
+++ b/lib/hashtable.c
@@ -324,8 +324,7 @@
 		 */
 		unsigned hval2;
 
-		if (htab->table[idx].used == USED_DELETED
-		    && !first_deleted)
+		if (htab->table[idx].used == USED_DELETED)
 			first_deleted = idx;
 
 		ret = _compare_and_overwrite_entry(item, action, retval, htab,
diff --git a/lib/rsa/rsa-mod-exp.c b/lib/rsa/rsa-mod-exp.c
index a437cbe..78c688d 100644
--- a/lib/rsa/rsa-mod-exp.c
+++ b/lib/rsa/rsa-mod-exp.c
@@ -25,6 +25,14 @@
 #define get_unaligned_be32(a) fdt32_to_cpu(*(uint32_t *)a)
 #define put_unaligned_be32(a, b) (*(uint32_t *)(b) = cpu_to_fdt32(a))
 
+static inline uint64_t fdt64_to_cpup(const void *p)
+{
+	fdt64_t w;
+
+	memcpy(&w, p, sizeof(w));
+	return fdt64_to_cpu(w);
+}
+
 /* Default public exponent for backward compatibility */
 #define RSA_DEFAULT_PUBEXP	65537
 
@@ -263,8 +271,7 @@
 	if (!prop->public_exponent)
 		key.exponent = RSA_DEFAULT_PUBEXP;
 	else
-		rsa_convert_big_endian((uint32_t *)&key.exponent,
-				       prop->public_exponent, 2);
+		key.exponent = fdt64_to_cpup(prop->public_exponent);
 
 	if (!key.len || !prop->modulus || !prop->rr) {
 		debug("%s: Missing RSA key info", __func__);
diff --git a/lib/rsa/rsa-verify.c b/lib/rsa/rsa-verify.c
index 2057f68..0ab0f62 100644
--- a/lib/rsa/rsa-verify.c
+++ b/lib/rsa/rsa-verify.c
@@ -439,12 +439,17 @@
 	struct key_prop prop;
 	int length;
 	int ret = 0;
+	const char *algo;
 
 	if (node < 0) {
 		debug("%s: Skipping invalid node", __func__);
 		return -EBADF;
 	}
 
+	algo = fdt_getprop(blob, node, "algo", NULL);
+	if (strcmp(info->name, algo))
+		return -EFAULT;
+
 	prop.num_bits = fdtdec_get_int(blob, node, "rsa,num-bits", 0);
 
 	prop.n0inv = fdtdec_get_int(blob, node, "rsa,n0-inverse", 0);
@@ -540,7 +545,7 @@
 {
 	/* Reserve memory for maximum checksum-length */
 	uint8_t hash[info->crypto->key_len];
-	int ret = -EACCES;
+	int ret;
 
 	/*
 	 * Verify that the checksum-length does not exceed the
diff --git a/lib/time.c b/lib/time.c
index 47f8c84..88bc504 100644
--- a/lib/time.c
+++ b/lib/time.c
@@ -91,13 +91,13 @@
 
 		ret = dm_timer_init();
 		if (ret)
-			return ret;
+			panic("Could not initialize timer (err %d)\n", ret);
 #endif
 	}
 
 	ret = timer_get_count(gd->timer, &count);
 	if (ret)
-		return ret;
+		panic("Could not read count from timer (err %d)\n", ret);
 
 	return count;
 }
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 4bed2b0..4ed7e12 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2365,6 +2365,12 @@
 		ERROR("DISABLE_FDT_OR_INITRD_RELOC",
 		     "fdt or initrd relocation disabled at boot time\n" . $herecurr);
 	}
+
+	# Do not use CONFIG_ prefix in CONFIG_IS_ENABLED() calls
+	if ($line =~ /^\+.*CONFIG_IS_ENABLED\(CONFIG_\w*\).*/) {
+		ERROR("CONFIG_IS_ENABLED_CONFIG",
+		      "CONFIG_IS_ENABLED() takes values without the CONFIG_ prefix\n" . $herecurr);
+	}
 }
 
 sub process {
diff --git a/test/dm/button.c b/test/dm/button.c
index 9117801..ecaa47c 100644
--- a/test/dm/button.c
+++ b/test/dm/button.c
@@ -57,17 +57,17 @@
 {
 	struct udevice *dev, *cmp;
 
-	ut_assertok(button_get_by_label("summer", &dev));
+	ut_assertok(button_get_by_label("button1", &dev));
 	ut_asserteq(1, device_active(dev));
 	ut_assertok(uclass_get_device(UCLASS_BUTTON, 1, &cmp));
 	ut_asserteq_ptr(dev, cmp);
 
-	ut_assertok(button_get_by_label("christmas", &dev));
+	ut_assertok(button_get_by_label("button2", &dev));
 	ut_asserteq(1, device_active(dev));
 	ut_assertok(uclass_get_device(UCLASS_BUTTON, 2, &cmp));
 	ut_asserteq_ptr(dev, cmp);
 
-	ut_asserteq(-ENODEV, button_get_by_label("spring", &dev));
+	ut_asserteq(-ENODEV, button_get_by_label("nobutton", &dev));
 
 	return 0;
 }
diff --git a/test/py/tests/test_button.py b/test/py/tests/test_button.py
index 98067a9..3b7f148 100644
--- a/test/py/tests/test_button.py
+++ b/test/py/tests/test_button.py
@@ -4,16 +4,34 @@
 
 @pytest.mark.boardspec('sandbox')
 @pytest.mark.buildconfigspec('cmd_button')
-def test_button_exit_statuses(u_boot_console):
-    """Test that non-input button commands correctly return the command
-    success/failure status."""
+def test_button_list(u_boot_console):
+    """Test listing buttons"""
 
-    expected_response = 'rc:0'
     response = u_boot_console.run_command('button list; echo rc:$?')
-    assert(expected_response in response)
-    response = u_boot_console.run_command('button summer; echo rc:$?')
-    assert(expected_response in response)
+    assert('button1' in response)
+    assert('button2' in response)
+    assert('rc:0' in response)
 
-    expected_response = 'rc:1'
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('cmd_button')
+@pytest.mark.buildconfigspec('cmd_gpio')
+def test_button_return_code(u_boot_console):
+    """Test correct reporting of the button status
+
+    The sandbox gpio driver reports the last output value as input value.
+    We can use this in our test to emulate different input statuses.
+    """
+
+    u_boot_console.run_command('gpio set a3; gpio input a3');
+    response = u_boot_console.run_command('button button1; echo rc:$?')
+    assert('on' in response)
+    assert('rc:0' in response)
+
+    u_boot_console.run_command('gpio clear a3; gpio input a3');
+    response = u_boot_console.run_command('button button1; echo rc:$?')
+    assert('off' in response)
+    assert('rc:1' in response)
+
     response = u_boot_console.run_command('button nonexistent-button; echo rc:$?')
-    assert(expected_response in response)
+    assert('not found' in response)
+    assert('rc:1' in response)
diff --git a/test/py/tests/test_pstore.py b/test/py/tests/test_pstore.py
new file mode 100644
index 0000000..5a35724
--- /dev/null
+++ b/test/py/tests/test_pstore.py
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2020, Collabora
+# Author: Frédéric Danis <frederic.danis@collabora.com>
+
+import pytest
+import u_boot_utils
+import os
+import tempfile
+import shutil
+
+PSTORE_ADDR=0x3000000
+PSTORE_LENGTH=0x100000
+PSTORE_PANIC1='test/py/tests/test_pstore_data_panic1.hex'
+PSTORE_PANIC2='test/py/tests/test_pstore_data_panic2.hex'
+PSTORE_CONSOLE='test/py/tests/test_pstore_data_console.hex'
+ADDR=0x01000008
+
+def load_pstore(u_boot_console):
+    """Load PStore records from sample files"""
+
+    output = u_boot_console.run_command_list([
+        'host load hostfs - 0x%x %s' % (PSTORE_ADDR,
+            os.path.join(u_boot_console.config.source_dir, PSTORE_PANIC1)),
+        'host load hostfs - 0x%x %s' % (PSTORE_ADDR + 4096,
+            os.path.join(u_boot_console.config.source_dir, PSTORE_PANIC2)),
+        'host load hostfs - 0x%x %s' % (PSTORE_ADDR + 253 * 4096,
+            os.path.join(u_boot_console.config.source_dir, PSTORE_CONSOLE)),
+        'pstore set 0x%x 0x%x' % (PSTORE_ADDR, PSTORE_LENGTH)])
+
+def checkfile(u_boot_console, path, filesize, checksum):
+    """Check file against MD5 checksum"""
+
+    output = u_boot_console.run_command_list([
+        'load hostfs - %x %s' % (ADDR, path),
+        'printenv filesize'])
+    assert('filesize=%x' % (filesize) in ''.join(output))
+
+    output = u_boot_console.run_command_list([
+        'md5sum %x $filesize' % ADDR,
+        'setenv filesize'])
+    assert(checksum in ''.join(output))
+
+@pytest.mark.buildconfigspec('cmd_pstore')
+def test_pstore_display_all_records(u_boot_console):
+    """Test that pstore displays all records."""
+
+    u_boot_console.run_command('')
+    load_pstore(u_boot_console)
+    response = u_boot_console.run_command('pstore display')
+    assert('**** Dump' in response)
+    assert('**** Console' in response)
+
+@pytest.mark.buildconfigspec('cmd_pstore')
+def test_pstore_display_one_record(u_boot_console):
+    """Test that pstore displays only one record."""
+
+    u_boot_console.run_command('')
+    load_pstore(u_boot_console)
+    response = u_boot_console.run_command('pstore display dump 1')
+    assert('Panic#2 Part1' in response)
+    assert('**** Console' not in response)
+
+@pytest.mark.buildconfigspec('cmd_pstore')
+def test_pstore_save_records(u_boot_console):
+    """Test that pstore saves all records."""
+
+    outdir = tempfile.mkdtemp()
+
+    u_boot_console.run_command('')
+    load_pstore(u_boot_console)
+    u_boot_console.run_command('pstore save hostfs - %s' % (outdir))
+
+    checkfile(u_boot_console, '%s/dmesg-ramoops-0' % (outdir), 3798, '8059335ab4cfa62c77324c491659c503')
+    checkfile(u_boot_console, '%s/dmesg-ramoops-1' % (outdir), 4035, '3ff30df3429d81939c75d0070b5187b9')
+    checkfile(u_boot_console, '%s/console-ramoops-0' % (outdir), 4084, 'bb44de4a9b8ebd9b17ae98003287325b')
+
+    shutil.rmtree(outdir)
diff --git a/test/py/tests/test_pstore_data_console.hex b/test/py/tests/test_pstore_data_console.hex
new file mode 100644
index 0000000..e7f426e
--- /dev/null
+++ b/test/py/tests/test_pstore_data_console.hex
Binary files differ
diff --git a/test/py/tests/test_pstore_data_panic1.hex b/test/py/tests/test_pstore_data_panic1.hex
new file mode 100644
index 0000000..988929d
--- /dev/null
+++ b/test/py/tests/test_pstore_data_panic1.hex
Binary files differ
diff --git a/test/py/tests/test_pstore_data_panic2.hex b/test/py/tests/test_pstore_data_panic2.hex
new file mode 100644
index 0000000..8f9d56c
--- /dev/null
+++ b/test/py/tests/test_pstore_data_panic2.hex
Binary files differ
diff --git a/test/py/tests/test_vboot.py b/test/py/tests/test_vboot.py
index 6b998cf..e45800d 100644
--- a/test/py/tests/test_vboot.py
+++ b/test/py/tests/test_vboot.py
@@ -126,6 +126,23 @@
         cons.log.action('%s: Sign images' % sha_algo)
         util.run_and_log(cons, args)
 
+    def sign_fit_norequire(sha_algo, options):
+        """Sign the FIT
+
+        Signs the FIT and writes the signature into it. It also writes the
+        public key into the dtb. It does not mark key as 'required' in dtb.
+
+        Args:
+            sha_algo: Either 'sha1' or 'sha256', to select the algorithm to
+                    use.
+            options: Options to provide to mkimage.
+        """
+        args = [mkimage, '-F', '-k', tmpdir, '-K', dtb, fit]
+        if options:
+            args += options.split(' ')
+        cons.log.action('%s: Sign images' % sha_algo)
+        util.run_and_log(cons, args)
+
     def replace_fit_totalsize(size):
         """Replace FIT header's totalsize with something greater.
 
@@ -279,15 +296,40 @@
         # Build the FIT with dev key (keys NOT required). This adds the
         # signature into sandbox-u-boot.dtb, NOT marked 'required'.
         make_fit('sign-configs-%s%s.its' % (sha_algo, padding))
-        sign_fit(sha_algo, sign_options)
+        sign_fit_norequire(sha_algo, sign_options)
 
         # So now sandbox-u-boot.dtb two signatures, for the prod and dev keys.
         # Only the prod key is set as 'required'. But FIT we just built has
-        # a dev signature only (sign_fit() overwrites the FIT).
+        # a dev signature only (sign_fit_norequire() overwrites the FIT).
         # Try to boot the FIT with dev key. This FIT should not be accepted by
         # U-Boot because the prod key is required.
         run_bootm(sha_algo, 'required key', '', False)
 
+        # Build the FIT with dev key (keys required) and sign it. This puts the
+        # signature into sandbox-u-boot.dtb, marked 'required'.
+        make_fit('sign-configs-%s%s.its' % (sha_algo, padding))
+        sign_fit(sha_algo, sign_options)
+
+        # Set the required-mode policy to "any".
+        # So now sandbox-u-boot.dtb two signatures, for the prod and dev keys.
+        # Both the dev and prod key are set as 'required'. But FIT we just built has
+        # a dev signature only (sign_fit() overwrites the FIT).
+        # Try to boot the FIT with dev key. This FIT should be accepted by
+        # U-Boot because the dev key is required and policy is "any" required key.
+        util.run_and_log(cons, 'fdtput -t s %s /signature required-mode any' %
+                         (dtb))
+        run_bootm(sha_algo, 'multi required key', 'dev+', True)
+
+        # Set the required-mode policy to "all".
+        # So now sandbox-u-boot.dtb two signatures, for the prod and dev keys.
+        # Both the dev and prod key are set as 'required'. But FIT we just built has
+        # a dev signature only (sign_fit() overwrites the FIT).
+        # Try to boot the FIT with dev key. This FIT should not be accepted by
+        # U-Boot because the prod key is required and policy is "all" required key
+        util.run_and_log(cons, 'fdtput -t s %s /signature required-mode all' %
+                         (dtb))
+        run_bootm(sha_algo, 'multi required key', '', False)
+
     cons = u_boot_console
     tmpdir = cons.config.result_dir + '/'
     datadir = cons.config.source_dir + '/test/py/tests/vboot/'
diff --git a/tools/image-host.c b/tools/image-host.c
index 3d52593..8886bef 100644
--- a/tools/image-host.c
+++ b/tools/image-host.c
@@ -320,6 +320,36 @@
 	return ret;
 }
 
+static int get_random_data(void *data, int size)
+{
+	unsigned char *tmp = data;
+	struct timespec date;
+	int i, ret = 0;
+
+	if (!tmp) {
+		printf("%s: pointer data is NULL\n", __func__);
+		ret = -1;
+		goto out;
+	}
+
+	ret = clock_gettime(CLOCK_MONOTONIC, &date);
+	if (ret < 0) {
+		printf("%s: clock_gettime has failed (err=%d, str=%s)\n",
+		       __func__, ret, strerror(ret));
+		goto out;
+	}
+
+	srand(date.tv_nsec);
+
+	for (i = 0; i < size; i++) {
+		*tmp = rand() & 0xff;
+		tmp++;
+	}
+
+ out:
+	return ret;
+}
+
 static int fit_image_setup_cipher(struct image_cipher_info *info,
 				  const char *keydir, void *fit,
 				  const char *image_name, int image_noffset,
@@ -345,13 +375,13 @@
 		goto out;
 	}
 
-	/* Read the IV name */
+	/*
+	 * Read the IV name
+	 *
+	 * If this property is not provided then mkimage will generate
+	 * a random IV and store it in the FIT image
+	 */
 	info->ivname = fdt_getprop(fit, noffset, "iv-name-hint", NULL);
-	if (!info->ivname) {
-		printf("Can't get iv name for cipher in image '%s'\n",
-		       image_name);
-		goto out;
-	}
 
 	info->fit = fit;
 	info->node_noffset = noffset;
@@ -377,17 +407,23 @@
 	if (ret < 0)
 		goto out;
 
-	/* Read the IV in the file */
-	snprintf(filename, sizeof(filename), "%s/%s%s",
-		 info->keydir, info->ivname, ".bin");
 	info->iv = malloc(info->cipher->iv_len);
 	if (!info->iv) {
 		printf("Can't allocate memory for iv\n");
 		ret = -1;
 		goto out;
 	}
+
+	if (info->ivname) {
+		/* Read the IV in the file */
+		snprintf(filename, sizeof(filename), "%s/%s%s",
+			 info->keydir, info->ivname, ".bin");
+		ret = fit_image_read_data(filename, (unsigned char *)info->iv,
+					  info->cipher->iv_len);
+	} else {
+		/* Generate an ramdom IV */
+		ret = get_random_data((void *)info->iv, info->cipher->iv_len);
+	}
-	ret = fit_image_read_data(filename, (unsigned char *)info->iv,
-				  info->cipher->iv_len);
 
  out:
 	return ret;
@@ -453,9 +489,10 @@
 	 * Write the public key into the supplied FDT file; this might fail
 	 * several times, since we try signing with successively increasing
 	 * size values
+	 * And, if needed, write the iv in the FIT file
 	 */
 	if (keydest) {
-		ret = info.cipher->add_cipher_data(&info, keydest);
+		ret = info.cipher->add_cipher_data(&info, keydest, fit, node_noffset);
 		if (ret) {
 			printf("Failed to add verification data for cipher '%s' in image '%s'\n",
 			       info.keyname, image_name);
diff --git a/tools/patman/test_checkpatch.py b/tools/patman/test_checkpatch.py
index 792196e..f71c70f 100644
--- a/tools/patman/test_checkpatch.py
+++ b/tools/patman/test_checkpatch.py
@@ -405,6 +405,12 @@
         pm.add_line('include/myfile.h', '#include <dm.h>')
         self.checkSingleMessage(pm, 'BARRED_INCLUDE_IN_HDR', 'error')
 
+    def testConfigIsEnabledConfig(self):
+        """Test for accidental CONFIG_IS_ENABLED(CONFIG_*) calls"""
+        pm = PatchMaker()
+        pm.add_line('common/main.c', 'if (CONFIG_IS_ENABLED(CONFIG_CLK))')
+        self.checkSingleMessage(pm, 'CONFIG_IS_ENABLED_CONFIG', 'error')
+
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/tools/socfpgaimage.c b/tools/socfpgaimage.c
index 6dfd64e..3ba3c93 100644
--- a/tools/socfpgaimage.c
+++ b/tools/socfpgaimage.c
@@ -62,6 +62,9 @@
 #define HEADER_OFFSET	0x40
 #define VALIDATION_WORD	0x31305341
 
+/* Minimum and default entry point offset */
+#define ENTRY_POINT_OFFSET	0x14
+
 static uint8_t buffer_v0[0x10000];
 static uint8_t buffer_v1[0x40000];
 
@@ -120,8 +123,10 @@
 }
 
 static void sfp_build_header(uint8_t *buf, uint8_t ver, uint8_t flags,
-			     uint32_t length_bytes)
+			     uint32_t length_bytes,
+			     struct image_tool_params *params)
 {
+	uint32_t entry_offset = params->eflag ? params->ep : ENTRY_POINT_OFFSET;
 	struct socfpga_header_v0 header_v0 = {
 		.validation	= cpu_to_le32(VALIDATION_WORD),
 		.version	= 0,
@@ -136,7 +141,8 @@
 		.flags		= flags,
 		.header_u8	= cpu_to_le16(sizeof(header_v1)),
 		.length_u8	= cpu_to_le32(length_bytes),
-		.entry_offset	= cpu_to_le32(0x14),	/* Trampoline offset */
+		/* Trampoline offset */
+		.entry_offset	= cpu_to_le32(entry_offset),
 		.zero		= 0,
 	};
 
@@ -198,7 +204,8 @@
 
 /* Sign the buffer and return the signed buffer size */
 static int sfp_sign_buffer(uint8_t *buf, uint8_t ver, uint8_t flags,
-			   int len, int pad_64k)
+			   int len, int pad_64k,
+			   struct image_tool_params *params)
 {
 	uint32_t calc_crc;
 
@@ -206,7 +213,7 @@
 	len = ALIGN(len, 4);
 
 	/* Build header, adding 4 bytes to length to hold the CRC32. */
-	sfp_build_header(buf + HEADER_OFFSET, ver, flags, len + 4);
+	sfp_build_header(buf + HEADER_OFFSET, ver, flags, len + 4, params);
 
 	/* Calculate and apply the CRC */
 	calc_crc = ~pbl_crc32(0, (char *)buf, len);
@@ -275,7 +282,7 @@
 		printf("Not a sane SOCFPGA preloader\n");
 }
 
-static int socfpgaimage_check_params(struct image_tool_params *params)
+static int socfpgaimage_check_params_v0(struct image_tool_params *params)
 {
 	/* Not sure if we should be accepting fflags */
 	return	(params->dflag && (params->fflag || params->lflag)) ||
@@ -283,6 +290,26 @@
 		(params->lflag && (params->dflag || params->fflag));
 }
 
+static int socfpgaimage_check_params_v1(struct image_tool_params *params)
+{
+	/*
+	 * If the entry point is specified, ensure it is >= ENTRY_POINT_OFFSET
+	 * and it is 4 bytes aligned.
+	 */
+	if (params->eflag && (params->ep < ENTRY_POINT_OFFSET ||
+			      params->ep % 4 != 0)) {
+		fprintf(stderr,
+			"Error: Entry point must be greater than 0x%x.\n",
+			ENTRY_POINT_OFFSET);
+		return -1;
+	}
+
+	/* Not sure if we should be accepting fflags */
+	return	(params->dflag && (params->fflag || params->lflag)) ||
+		(params->fflag && (params->dflag || params->lflag)) ||
+		(params->lflag && (params->dflag || params->fflag));
+}
+
 static int socfpgaimage_check_image_types_v0(uint8_t type)
 {
 	if (type == IH_TYPE_SOCFPGAIMAGE)
@@ -343,7 +370,8 @@
 	return sfp_vrec_header(params, tparams, 1);
 }
 
-static void sfp_set_header(void *ptr, unsigned char ver)
+static void sfp_set_header(void *ptr, unsigned char ver,
+			   struct image_tool_params *params)
 {
 	uint8_t *buf = (uint8_t *)ptr;
 
@@ -357,19 +385,19 @@
 	memmove(buf, buf + sfp_fake_header_size(data_size, ver), data_size);
 	memset(buf + data_size, 0, sfp_fake_header_size(data_size, ver));
 
-	sfp_sign_buffer(buf, ver, 0, data_size, 0);
+	sfp_sign_buffer(buf, ver, 0, data_size, 0, params);
 }
 
 static void socfpgaimage_set_header_v0(void *ptr, struct stat *sbuf, int ifd,
 				       struct image_tool_params *params)
 {
-	sfp_set_header(ptr, 0);
+	sfp_set_header(ptr, 0, params);
 }
 
 static void socfpgaimage_set_header_v1(void *ptr, struct stat *sbuf, int ifd,
 				       struct image_tool_params *params)
 {
-	sfp_set_header(ptr, 1);
+	sfp_set_header(ptr, 1, params);
 }
 
 U_BOOT_IMAGE_TYPE(
@@ -377,7 +405,7 @@
 	"Altera SoCFPGA Cyclone V / Arria V image support",
 	0, /* This will be modified by vrec_header() */
 	(void *)buffer_v0,
-	socfpgaimage_check_params,
+	socfpgaimage_check_params_v0,
 	socfpgaimage_verify_header,
 	socfpgaimage_print_header,
 	socfpgaimage_set_header_v0,
@@ -392,7 +420,7 @@
 	"Altera SoCFPGA Arria10 image support",
 	0, /* This will be modified by vrec_header() */
 	(void *)buffer_v1,
-	socfpgaimage_check_params,
+	socfpgaimage_check_params_v1,
 	socfpgaimage_verify_header,
 	socfpgaimage_print_header,
 	socfpgaimage_set_header_v1,