Merge tag 'efi-2022-07-rc2-2' of https://source.denx.de/u-boot/custodians/u-boot-efi

Pull request for efi-2022-07-rc2-2

* Test
  Unit test for 'bootmenu' command

* UEFI
  Preparatory patches for implementing a UEFI boot options based menu
diff --git a/arch/arm/dts/armada-3720-turris-mox-u-boot.dtsi b/arch/arm/dts/armada-3720-turris-mox-u-boot.dtsi
index 28a36a6..71d5e70 100644
--- a/arch/arm/dts/armada-3720-turris-mox-u-boot.dtsi
+++ b/arch/arm/dts/armada-3720-turris-mox-u-boot.dtsi
@@ -3,25 +3,6 @@
  * 2022 by Marek Behún <kabel@kernel.org>
  */
 
-/ {
-	mdio {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		old_binding_phy1: ethernet-phy@1 {
-			reg = <1>;
-		};
-	};
-};
-
-&eth0 {
-	pinctrl-0 = <&rgmii_pins>, <&smi_pins>;
-	/delete-property/ phy-handle;
-	phy = <&old_binding_phy1>;
-};
-
-/delete-node/ &mdio;
-
 &usb3 {
 	vbus-supply = <&exp_usb3_vbus>;
 };
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 06cae68..9b62764 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -101,6 +101,7 @@
 
 config ARCH_OCTEON
 	bool "Support Marvell Octeon CN7xxx platforms"
+	select ARCH_EARLY_INIT_R
 	select CPU_CAVIUM_OCTEON
 	select DISPLAY_CPUINFO
 	select DMA_ADDR_T_64BIT
diff --git a/arch/mips/dts/mrvl,cn73xx.dtsi b/arch/mips/dts/mrvl,cn73xx.dtsi
index 2a17f7a..77f3548 100644
--- a/arch/mips/dts/mrvl,cn73xx.dtsi
+++ b/arch/mips/dts/mrvl,cn73xx.dtsi
@@ -267,5 +267,40 @@
 				interrupts = <0x6c010 4>;
 			};
 		};
+
+		/* SMI1 */
+		smi1: mdio@1180000003880 {
+			compatible = "cavium,octeon-3860-mdio";
+			reg = <0x11800 0x00003880 0x0 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		/* BGX 0 */
+		bgx0: ethernet-mac-nexus@11800e0000000 {
+			compatible = "cavium,octeon-7890-bgx";
+			reg = <0x11800 0xe0000000 0x0 0x1000000>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		/* BGX 1 */
+		bgx1: ethernet-mac-nexus@11800e1000000 {
+			compatible = "cavium,octeon-7890-bgx";
+			reg = <0x11800 0xe1000000 0x0 0x1000000>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		/* BGX 2*/
+		bgx2: ethernet-mac-nexus@11800e2000000 {
+			compatible = "cavium,octeon-7890-bgx";
+			reg = <0x11800 0xe2000000 0x0 0x1000000>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/mips/dts/mrvl,octeon-ebb7304.dts b/arch/mips/dts/mrvl,octeon-ebb7304.dts
index fda559d..08247eb 100644
--- a/arch/mips/dts/mrvl,octeon-ebb7304.dts
+++ b/arch/mips/dts/mrvl,octeon-ebb7304.dts
@@ -201,3 +201,48 @@
 		cd-gpios = <&gpio 25 1>; /* active low */
 	};
 };
+
+/* SMI_1 -- Available on rev 2 and later boards */
+&smi1 {
+	/**
+	 * The phy names are broken down as follows:
+	 * (m)phyxxyzzs
+	 * where:
+	 *	xx = 01 for SGMII, 10 for DXAUI, 20 for RXAUI
+	 *	     and 40 for XFI/LXAUI
+	 *	y = QLM/DLM number
+	 *	zz = PHY address (decimal)
+	 *	s = sub-phy number in the case of the Cortina
+	 *	    PHY
+	 * a mphy is a nexus phy that contains one or more
+	 * sub-phys, for example the Cortina CS4223.
+	 */
+
+	/* QLM 2 */
+	phy01208: ethernet-phy@01208 {
+		reg = <8>;
+		compatible = "marvell,88e1240", "ethernet-phy-ieee802.3-c22";
+
+		marvell,reg-init = <3 0x10 0 0x8665>,
+				   <3 0x11 0 0x00aa>,
+				   <3 0x12 0 0x4105>,
+				   <3 0x13 0 0x8a08>;
+
+		interrupt-parent = <&gpio>;
+		interrupts = <12 8>; /* Pin 12, active low */
+	};
+};
+
+/* BGX 0 */
+&bgx0 {
+	status = "okay";
+	phy-handle = <&phy01208>; /* put phy-handle in BGX node and MAC node */
+
+	/* SerDes 0, may differ from PCS Lane/LMAC */
+	eth0: ethernet-mac@D {
+		compatible = "cavium,octeon-7890-bgx-port";
+		reg = <0>;
+		local-mac-address = [ 00 00 00 00 00 00 ];
+		phy-handle = <&phy01208>;
+	};
+};
diff --git a/arch/mips/dts/mrvl,octeon-nic23.dts b/arch/mips/dts/mrvl,octeon-nic23.dts
index 72ef56d..dfbd51c 100644
--- a/arch/mips/dts/mrvl,octeon-nic23.dts
+++ b/arch/mips/dts/mrvl,octeon-nic23.dts
@@ -118,11 +118,208 @@
 &i2c0 {
 	u-boot,dm-pre-reloc;	/* Needed early for DDR SPD EEPROM */
 	clock-frequency = <100000>;
+
+	sfp0eeprom: eeprom@50 {
+		compatible = "atmel,24c01";
+		reg = <0x50>;
+	};
+
+	sfp0alerts: eeprom@51 {
+		compatible = "atmel,24c01";
+		reg = <0x51>;
+	};
 };
 
 &i2c1 {
 	u-boot,dm-pre-reloc;	/* Needed early for DDR SPD EEPROM */
 	clock-frequency = <100000>;
+
+	vitesse@10 {
+		compatible = "vitesse,vsc7224";
+		reg = <0x10>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		/* Note that reset is active high with this device */
+		reset = <&gpio 7 0>;
+
+		/* LoS pin can be pulled low when there is a loss of signal */
+		los = <&gpio 6 0>;
+
+		vitesse,reg-init =
+			/* Clear all masks */
+			/* Page select FSYNC0 (0x30) */
+			<0x7f 0x0030>,
+			/* Set FSYNC0 for 10.3125Gbps */
+			<0x80 0x2841>,	/* See Table 3. */
+			<0x81 0x0008>,
+			<0x82 0xc000>,
+			<0x83 0x0010>,
+			<0x84 0x1d00>,
+
+			/* All channels Rx settings set equally */
+			<0x7f 0x0050>,
+			/* Shrink EQ_BUFF */
+			<0x82 0x0014>,
+			/* Set EQVGA_ADAP = 1 (enable EQVGA circuitry),
+			 * USE_UNIT_GAIN = 1 (EQVGA is in unity gain),
+			 * USE_LPF = 0 (VGA adapt not using LPF),
+			 * USE_EQVGA = 1
+			<0x89 0x7f13>,
+			/* Select min DFE Delay (DFE_DELAY) */
+			<0x90 0x5785>,
+			/* Set DFE 1-3 limit (DXMAX) = 32dec,
+			 * AP Max limit = 127 decimal
+			 */
+			<0x92 0x207f>,
+			/* Set AP Min limit = 32 decimal */
+			<0x93 0x2000>,
+			/* Set DFE Averaging to the slowest (DFE_AVG) */
+			<0x94 0x0031>,
+			/* Set Inductor Bypass OD_IND_BYP = 0 & fastest Rise/Fall */
+			<0x9c 0x0000>,
+			/* Setting DFE Boost = none. Must set for
+			 * rev C (if DFE in adapt mode)
+			 */
+			<0xaa 0x0888>,
+			/* Setting EQ Min = 8 & Max limit = 72 dec.
+			 * Must set for rev C, otherwise EQ is 0
+			 * (if EQ is in adaptive mode)
+			 */
+			<0xa8 0x2408>,
+			/* Setting EQVGA = 96, when in EQVGA manual mode */
+			<0xa9 0x0060>,
+			/* Setting SW_BFOCM, bits 15:14 to 01 */
+			<0x87 0x4021>,
+			/* Turn off adaptive input equalization
+			 * and VGA adaptive algorithm control.
+			*/
+			<0x89 0x7313>,
+			/* Turn on adaptive input equalization
+			 * and VGA adaptive algorithm control.
+			*/
+			<0x89 0x7f13>;
+
+		vitesse-channel@0 {
+			compatible = "vitesse,vsc7224-channel";
+			reg = <0>;
+			direction-tx;
+			sfp-mac = <&eth0>;
+
+			/* TAP settings.  The format of this is as
+			 * follows:
+			 * - cable length in meters, 0 = active or
+			 *   optical module
+			 * - maintap value
+			 * - pretap value
+			 * - posttap value
+			 *
+			 * For the cable length, the value will apply
+			 * for that cable length and greater until the
+			 * next largest cable length specified.  These
+			 * values must be ordered first by channel mask
+			 * then by cable length.  These are typically
+			 * set for the transmit channels, not the
+			 * receive channels.
+			 */
+			taps = <0 0x0013 0x000f 0x0000>,
+			       <1 0x001f 0x000f 0x0004>,
+			       <3 0x0014 0x000b 0x0004>,
+			       <5 0x0014 0x0009 0x0006>,
+			       <7 0x0014 0x000f 0x0000>,
+			       <10 0x0012 0x000b 0x0013>;
+		};
+
+		vitesse-channel@1 {
+			compatible = "vitesse,vsc7224-channel";
+			reg = <1>;
+			/* Ignore mod_abs and module */
+			direction-rx;
+			sfp-mac = <&eth0>;
+
+			/* Disable pre-tap */
+			pretap-disable;
+
+			/* Disable post-tap */
+			posttap-disable;
+
+			/* Taps has the following fields:
+			 * - cable length (ignored for rx)
+			 * - main tap value
+			 * - pre tap value
+			 * - post tap value
+			 *
+			 * NOTE: if taps are disabled then they
+			 *       are not programmed.
+			 */
+			taps = <0 0x0a 0x0b 0x10>;
+		};
+
+		vitesse-channel@2 {
+			compatible = "vitesse,vsc7224-channel";
+			reg = <2>;
+			direction-tx;
+			sfp-mac = <&eth1>;
+
+			/* TAP settings.  The format of this is as
+			 * follows:
+			 * - cable length in meters, 0 = active or
+			 *   optical module
+			 * - maintap value
+			 * - pretap value
+			 * - posttap value
+			 *
+			 * For the cable length, the value will apply
+			 * for that cable length and greater until the
+			 * next largest cable length specified.  These
+			 * values must be ordered first by channel mask
+			 * then by cable length.  These are typically
+			 * set for the transmit channels, not the
+			 * receive channels.
+			 */
+			taps = <0 0x0013 0x000f 0x0000>,
+			       <1 0x001f 0x000f 0x0004>,
+			       <3 0x0014 0x000b 0x0004>,
+			       <5 0x0014 0x0009 0x0006>,
+			       <7 0x0014 0x000f 0x0000>,
+			       <10 0x0012 0x000b 0x0013>;
+		};
+
+		vitesse-channel@3 {
+			compatible = "vitesse,vsc7224-channel";
+			reg = <3>;
+			/* Ignore mod_abs and module */
+			direction-rx;
+			sfp-mac = <&eth1>;
+
+			/* Disable pre-tap */
+			pretap-disable;
+
+			/* Disable post-tap */
+			posttap-disable;
+
+			/* Taps has the following fields:
+			 * - cable length (ignored for rx)
+			 * - main tap value
+			 * - pre tap value
+			 * - post tap value
+			 *
+			 * NOTE: if taps are disabled then they
+			 *       are not programmed.
+			 */
+			taps = <0 0x0a 0x0b 0x10>;
+		};
+	};
+
+	sfp1eeprom: eeprom@50 {
+		compatible = "atmel,24c01";
+		reg = <0x50>;
+	};
+
+	sfp1alerts: eeprom@51 {
+		compatible = "atmel,24c01";
+		reg = <0x51>;
+	};
 };
 
 &mmc {
@@ -151,6 +348,26 @@
 		compatible = "marvell,pci-bootcmd";
 		status = "okay";
 	};
+
+	sfp0: sfp-slot@0 {
+		compatible = "ethernet,sfp-slot";
+		tx_disable = <&gpio 16 0>;
+		mod_abs = <&gpio 17 0>;
+		tx_error = <&gpio 19 0>;
+		rx_los = <&gpio 18 0>;
+		eeprom = <&sfp0eeprom>;
+		diag = <&sfp0alerts>;
+	};
+
+	sfp1: sfp-slot@1 {
+		compatible = "ethernet,sfp-slot";
+		tx_disable = <&gpio 21 0>;
+		mod_abs = <&gpio 22 0>;
+		tx_error = <&gpio 24 0>;
+		rx_los = <&gpio 23 0>;
+		eeprom = <&sfp1eeprom>;
+		diag = <&sfp1alerts>;
+	};
 };
 
 &spi {
@@ -160,3 +377,24 @@
 		reg = <0>;
 	};
 };
+
+/* BGX 2 */
+&bgx2 {
+	status = "okay";
+
+	/* SerDes 0, may differ from PCS Lane/LMAC */
+	eth0: ethernet-mac@0 {
+		compatible = "cavium,octeon-7890-bgx-port";
+		reg = <0>;
+		local-mac-address = [ 00 00 00 00 00 00 ];
+		sfp-slot = <&sfp0>;
+	};
+
+	/* SerDes 1, may differ from PCS Lane/LMAC */
+	eth1: ethernet-mac@1 {
+		compatible = "cavium,octeon-7890-bgx-port";
+		reg = <1>;
+		local-mac-address = [ 00 00 00 00 00 00 ];
+		sfp-slot = <&sfp1>;
+	};
+};
diff --git a/arch/mips/mach-octeon/Makefile b/arch/mips/mach-octeon/Makefile
index 40ddab2..6aa7b36 100644
--- a/arch/mips/mach-octeon/Makefile
+++ b/arch/mips/mach-octeon/Makefile
@@ -12,13 +12,46 @@
 obj-y += cvmx-bootmem.o
 obj-y += bootoctlinux.o
 
-# QLM related code
+# Misc Octeon C files, mostly for QLM & ethernet support
+obj-y += cvmx-agl.o
+obj-y += cvmx-fpa.o
+obj-y += cvmx-fpa-resource.o
+obj-y += cvmx-fau-compat.o
+obj-y += cvmx-global-resources.o
+obj-y += cvmx-cmd-queue.o
+obj-y += cvmx-helper-agl.o
+obj-y += cvmx-helper-bgx.o
+obj-y += cvmx-helper-board.o
 obj-y += cvmx-helper-cfg.o
 obj-y += cvmx-helper-fdt.o
+obj-y += cvmx-helper-fpa.o
+obj-y += cvmx-helper-ilk.o
+obj-y += cvmx-helper-ipd.o
 obj-y += cvmx-helper-jtag.o
+obj-y += cvmx-helper-loop.o
+obj-y += cvmx-helper-npi.o
+obj-y += cvmx-helper-pki.o
+obj-y += cvmx-helper-pko.o
+obj-y += cvmx-helper-pko3.o
+obj-y += cvmx-helper-rgmii.o
+obj-y += cvmx-helper-sfp.o
+obj-y += cvmx-helper-sgmii.o
 obj-y += cvmx-helper-util.o
+obj-y += cvmx-helper-xaui.o
 obj-y += cvmx-helper.o
+obj-y += cvmx-ilk.o
+obj-y += cvmx-ipd.o
 obj-y += cvmx-pcie.o
+obj-y += cvmx-pki.o
+obj-y += cvmx-pki-resources.o
+obj-y += cvmx-pko.o
+obj-y += cvmx-pko-internal-ports-range.o
+obj-y += cvmx-pko3.o
+obj-y += cvmx-pko3-compat.o
+obj-y += cvmx-pko3-resources.o
+obj-y += cvmx-pko3-queue.o
+obj-y += cvmx-range.o
 obj-y += cvmx-qlm.o
+obj-y += cvmx-qlm-tables.o
 obj-y += octeon_fdt.o
 obj-y += octeon_qlm.o
diff --git a/arch/mips/mach-octeon/cpu.c b/arch/mips/mach-octeon/cpu.c
index 6cfcc3e..1bdc6cd 100644
--- a/arch/mips/mach-octeon/cpu.c
+++ b/arch/mips/mach-octeon/cpu.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0+
 /*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
  */
 
 #include <dm.h>
@@ -17,6 +17,8 @@
 #include <mach/cvmx-bootmem.h>
 #include <mach/cvmx-regs.h>
 #include <mach/cvmx-sata-defs.h>
+#include <mach/octeon-model.h>
+#include <mach/octeon-feature.h>
 
 DECLARE_GLOBAL_DATA_PTR;
 
@@ -393,14 +395,55 @@
 	return ret;
 }
 
-int arch_misc_init(void)
+static void configure_lmtdma_window(void)
+{
+	u64 tmp;
+	u64 addr;
+	u64 end_addr;
+
+	CVMX_MF_CVM_MEM_CTL(tmp);
+	tmp &= ~0x1ffull;
+	tmp |= 0x104ull;
+
+	/* enable LMTDMA */
+	tmp |= (1ull << 51);
+	/* configure scratch line 2 for LMT */
+	/* TODO: reserve this scratch line, so that others will not use it */
+	/* TODO: store LMTLINE in global var */
+	tmp |= (CVMX_PKO_LMTLINE << 45);
+	/* clear LMTLINE in scratch */
+	addr = CVMX_PKO_LMTLINE * CVMX_CACHE_LINE_SIZE;
+	end_addr = addr + CVMX_CACHE_LINE_SIZE;
+
+	while (addr < end_addr) {
+		*CASTPTR(volatile u64, addr + CVMX_SCRATCH_BASE) = (u64)0;
+		addr += 8;
+	}
+	CVMX_MT_CVM_MEM_CTL(tmp);
+}
+
+int arch_early_init_r(void)
 {
 	int ret;
 
+	/*
+	 * Needs to be called pretty early, so that e.g. networking etc
+	 * can access the bootmem infrastructure
+	 */
 	ret = octeon_bootmem_init();
 	if (ret)
 		return ret;
 
+	if (octeon_has_feature(OCTEON_FEATURE_PKO3))
+		configure_lmtdma_window();
+
+	return 0;
+}
+
+int arch_misc_init(void)
+{
+	int ret;
+
 	ret = octeon_configure_load_memory();
 	if (ret)
 		return ret;
diff --git a/arch/mips/mach-octeon/cvmx-agl.c b/arch/mips/mach-octeon/cvmx-agl.c
new file mode 100644
index 0000000..9eea857
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-agl.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for RGMII (MGMT) initialization, configuration,
+ * and monitoring.
+ */
+
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-agl.h>
+#include <mach/cvmx-agl-defs.h>
+
+/*
+ * @param port to enable
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_agl_enable(int port)
+{
+	cvmx_agl_gmx_rxx_frm_ctl_t rxx_frm_ctl;
+
+	rxx_frm_ctl.u64 = 0;
+	rxx_frm_ctl.s.pre_align = 1;
+	/* When set, disables the length check for non-min sized pkts with
+	 * padding in the client data
+	 */
+	rxx_frm_ctl.s.pad_len = 1;
+	/* When set, disables the length check for VLAN pkts */
+	rxx_frm_ctl.s.vlan_len = 1;
+	/* When set, PREAMBLE checking is  less strict */
+	rxx_frm_ctl.s.pre_free = 1;
+	/* Control Pause Frames can match station SMAC */
+	rxx_frm_ctl.s.ctl_smac = 0;
+	/* Control Pause Frames can match globally assign Multicast address */
+	rxx_frm_ctl.s.ctl_mcst = 1;
+	rxx_frm_ctl.s.ctl_bck = 1;  /* Forward pause information to TX block */
+	rxx_frm_ctl.s.ctl_drp = 1;  /* Drop Control Pause Frames */
+	rxx_frm_ctl.s.pre_strp = 1; /* Strip off the preamble */
+	/* This port is configured to send PREAMBLE+SFD to begin every frame.
+	 * GMX checks that the PREAMBLE is sent correctly
+	 */
+	rxx_frm_ctl.s.pre_chk = 1;
+	csr_wr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
+
+	return 0;
+}
+
+cvmx_helper_link_info_t cvmx_agl_link_get(int port)
+{
+	cvmx_helper_link_info_t result;
+	int interface, port_index;
+
+	/* Fake IPD port is used on some older models. */
+	if (port < 0)
+		return __cvmx_helper_board_link_get(port);
+
+	/* Simulator does not have PHY, use some defaults. */
+	interface = cvmx_helper_get_interface_num(port);
+	port_index = cvmx_helper_get_interface_index_num(port);
+	if (cvmx_helper_get_port_force_link_up(interface, port_index)) {
+		result.u64 = 0;
+		result.s.full_duplex = 1;
+		result.s.link_up = 1;
+		result.s.speed = 1000;
+		return result;
+	}
+
+	return __cvmx_helper_board_link_get(port);
+}
+
+/*
+ * Set MII/RGMII link based on mode.
+ *
+ * @param port   interface port to set the link.
+ * @param link_info  Link status
+ *
+ * @return       0 on success and 1 on failure
+ */
+int cvmx_agl_link_set(int port, cvmx_helper_link_info_t link_info)
+{
+	cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
+
+	/* Disable GMX before we make any changes. */
+	agl_gmx_prtx.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
+	agl_gmx_prtx.s.en = 0;
+	agl_gmx_prtx.s.tx_en = 0;
+	agl_gmx_prtx.s.rx_en = 0;
+	csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
+		u64 one_second = 0x1000000; /* todo: this needs checking */
+
+		/* Wait for GMX to be idle */
+		if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
+					  cvmx_agl_gmx_prtx_cfg_t, rx_idle, ==,
+					  1, one_second) ||
+		    CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
+					  cvmx_agl_gmx_prtx_cfg_t, tx_idle, ==,
+					  1, one_second)) {
+			debug("AGL%d: Timeout waiting for GMX to be idle\n",
+			      port);
+			return -1;
+		}
+	}
+
+	agl_gmx_prtx.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
+
+	/* Set duplex mode */
+	if (!link_info.s.link_up)
+		agl_gmx_prtx.s.duplex = 1; /* Force full duplex on down links */
+	else
+		agl_gmx_prtx.s.duplex = link_info.s.full_duplex;
+
+	switch (link_info.s.speed) {
+	case 10:
+		agl_gmx_prtx.s.speed = 0;
+		agl_gmx_prtx.s.slottime = 0;
+		if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
+			agl_gmx_prtx.s.speed_msb = 1;
+			agl_gmx_prtx.s.burst = 1;
+		}
+		break;
+
+	case 100:
+		agl_gmx_prtx.s.speed = 0;
+		agl_gmx_prtx.s.slottime = 0;
+		if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
+			agl_gmx_prtx.s.speed_msb = 0;
+			agl_gmx_prtx.s.burst = 1;
+		}
+		break;
+
+	case 1000:
+		/* 1000 MBits is only supported on 6XXX chips */
+		if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
+			agl_gmx_prtx.s.speed_msb = 0;
+			agl_gmx_prtx.s.speed = 1;
+			agl_gmx_prtx.s.slottime =
+				1; /* Only matters for half-duplex */
+			agl_gmx_prtx.s.burst = agl_gmx_prtx.s.duplex;
+		}
+		break;
+
+		/* No link */
+	case 0:
+	default:
+		break;
+	}
+
+	/* Write the new GMX setting with the port still disabled. */
+	csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+	/* Read GMX CFG again to make sure the config is completed. */
+	agl_gmx_prtx.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
+
+	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
+		cvmx_agl_gmx_txx_clk_t agl_clk;
+		cvmx_agl_prtx_ctl_t prt_ctl;
+
+		prt_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+		agl_clk.u64 = csr_rd(CVMX_AGL_GMX_TXX_CLK(port));
+		/* MII (both speeds) and RGMII 1000 setting */
+		agl_clk.s.clk_cnt = 1;
+		/* Check other speeds for RGMII mode */
+		if (prt_ctl.s.mode == 0 || OCTEON_IS_OCTEON3()) {
+			if (link_info.s.speed == 10)
+				agl_clk.s.clk_cnt = 50;
+			else if (link_info.s.speed == 100)
+				agl_clk.s.clk_cnt = 5;
+		}
+		csr_wr(CVMX_AGL_GMX_TXX_CLK(port), agl_clk.u64);
+	}
+
+	/* Enable transmit and receive ports */
+	agl_gmx_prtx.s.tx_en = 1;
+	agl_gmx_prtx.s.rx_en = 1;
+	csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+	/* Enable the link. */
+	agl_gmx_prtx.s.en = 1;
+	csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+	if (OCTEON_IS_OCTEON3()) {
+		union cvmx_agl_prtx_ctl agl_prtx_ctl;
+		/* Enable the interface, set clkrst */
+		agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+		agl_prtx_ctl.s.clkrst = 1;
+		csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+		csr_rd(CVMX_AGL_PRTX_CTL(port));
+		agl_prtx_ctl.s.enable = 1;
+		csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+		/* Read the value back to force the previous write */
+		csr_rd(CVMX_AGL_PRTX_CTL(port));
+	}
+
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-bootmem.c b/arch/mips/mach-octeon/cvmx-bootmem.c
index 9bd644d..52e58b4 100644
--- a/arch/mips/mach-octeon/cvmx-bootmem.c
+++ b/arch/mips/mach-octeon/cvmx-bootmem.c
@@ -1189,7 +1189,7 @@
 	if (mem_size > OCTEON_DDR1_SIZE) {
 		__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
 		__cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
-					mem_size - OCTEON_DDR1_SIZE, 0);
+					mem_size - OCTEON_DDR2_BASE, 0);
 	} else {
 		__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
 	}
@@ -1349,7 +1349,6 @@
 		addr += sizeof(struct cvmx_bootmem_named_block_desc);
 	}
 
-	// test-only: DEBUG ifdef???
 	cvmx_bootmem_phy_list_print();
 
 	return 1;
diff --git a/arch/mips/mach-octeon/cvmx-cmd-queue.c b/arch/mips/mach-octeon/cvmx-cmd-queue.c
new file mode 100644
index 0000000..c4b49f9
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-cmd-queue.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Support functions for managing command queues used for
+ * various hardware blocks.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-fpa.h>
+#include <mach/cvmx-cmd-queue.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-dpi-defs.h>
+#include <mach/cvmx-npei-defs.h>
+#include <mach/cvmx-pexp-defs.h>
+
+/**
+ * This application uses this pointer to access the global queue
+ * state. It points to a bootmem named block.
+ */
+__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptrs[CVMX_MAX_NODES];
+
+/**
+ * @INTERNAL
+ * Initialize the Global queue state pointer.
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(unsigned int node)
+{
+	const char *alloc_name = "cvmx_cmd_queues\0\0";
+	char s[4] = "_0";
+	const struct cvmx_bootmem_named_block_desc *block_desc = NULL;
+	unsigned int size;
+	u64 paddr_min = 0, paddr_max = 0;
+	void *ptr;
+
+	if (cvmx_likely(__cvmx_cmd_queue_state_ptrs[node]))
+		return CVMX_CMD_QUEUE_SUCCESS;
+
+	/* Add node# to block name */
+	if (node > 0) {
+		s[1] += node;
+		strcat((char *)alloc_name, s);
+	}
+
+	/* Find the named block in case it has been created already */
+	block_desc = cvmx_bootmem_find_named_block(alloc_name);
+	if (block_desc) {
+		__cvmx_cmd_queue_state_ptrs[node] =
+			(__cvmx_cmd_queue_all_state_t *)cvmx_phys_to_ptr(
+				block_desc->base_addr);
+		return CVMX_CMD_QUEUE_SUCCESS;
+	}
+
+	size = sizeof(*__cvmx_cmd_queue_state_ptrs[node]);
+
+	/* Rest f the code is to allocate a new named block */
+
+	/* Atomically allocate named block once, and zero it by default */
+	ptr = cvmx_bootmem_alloc_named_range_once(size, paddr_min, paddr_max,
+						  128, alloc_name, NULL);
+
+	if (ptr) {
+		__cvmx_cmd_queue_state_ptrs[node] =
+			(__cvmx_cmd_queue_all_state_t *)ptr;
+	} else {
+		debug("ERROR: %s: Unable to get named block %s.\n", __func__,
+		      alloc_name);
+		return CVMX_CMD_QUEUE_NO_MEMORY;
+	}
+	return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Initialize a command queue for use. The initial FPA buffer is
+ * allocated and the hardware unit is configured to point to the
+ * new command queue.
+ *
+ * @param queue_id  Hardware command queue to initialize.
+ * @param max_depth Maximum outstanding commands that can be queued.
+ * @param fpa_pool  FPA pool the command queues should come from.
+ * @param pool_size Size of each buffer in the FPA pool (bytes)
+ *
+ * @return CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
+						  int max_depth, int fpa_pool,
+						  int pool_size)
+{
+	__cvmx_cmd_queue_state_t *qstate;
+	cvmx_cmd_queue_result_t result;
+	unsigned int node;
+	unsigned int index;
+	int fpa_pool_min, fpa_pool_max;
+	union cvmx_fpa_ctl_status status;
+	void *buffer;
+
+	node = __cvmx_cmd_queue_get_node(queue_id);
+
+	index = __cvmx_cmd_queue_get_index(queue_id);
+	if (index >= NUM_ELEMENTS(__cvmx_cmd_queue_state_ptrs[node]->state)) {
+		printf("ERROR: %s: queue %#x out of range\n", __func__,
+		       queue_id);
+		return CVMX_CMD_QUEUE_INVALID_PARAM;
+	}
+
+	result = __cvmx_cmd_queue_init_state_ptr(node);
+	if (result != CVMX_CMD_QUEUE_SUCCESS)
+		return result;
+
+	qstate = __cvmx_cmd_queue_get_state(queue_id);
+	if (!qstate)
+		return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+	/*
+	 * We artificially limit max_depth to 1<<20 words. It is an
+	 * arbitrary limit.
+	 */
+	if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
+		if (max_depth < 0 || max_depth > 1 << 20)
+			return CVMX_CMD_QUEUE_INVALID_PARAM;
+	} else if (max_depth != 0) {
+		return CVMX_CMD_QUEUE_INVALID_PARAM;
+	}
+
+	/* CVMX_FPA_NUM_POOLS maps to cvmx_fpa3_num_auras for FPA3 */
+	fpa_pool_min = node << 10;
+	fpa_pool_max = fpa_pool_min + CVMX_FPA_NUM_POOLS;
+
+	if (fpa_pool < fpa_pool_min || fpa_pool >= fpa_pool_max)
+		return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+	if (pool_size < 128 || pool_size > (1 << 17))
+		return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+	if (pool_size & 3)
+		debug("WARNING: %s: pool_size %d not multiple of 8\n", __func__,
+		      pool_size);
+
+	/* See if someone else has already initialized the queue */
+	if (qstate->base_paddr) {
+		int depth;
+		static const char emsg[] = /* Common error message part */
+			"Queue already initialized with different ";
+
+		depth = (max_depth + qstate->pool_size_m1 - 1) /
+			qstate->pool_size_m1;
+		if (depth != qstate->max_depth) {
+			depth = qstate->max_depth * qstate->pool_size_m1;
+			debug("ERROR: %s: %s max_depth (%d).\n", __func__, emsg,
+			      depth);
+			return CVMX_CMD_QUEUE_INVALID_PARAM;
+		}
+		if (fpa_pool != qstate->fpa_pool) {
+			debug("ERROR: %s: %s FPA pool (%d).\n", __func__, emsg,
+			      (int)qstate->fpa_pool);
+			return CVMX_CMD_QUEUE_INVALID_PARAM;
+		}
+		if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
+			debug("ERROR: %s: %s FPA pool size (%u).\n", __func__,
+			      emsg, (qstate->pool_size_m1 + 1) << 3);
+			return CVMX_CMD_QUEUE_INVALID_PARAM;
+		}
+		return CVMX_CMD_QUEUE_ALREADY_SETUP;
+	}
+
+	if (!(octeon_has_feature(OCTEON_FEATURE_FPA3))) {
+		status.u64 = csr_rd(CVMX_FPA_CTL_STATUS);
+		if (!status.s.enb) {
+			debug("ERROR: %s: FPA is not enabled.\n",
+			      __func__);
+			return CVMX_CMD_QUEUE_NO_MEMORY;
+		}
+	}
+	buffer = cvmx_fpa_alloc(fpa_pool);
+	if (!buffer) {
+		debug("ERROR: %s: allocating first buffer.\n", __func__);
+		return CVMX_CMD_QUEUE_NO_MEMORY;
+	}
+
+	index = (pool_size >> 3) - 1;
+	qstate->pool_size_m1 = index;
+	qstate->max_depth = (max_depth + index - 1) / index;
+	qstate->index = 0;
+	qstate->fpa_pool = fpa_pool;
+	qstate->base_paddr = cvmx_ptr_to_phys(buffer);
+
+	/* Initialize lock */
+	__cvmx_cmd_queue_lock_init(queue_id);
+	return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Return the command buffer to be written to. The purpose of this
+ * function is to allow CVMX routine access to the low level buffer
+ * for initial hardware setup. User applications should not call this
+ * function directly.
+ *
+ * @param queue_id Command queue to query
+ *
+ * @return Command buffer or NULL on failure
+ */
+void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
+{
+	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+	if (qptr && qptr->base_paddr)
+		return cvmx_phys_to_ptr((u64)qptr->base_paddr);
+	else
+		return NULL;
+}
+
+static u64 *__cvmx_cmd_queue_add_blk(__cvmx_cmd_queue_state_t *qptr)
+{
+	u64 *cmd_ptr;
+	u64 *new_buffer;
+	u64 new_paddr;
+
+	/* Get base vaddr of current (full) block */
+	cmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr);
+
+	/* Allocate a new block from the per-queue pool */
+	new_buffer = (u64 *)cvmx_fpa_alloc(qptr->fpa_pool);
+
+	/* Check for allocation failure */
+	if (cvmx_unlikely(!new_buffer))
+		return NULL;
+
+	/* Zero out the new block link pointer,
+	 * in case this block will be filled to the rim
+	 */
+	new_buffer[qptr->pool_size_m1] = ~0ull;
+
+	/* Get physical address of the new buffer */
+	new_paddr = cvmx_ptr_to_phys(new_buffer);
+
+	/* Store the physical link address at the end of current full block */
+	cmd_ptr[qptr->pool_size_m1] = new_paddr;
+
+	/* Store the physical address in the queue state structure */
+	qptr->base_paddr = new_paddr;
+	qptr->index = 0;
+
+	/* Return the virtual base of the new block */
+	return new_buffer;
+}
+
+/**
+ * @INTERNAL
+ * Add command words into a queue, handles all the corener cases
+ * where only some of the words might fit into the current block,
+ * and a new block may need to be allocated.
+ * Locking and argument checks are done in the front-end in-line
+ * functions that call this one for the rare corner cases.
+ */
+cvmx_cmd_queue_result_t
+__cvmx_cmd_queue_write_raw(cvmx_cmd_queue_id_t queue_id,
+			   __cvmx_cmd_queue_state_t *qptr, int cmd_count,
+			   const u64 *cmds)
+{
+	u64 *cmd_ptr;
+	unsigned int index;
+
+	cmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr);
+	index = qptr->index;
+
+	/* Enforce queue depth limit, if enabled, once per block */
+	if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth)) {
+		unsigned int depth = cvmx_cmd_queue_length(queue_id);
+
+		depth /= qptr->pool_size_m1;
+
+		if (cvmx_unlikely(depth > qptr->max_depth))
+			return CVMX_CMD_QUEUE_FULL;
+	}
+
+	/*
+	 * If the block allocation fails, even the words that we wrote
+	 * to the current block will not count because the 'index' will
+	 * not be comitted.
+	 * The loop is run 'count + 1' times to take care of the tail
+	 * case, where the buffer is full to the rim, so the link
+	 * pointer must be filled with a valid address.
+	 */
+	while (cmd_count >= 0) {
+		if (index >= qptr->pool_size_m1) {
+			/* Block is full, get another one and proceed */
+			cmd_ptr = __cvmx_cmd_queue_add_blk(qptr);
+
+			/* Baul on allocation error w/o comitting anything */
+			if (cvmx_unlikely(!cmd_ptr))
+				return CVMX_CMD_QUEUE_NO_MEMORY;
+
+			/* Reset index for start of new block */
+			index = 0;
+		}
+		/* Exit Loop on 'count + 1' iterations */
+		if (cmd_count <= 0)
+			break;
+		/* Store commands into queue block while there is space */
+		cmd_ptr[index++] = *cmds++;
+		cmd_count--;
+	} /* while cmd_count */
+
+	/* Commit added words if all is well */
+	qptr->index = index;
+
+	return CVMX_CMD_QUEUE_SUCCESS;
+}
diff --git a/arch/mips/mach-octeon/cvmx-fau-compat.c b/arch/mips/mach-octeon/cvmx-fau-compat.c
new file mode 100644
index 0000000..9c2ff76
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-fau-compat.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-hwfau.h>
+
+u8 *cvmx_fau_regs_ptr;
+
+void cvmx_fau_bootmem_init(void *bootmem)
+{
+	memset(bootmem, 0, CVMX_FAU_MAX_REGISTERS_8);
+}
+
+/**
+ * Initializes FAU region for devices without FAU unit.
+ * @return 0 on success -1 on failure
+ */
+int cvmx_fau_init(void)
+{
+	cvmx_fau_regs_ptr = (u8 *)cvmx_bootmem_alloc_named_range_once(
+		CVMX_FAU_MAX_REGISTERS_8, 0, 1ull << 31, 128,
+		"cvmx_fau_registers", cvmx_fau_bootmem_init);
+
+	if (cvmx_fau_regs_ptr == 0ull) {
+		debug("ERROR: Failed to alloc named block for software FAU.\n");
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-fpa-resource.c b/arch/mips/mach-octeon/cvmx-fpa-resource.c
new file mode 100644
index 0000000..e502564
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-fpa-resource.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+static struct global_resource_tag get_fpa1_resource_tag(void)
+{
+	return CVMX_GR_TAG_FPA;
+}
+
+static struct global_resource_tag get_fpa3_aura_resource_tag(int node)
+{
+	return cvmx_get_gr_tag('c', 'v', 'm', '_', 'a', 'u', 'r', 'a', '_',
+			       node + '0', '.', '.', '.', '.', '.', '.');
+}
+
+static struct global_resource_tag get_fpa3_pool_resource_tag(int node)
+{
+	return cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'o', 'o', 'l', '_',
+			       node + '0', '.', '.', '.', '.', '.', '.');
+}
+
+int cvmx_fpa_get_max_pools(void)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FPA3))
+		return cvmx_fpa3_num_auras();
+	else if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		/* 68xx pool 8 is not available via API */
+		return CVMX_FPA1_NUM_POOLS;
+	else
+		return CVMX_FPA1_NUM_POOLS;
+}
+
+cvmx_fpa3_gaura_t cvmx_fpa3_reserve_aura(int node, int desired_aura_num)
+{
+	u64 owner = cvmx_get_app_id();
+	int rv = 0;
+	struct global_resource_tag tag;
+	cvmx_fpa3_gaura_t aura;
+
+	if (node == -1)
+		node = cvmx_get_node_num();
+
+	tag = get_fpa3_aura_resource_tag(node);
+
+	if (cvmx_create_global_resource_range(tag, cvmx_fpa3_num_auras()) !=
+	    0) {
+		printf("ERROR: %s: global resource create node=%u\n", __func__,
+		       node);
+		return CVMX_FPA3_INVALID_GAURA;
+	}
+
+	if (desired_aura_num >= 0)
+		rv = cvmx_reserve_global_resource_range(tag, owner,
+							desired_aura_num, 1);
+	else
+		rv = cvmx_resource_alloc_reverse(tag, owner);
+
+	if (rv < 0) {
+		printf("ERROR: %s: node=%u desired aura=%d\n", __func__, node,
+		       desired_aura_num);
+		return CVMX_FPA3_INVALID_GAURA;
+	}
+
+	aura = __cvmx_fpa3_gaura(node, rv);
+
+	return aura;
+}
+
+int cvmx_fpa3_release_aura(cvmx_fpa3_gaura_t aura)
+{
+	struct global_resource_tag tag = get_fpa3_aura_resource_tag(aura.node);
+	int laura = aura.laura;
+
+	if (!__cvmx_fpa3_aura_valid(aura))
+		return -1;
+
+	return cvmx_free_global_resource_range_multiple(tag, &laura, 1);
+}
+
+/**
+ */
+cvmx_fpa3_pool_t cvmx_fpa3_reserve_pool(int node, int desired_pool_num)
+{
+	u64 owner = cvmx_get_app_id();
+	int rv = 0;
+	struct global_resource_tag tag;
+	cvmx_fpa3_pool_t pool;
+
+	if (node == -1)
+		node = cvmx_get_node_num();
+
+	tag = get_fpa3_pool_resource_tag(node);
+
+	if (cvmx_create_global_resource_range(tag, cvmx_fpa3_num_pools()) !=
+	    0) {
+		printf("ERROR: %s: global resource create node=%u\n", __func__,
+		       node);
+		return CVMX_FPA3_INVALID_POOL;
+	}
+
+	if (desired_pool_num >= 0)
+		rv = cvmx_reserve_global_resource_range(tag, owner,
+							desired_pool_num, 1);
+	else
+		rv = cvmx_resource_alloc_reverse(tag, owner);
+
+	if (rv < 0) {
+		/* Desired pool is already in use */
+		return CVMX_FPA3_INVALID_POOL;
+	}
+
+	pool = __cvmx_fpa3_pool(node, rv);
+
+	return pool;
+}
+
+int cvmx_fpa3_release_pool(cvmx_fpa3_pool_t pool)
+{
+	struct global_resource_tag tag = get_fpa3_pool_resource_tag(pool.node);
+	int lpool = pool.lpool;
+
+	if (!__cvmx_fpa3_pool_valid(pool))
+		return -1;
+
+	if (cvmx_create_global_resource_range(tag, cvmx_fpa3_num_pools()) !=
+	    0) {
+		printf("ERROR: %s: global resource create node=%u\n", __func__,
+		       pool.node);
+		return -1;
+	}
+
+	return cvmx_free_global_resource_range_multiple(tag, &lpool, 1);
+}
+
+cvmx_fpa1_pool_t cvmx_fpa1_reserve_pool(int desired_pool_num)
+{
+	u64 owner = cvmx_get_app_id();
+	struct global_resource_tag tag;
+	int rv;
+
+	tag = get_fpa1_resource_tag();
+
+	if (cvmx_create_global_resource_range(tag, CVMX_FPA1_NUM_POOLS) != 0) {
+		printf("ERROR: %s: global resource not created\n", __func__);
+		return -1;
+	}
+
+	if (desired_pool_num >= 0) {
+		rv = cvmx_reserve_global_resource_range(tag, owner,
+							desired_pool_num, 1);
+	} else {
+		rv = cvmx_resource_alloc_reverse(tag, owner);
+	}
+
+	if (rv < 0) {
+		printf("ERROR: %s: FPA_POOL %d unavailable\n", __func__,
+		       desired_pool_num);
+		return CVMX_RESOURCE_ALREADY_RESERVED;
+	}
+	return (cvmx_fpa1_pool_t)rv;
+}
+
+int cvmx_fpa1_release_pool(cvmx_fpa1_pool_t pool)
+{
+	struct global_resource_tag tag;
+
+	tag = get_fpa1_resource_tag();
+
+	return cvmx_free_global_resource_range_multiple(tag, &pool, 1);
+}
diff --git a/arch/mips/mach-octeon/cvmx-fpa.c b/arch/mips/mach-octeon/cvmx-fpa.c
new file mode 100644
index 0000000..14fe87c
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-fpa.c
@@ -0,0 +1,1127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Support library for the hardware Free Pool Allocator.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+static const int debug;
+
+/* Due to suspected errata, we may not be able to let the FPA_AURAX_CNT
+ * get too close to 0, to avoid a spurious wrap-around error
+ */
+const unsigned int __cvmx_fpa3_cnt_offset = 32;
+
+/* For advanced checks, a guard-band is created around the internal
+ * stack, to make sure the stack is not overwritten.
+ */
+const u64 magic_pattern = 0xbab4faced095f00d;
+const unsigned int guard_band_size = 0 << 10; /* 1KiB default*/
+
+#define CVMX_CACHE_LINE_SHIFT (7)
+
+#define CVMX_FPA3_NAME_LEN (16)
+
+typedef struct {
+	char name[CVMX_FPA3_NAME_LEN];
+	u64 stack_paddr; /* Internal stack storage */
+	u64 bufs_paddr;	 /* Buffer pool base address */
+	u64 stack_psize; /* Internal stack storage size */
+	u64 bufs_psize;	 /* Buffer pool raw size */
+	u64 buf_count;	 /* Number of buffer filled */
+	u64 buf_size;	 /* Buffer size */
+} cvmx_fpa3_poolx_info_t;
+
+typedef struct {
+	char name[CVMX_FPA3_NAME_LEN];
+	unsigned int buf_size; /* Buffer size */
+} cvmx_fpa3_aurax_info_t;
+
+typedef struct {
+	char name[CVMX_FPA1_NAME_SIZE];
+	u64 size; /* Block size of pool buffers */
+	u64 buffer_count;
+	u64 base_paddr; /* Base physical addr */
+			/* if buffer is allocated at initialization */
+} cvmx_fpa1_pool_info_t;
+
+/**
+ * FPA1/FPA3 info structure is stored in a named block
+ * that is allocated once and shared among applications.
+ */
+static cvmx_fpa1_pool_info_t *cvmx_fpa1_pool_info;
+static cvmx_fpa3_poolx_info_t *cvmx_fpa3_pool_info[CVMX_MAX_NODES];
+static cvmx_fpa3_aurax_info_t *cvmx_fpa3_aura_info[CVMX_MAX_NODES];
+
+/**
+ * Return the size of buffers held in a POOL
+ *
+ * @param pool is the POOL handle
+ * @return buffer size in bytes
+ *
+ */
+int cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_pool_t pool)
+{
+	cvmx_fpa_poolx_cfg_t pool_cfg;
+
+	if (!__cvmx_fpa3_pool_valid(pool))
+		return -1;
+
+	pool_cfg.u64 = csr_rd_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
+	return pool_cfg.cn78xx.buf_size << CVMX_CACHE_LINE_SHIFT;
+}
+
+/**
+ * Return the size of buffers held in a buffer pool
+ *
+ * @param pool is the pool number
+ *
+ * This function will work with CN78XX models in backward-compatible mode
+ */
+unsigned int cvmx_fpa_get_block_size(int pool)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
+		return cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_aura_to_pool(
+			cvmx_fpa1_pool_to_fpa3_aura(pool)));
+	} else {
+		if ((unsigned int)pool >= CVMX_FPA1_NUM_POOLS)
+			return 0;
+		if (!cvmx_fpa1_pool_info)
+			cvmx_fpa_global_init_node(0);
+		return cvmx_fpa1_pool_info[pool].size;
+	}
+}
+
+static void cvmx_fpa3_set_aura_name(cvmx_fpa3_gaura_t aura, const char *name)
+{
+	cvmx_fpa3_aurax_info_t *pinfo;
+
+	pinfo = cvmx_fpa3_aura_info[aura.node];
+	if (!pinfo)
+		return;
+	pinfo += aura.laura;
+	memset(pinfo->name, 0, sizeof(pinfo->name));
+	if (name)
+		strlcpy(pinfo->name, name, sizeof(pinfo->name));
+}
+
+static void cvmx_fpa3_set_pool_name(cvmx_fpa3_pool_t pool, const char *name)
+{
+	cvmx_fpa3_poolx_info_t *pinfo;
+
+	pinfo = cvmx_fpa3_pool_info[pool.node];
+	if (!pinfo)
+		return;
+	pinfo += pool.lpool;
+	memset(pinfo->name, 0, sizeof(pinfo->name));
+	if (name)
+		strlcpy(pinfo->name, name, sizeof(pinfo->name));
+}
+
+static void cvmx_fpa_set_name(int pool_num, const char *name)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
+		cvmx_fpa3_set_aura_name(cvmx_fpa1_pool_to_fpa3_aura(pool_num),
+					name);
+	} else {
+		cvmx_fpa1_pool_info_t *pinfo;
+
+		if ((unsigned int)pool_num >= CVMX_FPA1_NUM_POOLS)
+			return;
+		if (!cvmx_fpa1_pool_info)
+			cvmx_fpa_global_init_node(0);
+		pinfo = &cvmx_fpa1_pool_info[pool_num];
+		memset(pinfo->name, 0, sizeof(pinfo->name));
+		if (name)
+			strlcpy(pinfo->name, name, sizeof(pinfo->name));
+	}
+}
+
+static int cvmx_fpa3_aura_cfg(cvmx_fpa3_gaura_t aura, cvmx_fpa3_pool_t pool,
+			      u64 limit, u64 threshold, int ptr_dis)
+{
+	cvmx_fpa3_aurax_info_t *pinfo;
+	cvmx_fpa_aurax_cfg_t aura_cfg;
+	cvmx_fpa_poolx_cfg_t pool_cfg;
+	cvmx_fpa_aurax_cnt_t cnt_reg;
+	cvmx_fpa_aurax_cnt_limit_t limit_reg;
+	cvmx_fpa_aurax_cnt_threshold_t thresh_reg;
+	cvmx_fpa_aurax_int_t int_reg;
+	unsigned int block_size;
+
+	if (debug)
+		debug("%s: AURA %u:%u POOL %u:%u\n", __func__, aura.node,
+		      aura.laura, pool.node, pool.lpool);
+
+	if (aura.node != pool.node) {
+		printf("ERROR: %s: AURA/POOL node mismatch\n", __func__);
+		return -1;
+	}
+
+	if (!__cvmx_fpa3_aura_valid(aura)) {
+		printf("ERROR: %s: AURA invalid\n", __func__);
+		return -1;
+	}
+
+	if (!__cvmx_fpa3_pool_valid(pool)) {
+		printf("ERROR: %s: POOL invalid\n", __func__);
+		return -1;
+	}
+
+	/* Record POOL block size in AURA info entry */
+	pool_cfg.u64 = csr_rd_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
+
+	block_size = pool_cfg.cn78xx.buf_size << 7;
+	pinfo = cvmx_fpa3_aura_info[aura.node];
+	if (!pinfo)
+		return -1;
+	pinfo += aura.laura;
+
+	pinfo->buf_size = block_size;
+
+	/* block_size should be >0 except for POOL=0 which is never enabled*/
+	if (pool_cfg.cn78xx.ena && block_size == 0) {
+		printf("ERROR; %s: POOL buf_size invalid\n", __func__);
+		return -1;
+	}
+
+	/* Initialize AURA count, limit and threshold registers */
+	cnt_reg.u64 = 0;
+	cnt_reg.cn78xx.cnt = 0 + __cvmx_fpa3_cnt_offset;
+
+	limit_reg.u64 = 0;
+	limit_reg.cn78xx.limit = limit;
+	/* Apply count offset, unless it cases a wrap-around */
+	if ((limit + __cvmx_fpa3_cnt_offset) < CVMX_FPA3_AURAX_LIMIT_MAX)
+		limit_reg.cn78xx.limit += __cvmx_fpa3_cnt_offset;
+
+	thresh_reg.u64 = 0;
+	thresh_reg.cn78xx.thresh = threshold;
+	/* Apply count offset, unless it cases a wrap-around */
+	if ((threshold + __cvmx_fpa3_cnt_offset) < CVMX_FPA3_AURAX_LIMIT_MAX)
+		thresh_reg.cn78xx.thresh += __cvmx_fpa3_cnt_offset;
+
+	csr_wr_node(aura.node, CVMX_FPA_AURAX_CNT(aura.laura), cnt_reg.u64);
+	csr_wr_node(aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura),
+		    limit_reg.u64);
+	csr_wr_node(aura.node, CVMX_FPA_AURAX_CNT_THRESHOLD(aura.laura),
+		    thresh_reg.u64);
+
+	/* Clear any pending error interrupts */
+	int_reg.u64 = 0;
+	int_reg.cn78xx.thresh = 1;
+
+	/* Follow a write to clear FPA_AURAX_INT[THRESH] with a read as
+	 * a workaround to Errata FPA-23410. If FPA_AURAX_INT[THRESH]
+	 * isn't clear, try again.
+	 */
+	do {
+		csr_wr_node(aura.node, CVMX_FPA_AURAX_INT(aura.laura),
+			    int_reg.u64);
+		int_reg.u64 =
+			csr_rd_node(aura.node, CVMX_FPA_AURAX_INT(aura.laura));
+	} while (int_reg.s.thresh);
+
+	/* Disable backpressure etc.*/
+	csr_wr_node(aura.node, CVMX_FPA_AURAX_CNT_LEVELS(aura.laura), 0);
+	csr_wr_node(aura.node, CVMX_FPA_AURAX_POOL_LEVELS(aura.laura), 0);
+
+	aura_cfg.u64 = 0;
+	aura_cfg.s.ptr_dis = ptr_dis;
+	csr_wr_node(aura.node, CVMX_FPA_AURAX_CFG(aura.laura), aura_cfg.u64);
+	csr_wr_node(aura.node, CVMX_FPA_AURAX_POOL(aura.laura), pool.lpool);
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Fill a newly created FPA3 POOL with buffers
+ * using a temporary AURA.
+ */
+static int cvmx_fpa3_pool_populate(cvmx_fpa3_pool_t pool, unsigned int buf_cnt,
+				   unsigned int buf_sz, void *mem_ptr,
+				   unsigned int mem_node)
+{
+	cvmx_fpa3_poolx_info_t *pinfo;
+	cvmx_fpa3_gaura_t aura;
+	cvmx_fpa3_pool_t zero_pool;
+	cvmx_fpa_poolx_cfg_t pool_cfg;
+	cvmx_fpa_poolx_start_addr_t pool_start_reg;
+	cvmx_fpa_poolx_end_addr_t pool_end_reg;
+	cvmx_fpa_poolx_available_t avail_reg;
+	cvmx_fpa_poolx_threshold_t thresh_reg;
+	cvmx_fpa_poolx_int_t int_reg;
+	unsigned int block_size, align;
+	unsigned long long mem_size;
+	u64 paddr;
+	unsigned int i;
+
+	if (debug)
+		debug("%s: POOL %u:%u buf_sz=%u count=%d\n", __func__,
+		      pool.node, pool.lpool, buf_sz, buf_cnt);
+
+	if (!__cvmx_fpa3_pool_valid(pool))
+		return -1;
+
+	zero_pool = __cvmx_fpa3_pool(pool.node, 0);
+
+	pool_cfg.u64 = csr_rd_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
+
+	block_size = pool_cfg.cn78xx.buf_size << 7;
+
+	if (pool_cfg.cn78xx.nat_align) {
+		/* Assure block_size is legit */
+		if (block_size > (1 << 17)) {
+			printf("ERROR: %s: POOL %u:%u block size %u is not valid\n",
+			       __func__, pool.node, pool.lpool, block_size);
+			return -1;
+		}
+	}
+	align = CVMX_CACHE_LINE_SIZE;
+
+	pinfo = cvmx_fpa3_pool_info[pool.node];
+	if (!pinfo)
+		return -1;
+	pinfo += pool.lpool;
+
+	if (pinfo->buf_size != block_size || block_size != buf_sz) {
+		printf("ERROR: %s: POOL %u:%u buffer size mismatch\n", __func__,
+		       pool.node, pool.lpool);
+		return -1;
+	}
+
+	if (!mem_ptr) {
+		/* When allocating our own memory
+		 * make sure at least 'buf_cnt' blocks
+		 * will fit into it.
+		 */
+		mem_size = (long long)buf_cnt * block_size + (block_size - 128);
+
+		mem_ptr = cvmx_helper_mem_alloc(mem_node, mem_size, align);
+
+		if (!mem_ptr) {
+			printf("ERROR: %s: POOL %u:%u out of memory, could not allocate %llu bytes\n",
+			       __func__, pool.node, pool.lpool, mem_size);
+			return -1;
+		}
+
+		/* Record memory base for use in shutdown */
+		pinfo->bufs_paddr = cvmx_ptr_to_phys(mem_ptr);
+	} else {
+		/* caller-allocated memory is sized simply, may reduce count */
+		mem_size = (long long)buf_cnt * block_size;
+		/* caller responsable to free this memory too */
+	}
+
+	/* Recalculate buf_cnt after possible alignment adjustment */
+	buf_cnt = mem_size / block_size;
+
+	/* Get temporary AURA */
+	aura = cvmx_fpa3_reserve_aura(pool.node, -1);
+	if (!__cvmx_fpa3_aura_valid(aura))
+		return -1;
+
+	/* Attach the temporary AURA to the POOL */
+	(void)cvmx_fpa3_aura_cfg(aura, pool, buf_cnt, buf_cnt + 1, 0);
+
+	/* Set AURA count to buffer count to avoid wrap-around */
+	csr_wr_node(aura.node, CVMX_FPA_AURAX_CNT(aura.laura), buf_cnt);
+
+	/* Set POOL threshold just above buf count so it does not misfire */
+	thresh_reg.u64 = 0;
+	thresh_reg.cn78xx.thresh = buf_cnt + 1;
+	csr_wr_node(pool.node, CVMX_FPA_POOLX_THRESHOLD(pool.lpool),
+		    thresh_reg.u64);
+
+	/* Set buffer memory region bounds checking */
+	paddr = (cvmx_ptr_to_phys(mem_ptr) >> 7) << 7;
+	pool_start_reg.u64 = 0;
+	pool_end_reg.u64 = 0;
+	pool_start_reg.cn78xx.addr = paddr >> 7;
+	pool_end_reg.cn78xx.addr = (paddr + mem_size + 127) >> 7;
+
+	csr_wr_node(pool.node, CVMX_FPA_POOLX_START_ADDR(pool.lpool),
+		    pool_start_reg.u64);
+	csr_wr_node(pool.node, CVMX_FPA_POOLX_END_ADDR(pool.lpool),
+		    pool_end_reg.u64);
+
+	/* Make sure 'paddr' is divisible by 'block_size' */
+	i = (paddr % block_size);
+	if (i > 0) {
+		i = block_size - i;
+		paddr += i;
+		mem_size -= i;
+	}
+
+	/* The above alignment mimics how the FPA3 hardware
+	 * aligns pointers to the buffer size, which only
+	 * needs to be multiple of cache line size
+	 */
+
+	if (debug && paddr != cvmx_ptr_to_phys(mem_ptr))
+		debug("%s: pool mem paddr %#llx adjusted to %#llx for block size %#x\n",
+		      __func__, CAST_ULL(cvmx_ptr_to_phys(mem_ptr)),
+		      CAST_ULL(paddr), block_size);
+
+	for (i = 0; i < buf_cnt; i++) {
+		void *ptr = cvmx_phys_to_ptr(paddr);
+
+		cvmx_fpa3_free_nosync(ptr, aura, 0);
+
+		paddr += block_size;
+
+		if ((paddr + block_size - 1) >= (paddr + mem_size))
+			break;
+	}
+
+	if (debug && i < buf_cnt) {
+		debug("%s: buffer count reduced from %u to %u\n", __func__,
+		      buf_cnt, i);
+		buf_cnt = i;
+	}
+
+	/* Wait for all buffers to reach the POOL before removing temp AURA */
+	do {
+		CVMX_SYNC;
+		avail_reg.u64 = csr_rd_node(
+			pool.node, CVMX_FPA_POOLX_AVAILABLE(pool.lpool));
+	} while (avail_reg.cn78xx.count < buf_cnt);
+
+	/* Detach the temporary AURA */
+	(void)cvmx_fpa3_aura_cfg(aura, zero_pool, 0, 0, 0);
+
+	/* Release temporary AURA */
+	(void)cvmx_fpa3_release_aura(aura);
+
+	/* Clear all POOL interrupts */
+	int_reg.u64 = 0;
+	int_reg.cn78xx.ovfls = 1;
+	int_reg.cn78xx.crcerr = 1;
+	int_reg.cn78xx.range = 1;
+	int_reg.cn78xx.thresh = 1;
+	csr_wr_node(pool.node, CVMX_FPA_POOLX_INT(pool.lpool), int_reg.u64);
+
+	/* Record buffer count for shutdown */
+	pinfo->buf_count = buf_cnt;
+
+	return buf_cnt;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Fill a legacy FPA pool with buffers
+ */
+static int cvmx_fpa1_fill_pool(cvmx_fpa1_pool_t pool, int num_blocks,
+			       void *buffer)
+{
+	cvmx_fpa_poolx_start_addr_t pool_start_reg;
+	cvmx_fpa_poolx_end_addr_t pool_end_reg;
+	unsigned int block_size = cvmx_fpa_get_block_size(pool);
+	unsigned int mem_size;
+	char *bufp;
+
+	if ((unsigned int)pool >= CVMX_FPA1_NUM_POOLS)
+		return -1;
+
+	mem_size = block_size * num_blocks;
+
+	if (!buffer) {
+		buffer = cvmx_helper_mem_alloc(0, mem_size,
+					       CVMX_CACHE_LINE_SIZE);
+
+		cvmx_fpa1_pool_info[pool].base_paddr = cvmx_ptr_to_phys(buffer);
+	} else {
+		/* Align user-supplied buffer to cache line size */
+		unsigned int off =
+			(CVMX_CACHE_LINE_SIZE - 1) & cvmx_ptr_to_phys(buffer);
+		if (off > 0) {
+			//			buffer += CVMX_CACHE_LINE_SIZE - off;
+			buffer = (char *)buffer + CVMX_CACHE_LINE_SIZE - off;
+			mem_size -= CVMX_CACHE_LINE_SIZE - off;
+			num_blocks = mem_size / block_size;
+		}
+	}
+
+	if (debug)
+		debug("%s: memory at %p size %#x\n", __func__, buffer,
+		      mem_size);
+
+	pool_start_reg.u64 = 0;
+	pool_end_reg.u64 = 0;
+
+	/* buffer pointer range checks are highly recommended, but optional */
+	pool_start_reg.cn61xx.addr = 1; /* catch NULL pointers */
+	pool_end_reg.cn61xx.addr = (1ull << (40 - 7)) - 1; /* max paddr */
+	if (!OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+		csr_wr(CVMX_FPA_POOLX_START_ADDR(pool), pool_start_reg.u64);
+		csr_wr(CVMX_FPA_POOLX_END_ADDR(pool), pool_end_reg.u64);
+	}
+
+	bufp = (char *)buffer;
+	while (num_blocks--) {
+		cvmx_fpa1_free(bufp, pool, 0);
+		cvmx_fpa1_pool_info[pool].buffer_count++;
+		bufp += block_size;
+	}
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Setup a legacy FPA pool
+ */
+static int cvmx_fpa1_pool_init(cvmx_fpa1_pool_t pool_id, int num_blocks,
+			       int block_size, void *buffer)
+{
+	int max_pool = cvmx_fpa_get_max_pools();
+
+	if (pool_id < 0 || pool_id >= max_pool) {
+		printf("ERROR: %s pool %d invalid\n", __func__, pool_id);
+		return -1;
+	}
+
+	if (!cvmx_fpa1_pool_info)
+		cvmx_fpa_global_init_node(0);
+
+	if (debug)
+		debug("%s: initializing info pool %d\n", __func__, pool_id);
+
+	cvmx_fpa1_pool_info[pool_id].size = block_size;
+	cvmx_fpa1_pool_info[pool_id].buffer_count = 0;
+
+	if (debug)
+		debug("%s: enabling unit for pool %d\n", __func__, pool_id);
+
+	return 0;
+}
+
+/**
+ * Initialize global configuration for FPA block for specified node.
+ *
+ * @param node is the node number
+ *
+ * @note this function sets the initial QoS averaging timing parameters,
+ * for the entire FPA unit (per node), which may be overridden on a
+ * per AURA basis.
+ */
+int cvmx_fpa_global_init_node(int node)
+{
+	/* There are just the initial parameter values */
+#define FPA_RED_AVG_DLY 1
+#define FPA_RED_LVL_DLY 3
+#define FPA_QOS_AVRG	0
+	/* Setting up avg_dly and prb_dly, enable bits */
+	if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
+		char pool_info_name[32] = "cvmx_fpa3_pools_";
+		char aura_info_name[32] = "cvmx_fpa3_auras_";
+		char ns[2] = "0";
+
+		ns[0] += node;
+		strcat(pool_info_name, ns);
+		strcat(aura_info_name, ns);
+
+		cvmx_fpa3_config_red_params(node, FPA_QOS_AVRG, FPA_RED_LVL_DLY,
+					    FPA_RED_AVG_DLY);
+
+		/* Allocate the pinfo named block */
+		cvmx_fpa3_pool_info[node] = (cvmx_fpa3_poolx_info_t *)
+			cvmx_bootmem_alloc_named_range_once(
+				sizeof(cvmx_fpa3_pool_info[0][0]) *
+					cvmx_fpa3_num_pools(),
+				0, 0, 0, pool_info_name, NULL);
+
+		cvmx_fpa3_aura_info[node] = (cvmx_fpa3_aurax_info_t *)
+			cvmx_bootmem_alloc_named_range_once(
+				sizeof(cvmx_fpa3_aura_info[0][0]) *
+					cvmx_fpa3_num_auras(),
+				0, 0, 0, aura_info_name, NULL);
+
+		//XXX add allocation error check
+
+		/* Setup zero_pool on this node */
+		cvmx_fpa3_reserve_pool(node, 0);
+		cvmx_fpa3_pool_info[node][0].buf_count = 0;
+	} else {
+		char pool_info_name[32] = "cvmx_fpa_pool";
+
+		/* Allocate the pinfo named block */
+		cvmx_fpa1_pool_info = (cvmx_fpa1_pool_info_t *)
+			cvmx_bootmem_alloc_named_range_once(
+				sizeof(cvmx_fpa1_pool_info[0]) *
+					CVMX_FPA1_NUM_POOLS,
+				0, 0, 0, pool_info_name, NULL);
+
+		cvmx_fpa1_enable();
+	}
+
+	return 0;
+}
+
+static void __memset_u64(u64 *ptr, u64 pattern, unsigned int words)
+{
+	while (words--)
+		*ptr++ = pattern;
+}
+
+/**
+ * @INTERNAL
+ * Initialize pool pointer-storage memory
+ *
+ * Unlike legacy FPA, which used free buffers to store pointers that
+ * exceed on-chip memory, FPA3 requires a dedicated memory buffer for
+ * free pointer stack back-store.
+ *
+ * @param pool - pool to initialize
+ * @param mem_node - if memory should be allocated from a different node
+ * @param max_buffer_cnt - maximum block capacity of pool
+ * @param align - buffer alignment mode,
+ *   current FPA_NATURAL_ALIGNMENT is supported
+ * @param buffer_sz - size of buffers in pool
+ */
+static int cvmx_fpa3_pool_stack_init(cvmx_fpa3_pool_t pool,
+				     unsigned int mem_node,
+				     unsigned int max_buffer_cnt,
+				     enum cvmx_fpa3_pool_alignment_e align,
+				     unsigned int buffer_sz)
+{
+	cvmx_fpa3_poolx_info_t *pinfo;
+	u64 stack_paddr;
+	void *mem_ptr;
+	unsigned int stack_memory_size;
+	cvmx_fpa_poolx_cfg_t pool_cfg;
+	cvmx_fpa_poolx_fpf_marks_t pool_fpf_marks;
+
+	if (debug)
+		debug("%s: POOL %u:%u bufsz=%u maxbuf=%u\n", __func__,
+		      pool.node, pool.lpool, buffer_sz, max_buffer_cnt);
+
+	if (!__cvmx_fpa3_pool_valid(pool)) {
+		printf("ERROR: %s: POOL invalid\n", __func__);
+		return -1;
+	}
+
+	pinfo = cvmx_fpa3_pool_info[pool.node];
+	if (!pinfo) {
+		printf("ERROR: %s: FPA on node#%u is not initialized\n",
+		       __func__, pool.node);
+		return -1;
+	}
+	pinfo += pool.lpool;
+
+	/* Calculate stack size based on buffer count with one line to spare */
+	stack_memory_size = (max_buffer_cnt * 128) / 29 + 128 + 127;
+
+	/* Increase stack size by band guard */
+	stack_memory_size += guard_band_size << 1;
+
+	/* Align size to cache line */
+	stack_memory_size = (stack_memory_size >> 7) << 7;
+
+	/* Allocate internal stack */
+	mem_ptr = cvmx_helper_mem_alloc(mem_node, stack_memory_size,
+					CVMX_CACHE_LINE_SIZE);
+
+	if (debug)
+		debug("%s: stack_mem=%u ptr=%p\n", __func__, stack_memory_size,
+		      mem_ptr);
+
+	if (!mem_ptr) {
+		debug("ERROR: %sFailed to allocate stack for POOL %u:%u\n",
+		      __func__, pool.node, pool.lpool);
+		return -1;
+	}
+
+	/* Initialize guard bands */
+	if (guard_band_size > 0) {
+		__memset_u64((u64 *)mem_ptr, magic_pattern,
+			     guard_band_size >> 3);
+		__memset_u64((u64 *)((char *)mem_ptr + stack_memory_size -
+				     guard_band_size),
+			     magic_pattern, guard_band_size >> 3);
+	}
+
+	pinfo->stack_paddr = cvmx_ptr_to_phys(mem_ptr);
+	pinfo->stack_psize = stack_memory_size;
+
+	/* Calculate usable stack start */
+	stack_paddr = cvmx_ptr_to_phys((char *)mem_ptr + guard_band_size);
+
+	csr_wr_node(pool.node, CVMX_FPA_POOLX_STACK_BASE(pool.lpool),
+		    stack_paddr);
+	csr_wr_node(pool.node, CVMX_FPA_POOLX_STACK_ADDR(pool.lpool),
+		    stack_paddr);
+
+	/* Calculate usable stack end  - start of last cache line */
+	stack_paddr = stack_paddr + stack_memory_size - (guard_band_size << 1);
+
+	csr_wr_node(pool.node, CVMX_FPA_POOLX_STACK_END(pool.lpool),
+		    stack_paddr);
+
+	if (debug)
+		debug("%s: Stack paddr %#llx - %#llx\n", __func__,
+		      CAST_ULL(csr_rd_node(pool.node, CVMX_FPA_POOLX_STACK_BASE(
+							      pool.lpool))),
+		      CAST_ULL(csr_rd_node(pool.node, CVMX_FPA_POOLX_STACK_END(
+							      pool.lpool))));
+
+	/* Setup buffer size for this pool until it is shutdown */
+	pinfo->buf_size = buffer_sz;
+
+	pool_cfg.u64 = 0;
+	pool_cfg.cn78xx.buf_size = buffer_sz >> 7;
+	pool_cfg.cn78xx.l_type = 0x2;
+	pool_cfg.cn78xx.ena = 0;
+	if (align == FPA_NATURAL_ALIGNMENT)
+		pool_cfg.cn78xx.nat_align = 1;
+
+	/* FPA-26117, FPA-22443 */
+	pool_fpf_marks.u64 =
+		csr_rd_node(pool.node, CVMX_FPA_POOLX_FPF_MARKS(pool.lpool));
+	pool_fpf_marks.s.fpf_rd = 0x80;
+	csr_wr_node(pool.node, CVMX_FPA_POOLX_FPF_MARKS(pool.lpool),
+		    pool_fpf_marks.u64);
+
+	csr_wr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool), pool_cfg.u64);
+	pool_cfg.cn78xx.ena = 1;
+	csr_wr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool), pool_cfg.u64);
+
+	/* Pool is now ready to be filled up */
+	return 0;
+}
+
+/**
+ * Create an FPA POOL and fill it up with buffers
+ *
+ * @param node is the node number for the pool and memory location
+ * @param desired_pool is the local pool number desired
+ *	or -1 for first available
+ * @param name is the symbolic name to assign the POOL
+ * @param block_size is the size of all buffers held in this POOL
+ * @param num_blocks is the number of free buffers to fill into the POOL
+ * @param buffer is an optionally caller-supplied memory for the buffers
+ *	or NULL to cause the buffer memory to be allocated automatically.
+ * @return the POOL handle
+ *
+ * Note: if the buffer memory is supplied by caller, the application
+ * will be responsable to free this memory.
+ *
+ * Only supported on CN78XX.
+ */
+cvmx_fpa3_pool_t cvmx_fpa3_setup_fill_pool(int node, int desired_pool,
+					   const char *name,
+					   unsigned int block_size,
+					   unsigned int num_blocks,
+					   void *buffer)
+{
+	cvmx_fpa3_pool_t pool;
+	unsigned int mem_node;
+	int rc;
+
+	if (node < 0)
+		node = cvmx_get_node_num();
+
+	if (debug)
+		debug("%s: desired pool=%d bufsize=%u cnt=%u '%s'\n", __func__,
+		      desired_pool, block_size, num_blocks, name);
+
+	/* Use memory from the node local to the AURA/POOL */
+	mem_node = node;
+
+	if (num_blocks == 0 || num_blocks > 1 << 30) {
+		printf("ERROR: %s: invalid block count %u\n", __func__,
+		       num_blocks);
+		return CVMX_FPA3_INVALID_POOL;
+	}
+
+	/*
+	 * Check for block size validity:
+	 * With user-supplied buffer, can't increase block size,
+	 * so make sure it is at least 128, and is aligned to 128
+	 * For all cases make sure it is not too big
+	 */
+	if ((buffer && (block_size < CVMX_CACHE_LINE_SIZE ||
+			(block_size & (CVMX_CACHE_LINE_SIZE - 1)))) ||
+	    (block_size > (1 << 17))) {
+		printf("ERROR: %s: invalid block size %u\n", __func__,
+		       block_size);
+		return CVMX_FPA3_INVALID_POOL;
+	}
+
+	if (block_size < CVMX_CACHE_LINE_SIZE)
+		block_size = CVMX_CACHE_LINE_SIZE;
+
+	/* Reserve POOL */
+	pool = cvmx_fpa3_reserve_pool(node, desired_pool);
+
+	if (!__cvmx_fpa3_pool_valid(pool)) {
+		printf("ERROR: %s: POOL %u:%d not available\n", __func__, node,
+		       desired_pool);
+		return CVMX_FPA3_INVALID_POOL;
+	}
+
+	/* Initialize POOL with stack storage */
+	rc = cvmx_fpa3_pool_stack_init(pool, mem_node, num_blocks,
+				       FPA_NATURAL_ALIGNMENT, block_size);
+	if (rc < 0) {
+		printf("ERROR: %s: POOL %u:%u stack setup failed\n", __func__,
+		       pool.node, pool.lpool);
+		cvmx_fpa3_release_pool(pool);
+		return CVMX_FPA3_INVALID_POOL;
+	}
+
+	/* Populate the POOL with buffers */
+	rc = cvmx_fpa3_pool_populate(pool, num_blocks, block_size, buffer,
+				     mem_node);
+	if (rc < 0) {
+		printf("ERROR: %s: POOL %u:%u memory fill failed\n", __func__,
+		       pool.node, pool.lpool);
+		cvmx_fpa3_release_pool(pool);
+		return CVMX_FPA3_INVALID_POOL;
+	}
+
+	cvmx_fpa3_set_pool_name(pool, name);
+
+	return pool;
+}
+
+/**
+ * Attach an AURA to an existing POOL
+ *
+ * @param pool is the handle of the POOL to be attached
+ * @param desired_aura is the number of the AURA resired
+ *	or -1 for the AURA to be automatically assigned
+ * @param name is a symbolic name for the new AURA
+ * @param block_size is the size of all buffers that will be handed
+ *	out by this AURA
+ * @param num_blocks is the maximum number of buffers that can be
+ *	handed out by this AURA, and can not exceed the number
+ *	of buffers filled into the attached POOL
+ * @return the AURA handle
+ *
+ * Only supported on CN78XX.
+ */
+cvmx_fpa3_gaura_t cvmx_fpa3_set_aura_for_pool(cvmx_fpa3_pool_t pool,
+					      int desired_aura,
+					      const char *name,
+					      unsigned int block_size,
+					      unsigned int num_blocks)
+{
+	cvmx_fpa3_gaura_t aura;
+	cvmx_fpa_poolx_available_t avail_reg;
+	const char *emsg;
+	int rc;
+
+	if (debug)
+		debug("%s: aura=%d bufsize=%u cnt=%u '%s'\n", __func__,
+		      desired_aura, block_size, num_blocks, name);
+
+	if (!__cvmx_fpa3_pool_valid(pool)) {
+		printf("ERROR: %s: POOL argument invalid\n", __func__);
+		return CVMX_FPA3_INVALID_GAURA;
+	}
+
+	/* Verify the AURA buffer count limit is not above POOL buffer count */
+	avail_reg.u64 =
+		csr_rd_node(pool.node, CVMX_FPA_POOLX_AVAILABLE(pool.lpool));
+	if (avail_reg.cn78xx.count < num_blocks) {
+		printf("WARNING: %s: AURA %u:%u buffer count limit %u reduced to POOL available count %u\n",
+		       __func__, aura.node, aura.laura, num_blocks,
+		       (unsigned int)avail_reg.cn78xx.count);
+		num_blocks = avail_reg.cn78xx.count;
+	}
+
+	/* Reserve an AURA number, follow desired number */
+	aura = cvmx_fpa3_reserve_aura(pool.node, desired_aura);
+
+	if (!__cvmx_fpa3_aura_valid(aura)) {
+		printf("ERROR: %s: AURA %u:%d not available\n", __func__,
+		       pool.node, desired_aura);
+		return CVMX_FPA3_INVALID_GAURA;
+	}
+
+	/* Initialize AURA attached to the above POOL */
+	rc = cvmx_fpa3_aura_cfg(aura, pool, num_blocks, num_blocks + 1, 0);
+	if (rc < 0) {
+		emsg = "AURA configuration";
+		goto _fail;
+	}
+
+	cvmx_fpa3_set_aura_name(aura, name);
+
+	return aura;
+
+_fail:
+	printf("ERROR: %s: %s\n", __func__, emsg);
+	cvmx_fpa3_release_aura(aura);
+	return CVMX_FPA3_INVALID_GAURA;
+}
+
+/**
+ * Create a combination of an AURA and a POOL
+ *
+ * @param node is the node number for the pool and memory location
+ * @param desired_aura is the number of the AURA resired
+ *	or -1 for the AURA to be automatically assigned
+ * @param name is a symbolic name for the new AURA
+ * @param block_size is the size of all buffers that will be handed
+ *	out by this AURA
+ * @param num_blocks is the maximum number of buffers that can be
+ *	handed out by this AURA, and can not exceed the number
+ *	of buffers filled into the attached POOL
+ * @param buffer is an optionally caller-supplied memory for the buffers
+ *	or NULL to cause the buffer memory to be allocated automatically.
+ *
+ * @return the AURA handle
+ *
+ * Note: if the buffer memory is supplied by caller, the application
+ * will be responsable to free this memory.
+ * The POOL number is always automatically assigned.
+ *
+ * Only supported on CN78XX.
+ */
+cvmx_fpa3_gaura_t cvmx_fpa3_setup_aura_and_pool(int node, int desired_aura,
+						const char *name, void *buffer,
+						unsigned int block_size,
+						unsigned int num_blocks)
+{
+	cvmx_fpa3_gaura_t aura = CVMX_FPA3_INVALID_GAURA;
+	cvmx_fpa3_pool_t pool = CVMX_FPA3_INVALID_POOL;
+	const char *emsg = "";
+	unsigned int mem_node;
+	int rc;
+
+	if (debug)
+		debug("%s: aura=%d size=%u cnt=%u '%s'\n", __func__,
+		      desired_aura, block_size, num_blocks, name);
+
+	if (node < 0)
+		node = cvmx_get_node_num();
+
+	if (num_blocks == 0 || num_blocks > 1 << 30) {
+		printf("ERROR: %s: invalid block count %u\n", __func__,
+		       num_blocks);
+		return CVMX_FPA3_INVALID_GAURA;
+	}
+
+	/* Use memory from the node local to the AURA/POOL */
+	mem_node = node;
+
+	/* Reserve an AURA number, follow desired number */
+	aura = cvmx_fpa3_reserve_aura(node, desired_aura);
+
+	if (!__cvmx_fpa3_aura_valid(aura)) {
+		emsg = "AURA not available";
+		goto _fail;
+	}
+
+	/* Reserve POOL dynamically to underpin this AURA */
+	pool = cvmx_fpa3_reserve_pool(node, -1);
+
+	if (!__cvmx_fpa3_pool_valid(pool)) {
+		emsg = "POOL not available";
+		goto _fail;
+	}
+
+	/*
+	 * Check for block size validity:
+	 * With user-supplied buffer, can't increase block size,
+	 * so make sure it is at least 128, and is aligned to 128
+	 * For all cases make sure it is not too big
+	 */
+	if ((buffer && (block_size < CVMX_CACHE_LINE_SIZE ||
+			(block_size & (CVMX_CACHE_LINE_SIZE - 1)))) ||
+	    block_size > (1 << 17)) {
+		printf("ERROR: %s: invalid block size %u\n", __func__,
+		       block_size);
+		emsg = "invalid block size";
+		goto _fail;
+	}
+
+	if (block_size < CVMX_CACHE_LINE_SIZE)
+		block_size = CVMX_CACHE_LINE_SIZE;
+
+	/* Initialize POOL with stack storage */
+	rc = cvmx_fpa3_pool_stack_init(pool, mem_node, num_blocks,
+				       FPA_NATURAL_ALIGNMENT, block_size);
+	if (rc < 0) {
+		emsg = "POOL Stack setup";
+		goto _fail;
+	}
+
+	/* Populate the AURA/POOL with buffers */
+	rc = cvmx_fpa3_pool_populate(pool, num_blocks, block_size, buffer,
+				     mem_node);
+	if (rc < 0) {
+		emsg = "POOL buffer memory";
+		goto _fail;
+	}
+
+	/* Initialize AURA attached to the above POOL */
+	rc = cvmx_fpa3_aura_cfg(aura, pool, num_blocks, num_blocks + 1, 0);
+	if (rc < 0) {
+		emsg = "AURA configuration";
+		goto _fail;
+	}
+
+	cvmx_fpa3_set_aura_name(aura, name);
+	cvmx_fpa3_set_pool_name(pool, name);
+
+	if (debug)
+		debug("%s: AURA %u:%u ready, avail=%lld\n", __func__, aura.node,
+		      aura.laura, cvmx_fpa3_get_available(aura));
+
+	return aura;
+
+_fail:
+	printf("ERROR: %s: Failed in %s\n", __func__, emsg);
+	/* These will silently fail if POOL/AURA is not valid */
+	cvmx_fpa3_release_aura(aura);
+	cvmx_fpa3_release_pool(pool);
+	return CVMX_FPA3_INVALID_GAURA;
+}
+
+/**
+ * Setup a legacy FPA pool
+ *
+ * @param desired_pool is the POOL number desired or -1 for automatic
+ *	assignment
+ * @param name is the symbolic POOL name
+ * @param block_size is the size of all buffers held in this POOL
+ * @param num_blocks is the number of free buffers to fill into the POOL
+ * @param buffer is an optionally caller-supplied memory for the buffers
+ *	or NULL to cause the buffer memory to be allocated automatically.
+ * @return pool number or -1 on error.
+ *
+ * Note: if the buffer memory is supplied by caller, the application
+ * will be responsable to free this memory.
+ */
+int cvmx_fpa1_setup_pool(int desired_pool, const char *name, void *buffer,
+			 unsigned int block_size, unsigned int num_blocks)
+{
+	cvmx_fpa1_pool_t pool = CVMX_FPA1_INVALID_POOL;
+	int rc;
+
+	if (debug)
+		debug("%s: desired pool %d, name '%s', mem %p size %u count %u\n",
+		      __func__, desired_pool, name, buffer, block_size,
+		      num_blocks);
+
+	/* Reserve desired pool or get one dynamically */
+	pool = cvmx_fpa1_reserve_pool(desired_pool);
+
+	/* Validate reserved pool, if successful */
+	if (pool < 0 || pool >= cvmx_fpa_get_max_pools()) {
+		/* global resources would have printed an error message here */
+		return CVMX_FPA1_INVALID_POOL;
+	}
+
+	/* Initialize the pool */
+	rc = cvmx_fpa1_pool_init(pool, num_blocks, block_size, buffer);
+	if (rc < 0) {
+		printf("ERROR: %s: failed pool %u init\n", __func__, pool);
+		cvmx_fpa1_release_pool(pool);
+		return CVMX_FPA1_INVALID_POOL;
+	}
+
+	rc = cvmx_fpa1_fill_pool(pool, num_blocks, buffer);
+	if (rc < 0) {
+		printf("ERROR: %s: failed pool %u memory\n", __func__, pool);
+		cvmx_fpa1_release_pool(pool);
+		return CVMX_FPA1_INVALID_POOL;
+	}
+
+	if (debug)
+		debug("%s: pool %d filled up\b", __func__, pool);
+
+	cvmx_fpa_set_name(pool, name);
+	return pool;
+}
+
+/**
+ * Setup an FPA pool with buffers
+ *
+ * @param pool is the POOL number desired or -1 for automatic assignment
+ * @param name is the symbolic POOL name
+ * @param buffer is an optionally caller-supplied memory for the buffers
+ *	or NULL to cause the buffer memory to be allocated automatically.
+ * @param block_size is the size of all buffers held in this POOL
+ * @param num_blocks is the number of free buffers to fill into the POOL
+ * @param buffer is an optionally caller-supplied memory for the buffers
+ *	or NULL to cause the buffer memory to be allocated automatically.
+ *
+ * @return pool number or -1 on error.
+ *
+ * Note: if the buffer memory is supplied by caller, the application
+ * will be responsable to free this memory.
+ * This function will work with CN78XX models in backward-compatible mode
+ */
+int cvmx_fpa_setup_pool(int pool, const char *name, void *buffer,
+			u64 block_size, u64 num_blocks)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
+		cvmx_fpa3_gaura_t aura;
+
+		aura = cvmx_fpa3_setup_aura_and_pool(-1, pool, name, buffer,
+						     block_size, num_blocks);
+		if (!__cvmx_fpa3_aura_valid(aura))
+			return -1;
+		if (aura.laura >= CVMX_FPA1_NUM_POOLS && pool >= 0)
+			printf("WARNING: %s: AURA %u is out of range for backward-compatible operation\n",
+			       __func__, aura.laura);
+		return aura.laura;
+	} else {
+		return cvmx_fpa1_setup_pool(pool, name, buffer, block_size,
+					    num_blocks);
+	}
+}
diff --git a/arch/mips/mach-octeon/cvmx-global-resources.c b/arch/mips/mach-octeon/cvmx-global-resources.c
new file mode 100644
index 0000000..a3da17c
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-global-resources.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ */
+
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+
+#include <mach/cvmx-global-resources.h>
+#include <mach/cvmx-bootmem.h>
+
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#define CVMX_MAX_GLOBAL_RESOURCES 128
+#define CVMX_RESOURCES_ENTRIES_SIZE                                            \
+	(sizeof(struct cvmx_global_resource_entry) * CVMX_MAX_GLOBAL_RESOURCES)
+
+/**
+ * This macro returns a member of the data
+ * structure. The argument "field" is the member name of the
+ * structure to read. The return type is a u64.
+ */
+#define CVMX_GLOBAL_RESOURCES_GET_FIELD(field)                                 \
+	__cvmx_struct_get_unsigned_field(                                      \
+		__cvmx_global_resources_addr,                                  \
+		offsetof(struct cvmx_global_resources, field),                 \
+		SIZEOF_FIELD(struct cvmx_global_resources, field))
+
+/**
+ * This macro writes a member of the struct cvmx_global_resourcest
+ * structure. The argument "field" is the member name of the
+ * struct cvmx_global_resources to write.
+ */
+#define CVMX_GLOBAL_RESOURCES_SET_FIELD(field, value)                          \
+	__cvmx_struct_set_unsigned_field(                                      \
+		__cvmx_global_resources_addr,                                  \
+		offsetof(struct cvmx_global_resources, field),                 \
+		SIZEOF_FIELD(struct cvmx_global_resources, field), value)
+
+/**
+ * This macro returns a member of the struct cvmx_global_resource_entry.
+ * The argument "field" is the member name of this structure.
+ * the return type is a u64. The "addr" parameter is the physical
+ * address of the structure.
+ */
+#define CVMX_RESOURCE_ENTRY_GET_FIELD(addr, field)                             \
+	__cvmx_struct_get_unsigned_field(                                      \
+		addr, offsetof(struct cvmx_global_resource_entry, field),      \
+		SIZEOF_FIELD(struct cvmx_global_resource_entry, field))
+
+/**
+ * This macro writes a member of the struct cvmx_global_resource_entry
+ * structure. The argument "field" is the member name of the
+ * struct cvmx_global_resource_entry to write. The "addr" parameter
+ * is the physical address of the structure.
+ */
+#define CVMX_RESOURCE_ENTRY_SET_FIELD(addr, field, value)                      \
+	__cvmx_struct_set_unsigned_field(                                      \
+		addr, offsetof(struct cvmx_global_resource_entry, field),      \
+		SIZEOF_FIELD(struct cvmx_global_resource_entry, field), value)
+
+#define CVMX_GET_RESOURCE_ENTRY(count)                                         \
+	(__cvmx_global_resources_addr +                                        \
+	 offsetof(struct cvmx_global_resources, resource_entry) +              \
+	 (count * sizeof(struct cvmx_global_resource_entry)))
+
+#define CVMX_RESOURCE_TAG_SET_FIELD(addr, field, value)                        \
+	__cvmx_struct_set_unsigned_field(                                      \
+		addr, offsetof(struct global_resource_tag, field),             \
+		SIZEOF_FIELD(struct global_resource_tag, field), value)
+
+#define CVMX_RESOURCE_TAG_GET_FIELD(addr, field)                               \
+	__cvmx_struct_get_unsigned_field(                                      \
+		addr, offsetof(struct global_resource_tag, field),             \
+		SIZEOF_FIELD(struct global_resource_tag, field))
+
+#define MAX_RESOURCE_TAG_LEN		16
+#define CVMX_GLOBAL_RESOURCE_NO_LOCKING (1)
+
+struct cvmx_global_resource_entry {
+	struct global_resource_tag tag;
+	u64 phys_addr;
+	u64 size;
+};
+
+struct cvmx_global_resources {
+	u32 pad;
+	u32 rlock;
+	u64 entry_cnt;
+	struct cvmx_global_resource_entry resource_entry[];
+};
+
+/* Not the right place, putting it here for now */
+u64 cvmx_app_id;
+
+/*
+ * Global named memory can be accessed anywhere even in 32-bit mode
+ */
+static u64 __cvmx_global_resources_addr;
+
+/**
+ * This macro returns the size of a member of a structure.
+ */
+#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
+
+/**
+ * This function is the implementation of the get macros defined
+ * for individual structure members. The argument are generated
+ * by the macros inorder to read only the needed memory.
+ *
+ * @param base   64bit physical address of the complete structure
+ * @param offset from the beginning of the structure to the member being
+ *               accessed.
+ * @param size   Size of the structure member.
+ *
+ * @return Value of the structure member promoted into a u64.
+ */
+static inline u64 __cvmx_struct_get_unsigned_field(u64 base, int offset,
+						   int size)
+{
+	base = (1ull << 63) | (base + offset);
+	switch (size) {
+	case 4:
+		return cvmx_read64_uint32(base);
+	case 8:
+		return cvmx_read64_uint64(base);
+	default:
+		return 0;
+	}
+}
+
+/**
+ * This function is the implementation of the set macros defined
+ * for individual structure members. The argument are generated
+ * by the macros in order to write only the needed memory.
+ *
+ * @param base   64bit physical address of the complete structure
+ * @param offset from the beginning of the structure to the member being
+ *               accessed.
+ * @param size   Size of the structure member.
+ * @param value  Value to write into the structure
+ */
+static inline void __cvmx_struct_set_unsigned_field(u64 base, int offset,
+						    int size, u64 value)
+{
+	base = (1ull << 63) | (base + offset);
+	switch (size) {
+	case 4:
+		cvmx_write64_uint32(base, value);
+		break;
+	case 8:
+		cvmx_write64_uint64(base, value);
+		break;
+	default:
+		break;
+	}
+}
+
+/* Get the global resource lock. */
+static inline void __cvmx_global_resource_lock(void)
+{
+	u64 lock_addr =
+		(1ull << 63) | (__cvmx_global_resources_addr +
+				offsetof(struct cvmx_global_resources, rlock));
+	unsigned int tmp;
+
+	__asm__ __volatile__(".set noreorder\n"
+			     "1: ll   %[tmp], 0(%[addr])\n"
+			     "   bnez %[tmp], 1b\n"
+			     "   li   %[tmp], 1\n"
+			     "   sc   %[tmp], 0(%[addr])\n"
+			     "   beqz %[tmp], 1b\n"
+			     "   nop\n"
+			     ".set reorder\n"
+			     : [tmp] "=&r"(tmp)
+			     : [addr] "r"(lock_addr)
+			     : "memory");
+}
+
+/* Release the global resource lock. */
+static inline void __cvmx_global_resource_unlock(void)
+{
+	u64 lock_addr =
+		(1ull << 63) | (__cvmx_global_resources_addr +
+				offsetof(struct cvmx_global_resources, rlock));
+	CVMX_SYNCW;
+	__asm__ __volatile__("sw $0, 0(%[addr])\n"
+			     :
+			     : [addr] "r"(lock_addr)
+			     : "memory");
+	CVMX_SYNCW;
+}
+
+static u64 __cvmx_alloc_bootmem_for_global_resources(int sz)
+{
+	void *tmp;
+
+	tmp = cvmx_bootmem_alloc_range(sz, CVMX_CACHE_LINE_SIZE, 0, 0);
+	return cvmx_ptr_to_phys(tmp);
+}
+
+static inline void __cvmx_get_tagname(struct global_resource_tag *rtag,
+				      char *tagname)
+{
+	int i, j, k;
+
+	j = 0;
+	k = 8;
+	for (i = 7; i >= 0; i--, j++, k++) {
+		tagname[j] = (rtag->lo >> (i * 8)) & 0xff;
+		tagname[k] = (rtag->hi >> (i * 8)) & 0xff;
+	}
+}
+
+static u64 __cvmx_global_resources_init(void)
+{
+	struct cvmx_bootmem_named_block_desc *block_desc;
+	int sz = sizeof(struct cvmx_global_resources) +
+		 CVMX_RESOURCES_ENTRIES_SIZE;
+	s64 tmp_phys;
+	int count = 0;
+	u64 base = 0;
+
+	cvmx_bootmem_lock();
+
+	block_desc = (struct cvmx_bootmem_named_block_desc *)
+		__cvmx_bootmem_find_named_block_flags(
+			CVMX_GLOBAL_RESOURCES_DATA_NAME,
+			CVMX_BOOTMEM_FLAG_NO_LOCKING);
+	if (!block_desc) {
+		debug("%s: allocating global resources\n", __func__);
+
+		tmp_phys = cvmx_bootmem_phy_named_block_alloc(
+			sz, 0, 0, CVMX_CACHE_LINE_SIZE,
+			CVMX_GLOBAL_RESOURCES_DATA_NAME,
+			CVMX_BOOTMEM_FLAG_NO_LOCKING);
+		if (tmp_phys < 0) {
+			cvmx_printf(
+				"ERROR: %s: failed to allocate global resource name block. sz=%d\n",
+				__func__, sz);
+			goto end;
+		}
+		__cvmx_global_resources_addr = (u64)tmp_phys;
+
+		debug("%s: memset global resources %llu\n", __func__,
+		      CAST_ULL(__cvmx_global_resources_addr));
+
+		base = (1ull << 63) | __cvmx_global_resources_addr;
+		for (count = 0; count < (sz / 8); count++) {
+			cvmx_write64_uint64(base, 0);
+			base += 8;
+		}
+	} else {
+		debug("%s:found global resource\n", __func__);
+		__cvmx_global_resources_addr = block_desc->base_addr;
+	}
+end:
+	cvmx_bootmem_unlock();
+	debug("__cvmx_global_resources_addr=%llu sz=%d\n",
+	      CAST_ULL(__cvmx_global_resources_addr), sz);
+	return __cvmx_global_resources_addr;
+}
+
+u64 cvmx_get_global_resource(struct global_resource_tag tag, int no_lock)
+{
+	u64 entry_cnt = 0;
+	u64 resource_entry_addr = 0;
+	int count = 0;
+	u64 rphys_addr = 0;
+	u64 tag_lo = 0, tag_hi = 0;
+
+	if (__cvmx_global_resources_addr == 0)
+		__cvmx_global_resources_init();
+	if (!no_lock)
+		__cvmx_global_resource_lock();
+
+	entry_cnt = CVMX_GLOBAL_RESOURCES_GET_FIELD(entry_cnt);
+	while (entry_cnt > 0) {
+		resource_entry_addr = CVMX_GET_RESOURCE_ENTRY(count);
+		tag_lo = CVMX_RESOURCE_TAG_GET_FIELD(resource_entry_addr, lo);
+		tag_hi = CVMX_RESOURCE_TAG_GET_FIELD(resource_entry_addr, hi);
+
+		if (tag_lo == tag.lo && tag_hi == tag.hi) {
+			debug("%s: Found global resource entry\n", __func__);
+			break;
+		}
+		entry_cnt--;
+		count++;
+	}
+
+	if (entry_cnt == 0) {
+		debug("%s: no matching global resource entry found\n",
+		      __func__);
+		if (!no_lock)
+			__cvmx_global_resource_unlock();
+		return 0;
+	}
+	rphys_addr =
+		CVMX_RESOURCE_ENTRY_GET_FIELD(resource_entry_addr, phys_addr);
+	if (!no_lock)
+		__cvmx_global_resource_unlock();
+
+	return rphys_addr;
+}
+
+u64 cvmx_create_global_resource(struct global_resource_tag tag, u64 size,
+				int no_lock, int *_new_)
+{
+	u64 entry_count = 0;
+	u64 resource_entry_addr = 0;
+	u64 phys_addr;
+
+	if (__cvmx_global_resources_addr == 0)
+		__cvmx_global_resources_init();
+
+	if (!no_lock)
+		__cvmx_global_resource_lock();
+
+	phys_addr =
+		cvmx_get_global_resource(tag, CVMX_GLOBAL_RESOURCE_NO_LOCKING);
+	if (phys_addr != 0) {
+		/* we already have the resource, return it */
+		*_new_ = 0;
+		goto end;
+	}
+
+	*_new_ = 1;
+	entry_count = CVMX_GLOBAL_RESOURCES_GET_FIELD(entry_cnt);
+	if (entry_count >= CVMX_MAX_GLOBAL_RESOURCES) {
+		char tagname[MAX_RESOURCE_TAG_LEN + 1];
+
+		__cvmx_get_tagname(&tag, tagname);
+		cvmx_printf(
+			"ERROR: %s: reached global resources limit for %s\n",
+			__func__, tagname);
+		phys_addr = 0;
+		goto end;
+	}
+
+	/* Allocate bootmem for the resource*/
+	phys_addr = __cvmx_alloc_bootmem_for_global_resources(size);
+	if (!phys_addr) {
+		char tagname[MAX_RESOURCE_TAG_LEN + 1];
+
+		__cvmx_get_tagname(&tag, tagname);
+		debug("ERROR: %s: out of memory %s, size=%d\n", __func__,
+		      tagname, (int)size);
+		goto end;
+	}
+
+	resource_entry_addr = CVMX_GET_RESOURCE_ENTRY(entry_count);
+	CVMX_RESOURCE_ENTRY_SET_FIELD(resource_entry_addr, phys_addr,
+				      phys_addr);
+	CVMX_RESOURCE_ENTRY_SET_FIELD(resource_entry_addr, size, size);
+	CVMX_RESOURCE_TAG_SET_FIELD(resource_entry_addr, lo, tag.lo);
+	CVMX_RESOURCE_TAG_SET_FIELD(resource_entry_addr, hi, tag.hi);
+	/* update entry_cnt */
+	entry_count += 1;
+	CVMX_GLOBAL_RESOURCES_SET_FIELD(entry_cnt, entry_count);
+
+end:
+	if (!no_lock)
+		__cvmx_global_resource_unlock();
+
+	return phys_addr;
+}
+
+int cvmx_create_global_resource_range(struct global_resource_tag tag,
+				      int nelements)
+{
+	int sz = cvmx_range_memory_size(nelements);
+	int _new_;
+	u64 addr;
+	int rv = 0;
+
+	if (__cvmx_global_resources_addr == 0)
+		__cvmx_global_resources_init();
+
+	__cvmx_global_resource_lock();
+	addr = cvmx_create_global_resource(tag, sz, 1, &_new_);
+	if (!addr) {
+		__cvmx_global_resource_unlock();
+		return -1;
+	}
+	if (_new_)
+		rv = cvmx_range_init(addr, nelements);
+	__cvmx_global_resource_unlock();
+	return rv;
+}
+
+int cvmx_allocate_global_resource_range(struct global_resource_tag tag,
+					u64 owner, int nelements, int alignment)
+{
+	u64 addr = cvmx_get_global_resource(tag, 1);
+	int base;
+
+	if (addr == 0) {
+		char tagname[256];
+
+		__cvmx_get_tagname(&tag, tagname);
+		cvmx_printf("ERROR: %s: cannot find resource %s\n", __func__,
+			    tagname);
+		return -1;
+	}
+	__cvmx_global_resource_lock();
+	base = cvmx_range_alloc(addr, owner, nelements, alignment);
+	__cvmx_global_resource_unlock();
+	return base;
+}
+
+int cvmx_resource_alloc_reverse(struct global_resource_tag tag, u64 owner)
+{
+	u64 addr = cvmx_get_global_resource(tag, 1);
+	int rv;
+
+	if (addr == 0) {
+		char tagname[256];
+
+		__cvmx_get_tagname(&tag, tagname);
+		debug("ERROR: cannot find resource %s\n", tagname);
+		return -1;
+	}
+	__cvmx_global_resource_lock();
+	rv = cvmx_range_alloc_ordered(addr, owner, 1, 1, 1);
+	__cvmx_global_resource_unlock();
+	return rv;
+}
+
+int cvmx_reserve_global_resource_range(struct global_resource_tag tag,
+				       u64 owner, int base, int nelements)
+{
+	u64 addr = cvmx_get_global_resource(tag, 1);
+	int start;
+
+	__cvmx_global_resource_lock();
+	start = cvmx_range_reserve(addr, owner, base, nelements);
+	__cvmx_global_resource_unlock();
+	return start;
+}
+
+int cvmx_free_global_resource_range_with_base(struct global_resource_tag tag,
+					      int base, int nelements)
+{
+	u64 addr = cvmx_get_global_resource(tag, 1);
+	int rv;
+
+	/* Resource was not created, nothing to release */
+	if (addr == 0)
+		return 0;
+
+	__cvmx_global_resource_lock();
+	rv = cvmx_range_free_with_base(addr, base, nelements);
+	__cvmx_global_resource_unlock();
+	return rv;
+}
+
+int cvmx_free_global_resource_range_multiple(struct global_resource_tag tag,
+					     int bases[], int nelements)
+{
+	u64 addr = cvmx_get_global_resource(tag, 1);
+	int rv;
+
+	/* Resource was not created, nothing to release */
+	if (addr == 0)
+		return 0;
+
+	__cvmx_global_resource_lock();
+	rv = cvmx_range_free_mutiple(addr, bases, nelements);
+	__cvmx_global_resource_unlock();
+	return rv;
+}
+
+void cvmx_app_id_init(void *bootmem)
+{
+	u64 *p = (u64 *)bootmem;
+
+	*p = 0;
+}
+
+u64 cvmx_allocate_app_id(void)
+{
+	u64 *vptr;
+
+	vptr = (u64 *)cvmx_bootmem_alloc_named_range_once(sizeof(cvmx_app_id),
+							  0, 1 << 31, 128,
+							  "cvmx_app_id",
+							  cvmx_app_id_init);
+
+	cvmx_app_id = __atomic_add_fetch(vptr, 1, __ATOMIC_SEQ_CST);
+
+	debug("CVMX_APP_ID = %lx\n", (unsigned long)cvmx_app_id);
+	return cvmx_app_id;
+}
+
+u64 cvmx_get_app_id(void)
+{
+	if (cvmx_app_id == 0)
+		cvmx_allocate_app_id();
+	return cvmx_app_id;
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-agl.c b/arch/mips/mach-octeon/cvmx-helper-agl.c
new file mode 100644
index 0000000..7eb99ac
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-agl.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for AGL (RGMII) initialization, configuration,
+ * and monitoring.
+ */
+
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-agl.h>
+#include <mach/cvmx-pki.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-pko-defs.h>
+
+int __cvmx_helper_agl_enumerate(int xiface)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+		union cvmx_agl_prtx_ctl agl_prtx_ctl;
+
+		agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(0));
+		if (agl_prtx_ctl.s.mode == 0) /* RGMII */
+			return 1;
+	}
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Convert interface to port to assess CSRs.
+ *
+ * @param xiface  Interface to probe
+ * @return  The port corresponding to the interface
+ */
+int cvmx_helper_agl_get_port(int xiface)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN70XX))
+		return xi.interface - 4;
+	return -1;
+}
+
+/**
+ * @INTERNAL
+ * Probe a RGMII interface and determine the number of ports
+ * connected to it. The RGMII interface should still be down
+ * after this call.
+ *
+ * @param interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_agl_probe(int interface)
+{
+	int port = cvmx_helper_agl_get_port(interface);
+	union cvmx_agl_gmx_bist gmx_bist;
+	union cvmx_agl_gmx_prtx_cfg gmx_prtx_cfg;
+	union cvmx_agl_prtx_ctl agl_prtx_ctl;
+	int result;
+
+	result = __cvmx_helper_agl_enumerate(interface);
+	if (result == 0)
+		return 0;
+
+	/* Check BIST status */
+	gmx_bist.u64 = csr_rd(CVMX_AGL_GMX_BIST);
+	if (gmx_bist.u64)
+		printf("Management port AGL failed BIST (0x%016llx) on AGL%d\n",
+		       CAST64(gmx_bist.u64), port);
+
+	/* Disable the external input/output */
+	gmx_prtx_cfg.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
+	gmx_prtx_cfg.s.en = 0;
+	csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), gmx_prtx_cfg.u64);
+
+	/* Set the rgx_ref_clk MUX with AGL_PRTx_CTL[REFCLK_SEL]. Default value
+	 * is 0 (RGMII REFCLK). Recommended to use RGMII RXC(1) or sclk/4 (2)
+	 * to save cost.
+	 */
+
+	agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+	agl_prtx_ctl.s.clkrst = 0;
+	agl_prtx_ctl.s.dllrst = 0;
+	agl_prtx_ctl.s.clktx_byp = 0;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+		bool tx_enable_bypass;
+		int tx_delay;
+
+		agl_prtx_ctl.s.refclk_sel =
+			cvmx_helper_get_agl_refclk_sel(interface, port);
+		agl_prtx_ctl.s.clkrx_set =
+			cvmx_helper_get_agl_rx_clock_skew(interface, port);
+		agl_prtx_ctl.s.clkrx_byp =
+			cvmx_helper_get_agl_rx_clock_delay_bypass(interface,
+								  port);
+		cvmx_helper_cfg_get_rgmii_tx_clk_delay(
+			interface, port, &tx_enable_bypass, &tx_delay);
+		agl_prtx_ctl.s.clktx_byp = tx_enable_bypass;
+		agl_prtx_ctl.s.clktx_set = tx_delay;
+	}
+	csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+	/* Force write out before wait */
+	csr_rd(CVMX_AGL_PRTX_CTL(port));
+	udelay(500);
+
+	/* Enable the componsation controller */
+	agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+	agl_prtx_ctl.s.drv_byp = 0;
+	csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+	/* Force write out before wait */
+	csr_rd(CVMX_AGL_PRTX_CTL(port));
+
+	if (!OCTEON_IS_OCTEON3()) {
+		/* Enable the interface */
+		agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+		agl_prtx_ctl.s.enable = 1;
+		csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+		/* Read the value back to force the previous write */
+		agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+	}
+
+	/* Enable the compensation controller */
+	agl_prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(port));
+	agl_prtx_ctl.s.comp = 1;
+	csr_wr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
+	/* Force write out before wait */
+	csr_rd(CVMX_AGL_PRTX_CTL(port));
+
+	/* for componsation state to lock. */
+	udelay(500);
+
+	return result;
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a RGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_agl_enable(int interface)
+{
+	int port = cvmx_helper_agl_get_port(interface);
+	int ipd_port = cvmx_helper_get_ipd_port(interface, port);
+	union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
+	union cvmx_pko_reg_read_idx read_idx;
+	int do_link_set = 1;
+	int i;
+
+	/* Setup PKO for AGL interface. Back pressure is not supported. */
+	pko_mem_port_ptrs.u64 = 0;
+	read_idx.u64 = 0;
+	read_idx.s.inc = 1;
+	csr_wr(CVMX_PKO_REG_READ_IDX, read_idx.u64);
+
+	for (i = 0; i < 40; i++) {
+		pko_mem_port_ptrs.u64 = csr_rd(CVMX_PKO_MEM_PORT_PTRS);
+		if (pko_mem_port_ptrs.s.pid == 24) {
+			pko_mem_port_ptrs.s.eid = 10;
+			pko_mem_port_ptrs.s.bp_port = 40;
+			csr_wr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
+			break;
+		}
+	}
+
+	cvmx_agl_enable(port);
+	if (do_link_set)
+		cvmx_agl_link_set(port, cvmx_agl_link_get(ipd_port));
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_agl_link_get(int ipd_port)
+{
+	return cvmx_agl_link_get(ipd_port);
+}
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port  IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_agl_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int port = cvmx_helper_agl_get_port(interface);
+
+	return cvmx_agl_link_set(port, link_info);
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-bgx.c b/arch/mips/mach-octeon/cvmx-helper-bgx.c
new file mode 100644
index 0000000..7d6e178
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-bgx.c
@@ -0,0 +1,2737 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions to configure the BGX MAC.
+ */
+
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-fdt.h>
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+#include <mach/cvmx-global-resources.h>
+#include <mach/cvmx-pko-internal-ports-range.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pip.h>
+
+/* Enable this define to see BGX error messages */
+/*#define DEBUG_BGX */
+
+/* Enable this variable to trace functions called for initializing BGX */
+static const int debug;
+
+/**
+ * cvmx_helper_bgx_override_autoneg(int xiface, int index) is a function pointer
+ * to override enabling/disabling of autonegotiation for SGMII, 10G-KR or 40G-KR4
+ * interfaces. This function is called when interface is initialized.
+ */
+int (*cvmx_helper_bgx_override_autoneg)(int xiface, int index) = NULL;
+
+/*
+ * cvmx_helper_bgx_override_fec(int xiface) is a function pointer
+ * to override enabling/disabling of FEC for 10G interfaces. This function
+ * is called when interface is initialized.
+ */
+int (*cvmx_helper_bgx_override_fec)(int xiface, int index) = NULL;
+
+/**
+ * Delay after enabling an interface based on the mode.  Different modes take
+ * different amounts of time.
+ */
+static void
+__cvmx_helper_bgx_interface_enable_delay(cvmx_helper_interface_mode_t mode)
+{
+	switch (mode) {
+	case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+	case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+	case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+	case CVMX_HELPER_INTERFACE_MODE_XFI:
+		mdelay(250);
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+	case CVMX_HELPER_INTERFACE_MODE_XAUI:
+		mdelay(100);
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+		mdelay(50);
+		break;
+	default:
+		mdelay(50);
+		break;
+	}
+}
+
+/**
+ * @INTERNAL
+ *
+ * Returns number of ports based on interface
+ * @param xiface Which xiface
+ * @return Number of ports based on xiface
+ */
+int __cvmx_helper_bgx_enumerate(int xiface)
+{
+	cvmx_bgxx_cmr_tx_lmacs_t lmacs;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+	lmacs.u64 = csr_rd_node(xi.node, CVMX_BGXX_CMR_TX_LMACS(xi.interface));
+	return lmacs.s.lmacs;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Returns mode of each BGX LMAC (port).
+ * This is different than 'cvmx_helper_interface_get_mode()' which
+ * provides mode of an entire interface, but when BGX is in "mixed"
+ * mode this function should be called instead to get the protocol
+ * for each port (BGX LMAC) individually.
+ * Both function return the same enumerated mode.
+ *
+ * @param xiface is the global interface identifier
+ * @param index is the interface port index
+ * @returns mode of the individual port
+ */
+cvmx_helper_interface_mode_t cvmx_helper_bgx_get_mode(int xiface, int index)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	cvmx_bgxx_cmrx_config_t cmr_config;
+	cvmx_bgxx_spux_br_pmd_control_t pmd_control;
+
+	cmr_config.u64 = csr_rd_node(
+		xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+
+	switch (cmr_config.s.lmac_type) {
+	case 0:
+		return CVMX_HELPER_INTERFACE_MODE_SGMII;
+	case 1:
+		return CVMX_HELPER_INTERFACE_MODE_XAUI;
+	case 2:
+		return CVMX_HELPER_INTERFACE_MODE_RXAUI;
+	case 3:
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+			return cvmx_helper_interface_get_mode(xiface);
+		pmd_control.u64 = csr_rd_node(
+			xi.node,
+			CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, xi.interface));
+		if (pmd_control.s.train_en)
+			return CVMX_HELPER_INTERFACE_MODE_10G_KR;
+		else
+			return CVMX_HELPER_INTERFACE_MODE_XFI;
+		break;
+	case 4:
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+			return cvmx_helper_interface_get_mode(xiface);
+		pmd_control.u64 = csr_rd_node(
+			xi.node,
+			CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, xi.interface));
+		if (pmd_control.s.train_en)
+			return CVMX_HELPER_INTERFACE_MODE_40G_KR4;
+		else
+			return CVMX_HELPER_INTERFACE_MODE_XLAUI;
+		break;
+	case 5:
+		return CVMX_HELPER_INTERFACE_MODE_RGMII;
+	default:
+		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+	}
+}
+
+static int __cvmx_helper_bgx_rgmii_speed(cvmx_helper_link_info_t link_info)
+{
+	cvmx_xcv_reset_t xcv_reset;
+	cvmx_xcv_ctl_t xcv_ctl;
+	cvmx_xcv_batch_crd_ret_t crd_ret;
+	cvmx_xcv_dll_ctl_t dll_ctl;
+	cvmx_xcv_comp_ctl_t comp_ctl;
+	int speed;
+	int up = link_info.s.link_up;
+	int do_credits;
+
+	if (link_info.s.speed == 100)
+		speed = 1;
+	else if (link_info.s.speed == 10)
+		speed = 0;
+	else
+		speed = 2;
+
+	xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+	xcv_ctl.u64 = csr_rd(CVMX_XCV_CTL);
+	do_credits = up && !xcv_reset.s.enable;
+
+	if (xcv_ctl.s.lpbk_int) {
+		xcv_reset.s.clkrst = 0;
+		csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+	}
+
+	if (up && (!xcv_reset.s.enable || xcv_ctl.s.speed != speed)) {
+		if (debug)
+			debug("%s: *** Enabling XCV block\n", __func__);
+		/* Enable the XCV block */
+		xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+		xcv_reset.s.enable = 1;
+		csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+
+		/* Set operating mode */
+		xcv_ctl.u64 = csr_rd(CVMX_XCV_CTL);
+		xcv_ctl.s.speed = speed;
+		csr_wr(CVMX_XCV_CTL, xcv_ctl.u64);
+
+		/* Configure DLL - enable or bypass */
+		/* TX no bypass, RX bypass */
+		dll_ctl.u64 = csr_rd(CVMX_XCV_DLL_CTL);
+		dll_ctl.s.clkrx_set = 0;
+		dll_ctl.s.clkrx_byp = 1;
+		dll_ctl.s.clktx_byp = 0;
+		csr_wr(CVMX_XCV_DLL_CTL, dll_ctl.u64);
+
+		/* Enable */
+		dll_ctl.u64 = csr_rd(CVMX_XCV_DLL_CTL);
+		dll_ctl.s.refclk_sel = 0;
+		csr_wr(CVMX_XCV_DLL_CTL, dll_ctl.u64);
+		xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+		xcv_reset.s.dllrst = 0;
+		csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+
+		/* Delay deems to be need so XCV_DLL_CTL[CLK_SET] works */
+		udelay(10);
+
+		comp_ctl.u64 = csr_rd(CVMX_XCV_COMP_CTL);
+		//comp_ctl.s.drv_pctl = 0;
+		//comp_ctl.s.drv_nctl = 0;
+		comp_ctl.s.drv_byp = 0;
+		csr_wr(CVMX_XCV_COMP_CTL, comp_ctl.u64);
+
+		/* enable */
+		xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+		xcv_reset.s.comp = 1;
+		csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+
+		/* setup the RXC */
+		xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+		xcv_reset.s.clkrst = !xcv_ctl.s.lpbk_int;
+		csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+
+		/* datapaths come out of the reset
+		 * - the datapath resets will disengage BGX from the RGMII
+		 *   interface
+		 * - XCV will continue to return TX credits for each tick that
+		 *   is sent on the TX data path
+		 */
+		xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+		xcv_reset.s.tx_dat_rst_n = 1;
+		xcv_reset.s.rx_dat_rst_n = 1;
+		csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+	} else if (debug) {
+		debug("%s: *** Not enabling XCV\n", __func__);
+		debug("  up: %s, xcv_reset.s.enable: %d, xcv_ctl.s.speed: %d, speed: %d\n",
+		      up ? "true" : "false", (unsigned int)xcv_reset.s.enable,
+		      (unsigned int)xcv_ctl.s.speed, speed);
+	}
+
+	/* enable the packet flow
+	 * - The packet resets will be only disengage on packet boundaries
+	 * - XCV will continue to return TX credits for each tick that is
+	 *   sent on the TX datapath
+	 */
+	xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+	xcv_reset.s.tx_pkt_rst_n = up;
+	xcv_reset.s.rx_pkt_rst_n = up;
+	csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+
+	/* Full reset when link is down */
+	if (!up) {
+		if (debug)
+			debug("%s: *** Disabling XCV reset\n", __func__);
+		/* wait 2*MTU in time */
+		mdelay(10);
+		/* reset the world */
+		csr_wr(CVMX_XCV_RESET, 0);
+	}
+
+	/* grant PKO TX credits */
+	if (do_credits) {
+		crd_ret.u64 = csr_rd(CVMX_XCV_BATCH_CRD_RET);
+		crd_ret.s.crd_ret = 1;
+		csr_wr(CVMX_XCV_BATCH_CRD_RET, crd_ret.u64);
+	}
+
+	return 0;
+}
+
+static void __cvmx_bgx_common_init_pknd(int xiface, int index)
+{
+	int num_ports;
+	int num_chl = 16;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int node = xi.node;
+	int pknd;
+	cvmx_bgxx_cmrx_rx_bp_on_t bgx_rx_bp_on;
+	cvmx_bgxx_cmrx_rx_id_map_t cmr_rx_id_map;
+	cvmx_bgxx_cmr_chan_msk_and_t chan_msk_and;
+	cvmx_bgxx_cmr_chan_msk_or_t chan_msk_or;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	num_ports = cvmx_helper_ports_on_interface(xiface);
+	/* Modify bp_on mark, depending on number of LMACS on that interface
+	 * and write it for every port
+	 */
+	bgx_rx_bp_on.u64 = 0;
+	bgx_rx_bp_on.s.mark = (CVMX_BGX_RX_FIFO_SIZE / (num_ports * 4 * 16));
+
+	/* Setup pkind */
+	pknd = cvmx_helper_get_pknd(xiface, index);
+	cmr_rx_id_map.u64 = csr_rd_node(
+		node, CVMX_BGXX_CMRX_RX_ID_MAP(index, xi.interface));
+	cmr_rx_id_map.s.pknd = pknd;
+	/* Change the default reassembly id (RID), as max 14 RIDs allowed */
+	if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+		cmr_rx_id_map.s.rid = ((4 * xi.interface) + 2 + index);
+	csr_wr_node(node, CVMX_BGXX_CMRX_RX_ID_MAP(index, xi.interface),
+		    cmr_rx_id_map.u64);
+	/* Set backpressure channel mask AND/OR registers */
+	chan_msk_and.u64 =
+		csr_rd_node(node, CVMX_BGXX_CMR_CHAN_MSK_AND(xi.interface));
+	chan_msk_or.u64 =
+		csr_rd_node(node, CVMX_BGXX_CMR_CHAN_MSK_OR(xi.interface));
+	chan_msk_and.s.msk_and |= ((1 << num_chl) - 1) << (16 * index);
+	chan_msk_or.s.msk_or |= ((1 << num_chl) - 1) << (16 * index);
+	csr_wr_node(node, CVMX_BGXX_CMR_CHAN_MSK_AND(xi.interface),
+		    chan_msk_and.u64);
+	csr_wr_node(node, CVMX_BGXX_CMR_CHAN_MSK_OR(xi.interface),
+		    chan_msk_or.u64);
+	/* set rx back pressure (bp_on) on value */
+	csr_wr_node(node, CVMX_BGXX_CMRX_RX_BP_ON(index, xi.interface),
+		    bgx_rx_bp_on.u64);
+}
+
+/**
+ * @INTERNAL
+ * Probe a SGMII interface and determine the number of ports
+ * connected to it. The SGMII interface should still be down after
+ * this call. This is used by interfaces using the bgx mac.
+ *
+ * @param xiface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_bgx_probe(int xiface)
+{
+	return __cvmx_helper_bgx_enumerate(xiface);
+}
+
+/**
+ * @INTERNAL
+ * Return the size of the BGX TX_FIFO for a given LMAC,
+ * or 0 if the requested LMAC is inactive.
+ *
+ * TBD: Need also to add a "__cvmx_helper_bgx_speed()" function to
+ * return the speed of each LMAC.
+ */
+int __cvmx_helper_bgx_fifo_size(int xiface, unsigned int lmac)
+{
+	cvmx_bgxx_cmr_tx_lmacs_t lmacs;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	unsigned int tx_fifo_size = CVMX_BGX_TX_FIFO_SIZE;
+
+	/* FIXME: Add validation for interface# < BGX_count */
+	lmacs.u64 = csr_rd_node(xi.node, CVMX_BGXX_CMR_TX_LMACS(xi.interface));
+
+	switch (lmacs.s.lmacs) {
+	case 1:
+		if (lmac > 0)
+			return 0;
+		else
+			return tx_fifo_size;
+	case 2:
+		if (lmac > 1)
+			return 0;
+		else
+			return tx_fifo_size >> 1;
+	case 3:
+		if (lmac > 2)
+			return 0;
+		else
+			return tx_fifo_size >> 2;
+	case 4:
+		if (lmac > 3)
+			return 0;
+		else
+			return tx_fifo_size >> 2;
+	default:
+		return 0;
+	}
+}
+
+/**
+ * @INTERNAL
+ * Perform initialization required only once for an SGMII port.
+ *
+ * @param xiface Interface to init
+ * @param index     Index of prot on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_bgx_sgmii_hardware_init_one_time(int xiface, int index)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int node = xi.node;
+	const u64 clock_mhz = 1200; /* todo: fixme */
+	cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_misc_ctl;
+	cvmx_bgxx_gmp_pcs_linkx_timer_t gmp_timer;
+
+	if (!cvmx_helper_is_port_valid(xi.interface, index))
+		return 0;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	/*
+	 * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
+	 * appropriate value. 1000BASE-X specifies a 10ms
+	 * interval. SGMII specifies a 1.6ms interval.
+	 */
+	gmp_misc_ctl.u64 = csr_rd_node(
+		node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
+	/* Adjust the MAC mode if requested by device tree */
+	gmp_misc_ctl.s.mac_phy = cvmx_helper_get_mac_phy_mode(xiface, index);
+	gmp_misc_ctl.s.mode = cvmx_helper_get_1000x_mode(xiface, index);
+	csr_wr_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface),
+		    gmp_misc_ctl.u64);
+
+	gmp_timer.u64 = csr_rd_node(
+		node, CVMX_BGXX_GMP_PCS_LINKX_TIMER(index, xi.interface));
+	if (gmp_misc_ctl.s.mode)
+		/* 1000BASE-X */
+		gmp_timer.s.count = (10000ull * clock_mhz) >> 10;
+	else
+		/* SGMII */
+		gmp_timer.s.count = (1600ull * clock_mhz) >> 10;
+
+	csr_wr_node(node, CVMX_BGXX_GMP_PCS_LINKX_TIMER(index, xi.interface),
+		    gmp_timer.u64);
+
+	/*
+	 * Write the advertisement register to be used as the
+	 * tx_Config_Reg<D15:D0> of the autonegotiation.  In
+	 * 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
+	 * In SGMII PHY mode, tx_Config_Reg<D15:D0> is
+	 * PCS*_SGM*_AN_ADV_REG.  In SGMII MAC mode,
+	 * tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
+	 * step can be skipped.
+	 */
+	if (gmp_misc_ctl.s.mode) {
+		/* 1000BASE-X */
+		cvmx_bgxx_gmp_pcs_anx_adv_t gmp_an_adv;
+
+		gmp_an_adv.u64 = csr_rd_node(
+			node, CVMX_BGXX_GMP_PCS_ANX_ADV(index, xi.interface));
+		gmp_an_adv.s.rem_flt = 0;
+		gmp_an_adv.s.pause = 3;
+		gmp_an_adv.s.hfd = 1;
+		gmp_an_adv.s.fd = 1;
+		csr_wr_node(node,
+			    CVMX_BGXX_GMP_PCS_ANX_ADV(index, xi.interface),
+			    gmp_an_adv.u64);
+	} else {
+		if (gmp_misc_ctl.s.mac_phy) {
+			/* PHY Mode */
+			cvmx_bgxx_gmp_pcs_sgmx_an_adv_t gmp_sgmx_an_adv;
+
+			gmp_sgmx_an_adv.u64 =
+				csr_rd_node(node, CVMX_BGXX_GMP_PCS_SGMX_AN_ADV(
+							  index, xi.interface));
+			gmp_sgmx_an_adv.s.dup = 1;
+			gmp_sgmx_an_adv.s.speed = 2;
+			csr_wr_node(node,
+				    CVMX_BGXX_GMP_PCS_SGMX_AN_ADV(index,
+								  xi.interface),
+				    gmp_sgmx_an_adv.u64);
+		} else {
+			/* MAC Mode - Nothing to do */
+		}
+	}
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Bring up the SGMII interface to be ready for packet I/O but
+ * leave I/O disabled using the GMX override. This function
+ * follows the bringup documented in 10.6.3 of the manual.
+ *
+ * @param xiface Interface to bringup
+ * @param num_ports Number of ports on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_bgx_sgmii_hardware_init(int xiface, int num_ports)
+{
+	int index;
+	int do_link_set = 1;
+
+	for (index = 0; index < num_ports; index++) {
+		int xipd_port = cvmx_helper_get_ipd_port(xiface, index);
+		cvmx_helper_interface_mode_t mode;
+
+		if (!cvmx_helper_is_port_valid(xiface, index))
+			continue;
+
+		__cvmx_helper_bgx_port_init(xipd_port, 0);
+
+		mode = cvmx_helper_bgx_get_mode(xiface, index);
+		if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+			continue;
+
+		if (do_link_set)
+			__cvmx_helper_bgx_sgmii_link_set(
+				xipd_port,
+				__cvmx_helper_bgx_sgmii_link_get(xipd_port));
+	}
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a SGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled. This is used by interfaces using
+ * the bgx mac.
+ *
+ * @param xiface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_bgx_sgmii_enable(int xiface)
+{
+	int num_ports;
+
+	num_ports = cvmx_helper_ports_on_interface(xiface);
+	__cvmx_helper_bgx_sgmii_hardware_init(xiface, num_ports);
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Initialize the SERDES link for the first time or after a loss
+ * of link.
+ *
+ * @param xiface Interface to init
+ * @param index     Index of prot on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_bgx_sgmii_hardware_init_link(int xiface, int index)
+{
+	cvmx_bgxx_gmp_pcs_mrx_control_t gmp_control;
+	cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_misc_ctl;
+	cvmx_bgxx_cmrx_config_t cmr_config;
+	int phy_mode, mode_1000x;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface = xi.interface;
+	int node = xi.node;
+	int autoneg = 0;
+
+	if (!cvmx_helper_is_port_valid(xiface, index))
+		return 0;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	gmp_control.u64 = csr_rd_node(
+		node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface));
+	/* Take PCS through a reset sequence */
+	gmp_control.s.reset = 1;
+	csr_wr_node(node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface),
+		    gmp_control.u64);
+
+	/* Wait until GMP_PCS_MRX_CONTROL[reset] comes out of reset */
+	if (CVMX_WAIT_FOR_FIELD64_NODE(
+		    node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface),
+		    cvmx_bgxx_gmp_pcs_mrx_control_t, reset, ==, 0, 10000)) {
+		debug("SGMII%d: Timeout waiting for port %d to finish reset\n",
+		      interface, index);
+		return -1;
+	}
+
+	cmr_config.u64 =
+		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+
+	gmp_control.u64 = csr_rd_node(
+		node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface));
+	if (cvmx_helper_get_port_phy_present(xiface, index)) {
+		gmp_control.s.pwr_dn = 0;
+	} else {
+		gmp_control.s.spdmsb = 1;
+		gmp_control.s.spdlsb = 0;
+		gmp_control.s.pwr_dn = 0;
+	}
+	/* Write GMP_PCS_MR*_CONTROL[RST_AN]=1 to ensure a fresh SGMII
+	 * negotiation starts.
+	 */
+	autoneg = cvmx_helper_get_port_autonegotiation(xiface, index);
+	gmp_control.s.rst_an = 1;
+	gmp_control.s.an_en = (cmr_config.s.lmac_type != 5) && autoneg;
+	csr_wr_node(node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface),
+		    gmp_control.u64);
+
+	phy_mode = cvmx_helper_get_mac_phy_mode(xiface, index);
+	mode_1000x = cvmx_helper_get_1000x_mode(xiface, index);
+
+	gmp_misc_ctl.u64 = csr_rd_node(
+		node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
+	gmp_misc_ctl.s.mac_phy = phy_mode;
+	gmp_misc_ctl.s.mode = mode_1000x;
+	csr_wr_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface),
+		    gmp_misc_ctl.u64);
+
+	if (phy_mode || !autoneg)
+		/* In PHY mode we can't query the link status so we just
+		 * assume that the link is up
+		 */
+		return 0;
+
+	/* Wait for GMP_PCS_MRX_CONTROL[an_cpt] to be set, indicating that
+	 * SGMII autonegotiation is complete. In MAC mode this isn't an
+	 * ethernet link, but a link between OCTEON and PHY.
+	 */
+	if (cmr_config.s.lmac_type != 5 &&
+	    CVMX_WAIT_FOR_FIELD64_NODE(
+		    node, CVMX_BGXX_GMP_PCS_MRX_STATUS(index, xi.interface),
+		    cvmx_bgxx_gmp_pcs_mrx_status_t, an_cpt, ==, 1, 10000)) {
+		debug("SGMII%d: Port %d link timeout\n", interface, index);
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Configure an SGMII link to the specified speed after the SERDES
+ * link is up.
+ *
+ * @param xiface Interface to init
+ * @param index     Index of prot on the interface
+ * @param link_info Link state to configure
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_bgx_sgmii_hardware_init_link_speed(
+	int xiface, int index, cvmx_helper_link_info_t link_info)
+{
+	cvmx_bgxx_cmrx_config_t cmr_config;
+	cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_miscx_ctl;
+	cvmx_bgxx_gmp_gmi_prtx_cfg_t gmp_prtx_cfg;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int node = xi.node;
+
+	if (!cvmx_helper_is_port_valid(xiface, index))
+		return 0;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	/* Disable GMX before we make any changes. */
+	cmr_config.u64 =
+		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+	cmr_config.s.data_pkt_tx_en = 0;
+	cmr_config.s.data_pkt_rx_en = 0;
+	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+		    cmr_config.u64);
+
+	/* Wait for GMX to be idle */
+	if (CVMX_WAIT_FOR_FIELD64_NODE(
+		    node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface),
+		    cvmx_bgxx_gmp_gmi_prtx_cfg_t, rx_idle, ==, 1, 10000) ||
+	    CVMX_WAIT_FOR_FIELD64_NODE(
+		    node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface),
+		    cvmx_bgxx_gmp_gmi_prtx_cfg_t, tx_idle, ==, 1, 10000)) {
+		debug("SGMII%d:%d: Timeout waiting for port %d to be idle\n",
+		      node, xi.interface, index);
+		return -1;
+	}
+
+	/* Read GMX CFG again to make sure the disable completed */
+	gmp_prtx_cfg.u64 = csr_rd_node(
+		node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface));
+
+	/*
+	 * Get the misc control for PCS. We will need to set the
+	 * duplication amount.
+	 */
+	gmp_miscx_ctl.u64 = csr_rd_node(
+		node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
+
+	/*
+	 * Use GMXENO to force the link down if the status we get says
+	 * it should be down.
+	 */
+	gmp_miscx_ctl.s.gmxeno = !link_info.s.link_up;
+
+	/* Only change the duplex setting if the link is up */
+	if (link_info.s.link_up)
+		gmp_prtx_cfg.s.duplex = link_info.s.full_duplex;
+
+	/* Do speed based setting for GMX */
+	switch (link_info.s.speed) {
+	case 10:
+		gmp_prtx_cfg.s.speed = 0;
+		gmp_prtx_cfg.s.speed_msb = 1;
+		gmp_prtx_cfg.s.slottime = 0;
+		/* Setting from GMX-603 */
+		gmp_miscx_ctl.s.samp_pt = 25;
+		csr_wr_node(node,
+			    CVMX_BGXX_GMP_GMI_TXX_SLOT(index, xi.interface),
+			    64);
+		csr_wr_node(node,
+			    CVMX_BGXX_GMP_GMI_TXX_BURST(index, xi.interface),
+			    0);
+		break;
+	case 100:
+		gmp_prtx_cfg.s.speed = 0;
+		gmp_prtx_cfg.s.speed_msb = 0;
+		gmp_prtx_cfg.s.slottime = 0;
+		gmp_miscx_ctl.s.samp_pt = 0x5;
+		csr_wr_node(node,
+			    CVMX_BGXX_GMP_GMI_TXX_SLOT(index, xi.interface),
+			    64);
+		csr_wr_node(node,
+			    CVMX_BGXX_GMP_GMI_TXX_BURST(index, xi.interface),
+			    0);
+		break;
+	case 1000:
+		gmp_prtx_cfg.s.speed = 1;
+		gmp_prtx_cfg.s.speed_msb = 0;
+		gmp_prtx_cfg.s.slottime = 1;
+		gmp_miscx_ctl.s.samp_pt = 1;
+		csr_wr_node(node,
+			    CVMX_BGXX_GMP_GMI_TXX_SLOT(index, xi.interface),
+			    512);
+		if (gmp_prtx_cfg.s.duplex)
+			/* full duplex */
+			csr_wr_node(node,
+				    CVMX_BGXX_GMP_GMI_TXX_BURST(index,
+								xi.interface),
+				    0);
+		else
+			/* half duplex */
+			csr_wr_node(node,
+				    CVMX_BGXX_GMP_GMI_TXX_BURST(index,
+								xi.interface),
+				    8192);
+		break;
+	default:
+		break;
+	}
+
+	/* Write the new misc control for PCS */
+	csr_wr_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface),
+		    gmp_miscx_ctl.u64);
+
+	/* Write the new GMX settings with the port still disabled */
+	csr_wr_node(node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface),
+		    gmp_prtx_cfg.u64);
+
+	/* Read GMX CFG again to make sure the config completed */
+	csr_rd_node(node, CVMX_BGXX_GMP_GMI_PRTX_CFG(index, xi.interface));
+
+	/* Enable back BGX. */
+	cmr_config.u64 =
+		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+	if (debug)
+		debug("%s: Enabling tx and rx packets on %d:%d\n", __func__,
+		      xi.interface, index);
+	cmr_config.s.data_pkt_tx_en = 1;
+	cmr_config.s.data_pkt_rx_en = 1;
+	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+		    cmr_config.u64);
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set(). This is used by
+ * interfaces using the bgx mac.
+ *
+ * @param xipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_bgx_sgmii_link_get(int xipd_port)
+{
+	cvmx_helper_link_info_t result;
+	cvmx_bgxx_gmp_pcs_mrx_control_t gmp_control;
+	cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_misc_ctl;
+	int xiface = cvmx_helper_get_interface_num(xipd_port);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+	int node = xi.node;
+	int index = cvmx_helper_get_interface_index_num(xp.port);
+
+	result.u64 = 0;
+
+	if (!cvmx_helper_is_port_valid(xiface, index))
+		return result;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	gmp_control.u64 = csr_rd_node(
+		node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface));
+	if (gmp_control.s.loopbck1) {
+		int qlm = cvmx_qlm_lmac(xiface, index);
+		int speed;
+
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+			speed = cvmx_qlm_get_gbaud_mhz_node(node, qlm);
+		else
+			speed = cvmx_qlm_get_gbaud_mhz(qlm);
+		/* Force 1Gbps full duplex link for internal loopback */
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		result.s.speed = speed * 8 / 10;
+		return result;
+	}
+
+	gmp_misc_ctl.u64 = csr_rd_node(
+		node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
+	if (gmp_misc_ctl.s.mac_phy ||
+	    cvmx_helper_get_port_force_link_up(xiface, index)) {
+		int qlm = cvmx_qlm_lmac(xiface, index);
+		int speed;
+
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+			speed = cvmx_qlm_get_gbaud_mhz_node(node, qlm);
+		else
+			speed = cvmx_qlm_get_gbaud_mhz(qlm);
+		/* PHY Mode */
+		/* Note that this also works for 1000base-X mode */
+
+		result.s.speed = speed * 8 / 10;
+		result.s.full_duplex = 1;
+		result.s.link_up = 1;
+		return result;
+	}
+
+	/* MAC Mode */
+	return __cvmx_helper_board_link_get(xipd_port);
+}
+
+/**
+ * This sequence brings down the link for the XCV RGMII interface
+ *
+ * @param interface	Interface (BGX) number.  Port index is always 0
+ */
+static void __cvmx_helper_bgx_rgmii_link_set_down(int interface)
+{
+	union cvmx_xcv_reset xcv_reset;
+	union cvmx_bgxx_cmrx_config cmr_config;
+	union cvmx_bgxx_gmp_pcs_mrx_control mr_control;
+	union cvmx_bgxx_cmrx_rx_fifo_len rx_fifo_len;
+	union cvmx_bgxx_cmrx_tx_fifo_len tx_fifo_len;
+
+	xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+	xcv_reset.s.rx_pkt_rst_n = 0;
+	csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+	csr_rd(CVMX_XCV_RESET);
+	mdelay(10); /* Wait for 1 MTU */
+
+	cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(0, interface));
+	cmr_config.s.data_pkt_rx_en = 0;
+	csr_wr(CVMX_BGXX_CMRX_CONFIG(0, interface), cmr_config.u64);
+
+	/* Wait for RX and TX to be idle */
+	do {
+		rx_fifo_len.u64 =
+			csr_rd(CVMX_BGXX_CMRX_RX_FIFO_LEN(0, interface));
+		tx_fifo_len.u64 =
+			csr_rd(CVMX_BGXX_CMRX_TX_FIFO_LEN(0, interface));
+	} while (rx_fifo_len.s.fifo_len > 0 && tx_fifo_len.s.lmac_idle != 1);
+
+	cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(0, interface));
+	cmr_config.s.data_pkt_tx_en = 0;
+	csr_wr(CVMX_BGXX_CMRX_CONFIG(0, interface), cmr_config.u64);
+
+	xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+	xcv_reset.s.tx_pkt_rst_n = 0;
+	csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+	mr_control.u64 = csr_rd(CVMX_BGXX_GMP_PCS_MRX_CONTROL(0, interface));
+	mr_control.s.pwr_dn = 1;
+	csr_wr(CVMX_BGXX_GMP_PCS_MRX_CONTROL(0, interface), mr_control.u64);
+}
+
+/**
+ * Sets a BGS SGMII link down.
+ *
+ * @param node	Octeon node number
+ * @param iface	BGX interface number
+ * @param index	BGX port index
+ */
+static void __cvmx_helper_bgx_sgmii_link_set_down(int node, int iface,
+						  int index)
+{
+	union cvmx_bgxx_gmp_pcs_miscx_ctl gmp_misc_ctl;
+	union cvmx_bgxx_gmp_pcs_mrx_control gmp_control;
+	union cvmx_bgxx_cmrx_config cmr_config;
+
+	cmr_config.u64 = csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, iface));
+	cmr_config.s.data_pkt_tx_en = 0;
+	cmr_config.s.data_pkt_rx_en = 0;
+	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, iface), cmr_config.u64);
+
+	gmp_misc_ctl.u64 =
+		csr_rd_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, iface));
+
+	/* Disable autonegotiation only when in MAC mode. */
+	if (gmp_misc_ctl.s.mac_phy == 0) {
+		gmp_control.u64 = csr_rd_node(
+			node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, iface));
+		gmp_control.s.an_en = 0;
+		csr_wr_node(node, CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, iface),
+			    gmp_control.u64);
+	}
+
+	/* Use GMXENO to force the link down.  It will get reenabled later... */
+	gmp_misc_ctl.s.gmxeno = 1;
+	csr_wr_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, iface),
+		    gmp_misc_ctl.u64);
+	csr_rd_node(node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, iface));
+}
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead. This is used by interfaces
+ * using the bgx mac.
+ *
+ * @param xipd_port  IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_bgx_sgmii_link_set(int xipd_port,
+				     cvmx_helper_link_info_t link_info)
+{
+	cvmx_bgxx_cmrx_config_t cmr_config;
+	int xiface = cvmx_helper_get_interface_num(xipd_port);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+	int node = xi.node;
+	int index = cvmx_helper_get_interface_index_num(xp.port);
+	const int iface = xi.interface;
+	int rc = 0;
+
+	if (!cvmx_helper_is_port_valid(xiface, index))
+		return 0;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	cmr_config.u64 = csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, iface));
+	if (link_info.s.link_up) {
+		cmr_config.s.enable = 1;
+		csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, iface),
+			    cmr_config.u64);
+		/* Apply workaround for errata BGX-22429 */
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && index) {
+			cvmx_bgxx_cmrx_config_t cmr0;
+
+			cmr0.u64 = csr_rd_node(node,
+					       CVMX_BGXX_CMRX_CONFIG(0, iface));
+			cmr0.s.enable = 1;
+			csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(0, iface),
+				    cmr0.u64);
+		}
+		__cvmx_helper_bgx_sgmii_hardware_init_link(xiface, index);
+	} else if (cvmx_helper_bgx_is_rgmii(xi.interface, index)) {
+		if (debug)
+			debug("%s: Bringing down XCV RGMII interface %d\n",
+			      __func__, xi.interface);
+		__cvmx_helper_bgx_rgmii_link_set_down(xi.interface);
+	} else { /* Link is down, not RGMII */
+		__cvmx_helper_bgx_sgmii_link_set_down(node, iface, index);
+		return 0;
+	}
+	rc = __cvmx_helper_bgx_sgmii_hardware_init_link_speed(xiface, index,
+							      link_info);
+	if (cvmx_helper_bgx_is_rgmii(xiface, index))
+		rc = __cvmx_helper_bgx_rgmii_speed(link_info);
+
+	return rc;
+}
+
+/**
+ * @INTERNAL
+ * Bringup XAUI interface. After this call packet I/O should be
+ * fully functional.
+ *
+ * @param index port on interface to bring up
+ * @param xiface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_bgx_xaui_init(int index, int xiface)
+{
+	cvmx_bgxx_cmrx_config_t cmr_config;
+	cvmx_bgxx_spux_br_pmd_control_t pmd_control;
+	cvmx_bgxx_spux_misc_control_t spu_misc_control;
+	cvmx_bgxx_spux_control1_t spu_control1;
+	cvmx_bgxx_spux_an_control_t spu_an_control;
+	cvmx_bgxx_spux_an_adv_t spu_an_adv;
+	cvmx_bgxx_spux_fec_control_t spu_fec_control;
+	cvmx_bgxx_spu_dbg_control_t spu_dbg_control;
+	cvmx_bgxx_smux_tx_append_t smu_tx_append;
+	cvmx_bgxx_smux_tx_ctl_t smu_tx_ctl;
+	cvmx_helper_interface_mode_t mode;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface = xi.interface;
+	int node = xi.node;
+	int use_auto_neg = 0;
+	int kr_mode = 0;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	mode = cvmx_helper_bgx_get_mode(xiface, index);
+
+	if (mode == CVMX_HELPER_INTERFACE_MODE_10G_KR ||
+	    mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4) {
+		kr_mode = 1;
+		if (cvmx_helper_bgx_override_autoneg)
+			use_auto_neg =
+				cvmx_helper_bgx_override_autoneg(xiface, index);
+		else
+			use_auto_neg = cvmx_helper_get_port_autonegotiation(
+				xiface, index);
+	}
+
+	/* NOTE: This code was moved first, out of order compared to the HRM
+	 * because the RESET causes all SPU registers to loose their value
+	 */
+	/* 4. Next, bring up the SMU/SPU and the BGX reconciliation layer
+	 * logic:
+	 */
+	/* 4a. Take SMU/SPU through a reset sequence. Write
+	 * BGX(0..5)_SPU(0..3)_CONTROL1[RESET] = 1. Read
+	 * BGX(0..5)_SPU(0..3)_CONTROL1[RESET] until it changes value to 0. Keep
+	 * BGX(0..5)_SPU(0..3)_MISC_CONTROL[RX_PACKET_DIS] = 1 to disable
+	 * reception.
+	 */
+	spu_control1.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
+	spu_control1.s.reset = 1;
+	csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+		    spu_control1.u64);
+
+	/* 1. Wait for PCS to come out of reset */
+	if (CVMX_WAIT_FOR_FIELD64_NODE(
+		    node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+		    cvmx_bgxx_spux_control1_t, reset, ==, 0, 10000)) {
+		debug("BGX%d:%d: SPU stuck in reset\n", node, interface);
+		return -1;
+	}
+
+	/* 2. Write BGX(0..5)_CMR(0..3)_CONFIG[ENABLE] to 0,
+	 * BGX(0..5)_SPU(0..3)_CONTROL1[LO_PWR] = 1 and
+	 * BGX(0..5)_SPU(0..3)_MISC_CONTROL[RX_PACKET_DIS] = 1.
+	 */
+	spu_control1.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
+	spu_control1.s.lo_pwr = 1;
+	csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+		    spu_control1.u64);
+
+	spu_misc_control.u64 = csr_rd_node(
+		node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
+	spu_misc_control.s.rx_packet_dis = 1;
+	csr_wr_node(node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface),
+		    spu_misc_control.u64);
+
+	/* 3. At this point, it may be appropriate to disable all BGX and
+	 * SMU/SPU interrupts, as a number of them will occur during bring-up
+	 * of the Link.
+	 * - zero BGX(0..5)_SMU(0..3)_RX_INT
+	 * - zero BGX(0..5)_SMU(0..3)_TX_INT
+	 * - zero BGX(0..5)_SPU(0..3)_INT
+	 */
+	csr_wr_node(node, CVMX_BGXX_SMUX_RX_INT(index, xi.interface),
+		    csr_rd_node(node,
+				CVMX_BGXX_SMUX_RX_INT(index, xi.interface)));
+	csr_wr_node(node, CVMX_BGXX_SMUX_TX_INT(index, xi.interface),
+		    csr_rd_node(node,
+				CVMX_BGXX_SMUX_TX_INT(index, xi.interface)));
+	csr_wr_node(node, CVMX_BGXX_SPUX_INT(index, xi.interface),
+		    csr_rd_node(node, CVMX_BGXX_SPUX_INT(index, xi.interface)));
+
+	/* 4. Configure the BGX LMAC. */
+	/* 4a. Configure the LMAC type (40GBASE-R/10GBASE-R/RXAUI/XAUI) and
+	 * SerDes selection in the BGX(0..5)_CMR(0..3)_CONFIG register, but keep
+	 * the ENABLE, DATA_PKT_TX_EN and DATA_PKT_RX_EN bits clear.
+	 */
+	/* Already done in bgx_setup_one_time */
+
+	/* 4b. Write BGX(0..5)_SPU(0..3)_CONTROL1[LO_PWR] = 1 and
+	 * BGX(0..5)_SPU(0..3)_MISC_CONTROL[RX_PACKET_DIS] = 1.
+	 */
+	/* 4b. Initialize the selected SerDes lane(s) in the QLM. See Section
+	 * 28.1.2.2 in the GSER chapter.
+	 */
+	/* Already done in QLM setup */
+
+	/* 4c. For 10GBASE-KR or 40GBASE-KR, enable link training by writing
+	 * BGX(0..5)_SPU(0..3)_BR_PMD_CONTROL[TRAIN_EN] = 1.
+	 */
+
+	if (kr_mode && !OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+		csr_wr_node(node,
+			    CVMX_BGXX_SPUX_BR_PMD_LP_CUP(index, interface), 0);
+		csr_wr_node(node,
+			    CVMX_BGXX_SPUX_BR_PMD_LD_CUP(index, interface), 0);
+		csr_wr_node(node,
+			    CVMX_BGXX_SPUX_BR_PMD_LD_REP(index, interface), 0);
+		pmd_control.u64 = csr_rd_node(
+			node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, interface));
+		pmd_control.s.train_en = 1;
+		csr_wr_node(node,
+			    CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, interface),
+			    pmd_control.u64);
+	}
+
+	/* 4d. Program all other relevant BGX configuration while
+	 * BGX(0..5)_CMR(0..3)_CONFIG[ENABLE] = 0. This includes all things
+	 * described in this chapter.
+	 */
+	/* Always add FCS to PAUSE frames */
+	smu_tx_append.u64 = csr_rd_node(
+		node, CVMX_BGXX_SMUX_TX_APPEND(index, xi.interface));
+	smu_tx_append.s.fcs_c = 1;
+	csr_wr_node(node, CVMX_BGXX_SMUX_TX_APPEND(index, xi.interface),
+		    smu_tx_append.u64);
+
+	/* 4e. If Forward Error Correction is desired for 10GBASE-R or
+	 * 40GBASE-R, enable it by writing
+	 * BGX(0..5)_SPU(0..3)_FEC_CONTROL[FEC_EN] = 1.
+	 */
+	/* FEC is optional for 10GBASE-KR, 40GBASE-KR4, and XLAUI. We're going
+	 * to disable it by default
+	 */
+	spu_fec_control.u64 = csr_rd_node(
+		node, CVMX_BGXX_SPUX_FEC_CONTROL(index, xi.interface));
+	if (cvmx_helper_bgx_override_fec)
+		spu_fec_control.s.fec_en =
+			cvmx_helper_bgx_override_fec(xiface, index);
+	else
+		spu_fec_control.s.fec_en =
+			cvmx_helper_get_port_fec(xiface, index);
+	csr_wr_node(node, CVMX_BGXX_SPUX_FEC_CONTROL(index, xi.interface),
+		    spu_fec_control.u64);
+
+	/* 4f. If Auto-Negotiation is desired, configure and enable
+	 * Auto-Negotiation as described in Section 33.6.2.
+	 */
+	spu_an_control.u64 = csr_rd_node(
+		node, CVMX_BGXX_SPUX_AN_CONTROL(index, xi.interface));
+	/* Disable extended next pages */
+	spu_an_control.s.xnp_en = 0;
+	spu_an_control.s.an_en = use_auto_neg;
+	csr_wr_node(node, CVMX_BGXX_SPUX_AN_CONTROL(index, xi.interface),
+		    spu_an_control.u64);
+
+	spu_fec_control.u64 = csr_rd_node(
+		node, CVMX_BGXX_SPUX_FEC_CONTROL(index, xi.interface));
+	spu_an_adv.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_AN_ADV(index, xi.interface));
+	spu_an_adv.s.fec_req = spu_fec_control.s.fec_en;
+	spu_an_adv.s.fec_able = 1;
+	spu_an_adv.s.a100g_cr10 = 0;
+	spu_an_adv.s.a40g_cr4 = 0;
+	spu_an_adv.s.a40g_kr4 = (mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4);
+	spu_an_adv.s.a10g_kr = (mode == CVMX_HELPER_INTERFACE_MODE_10G_KR);
+	spu_an_adv.s.a10g_kx4 = 0;
+	spu_an_adv.s.a1g_kx = 0;
+	spu_an_adv.s.xnp_able = 0;
+	spu_an_adv.s.rf = 0;
+	csr_wr_node(node, CVMX_BGXX_SPUX_AN_ADV(index, xi.interface),
+		    spu_an_adv.u64);
+
+	/* 3. Set BGX(0..5)_SPU_DBG_CONTROL[AN_ARB_LINK_CHK_EN] = 1. */
+	spu_dbg_control.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPU_DBG_CONTROL(xi.interface));
+	spu_dbg_control.s.an_nonce_match_dis = 1; /* Needed for loopback */
+	spu_dbg_control.s.an_arb_link_chk_en |= kr_mode;
+	csr_wr_node(node, CVMX_BGXX_SPU_DBG_CONTROL(xi.interface),
+		    spu_dbg_control.u64);
+
+	/* 4. Execute the link bring-up sequence in Section 33.6.3. */
+
+	/* 5. If the auto-negotiation protocol is successful,
+	 * BGX(0..5)_SPU(0..3)_AN_ADV[AN_COMPLETE] is set along with
+	 * BGX(0..5)_SPU(0..3)_INT[AN_COMPLETE] when the link is up.
+	 */
+
+	/* 3h. Set BGX(0..5)_CMR(0..3)_CONFIG[ENABLE] = 1 and
+	 * BGX(0..5)_SPU(0..3)_CONTROL1[LO_PWR] = 0 to enable the LMAC.
+	 */
+	cmr_config.u64 =
+		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+	cmr_config.s.enable = 1;
+	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+		    cmr_config.u64);
+	/* Apply workaround for errata BGX-22429 */
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && index) {
+		cvmx_bgxx_cmrx_config_t cmr0;
+
+		cmr0.u64 = csr_rd_node(node,
+				       CVMX_BGXX_CMRX_CONFIG(0, xi.interface));
+		cmr0.s.enable = 1;
+		csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(0, xi.interface),
+			    cmr0.u64);
+	}
+
+	spu_control1.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
+	spu_control1.s.lo_pwr = 0;
+	csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+		    spu_control1.u64);
+
+	/* 4g. Set the polarity and lane swapping of the QLM SerDes. Refer to
+	 * Section 33.4.1, BGX(0..5)_SPU(0..3)_MISC_CONTROL[XOR_TXPLRT,XOR_RXPLRT]
+	 * and BGX(0..5)_SPU(0..3)_MISC_CONTROL[TXPLRT,RXPLRT].
+	 */
+
+	/* 4c. Write BGX(0..5)_SPU(0..3)_CONTROL1[LO_PWR] = 0. */
+	spu_control1.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
+	spu_control1.s.lo_pwr = 0;
+	csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+		    spu_control1.u64);
+
+	/* 4d. Select Deficit Idle Count mode and unidirectional enable/disable
+	 * via BGX(0..5)_SMU(0..3)_TX_CTL[DIC_EN,UNI_EN].
+	 */
+	smu_tx_ctl.u64 =
+		csr_rd_node(node, CVMX_BGXX_SMUX_TX_CTL(index, xi.interface));
+	smu_tx_ctl.s.dic_en = 1;
+	smu_tx_ctl.s.uni_en = 0;
+	csr_wr_node(node, CVMX_BGXX_SMUX_TX_CTL(index, xi.interface),
+		    smu_tx_ctl.u64);
+
+	{
+		/* Calculate the number of s-clk cycles per usec. */
+		const u64 clock_mhz = 1200; /* todo: fixme */
+		cvmx_bgxx_spu_dbg_control_t dbg_control;
+
+		dbg_control.u64 = csr_rd_node(
+			node, CVMX_BGXX_SPU_DBG_CONTROL(xi.interface));
+		dbg_control.s.us_clk_period = clock_mhz - 1;
+		csr_wr_node(node, CVMX_BGXX_SPU_DBG_CONTROL(xi.interface),
+			    dbg_control.u64);
+	}
+	/* The PHY often takes at least 100ms to stabilize */
+	__cvmx_helper_bgx_interface_enable_delay(mode);
+	return 0;
+}
+
+static void __cvmx_bgx_start_training(int node, int unit, int index)
+{
+	cvmx_bgxx_spux_int_t spu_int;
+	cvmx_bgxx_spux_br_pmd_control_t pmd_control;
+	cvmx_bgxx_spux_an_control_t an_control;
+
+	/* Clear the training interrupts (W1C) */
+	spu_int.u64 = 0;
+	spu_int.s.training_failure = 1;
+	spu_int.s.training_done = 1;
+	csr_wr_node(node, CVMX_BGXX_SPUX_INT(index, unit), spu_int.u64);
+
+	/* These registers aren't cleared when training is restarted. Manually
+	 * clear them as per Errata BGX-20968.
+	 */
+	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LP_CUP(index, unit), 0);
+	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LD_CUP(index, unit), 0);
+	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LD_REP(index, unit), 0);
+
+	/* Disable autonegotiation */
+	an_control.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_AN_CONTROL(index, unit));
+	an_control.s.an_en = 0;
+	csr_wr_node(node, CVMX_BGXX_SPUX_AN_CONTROL(index, unit),
+		    an_control.u64);
+	udelay(1);
+
+	/* Restart training */
+	pmd_control.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit));
+	pmd_control.s.train_en = 1;
+	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit),
+		    pmd_control.u64);
+
+	udelay(1);
+	pmd_control.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit));
+	pmd_control.s.train_restart = 1;
+	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit),
+		    pmd_control.u64);
+}
+
+static void __cvmx_bgx_restart_training(int node, int unit, int index)
+{
+	cvmx_bgxx_spux_int_t spu_int;
+	cvmx_bgxx_spux_br_pmd_control_t pmd_control;
+
+	/* Clear the training interrupts (W1C) */
+	spu_int.u64 = 0;
+	spu_int.s.training_failure = 1;
+	spu_int.s.training_done = 1;
+	csr_wr_node(node, CVMX_BGXX_SPUX_INT(index, unit), spu_int.u64);
+
+	udelay(1700); /* Wait 1.7 msec */
+
+	/* These registers aren't cleared when training is restarted. Manually
+	 * clear them as per Errata BGX-20968.
+	 */
+	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LP_CUP(index, unit), 0);
+	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LD_CUP(index, unit), 0);
+	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_LD_REP(index, unit), 0);
+
+	/* Restart training */
+	pmd_control.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit));
+	pmd_control.s.train_restart = 1;
+	csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, unit),
+		    pmd_control.u64);
+}
+
+/*
+ * @INTERNAL
+ * Wrapper function to configure the BGX, does not enable.
+ *
+ * @param xipd_port IPD/PKO port to configure.
+ * @param phy_pres  If set, enable disparity, only applies to RXAUI interface
+ *
+ * @return Zero on success, negative on failure.
+ */
+int __cvmx_helper_bgx_port_init(int xipd_port, int phy_pres)
+{
+	int xiface = cvmx_helper_get_interface_num(xipd_port);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+	int index = cvmx_helper_get_interface_index_num(xp.port);
+	cvmx_helper_interface_mode_t mode;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	mode = cvmx_helper_bgx_get_mode(xiface, index);
+
+	__cvmx_bgx_common_init_pknd(xiface, index);
+
+	if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
+	    mode == CVMX_HELPER_INTERFACE_MODE_RGMII) {
+		cvmx_bgxx_gmp_gmi_txx_thresh_t gmi_tx_thresh;
+		cvmx_bgxx_gmp_gmi_txx_append_t gmp_txx_append;
+		cvmx_bgxx_gmp_gmi_txx_sgmii_ctl_t gmp_sgmii_ctl;
+
+		/* Set TX Threshold */
+		gmi_tx_thresh.u64 = 0;
+		gmi_tx_thresh.s.cnt = 0x20;
+		csr_wr_node(xi.node,
+			    CVMX_BGXX_GMP_GMI_TXX_THRESH(index, xi.interface),
+			    gmi_tx_thresh.u64);
+		__cvmx_helper_bgx_sgmii_hardware_init_one_time(xiface, index);
+		gmp_txx_append.u64 = csr_rd_node(
+			xi.node,
+			CVMX_BGXX_GMP_GMI_TXX_APPEND(index, xi.interface));
+		gmp_sgmii_ctl.u64 = csr_rd_node(
+			xi.node,
+			CVMX_BGXX_GMP_GMI_TXX_SGMII_CTL(index, xi.interface));
+		gmp_sgmii_ctl.s.align = gmp_txx_append.s.preamble ? 0 : 1;
+		csr_wr_node(xi.node,
+			    CVMX_BGXX_GMP_GMI_TXX_SGMII_CTL(index,
+							    xi.interface),
+			    gmp_sgmii_ctl.u64);
+		if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII) {
+			/* Disable XCV interface when initialized */
+			union cvmx_xcv_reset xcv_reset;
+
+			if (debug)
+				debug("%s: Disabling RGMII XCV interface\n",
+				      __func__);
+			xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+			xcv_reset.s.enable = 0;
+			xcv_reset.s.tx_pkt_rst_n = 0;
+			xcv_reset.s.rx_pkt_rst_n = 0;
+			csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+		}
+	} else {
+		int res, cred;
+		cvmx_bgxx_smux_tx_thresh_t smu_tx_thresh;
+
+		res = __cvmx_helper_bgx_xaui_init(index, xiface);
+		if (res == -1) {
+#ifdef DEBUG_BGX
+			debug("Failed to enable XAUI for %d:BGX(%d,%d)\n",
+			      xi.node, xi.interface, index);
+#endif
+			return res;
+		}
+		/* See BVX_SMU_TX_THRESH register descriptin */
+		cred = __cvmx_helper_bgx_fifo_size(xiface, index) >> 4;
+		smu_tx_thresh.u64 = 0;
+		smu_tx_thresh.s.cnt = cred - 10;
+		csr_wr_node(xi.node,
+			    CVMX_BGXX_SMUX_TX_THRESH(index, xi.interface),
+			    smu_tx_thresh.u64);
+		if (debug)
+			debug("%s: BGX%d:%d TX-thresh=%d\n", __func__,
+			      xi.interface, index,
+			      (unsigned int)smu_tx_thresh.s.cnt);
+
+		/* Set disparity for RXAUI interface as described in the
+		 * Marvell RXAUI Interface specification.
+		 */
+		if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI && phy_pres) {
+			cvmx_bgxx_spux_misc_control_t misc_control;
+
+			misc_control.u64 = csr_rd_node(
+				xi.node, CVMX_BGXX_SPUX_MISC_CONTROL(
+						 index, xi.interface));
+			misc_control.s.intlv_rdisp = 1;
+			csr_wr_node(xi.node,
+				    CVMX_BGXX_SPUX_MISC_CONTROL(index,
+								xi.interface),
+				    misc_control.u64);
+		}
+	}
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again. This is used by
+ * interfaces using the bgx mac.
+ *
+ * @param xipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ *                 Non zero if you want internal loopback
+ * @param enable_external
+ *                 Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+int __cvmx_helper_bgx_sgmii_configure_loopback(int xipd_port,
+					       int enable_internal,
+					       int enable_external)
+{
+	int xiface = cvmx_helper_get_interface_num(xipd_port);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+	int node = xi.node;
+	int index = cvmx_helper_get_interface_index_num(xp.port);
+	cvmx_bgxx_gmp_pcs_mrx_control_t gmp_mrx_control;
+	cvmx_bgxx_gmp_pcs_miscx_ctl_t gmp_misc_ctl;
+
+	if (!cvmx_helper_is_port_valid(xiface, index))
+		return 0;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	if (cvmx_helper_bgx_is_rgmii(xi.interface, index)) {
+		cvmx_xcv_ctl_t xcv_ctl;
+		cvmx_helper_link_info_t link_info;
+
+		xcv_ctl.u64 = csr_rd(CVMX_XCV_CTL);
+		xcv_ctl.s.lpbk_int = enable_internal;
+		xcv_ctl.s.lpbk_ext = enable_external;
+		csr_wr(CVMX_XCV_CTL, xcv_ctl.u64);
+
+		/* Initialize link and speed */
+		__cvmx_helper_bgx_sgmii_hardware_init_link(xiface, index);
+		link_info = __cvmx_helper_bgx_sgmii_link_get(xipd_port);
+		__cvmx_helper_bgx_sgmii_hardware_init_link_speed(xiface, index,
+								 link_info);
+		__cvmx_helper_bgx_rgmii_speed(link_info);
+	} else {
+		gmp_mrx_control.u64 = csr_rd_node(
+			node,
+			CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface));
+		gmp_mrx_control.s.loopbck1 = enable_internal;
+		csr_wr_node(node,
+			    CVMX_BGXX_GMP_PCS_MRX_CONTROL(index, xi.interface),
+			    gmp_mrx_control.u64);
+
+		gmp_misc_ctl.u64 = csr_rd_node(
+			node, CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface));
+		gmp_misc_ctl.s.loopbck2 = enable_external;
+		csr_wr_node(node,
+			    CVMX_BGXX_GMP_PCS_MISCX_CTL(index, xi.interface),
+			    gmp_misc_ctl.u64);
+		__cvmx_helper_bgx_sgmii_hardware_init_link(xiface, index);
+	}
+
+	return 0;
+}
+
+static int __cvmx_helper_bgx_xaui_link_init(int index, int xiface)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int node = xi.node;
+	cvmx_bgxx_spux_status1_t spu_status1;
+	cvmx_bgxx_spux_status2_t spu_status2;
+	cvmx_bgxx_spux_br_status2_t br_status2;
+	cvmx_bgxx_spux_int_t spu_int;
+	cvmx_bgxx_spux_misc_control_t spu_misc_control;
+	cvmx_bgxx_spux_an_control_t spu_an_control;
+	cvmx_bgxx_spux_an_status_t spu_an_status;
+	cvmx_bgxx_spux_br_pmd_control_t pmd_control;
+	cvmx_bgxx_cmrx_config_t cmr_config;
+	cvmx_helper_interface_mode_t mode;
+	int use_training = 0;
+	int rgmii_first = 0;
+	int qlm = cvmx_qlm_lmac(xiface, index);
+	int use_ber = 0;
+	u64 err_blks;
+	u64 ber_cnt;
+	u64 error_debounce;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	rgmii_first = cvmx_helper_bgx_is_rgmii(xi.interface, index);
+
+	mode = cvmx_helper_bgx_get_mode(xiface, index);
+	if (mode == CVMX_HELPER_INTERFACE_MODE_10G_KR ||
+	    mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4)
+		use_training = 1;
+
+	if ((mode == CVMX_HELPER_INTERFACE_MODE_XFI ||
+	     mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
+	     mode == CVMX_HELPER_INTERFACE_MODE_10G_KR ||
+	     mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4))
+		use_ber = 1;
+
+	/* Disable packet reception, CMR as well as SPU block */
+	cmr_config.u64 =
+		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+	cmr_config.s.data_pkt_tx_en = 0;
+	cmr_config.s.data_pkt_rx_en = 0;
+	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+		    cmr_config.u64);
+	spu_misc_control.u64 = csr_rd_node(
+		node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
+	spu_misc_control.s.rx_packet_dis = 1;
+	csr_wr_node(node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface),
+		    spu_misc_control.u64);
+
+	spu_an_control.u64 = csr_rd_node(
+		node, CVMX_BGXX_SPUX_AN_CONTROL(index, xi.interface));
+	if (spu_an_control.s.an_en) {
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+			cvmx_bgxx_spux_int_t spu_int;
+
+			spu_int.u64 = csr_rd_node(
+				node, CVMX_BGXX_SPUX_INT(index, xi.interface));
+			if (!spu_int.s.an_link_good) {
+				static u64 restart_auto_neg[2][6][4] = {
+					[0 ... 1][0 ... 5] = { [0 ... 3] = 0 }
+				};
+				u64 now = get_timer(0);
+				u64 next_restart =
+					restart_auto_neg[node][xi.interface]
+							[index] +
+					2000;
+
+				if (now >= next_restart)
+					return -1;
+
+				restart_auto_neg[node][xi.interface][index] =
+					now;
+
+				/* Clear the auto negotiation (W1C) */
+				spu_int.u64 = 0;
+				spu_int.s.an_complete = 1;
+				spu_int.s.an_link_good = 1;
+				spu_int.s.an_page_rx = 1;
+				csr_wr_node(node,
+					    CVMX_BGXX_SPUX_INT(index,
+							       xi.interface),
+					    spu_int.u64);
+				/* Restart auto negotiation */
+				spu_an_control.u64 = csr_rd_node(
+					node, CVMX_BGXX_SPUX_AN_CONTROL(
+						      index, xi.interface));
+				spu_an_control.s.an_restart = 1;
+				csr_wr_node(node,
+					    CVMX_BGXX_SPUX_AN_CONTROL(
+						    index, xi.interface),
+					    spu_an_control.u64);
+				return -1;
+			}
+		} else {
+			spu_an_status.u64 = csr_rd_node(
+				node,
+				CVMX_BGXX_SPUX_AN_STATUS(index, xi.interface));
+			if (!spu_an_status.s.an_complete) {
+				static u64 restart_auto_neg[2][6][4] = {
+					[0 ... 1][0 ... 5] = { [0 ... 3] = 0 }
+				};
+				u64 now = get_timer(0);
+				u64 next_restart =
+					restart_auto_neg[node][xi.interface]
+							[index] +
+					2000;
+				if (now >= next_restart) {
+#ifdef DEBUG_BGX
+					debug("WARNING: BGX%d:%d: Waiting for autoneg to complete\n",
+					      xi.interface, index);
+#endif
+					return -1;
+				}
+
+				restart_auto_neg[node][xi.interface][index] =
+					now;
+				/* Restart auto negotiation */
+				spu_an_control.u64 = csr_rd_node(
+					node, CVMX_BGXX_SPUX_AN_CONTROL(
+						      index, xi.interface));
+				spu_an_control.s.an_restart = 1;
+				csr_wr_node(node,
+					    CVMX_BGXX_SPUX_AN_CONTROL(
+						    index, xi.interface),
+					    spu_an_control.u64);
+				return -1;
+			}
+		}
+	}
+
+	if (use_training) {
+		spu_int.u64 = csr_rd_node(
+			node, CVMX_BGXX_SPUX_INT(index, xi.interface));
+		pmd_control.u64 = csr_rd_node(
+			node,
+			CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, xi.interface));
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&
+		    pmd_control.s.train_en == 0) {
+			__cvmx_bgx_start_training(node, xi.interface, index);
+			return -1;
+		}
+		cvmx_qlm_gser_errata_27882(node, qlm, index);
+		spu_int.u64 = csr_rd_node(
+			node, CVMX_BGXX_SPUX_INT(index, xi.interface));
+
+		if (spu_int.s.training_failure &&
+		    !OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+			__cvmx_bgx_restart_training(node, xi.interface, index);
+			return -1;
+		}
+		if (!spu_int.s.training_done) {
+			debug("Waiting for link training\n");
+			return -1;
+		}
+	}
+
+	/* (GSER-21957) GSER RX Equalization may make >= 5gbaud non-KR
+	 * channel with DXAUI, RXAUI, XFI and XLAUI, we need to perform
+	 * RX equalization when the link is receiving data the first time
+	 */
+	if (use_training == 0) {
+		int lane = index;
+		cvmx_bgxx_spux_control1_t control1;
+
+		cmr_config.u64 = csr_rd_node(
+			node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+		control1.u64 = csr_rd_node(
+			node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
+		if (control1.s.loopbck) {
+			/* Skip RX equalization when in loopback */
+		} else if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
+			   mode == CVMX_HELPER_INTERFACE_MODE_XAUI) {
+			lane = -1;
+			if (__cvmx_qlm_rx_equalization(node, qlm, lane)) {
+#ifdef DEBUG_BGX
+				debug("%d:%d:%d: Waiting for RX Equalization on QLM%d\n",
+				      node, xi.interface, index, qlm);
+#endif
+				return -1;
+			}
+			/* If BGX2 uses both dlms, then configure other dlm also. */
+			if (OCTEON_IS_MODEL(OCTEON_CN73XX) &&
+			    xi.interface == 2) {
+				if (__cvmx_qlm_rx_equalization(node, 6, lane)) {
+#ifdef DEBUG_BGX
+					debug("%d:%d:%d: Waiting for RX Equalization on QLM%d\n",
+					      node, xi.interface, index, qlm);
+#endif
+					return -1;
+				}
+			}
+			/* RXAUI */
+		} else if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI) {
+			lane = index * 2;
+			if (OCTEON_IS_MODEL(OCTEON_CN73XX) && index >= 2 &&
+			    xi.interface == 2) {
+				lane = 0;
+			}
+			if (rgmii_first)
+				lane--;
+			if (__cvmx_qlm_rx_equalization(node, qlm, lane) ||
+			    __cvmx_qlm_rx_equalization(node, qlm, lane + 1)) {
+#ifdef DEBUG_BGX
+				debug("%d:%d:%d: Waiting for RX Equalization on QLM%d\n",
+				      node, xi.interface, index, qlm);
+#endif
+				return -1;
+			}
+			/* XFI */
+		} else if (cmr_config.s.lmac_type != 5) {
+			if (rgmii_first)
+				lane--;
+			if (OCTEON_IS_MODEL(OCTEON_CN73XX) && index >= 2 &&
+			    xi.interface == 2) {
+				lane = index - 2;
+			} else if (OCTEON_IS_MODEL(OCTEON_CNF75XX) &&
+				   index >= 2) {
+				lane = index - 2;
+			}
+			if (__cvmx_qlm_rx_equalization(node, qlm, lane)) {
+#ifdef DEBUG_BGX
+				debug("%d:%d:%d: Waiting for RX Equalization on QLM%d\n",
+				      node, xi.interface, index, qlm);
+#endif
+				return -1;
+			}
+		}
+	}
+
+	if (CVMX_WAIT_FOR_FIELD64_NODE(
+		    node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+		    cvmx_bgxx_spux_control1_t, reset, ==, 0, 10000)) {
+#ifdef DEBUG_BGX
+		debug("ERROR: %d:BGX%d:%d: PCS in reset", node, xi.interface,
+		      index);
+#endif
+		return -1;
+	}
+
+	if (use_ber) {
+		if (CVMX_WAIT_FOR_FIELD64_NODE(
+			    node,
+			    CVMX_BGXX_SPUX_BR_STATUS1(index, xi.interface),
+			    cvmx_bgxx_spux_br_status1_t, blk_lock, ==, 1,
+			    10000)) {
+#ifdef DEBUG_BGX
+			debug("ERROR: %d:BGX%d:%d: BASE-R PCS block not locked\n",
+			      node, xi.interface, index);
+
+			if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
+			    mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4) {
+				cvmx_bgxx_spux_br_algn_status_t bstatus;
+
+				bstatus.u64 = csr_rd_node(
+					node, CVMX_BGXX_SPUX_BR_ALGN_STATUS(
+						      index, xi.interface));
+				debug("ERROR: %d:BGX%d:%d: LANE BLOCK_LOCK:%x LANE MARKER_LOCK:%x\n",
+				      node, xi.interface, index,
+				      bstatus.s.block_lock,
+				      bstatus.s.marker_lock);
+			}
+#endif
+			return -1;
+		}
+	} else {
+		/* (5) Check to make sure that the link appears up and stable.
+		 */
+		/* Wait for PCS to be aligned */
+		if (CVMX_WAIT_FOR_FIELD64_NODE(
+			    node, CVMX_BGXX_SPUX_BX_STATUS(index, xi.interface),
+			    cvmx_bgxx_spux_bx_status_t, alignd, ==, 1, 10000)) {
+#ifdef DEBUG_BGX
+			debug("ERROR: %d:BGX%d:%d: PCS not aligned\n", node,
+			      xi.interface, index);
+#endif
+			return -1;
+		}
+	}
+
+	if (use_ber) {
+		/* Set the BGXX_SPUX_BR_STATUS2.latched_lock bit (latching low).
+		 * This will be checked prior to enabling packet tx and rx,
+		 * ensuring block lock is sustained throughout the BGX link-up
+		 * procedure
+		 */
+		br_status2.u64 = csr_rd_node(
+			node, CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
+		br_status2.s.latched_lock = 1;
+		csr_wr_node(node,
+			    CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface),
+			    br_status2.u64);
+	}
+
+	/* Clear rcvflt bit (latching high) and read it back */
+	spu_status2.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_STATUS2(index, xi.interface));
+	spu_status2.s.rcvflt = 1;
+	csr_wr_node(node, CVMX_BGXX_SPUX_STATUS2(index, xi.interface),
+		    spu_status2.u64);
+
+	spu_status2.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_STATUS2(index, xi.interface));
+	if (spu_status2.s.rcvflt) {
+#ifdef DEBUG_BGX
+		debug("ERROR: %d:BGX%d:%d: Receive fault, need to retry\n",
+		      node, xi.interface, index);
+#endif
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && use_training)
+			__cvmx_bgx_restart_training(node, xi.interface, index);
+		/* debug("training restarting\n"); */
+		return -1;
+	}
+
+	/* Wait for MAC RX to be ready */
+	if (CVMX_WAIT_FOR_FIELD64_NODE(
+		    node, CVMX_BGXX_SMUX_RX_CTL(index, xi.interface),
+		    cvmx_bgxx_smux_rx_ctl_t, status, ==, 0, 10000)) {
+#ifdef DEBUG_BGX
+		debug("ERROR: %d:BGX%d:%d: RX not ready\n", node, xi.interface,
+		      index);
+#endif
+		return -1;
+	}
+
+	/* Wait for BGX RX to be idle */
+	if (CVMX_WAIT_FOR_FIELD64_NODE(
+		    node, CVMX_BGXX_SMUX_CTRL(index, xi.interface),
+		    cvmx_bgxx_smux_ctrl_t, rx_idle, ==, 1, 10000)) {
+#ifdef DEBUG_BGX
+		debug("ERROR: %d:BGX%d:%d: RX not idle\n", node, xi.interface,
+		      index);
+#endif
+		return -1;
+	}
+
+	/* Wait for GMX TX to be idle */
+	if (CVMX_WAIT_FOR_FIELD64_NODE(
+		    node, CVMX_BGXX_SMUX_CTRL(index, xi.interface),
+		    cvmx_bgxx_smux_ctrl_t, tx_idle, ==, 1, 10000)) {
+#ifdef DEBUG_BGX
+		debug("ERROR: %d:BGX%d:%d: TX not idle\n", node, xi.interface,
+		      index);
+#endif
+		return -1;
+	}
+
+	/* rcvflt should still be 0 */
+	spu_status2.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_STATUS2(index, xi.interface));
+	if (spu_status2.s.rcvflt) {
+#ifdef DEBUG_BGX
+		debug("ERROR: %d:BGX%d:%d: Receive fault, need to retry\n",
+		      node, xi.interface, index);
+#endif
+		return -1;
+	}
+
+	/* Receive link is latching low. Force it high and verify it */
+	spu_status1.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface));
+	spu_status1.s.rcv_lnk = 1;
+	csr_wr_node(node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface),
+		    spu_status1.u64);
+
+	if (CVMX_WAIT_FOR_FIELD64_NODE(
+		    node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface),
+		    cvmx_bgxx_spux_status1_t, rcv_lnk, ==, 1, 10000)) {
+#ifdef DEBUG_BGX
+		debug("ERROR: %d:BGX%d:%d: Receive link down\n", node,
+		      xi.interface, index);
+#endif
+		return -1;
+	}
+
+	if (use_ber) {
+		/* Clearing BER_CNT and ERR_BLKs */
+		br_status2.u64 = csr_rd_node(
+			node, CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
+
+		/* If set, clear the LATCHED_BER by writing it to a one.  */
+		if (br_status2.s.latched_ber)
+			csr_wr_node(node,
+				    CVMX_BGXX_SPUX_BR_STATUS2(index,
+							      xi.interface),
+				    br_status2.u64);
+
+		error_debounce = get_timer(0);
+
+		/* Clear error counts */
+		err_blks = 0;
+		ber_cnt = 0;
+
+		/* Verify that the link is up and  error free for 100ms */
+		while (get_timer(error_debounce) < 100) {
+			spu_status1.u64 = csr_rd_node(
+				node,
+				CVMX_BGXX_SPUX_STATUS1(index, xi.interface));
+			/* Checking that Receive link is still up (rcv_lnk = 1 (up)) */
+			if (!spu_status1.s.rcv_lnk) {
+#ifdef DEBUG_BGX
+				debug("ERROR: %d:BGX%d:%d: Receive link down\n",
+				      node, xi.interface, index);
+#endif
+				return -1;
+			}
+
+			/* Checking if latched_ber = 1 (BER >= 10e^4) */
+			br_status2.u64 = csr_rd_node(
+				node,
+				CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
+			err_blks += br_status2.s.err_blks;
+			ber_cnt += br_status2.s.ber_cnt;
+
+			if (br_status2.s.latched_ber) {
+#ifdef DEBUG_BGX
+				debug("ERROR: %d:BGX%d:%d: BER test failed, BER >= 10e^4, need to retry\n",
+				      node, xi.interface, index);
+#endif
+				return -1;
+			}
+			/* Checking that latched BLOCK_LOCK is still set (Block Lock never lost) */
+			if (!br_status2.s.latched_lock) {
+#ifdef DEBUG_BGX
+				debug("ERROR: %d:BGX%d:%d: BASE-R PCS block lock lost, need to retry\n",
+				      node, xi.interface, index);
+#endif
+				return -1;
+			}
+
+			/* Check error counters. Must be 0 (this error rate#
+			 * is much higher than 1E-12)
+			 */
+			if (err_blks > 0) {
+#ifdef DEBUG_BGX
+				debug("ERROR: %d:BGX%d:%d: BASE-R errored-blocks (%llu) detected, need to retry\n",
+				      node, xi.interface, index,
+				      (unsigned long long)err_blks);
+#endif
+				return -1;
+			}
+
+			if (ber_cnt > 0) {
+#ifdef DEBUG_BGX
+				debug("ERROR: %d:BGX%d:%d: BASE-R bit-errors (%llu) detected, need to retry\n",
+				      node, xi.interface, index,
+				      (unsigned long long)ber_cnt);
+#endif
+				return -1;
+			}
+
+			udelay(1000);
+		}
+
+		/* Clear out the BGX error counters/bits. These errors are
+		 * expected as part of the BGX link up procedure
+		 */
+		/* BIP_ERR counters clear as part of this read */
+		csr_rd_node(node,
+			    CVMX_BGXX_SPUX_BR_BIP_ERR_CNT(index, xi.interface));
+		/* BER_CNT and ERR_BLKs clear as part of this read */
+		br_status2.u64 = csr_rd_node(
+			node, CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
+	}
+
+	/* (7) Enable packet transmit and receive */
+	spu_misc_control.u64 = csr_rd_node(
+		node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
+	spu_misc_control.s.rx_packet_dis = 0;
+	csr_wr_node(node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface),
+		    spu_misc_control.u64);
+
+	if (debug)
+		debug("%s: Enabling tx and rx data packets\n", __func__);
+	cmr_config.u64 =
+		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+	cmr_config.s.data_pkt_tx_en = 1;
+	cmr_config.s.data_pkt_rx_en = 1;
+	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+		    cmr_config.u64);
+	return 0;
+}
+
+int __cvmx_helper_bgx_xaui_enable(int xiface)
+{
+	int index;
+	cvmx_helper_interface_mode_t mode;
+	int num_ports = cvmx_helper_ports_on_interface(xiface);
+
+	for (index = 0; index < num_ports; index++) {
+		int res;
+		int xipd_port = cvmx_helper_get_ipd_port(xiface, index);
+		int phy_pres;
+		struct cvmx_xiface xi =
+			cvmx_helper_xiface_to_node_interface(xiface);
+		static int count
+			[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE]
+			[CVMX_HELPER_CFG_MAX_PORT_PER_IFACE] = {
+				[0 ... CVMX_MAX_NODES -
+				 1][0 ... CVMX_HELPER_MAX_IFACE -
+				    1] = { [0 ... CVMX_HELPER_CFG_MAX_PORT_PER_IFACE -
+					    1] = 0 }
+			};
+
+		mode = cvmx_helper_bgx_get_mode(xiface, index);
+
+		/* Set disparity for RXAUI interface as described in the
+		 * Marvell RXAUI Interface specification.
+		 */
+		if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI &&
+		    (cvmx_helper_get_port_phy_present(xiface, index)))
+			phy_pres = 1;
+		else
+			phy_pres = 0;
+		__cvmx_helper_bgx_port_init(xipd_port, phy_pres);
+
+retry_link:
+		res = __cvmx_helper_bgx_xaui_link_init(index, xiface);
+		/* RX Equalization or autonegotiation can take little longer
+		 * retry the link maybe 5 times for now
+		 */
+		if (res == -1 && count[xi.node][xi.interface][index] < 5) {
+			count[xi.node][xi.interface][index]++;
+#ifdef DEBUG_BGX
+			debug("%d:BGX(%d,%d): Failed to get link, retrying\n",
+			      xi.node, xi.interface, index);
+#endif
+			goto retry_link;
+		}
+
+		if (res == -1) {
+#ifdef DEBUG_BGX
+			debug("%d:BGX(%d,%d): Failed to get link\n", xi.node,
+			      xi.interface, index);
+#endif
+			continue;
+		}
+	}
+	return 0;
+}
+
+cvmx_helper_link_info_t __cvmx_helper_bgx_xaui_link_get(int xipd_port)
+{
+	int xiface = cvmx_helper_get_interface_num(xipd_port);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+	int index = cvmx_helper_get_interface_index_num(xp.port);
+	cvmx_bgxx_spux_status1_t spu_status1;
+	cvmx_bgxx_smux_tx_ctl_t smu_tx_ctl;
+	cvmx_bgxx_smux_rx_ctl_t smu_rx_ctl;
+	cvmx_bgxx_cmrx_config_t cmr_config;
+	cvmx_helper_link_info_t result;
+	cvmx_helper_interface_mode_t mode;
+	cvmx_bgxx_spux_misc_control_t spu_misc_control;
+	cvmx_bgxx_spux_br_status2_t br_status2;
+
+	result.u64 = 0;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	mode = cvmx_helper_bgx_get_mode(xiface, index);
+	if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+		return __cvmx_helper_bgx_sgmii_link_get(xipd_port);
+
+	/* Reading current rx/tx link status */
+	spu_status1.u64 = csr_rd_node(
+		xi.node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface));
+	smu_tx_ctl.u64 = csr_rd_node(
+		xi.node, CVMX_BGXX_SMUX_TX_CTL(index, xi.interface));
+	smu_rx_ctl.u64 = csr_rd_node(
+		xi.node, CVMX_BGXX_SMUX_RX_CTL(index, xi.interface));
+	/* Reading tx/rx packet enables */
+	cmr_config.u64 = csr_rd_node(
+		xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+	spu_misc_control.u64 = csr_rd_node(
+		xi.node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
+
+	if (smu_tx_ctl.s.ls == 0 && smu_rx_ctl.s.status == 0 &&
+	    cmr_config.s.data_pkt_tx_en == 1 &&
+	    cmr_config.s.data_pkt_rx_en == 1 &&
+	    spu_misc_control.s.rx_packet_dis == 0 &&
+	    spu_status1.s.rcv_lnk) {
+		int lanes;
+		int qlm = cvmx_qlm_lmac(xiface, index);
+		u64 speed;
+
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+			speed = cvmx_qlm_get_gbaud_mhz_node(xi.node, qlm);
+		else
+			speed = cvmx_qlm_get_gbaud_mhz(qlm);
+
+		cmr_config.u64 = csr_rd_node(
+			xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+		switch (cmr_config.s.lmac_type) {
+		default:
+		case 1: // XAUI
+			speed = (speed * 8 + 5) / 10;
+			lanes = 4;
+			break;
+		case 2: // RXAUI
+			speed = (speed * 8 + 5) / 10;
+			lanes = 2;
+			break;
+		case 3: // XFI
+			speed = (speed * 64 + 33) / 66;
+			lanes = 1;
+			break;
+		case 4: // XLAUI
+			/* Adjust the speed when XLAUI is configured at 6.250Gbps */
+			if (speed == 6250)
+				speed = 6445;
+			speed = (speed * 64 + 33) / 66;
+			lanes = 4;
+			break;
+		}
+
+		if (debug)
+			debug("%s: baud: %llu, lanes: %d\n", __func__,
+			      (unsigned long long)speed, lanes);
+		speed *= lanes;
+		result.s.speed = speed;
+	} else {
+		int res;
+		u64 err_blks = 0;
+		u64 ber_cnt = 0;
+
+		/* Check for err_blk and ber errors if 10G or 40G */
+		if ((mode == CVMX_HELPER_INTERFACE_MODE_XFI ||
+		     mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
+		     mode == CVMX_HELPER_INTERFACE_MODE_10G_KR ||
+		     mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4)) {
+			br_status2.u64 = csr_rd_node(
+				xi.node,
+				CVMX_BGXX_SPUX_BR_STATUS2(index, xi.interface));
+			err_blks = br_status2.s.err_blks;
+			ber_cnt = br_status2.s.ber_cnt;
+		}
+
+		/* Checking if the link is up and error-free but we are receiving remote-faults */
+		if (smu_tx_ctl.s.ls != 1 && smu_rx_ctl.s.status != 1 &&
+		    cmr_config.s.data_pkt_tx_en == 1 &&
+		    cmr_config.s.data_pkt_rx_en == 1 &&
+		    spu_misc_control.s.rx_packet_dis == 0 &&
+		    err_blks == 0 && ber_cnt == 0 &&
+		    spu_status1.s.rcv_lnk) {
+			result.s.init_success = 1;
+#ifdef DEBUG_BGX
+			debug("Receiving remote-fault ordered sets %d:BGX(%d,%d)\n",
+			      xi.node, xi.interface, index);
+#endif
+
+		} else {
+			res = __cvmx_helper_bgx_xaui_link_init(index, xiface);
+			if (res == -1) {
+#ifdef DEBUG_BGX
+				debug("Failed to get %d:BGX(%d,%d) link\n",
+				      xi.node, xi.interface, index);
+#endif
+			} else {
+#ifdef DEBUG_BGX
+				debug("Link initialization successful %d:BGX(%d,%d)\n",
+				      xi.node, xi.interface, index);
+#endif
+				result.s.init_success = 1;
+			}
+		}
+	}
+
+	return result;
+}
+
+int __cvmx_helper_bgx_xaui_link_set(int xipd_port,
+				    cvmx_helper_link_info_t link_info)
+{
+	int xiface = cvmx_helper_get_interface_num(xipd_port);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+	int node = xi.node;
+	int index = cvmx_helper_get_interface_index_num(xp.port);
+	cvmx_bgxx_smux_tx_ctl_t smu_tx_ctl;
+	cvmx_bgxx_smux_rx_ctl_t smu_rx_ctl;
+	cvmx_bgxx_spux_status1_t spu_status1;
+	cvmx_helper_interface_mode_t mode;
+	cvmx_bgxx_cmrx_config_t cmr_config;
+	cvmx_bgxx_spux_misc_control_t spu_misc_control;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	mode = cvmx_helper_bgx_get_mode(xiface, index);
+	if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+		return __cvmx_helper_bgx_sgmii_link_set(xipd_port, link_info);
+
+	/* Reading current rx/tx link status */
+	smu_tx_ctl.u64 =
+		csr_rd_node(node, CVMX_BGXX_SMUX_TX_CTL(index, xi.interface));
+	smu_rx_ctl.u64 =
+		csr_rd_node(node, CVMX_BGXX_SMUX_RX_CTL(index, xi.interface));
+	spu_status1.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_STATUS1(index, xi.interface));
+	/* Reading tx/rx packet enables */
+	cmr_config.u64 = csr_rd_node(
+		xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+	spu_misc_control.u64 = csr_rd_node(
+		xi.node, CVMX_BGXX_SPUX_MISC_CONTROL(index, xi.interface));
+
+	/* If the link shouldn't be up, then just return */
+	if (!link_info.s.link_up)
+		return 0;
+
+	/* Do nothing if both RX and TX are happy and packet
+	 * transmission/reception is enabled
+	 */
+	if (smu_tx_ctl.s.ls == 0 && smu_rx_ctl.s.status == 0 &&
+	    cmr_config.s.data_pkt_tx_en == 1 &&
+	    cmr_config.s.data_pkt_rx_en == 1 &&
+	    spu_misc_control.s.rx_packet_dis == 0 && spu_status1.s.rcv_lnk)
+		return 0;
+
+	/* Bring the link up */
+	return __cvmx_helper_bgx_xaui_link_init(index, xiface);
+}
+
+int __cvmx_helper_bgx_xaui_configure_loopback(int xipd_port,
+					      int enable_internal,
+					      int enable_external)
+{
+	int xiface = cvmx_helper_get_interface_num(xipd_port);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+	int node = xi.node;
+	int index = cvmx_helper_get_interface_index_num(xp.port);
+	cvmx_bgxx_spux_control1_t spu_control1;
+	cvmx_bgxx_smux_ext_loopback_t smu_ext_loopback;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	/* INT_BEAT_GEN must be set for loopback if the QLMs are not clocked.
+	 * Set it whenever we use internal loopback
+	 */
+	if (enable_internal) {
+		cvmx_bgxx_cmrx_config_t cmr_config;
+
+		cmr_config.u64 = csr_rd_node(
+			node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+		cmr_config.s.int_beat_gen = 1;
+		csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+			    cmr_config.u64);
+	}
+	/* Set the internal loop */
+	spu_control1.u64 =
+		csr_rd_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface));
+	spu_control1.s.loopbck = enable_internal;
+	csr_wr_node(node, CVMX_BGXX_SPUX_CONTROL1(index, xi.interface),
+		    spu_control1.u64);
+	/* Set the external loop */
+	smu_ext_loopback.u64 = csr_rd_node(
+		node, CVMX_BGXX_SMUX_EXT_LOOPBACK(index, xi.interface));
+	smu_ext_loopback.s.en = enable_external;
+	csr_wr_node(node, CVMX_BGXX_SMUX_EXT_LOOPBACK(index, xi.interface),
+		    smu_ext_loopback.u64);
+
+	return __cvmx_helper_bgx_xaui_link_init(index, xiface);
+}
+
+int __cvmx_helper_bgx_mixed_enable(int xiface)
+{
+	int index;
+	int num_ports = cvmx_helper_ports_on_interface(xiface);
+	cvmx_helper_interface_mode_t mode;
+
+	for (index = 0; index < num_ports; index++) {
+		int xipd_port, phy_pres = 0;
+
+		if (!cvmx_helper_is_port_valid(xiface, index))
+			continue;
+
+		mode = cvmx_helper_bgx_get_mode(xiface, index);
+
+		xipd_port = cvmx_helper_get_ipd_port(xiface, index);
+
+		if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI &&
+		    (cvmx_helper_get_port_phy_present(xiface, index)))
+			phy_pres = 1;
+
+		if (__cvmx_helper_bgx_port_init(xipd_port, phy_pres))
+			continue;
+
+		/* For RGMII interface, initialize the link after PKO is setup */
+		if (mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+			continue;
+		/* Call SGMII init code for lmac_type = 0|5 */
+		else if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII) {
+			int do_link_set = 1;
+
+			if (do_link_set)
+				__cvmx_helper_bgx_sgmii_link_set(
+					xipd_port,
+					__cvmx_helper_bgx_sgmii_link_get(
+						xipd_port));
+			/* All other lmac type call XAUI init code */
+		} else {
+			int res;
+			struct cvmx_xiface xi =
+				cvmx_helper_xiface_to_node_interface(xiface);
+			static int count
+				[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE]
+				[CVMX_HELPER_CFG_MAX_PORT_PER_IFACE] = {
+					[0 ... CVMX_MAX_NODES -
+					 1][0 ... CVMX_HELPER_MAX_IFACE -
+					    1] = { [0 ... CVMX_HELPER_CFG_MAX_PORT_PER_IFACE -
+						    1] = 0 }
+				};
+
+retry_link:
+			res = __cvmx_helper_bgx_xaui_link_init(index, xiface);
+			/* RX Equalization or autonegotiation can take little
+			 * longer retry the link maybe 5 times for now
+			 */
+			if (res == -1 &&
+			    count[xi.node][xi.interface][index] < 5) {
+				count[xi.node][xi.interface][index]++;
+				goto retry_link;
+			}
+
+			if (res == -1) {
+#ifdef DEBUG_BGX
+				debug("Failed to get %d:BGX(%d,%d) link\n",
+				      xi.node, xi.interface, index);
+#endif
+				continue;
+			}
+		}
+	}
+	return 0;
+}
+
+cvmx_helper_link_info_t __cvmx_helper_bgx_mixed_link_get(int xipd_port)
+{
+	int xiface = cvmx_helper_get_interface_num(xipd_port);
+	int index = cvmx_helper_get_interface_index_num(xipd_port);
+	cvmx_helper_interface_mode_t mode;
+
+	mode = cvmx_helper_bgx_get_mode(xiface, index);
+	if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
+	    mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+		return __cvmx_helper_bgx_sgmii_link_get(xipd_port);
+	else
+		return __cvmx_helper_bgx_xaui_link_get(xipd_port);
+}
+
+int __cvmx_helper_bgx_mixed_link_set(int xipd_port,
+				     cvmx_helper_link_info_t link_info)
+{
+	int xiface = cvmx_helper_get_interface_num(xipd_port);
+	int index = cvmx_helper_get_interface_index_num(xipd_port);
+	cvmx_helper_interface_mode_t mode;
+
+	mode = cvmx_helper_bgx_get_mode(xiface, index);
+	if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
+	    mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+		return __cvmx_helper_bgx_sgmii_link_set(xipd_port, link_info);
+	else
+		return __cvmx_helper_bgx_xaui_link_set(xipd_port, link_info);
+}
+
+int __cvmx_helper_bgx_mixed_configure_loopback(int xipd_port,
+					       int enable_internal,
+					       int enable_external)
+{
+	int xiface = cvmx_helper_get_interface_num(xipd_port);
+	int index = cvmx_helper_get_interface_index_num(xipd_port);
+	cvmx_helper_interface_mode_t mode;
+
+	mode = cvmx_helper_bgx_get_mode(xiface, index);
+	if (mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
+	    mode == CVMX_HELPER_INTERFACE_MODE_RGMII)
+		return __cvmx_helper_bgx_sgmii_configure_loopback(
+			xipd_port, enable_internal, enable_external);
+	else
+		return __cvmx_helper_bgx_xaui_configure_loopback(
+			xipd_port, enable_internal, enable_external);
+}
+
+/**
+ * @INTERNAL
+ * Configure Priority-Based Flow Control (a.k.a. PFC/CBFC)
+ * on a specific BGX interface/port.
+ */
+void __cvmx_helper_bgx_xaui_config_pfc(unsigned int node,
+				       unsigned int interface,
+				       unsigned int index, bool pfc_enable)
+{
+	int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	cvmx_bgxx_smux_cbfc_ctl_t cbfc_ctl;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	cbfc_ctl.u64 =
+		csr_rd_node(node, CVMX_BGXX_SMUX_CBFC_CTL(index, xi.interface));
+
+	/* Enable all PFC controls if requiested */
+	cbfc_ctl.s.rx_en = pfc_enable;
+	cbfc_ctl.s.tx_en = pfc_enable;
+	if (debug)
+		debug("%s: CVMX_BGXX_SMUX_CBFC_CTL(%d,%d)=%#llx\n", __func__,
+		      index, xi.interface, (unsigned long long)cbfc_ctl.u64);
+	csr_wr_node(node, CVMX_BGXX_SMUX_CBFC_CTL(index, xi.interface),
+		    cbfc_ctl.u64);
+}
+
+/**
+ * Function to control the generation of FCS, padding by the BGX
+ *
+ */
+void cvmx_helper_bgx_tx_options(unsigned int node, unsigned int interface,
+				unsigned int index, bool fcs_enable,
+				bool pad_enable)
+{
+	cvmx_bgxx_cmrx_config_t cmr_config;
+	cvmx_bgxx_gmp_gmi_txx_append_t gmp_txx_append;
+	cvmx_bgxx_gmp_gmi_txx_min_pkt_t gmp_min_pkt;
+	cvmx_bgxx_smux_tx_min_pkt_t smu_min_pkt;
+	cvmx_bgxx_smux_tx_append_t smu_tx_append;
+	int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+	if (!cvmx_helper_is_port_valid(xiface, index))
+		return;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d, fcs: %s, pad: %s\n", __func__,
+		      xi.node, xi.interface, index,
+		      fcs_enable ? "true" : "false",
+		      pad_enable ? "true" : "false");
+
+	cmr_config.u64 =
+		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+
+	(void)cmr_config; /* In case we need LMAC_TYPE later */
+
+	/* Setting options for both BGX subsystems, regardless of LMAC type */
+
+	/* Set GMP (SGMII) Tx options */
+	gmp_min_pkt.u64 = 0;
+	/* per HRM Sec 34.3.4.4 */
+	gmp_min_pkt.s.min_size = 59;
+	csr_wr_node(node, CVMX_BGXX_GMP_GMI_TXX_MIN_PKT(index, xi.interface),
+		    gmp_min_pkt.u64);
+	gmp_txx_append.u64 = csr_rd_node(
+		node, CVMX_BGXX_GMP_GMI_TXX_APPEND(index, xi.interface));
+	gmp_txx_append.s.fcs = fcs_enable;
+	gmp_txx_append.s.pad = pad_enable;
+	csr_wr_node(node, CVMX_BGXX_GMP_GMI_TXX_APPEND(index, xi.interface),
+		    gmp_txx_append.u64);
+
+	/* Set SMUX (XAUI/XFI) Tx options */
+	/* HRM Sec 33.3.4.3 should read 64 */
+	smu_min_pkt.u64 = 0;
+	smu_min_pkt.s.min_size = 0x40;
+	csr_wr_node(node, CVMX_BGXX_SMUX_TX_MIN_PKT(index, xi.interface),
+		    smu_min_pkt.u64);
+	smu_tx_append.u64 = csr_rd_node(
+		node, CVMX_BGXX_SMUX_TX_APPEND(index, xi.interface));
+	smu_tx_append.s.fcs_d = fcs_enable; /* Set data-packet FCS */
+	smu_tx_append.s.pad = pad_enable;
+	csr_wr_node(node, CVMX_BGXX_SMUX_TX_APPEND(index, xi.interface),
+		    smu_tx_append.u64);
+}
+
+/**
+ * Set mac for the ipd_port
+ *
+ * @param xipd_port ipd_port to set the mac
+ * @param bcst      If set, accept all broadcast packets
+ * @param mcst      Multicast mode
+ *		    0 = Force reject all multicast packets
+ *		    1 = Force accept all multicast packets
+ *		    2 = use the address filter CAM.
+ * @param mac       mac address for the ipd_port, or 0 to disable MAC filtering
+ */
+void cvmx_helper_bgx_set_mac(int xipd_port, int bcst, int mcst, u64 mac)
+{
+	int xiface = cvmx_helper_get_interface_num(xipd_port);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int node = xi.node;
+	int index;
+	cvmx_bgxx_cmr_rx_adrx_cam_t adr_cam;
+	cvmx_bgxx_cmrx_rx_adr_ctl_t adr_ctl;
+	cvmx_bgxx_cmrx_config_t cmr_config;
+	int saved_state_tx, saved_state_rx;
+
+	index = cvmx_helper_get_interface_index_num(xipd_port);
+
+	if (!cvmx_helper_is_port_valid(xiface, index))
+		return;
+
+	if (debug)
+		debug("%s: interface %u:%d/%d\n", __func__, xi.node,
+		      xi.interface, index);
+
+	cmr_config.u64 =
+		csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+	saved_state_tx = cmr_config.s.data_pkt_tx_en;
+	saved_state_rx = cmr_config.s.data_pkt_rx_en;
+	cmr_config.s.data_pkt_tx_en = 0;
+	cmr_config.s.data_pkt_rx_en = 0;
+	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+		    cmr_config.u64);
+
+	/* Set the mac */
+	adr_cam.u64 = 0;
+	adr_cam.s.id = index;
+
+	if (mac != 0ull)
+		adr_cam.s.en = 1;
+	adr_cam.s.adr = mac;
+
+	csr_wr_node(node, CVMX_BGXX_CMR_RX_ADRX_CAM(index * 8, xi.interface),
+		    adr_cam.u64);
+
+	adr_ctl.u64 = csr_rd_node(
+		node, CVMX_BGXX_CMRX_RX_ADR_CTL(index, xi.interface));
+	if (mac != 0ull)
+		adr_ctl.s.cam_accept =
+			1; /* Accept the packet on DMAC CAM address */
+	else
+		adr_ctl.s.cam_accept = 0; /* No filtering, promiscuous */
+
+	adr_ctl.s.mcst_mode = mcst;   /* Use the address filter CAM */
+	adr_ctl.s.bcst_accept = bcst; /* Accept all broadcast packets */
+	csr_wr_node(node, CVMX_BGXX_CMRX_RX_ADR_CTL(index, xi.interface),
+		    adr_ctl.u64);
+	/* Set SMAC for PAUSE frames */
+	csr_wr_node(node, CVMX_BGXX_GMP_GMI_SMACX(index, xi.interface), mac);
+
+	/* Restore back the interface state */
+	cmr_config.s.data_pkt_tx_en = saved_state_tx;
+	cmr_config.s.data_pkt_rx_en = saved_state_rx;
+	csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+		    cmr_config.u64);
+
+	/* Wait 100ms after bringing up the link to give the PHY some time */
+	if (cmr_config.s.enable) {
+		cvmx_helper_interface_mode_t mode;
+
+		mode = cvmx_helper_bgx_get_mode(xiface, index);
+		__cvmx_helper_bgx_interface_enable_delay(mode);
+	}
+}
+
+/**
+ * Disables the sending of flow control (pause) frames on the specified
+ * BGX port(s).
+ *
+ * @param xiface Which xiface
+ * @param port_mask Mask (4bits) of which ports on the interface to disable
+ *                  backpressure on.
+ *                  1 => disable backpressure
+ *                  0 => enable backpressure
+ *
+ * @return 0 on success
+ *         -1 on error
+ *
+ * FIXME: Should change the API to handle a single port in every
+ * invocation, for consistency with other API calls.
+ */
+int cvmx_bgx_set_backpressure_override(int xiface, unsigned int port_mask)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	cvmx_bgxx_cmr_rx_ovr_bp_t rx_ovr_bp;
+	int node = xi.node;
+
+	if (xi.interface >= CVMX_HELPER_MAX_GMX)
+		return 0;
+
+	if (debug)
+		debug("%s: interface %u:%d port_mask=%#x\n", __func__, xi.node,
+		      xi.interface, port_mask);
+
+	/* Check for valid arguments */
+	rx_ovr_bp.u64 = 0;
+	rx_ovr_bp.s.en = port_mask; /* Per port Enable back pressure override */
+	rx_ovr_bp.s.ign_fifo_bp =
+		port_mask; /* Ignore the RX FIFO full when computing BP */
+
+	csr_wr_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface), rx_ovr_bp.u64);
+	return 0;
+}
+
+int cvmx_bgx_set_flowctl_mode(int xipd_port, cvmx_qos_proto_t qos,
+			      cvmx_qos_pkt_mode_t fc_mode)
+{
+	int node, xiface, iface, index, mode;
+	struct cvmx_xiface xi;
+	const struct {
+		int bck;
+		int drp;
+	} fcmode[4] = { [CVMX_QOS_PKT_MODE_HWONLY] = { 1, 1 },
+			[CVMX_QOS_PKT_MODE_SWONLY] = { 0, 0 },
+			[CVMX_QOS_PKT_MODE_HWSW] = { 1, 0 },
+			[CVMX_QOS_PKT_MODE_DROP] = { 0, 1 } };
+
+	xiface = cvmx_helper_get_interface_num(xipd_port);
+	xi = cvmx_helper_xiface_to_node_interface(xiface);
+	node = xi.node;
+	iface = xi.interface;
+
+	if (xi.interface >= CVMX_HELPER_MAX_GMX)
+		return 0;
+
+	index = cvmx_helper_get_interface_index_num(xipd_port);
+	mode = cvmx_helper_bgx_get_mode(xiface, index);
+	switch (mode) {
+	case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+	case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+	case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+	case CVMX_HELPER_INTERFACE_MODE_XFI:
+	case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+	case CVMX_HELPER_INTERFACE_MODE_XAUI: {
+		cvmx_bgxx_smux_tx_ctl_t txctl;
+		cvmx_bgxx_smux_cbfc_ctl_t cbfc;
+		cvmx_bgxx_smux_rx_frm_ctl_t frmctl;
+		cvmx_bgxx_smux_hg2_control_t hg2ctl;
+
+		txctl.u64 =
+			csr_rd_node(node, CVMX_BGXX_SMUX_TX_CTL(index, iface));
+		cbfc.u64 = csr_rd_node(node,
+				       CVMX_BGXX_SMUX_CBFC_CTL(index, iface));
+		frmctl.u64 = csr_rd_node(
+			node, CVMX_BGXX_SMUX_RX_FRM_CTL(index, iface));
+		hg2ctl.u64 = csr_rd_node(
+			node, CVMX_BGXX_SMUX_HG2_CONTROL(index, iface));
+		switch (qos) {
+		case CVMX_QOS_PROTO_PAUSE:
+			cbfc.u64 = 0;
+			hg2ctl.u64 = 0;
+			frmctl.s.ctl_bck = fcmode[fc_mode].bck;
+			frmctl.s.ctl_drp = fcmode[fc_mode].drp;
+			frmctl.s.ctl_mcst = 1;
+			txctl.s.l2p_bp_conv = 1;
+			break;
+		case CVMX_QOS_PROTO_PFC:
+			hg2ctl.u64 = 0;
+			hg2ctl.s.logl_en = 0xff;
+			frmctl.s.ctl_bck = fcmode[fc_mode].bck;
+			frmctl.s.ctl_drp = fcmode[fc_mode].drp;
+			frmctl.s.ctl_mcst = 1;
+			cbfc.s.bck_en = fcmode[fc_mode].bck;
+			cbfc.s.drp_en = fcmode[fc_mode].drp;
+			cbfc.s.phys_en = 0;
+			cbfc.s.logl_en = 0xff;
+			cbfc.s.tx_en = 1;
+			cbfc.s.rx_en = 1;
+			break;
+		case CVMX_QOS_PROTO_NONE:
+			cbfc.u64 = 0;
+			hg2ctl.u64 = 0;
+			frmctl.s.ctl_bck = fcmode[CVMX_QOS_PKT_MODE_DROP].bck;
+			frmctl.s.ctl_drp = fcmode[CVMX_QOS_PKT_MODE_DROP].drp;
+			txctl.s.l2p_bp_conv = 0;
+			break;
+		default:
+			break;
+		}
+		csr_wr_node(node, CVMX_BGXX_SMUX_CBFC_CTL(index, iface),
+			    cbfc.u64);
+		csr_wr_node(node, CVMX_BGXX_SMUX_RX_FRM_CTL(index, iface),
+			    frmctl.u64);
+		csr_wr_node(node, CVMX_BGXX_SMUX_HG2_CONTROL(index, iface),
+			    hg2ctl.u64);
+		csr_wr_node(node, CVMX_BGXX_SMUX_TX_CTL(index, iface),
+			    txctl.u64);
+		break;
+	}
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+	case CVMX_HELPER_INTERFACE_MODE_RGMII: {
+		cvmx_bgxx_gmp_gmi_rxx_frm_ctl_t gmi_frmctl;
+
+		gmi_frmctl.u64 = csr_rd_node(
+			node, CVMX_BGXX_GMP_GMI_RXX_FRM_CTL(index, iface));
+		switch (qos) {
+		case CVMX_QOS_PROTO_PAUSE:
+			gmi_frmctl.s.ctl_bck = fcmode[fc_mode].bck;
+			gmi_frmctl.s.ctl_drp = fcmode[fc_mode].drp;
+			gmi_frmctl.s.ctl_mcst = 1;
+			break;
+		case CVMX_QOS_PROTO_NONE:
+			gmi_frmctl.s.ctl_bck =
+				fcmode[CVMX_QOS_PKT_MODE_DROP].bck;
+			gmi_frmctl.s.ctl_drp =
+				fcmode[CVMX_QOS_PKT_MODE_DROP].drp;
+			break;
+		default:
+			break;
+		}
+		csr_wr_node(node, CVMX_BGXX_GMP_GMI_RXX_FRM_CTL(index, iface),
+			    gmi_frmctl.u64);
+	}
+	} /*switch*/
+
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-board.c b/arch/mips/mach-octeon/cvmx-helper-board.c
new file mode 100644
index 0000000..6dcc4e5
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-board.c
@@ -0,0 +1,1824 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Helper functions to abstract board specific data about
+ * network ports from the rest of the cvmx-helper files.
+ */
+
+#include <i2c.h>
+#include <log.h>
+#include <malloc.h>
+#include <net.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/octeon_fdt.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-fdt.h>
+#include <mach/cvmx-helper-gpio.h>
+
+#include <mach/cvmx-smix-defs.h>
+#include <mach/cvmx-mdio.h>
+#include <mach/cvmx-qlm.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static bool sfp_parsed;
+
+static int __cvmx_helper_78xx_parse_phy(struct cvmx_phy_info *phy_info,
+					int ipd_port);
+static int __get_phy_info_from_dt(cvmx_phy_info_t *phy_info, int ipd_port);
+
+/**
+ * Writes to a Microsemi VSC7224 16-bit register
+ *
+ * @param[in]	i2c_bus	i2c bus data structure (must be enabled)
+ * @param	addr	Address of VSC7224 on the i2c bus
+ * @param	reg	8-bit register number to write to
+ * @param	val	16-bit value to write
+ *
+ * @return	0 for success
+ */
+static int cvmx_write_vsc7224_reg(const struct cvmx_fdt_i2c_bus_info *i2c_bus,
+				  u8 addr, u8 reg, u16 val)
+{
+	struct udevice *dev;
+	u8 buffer[2];
+	int ret;
+
+	ret = i2c_get_chip(i2c_bus->i2c_bus, addr, 1, &dev);
+	if (ret) {
+		debug("Cannot find I2C device: %d\n", ret);
+		return -1;
+	}
+
+	ret = dm_i2c_write(dev, reg, buffer, 2);
+	if (ret) {
+		debug("Cannot write I2C device: %d\n", ret);
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * Writes to a Microsemi VSC7224 16-bit register
+ *
+ * @param[in]	i2c_bus	i2c bus data structure (must be enabled)
+ * @param	addr	Address of VSC7224 on the i2c bus
+ * @param	reg	8-bit register number to write to
+ *
+ * @return	16-bit value or error if < 0
+ */
+static int cvmx_read_vsc7224_reg(const struct cvmx_fdt_i2c_bus_info *i2c_bus,
+				 u8 addr, u8 reg)
+{
+	struct udevice *dev;
+	u8 buffer[2];
+	int ret;
+
+	ret = i2c_get_chip(i2c_bus->i2c_bus, addr, 1, &dev);
+	if (ret) {
+		debug("Cannot find I2C device: %d\n", ret);
+		return -1;
+	}
+
+	ret = dm_i2c_read(dev, reg, buffer, 2);
+	if (ret) {
+		debug("Cannot read I2C device: %d\n", ret);
+		return -1;
+	}
+
+	return (buffer[0] << 8) | buffer[1];
+}
+
+/**
+ * Function called whenever mod_abs/mod_prs has changed for Microsemi VSC7224
+ *
+ * @param	sfp	pointer to SFP data structure
+ * @param	val	1 if absent, 0 if present, otherwise not set
+ * @param	data	user-defined data
+ *
+ * @return	0 for success, -1 on error
+ */
+int cvmx_sfp_vsc7224_mod_abs_changed(struct cvmx_fdt_sfp_info *sfp, int val,
+				     void *data)
+{
+	int err;
+	struct cvmx_sfp_mod_info *mod_info;
+	int length;
+	struct cvmx_vsc7224 *vsc7224;
+	struct cvmx_vsc7224_chan *vsc7224_chan;
+	struct cvmx_vsc7224_tap *taps, *match = NULL;
+	int i;
+
+	debug("%s(%s, %d, %p): Module %s\n", __func__, sfp->name, val, data,
+	      val ? "absent" : "present");
+	if (val)
+		return 0;
+
+	/* We're here if we detect that the module is now present */
+	err = cvmx_sfp_read_i2c_eeprom(sfp);
+	if (err) {
+		debug("%s: Error reading the SFP module eeprom for %s\n",
+		      __func__, sfp->name);
+		return err;
+	}
+	mod_info = &sfp->sfp_info;
+
+	if (!mod_info->valid || !sfp->valid) {
+		debug("%s: Module data is invalid\n", __func__);
+		return -1;
+	}
+
+	vsc7224_chan = sfp->vsc7224_chan;
+	while (vsc7224_chan) {
+		/* We don't do any rx tuning */
+		if (!vsc7224_chan->is_tx) {
+			vsc7224_chan = vsc7224_chan->next;
+			continue;
+		}
+
+		/* Walk through all the channels */
+		taps = vsc7224_chan->taps;
+		if (mod_info->limiting)
+			length = 0;
+		else
+			length = mod_info->max_copper_cable_len;
+		debug("%s: limiting: %d, length: %d\n", __func__,
+		      mod_info->limiting, length);
+
+		/* Find a matching length in the taps table */
+		for (i = 0; i < vsc7224_chan->num_taps; i++) {
+			if (length >= taps->len)
+				match = taps;
+			taps++;
+		}
+		if (!match) {
+			debug("%s(%s, %d, %p): Error: no matching tap for length %d\n",
+			      __func__, sfp->name, val, data, length);
+			return -1;
+		}
+		debug("%s(%s): Applying %cx taps to vsc7224 %s:%d for cable length %d+\n",
+		      __func__, sfp->name, vsc7224_chan->is_tx ? 't' : 'r',
+		      vsc7224_chan->vsc7224->name, vsc7224_chan->lane,
+		      match->len);
+		/* Program the taps */
+		vsc7224 = vsc7224_chan->vsc7224;
+		cvmx_write_vsc7224_reg(vsc7224->i2c_bus, vsc7224->i2c_addr,
+				       0x7f, vsc7224_chan->lane);
+		if (!vsc7224_chan->maintap_disable)
+			cvmx_write_vsc7224_reg(vsc7224->i2c_bus,
+					       vsc7224->i2c_addr, 0x99,
+					       match->main_tap);
+		if (!vsc7224_chan->pretap_disable)
+			cvmx_write_vsc7224_reg(vsc7224->i2c_bus,
+					       vsc7224->i2c_addr, 0x9a,
+					       match->pre_tap);
+		if (!vsc7224_chan->posttap_disable)
+			cvmx_write_vsc7224_reg(vsc7224->i2c_bus,
+					       vsc7224->i2c_addr, 0x9b,
+					       match->post_tap);
+
+		/* Re-use val and disable taps if needed */
+		if (vsc7224_chan->maintap_disable ||
+		    vsc7224_chan->pretap_disable ||
+		    vsc7224_chan->posttap_disable) {
+			val = cvmx_read_vsc7224_reg(vsc7224->i2c_bus,
+						    vsc7224->i2c_addr, 0x97);
+			if (vsc7224_chan->maintap_disable)
+				val |= 0x800;
+			if (vsc7224_chan->pretap_disable)
+				val |= 0x1000;
+			if (vsc7224_chan->posttap_disable)
+				val |= 0x400;
+			cvmx_write_vsc7224_reg(vsc7224->i2c_bus,
+					       vsc7224->i2c_addr, 0x97, val);
+		}
+		vsc7224_chan = vsc7224_chan->next;
+	}
+
+	return err;
+}
+
+/**
+ * Update the mod_abs and error LED
+ *
+ * @param	ipd_port	ipd port number
+ * @param	link		link information
+ */
+static void __cvmx_helper_update_sfp(int ipd_port,
+				     struct cvmx_fdt_sfp_info *sfp_info,
+				     cvmx_helper_link_info_t link)
+{
+	debug("%s(%d): checking mod_abs\n", __func__, ipd_port);
+
+	cvmx_sfp_check_mod_abs(sfp_info, sfp_info->mod_abs_data);
+}
+
+static void cvmx_sfp_update_link(struct cvmx_fdt_sfp_info *sfp,
+				 cvmx_helper_link_info_t link)
+{
+	while (sfp) {
+		debug("%s(%s): checking mod_abs\n", __func__, sfp->name);
+		if (link.s.link_up && sfp->last_mod_abs)
+			cvmx_sfp_check_mod_abs(sfp, sfp->mod_abs_data);
+		sfp = sfp->next_iface_sfp;
+	}
+}
+
+/**
+ * @INTERNAL
+ * This function is used ethernet ports link speed. This functions uses the
+ * device tree information to determine the phy address and type of PHY.
+ * The only supproted PHYs are Marvell and Broadcom.
+ *
+ * @param ipd_port IPD input port associated with the port we want to get link
+ *                 status for.
+ *
+ * @return The ports link status. If the link isn't fully resolved, this must
+ *         return zero.
+ */
+cvmx_helper_link_info_t __cvmx_helper_board_link_get_from_dt(int ipd_port)
+{
+	cvmx_helper_link_info_t result;
+	cvmx_phy_info_t *phy_info = NULL;
+	cvmx_phy_info_t local_phy_info;
+	int xiface = 0, index = 0;
+	bool use_inband = false;
+	struct cvmx_fdt_sfp_info *sfp_info;
+	const void *fdt_addr = CASTPTR(const void *, gd->fdt_blob);
+
+	result.u64 = 0;
+
+	if (ipd_port >= 0) {
+		int mode;
+
+		xiface = cvmx_helper_get_interface_num(ipd_port);
+		index = cvmx_helper_get_interface_index_num(ipd_port);
+		mode = cvmx_helper_interface_get_mode(xiface);
+		if (!cvmx_helper_get_port_autonegotiation(xiface, index)) {
+			result.s.link_up = 1;
+			result.s.full_duplex = 1;
+			switch (mode) {
+			case CVMX_HELPER_INTERFACE_MODE_RGMII:
+			case CVMX_HELPER_INTERFACE_MODE_GMII:
+			case CVMX_HELPER_INTERFACE_MODE_SGMII:
+			case CVMX_HELPER_INTERFACE_MODE_QSGMII:
+			case CVMX_HELPER_INTERFACE_MODE_AGL:
+			case CVMX_HELPER_INTERFACE_MODE_SPI:
+				if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+					struct cvmx_xiface xi =
+						cvmx_helper_xiface_to_node_interface(
+							xiface);
+					u64 gbaud = cvmx_qlm_get_gbaud_mhz(0);
+
+					result.s.speed = gbaud * 8 / 10;
+					if (cvmx_qlm_get_dlm_mode(
+						    0, xi.interface) ==
+					    CVMX_QLM_MODE_SGMII)
+						result.s.speed >>= 1;
+					else
+						result.s.speed >>= 2;
+				} else {
+					result.s.speed = 1000;
+				}
+				break;
+			case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+			case CVMX_HELPER_INTERFACE_MODE_XAUI:
+			case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+			case CVMX_HELPER_INTERFACE_MODE_XFI:
+				result.s.speed = 10000;
+				break;
+			case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+			case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+				result.s.speed = 40000;
+				break;
+			default:
+				break;
+			}
+
+			sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
+			/* Initialize the SFP info if it hasn't already been
+			 * done.
+			 */
+			if (!sfp_info && !sfp_parsed) {
+				cvmx_sfp_parse_device_tree(fdt_addr);
+				sfp_parsed = true;
+				cvmx_sfp_read_all_modules();
+				sfp_info = cvmx_helper_cfg_get_sfp_info(xiface,
+									index);
+			}
+			/* If the link is down or the link is up but we still
+			 * register the module as being absent, re-check
+			 * mod_abs.
+			 */
+			cvmx_sfp_update_link(sfp_info, result);
+
+			cvmx_helper_update_link_led(xiface, index, result);
+
+			return result;
+		}
+		phy_info = cvmx_helper_get_port_phy_info(xiface, index);
+		if (!phy_info) {
+			debug("%s: phy info not saved in config, allocating for 0x%x:%d\n",
+			      __func__, xiface, index);
+
+			phy_info = (cvmx_phy_info_t *)cvmx_bootmem_alloc(
+				sizeof(*phy_info), 0);
+			if (!phy_info) {
+				debug("%s: Out of memory\n", __func__);
+				return result;
+			}
+			memset(phy_info, 0, sizeof(*phy_info));
+			phy_info->phy_addr = -1;
+			debug("%s: Setting phy info for 0x%x:%d to %p\n",
+			      __func__, xiface, index, phy_info);
+			cvmx_helper_set_port_phy_info(xiface, index, phy_info);
+		}
+	} else {
+		/* For management ports we don't store the PHY information
+		 * so we use a local copy instead.
+		 */
+		phy_info = &local_phy_info;
+		memset(phy_info, 0, sizeof(*phy_info));
+		phy_info->phy_addr = -1;
+	}
+
+	if (phy_info->phy_addr == -1) {
+		if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
+			if (__cvmx_helper_78xx_parse_phy(phy_info, ipd_port)) {
+				phy_info->phy_addr = -1;
+				use_inband = true;
+			}
+		} else if (__get_phy_info_from_dt(phy_info, ipd_port) < 0) {
+			phy_info->phy_addr = -1;
+			use_inband = true;
+		}
+	}
+
+	/* If we can't get the PHY info from the device tree then try
+	 * the inband state.
+	 */
+	if (use_inband) {
+		result.s.full_duplex = 1;
+		result.s.link_up = 1;
+		result.s.speed = 1000;
+		return result;
+	}
+
+	if (phy_info->phy_addr < 0)
+		return result;
+
+	if (phy_info->link_function)
+		result = phy_info->link_function(phy_info);
+	else
+		result = cvmx_helper_link_get(ipd_port);
+
+	sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
+	while (sfp_info) {
+		/* If the link is down or the link is up but we still register
+		 * the module as being absent, re-check mod_abs.
+		 */
+		if (!result.s.link_up ||
+		    (result.s.link_up && sfp_info->last_mod_abs))
+			__cvmx_helper_update_sfp(ipd_port, sfp_info, result);
+		sfp_info = sfp_info->next_iface_sfp;
+	}
+
+	return result;
+}
+
+cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
+{
+	cvmx_helper_link_info_t result;
+
+	/* Unless we fix it later, all links are defaulted to down */
+	result.u64 = 0;
+
+	return __cvmx_helper_board_link_get_from_dt(ipd_port);
+}
+
+void cvmx_helper_update_link_led(int xiface, int index,
+				 cvmx_helper_link_info_t result)
+{
+}
+
+void cvmx_helper_leds_show_error(struct cvmx_phy_gpio_leds *leds, bool error)
+{
+}
+
+int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
+{
+	return supported_ports;
+}
+
+/**
+ * Returns the Ethernet node offset in the device tree
+ *
+ * @param     fdt_addr - pointer to flat device tree in memory
+ * @param     aliases    - offset of alias in device tree
+ * @param     ipd_port - ipd port number to look up
+ *
+ * @returns   offset of Ethernet node if >= 0, error if -1
+ */
+int __pip_eth_node(const void *fdt_addr, int aliases, int ipd_port)
+{
+	char name_buffer[20];
+	const char *pip_path;
+	int pip, iface, eth;
+	int interface_num = cvmx_helper_get_interface_num(ipd_port);
+	int interface_index = cvmx_helper_get_interface_index_num(ipd_port);
+	cvmx_helper_interface_mode_t interface_mode =
+		cvmx_helper_interface_get_mode(interface_num);
+
+	/* The following are not found in the device tree */
+	switch (interface_mode) {
+	case CVMX_HELPER_INTERFACE_MODE_ILK:
+	case CVMX_HELPER_INTERFACE_MODE_LOOP:
+	case CVMX_HELPER_INTERFACE_MODE_SRIO:
+		debug("ERROR: No node expected for interface: %d, port: %d, mode: %s\n",
+		      interface_index, ipd_port,
+		      cvmx_helper_interface_mode_to_string(interface_mode));
+		return -1;
+	default:
+		break;
+	}
+	pip_path = (const char *)fdt_getprop(fdt_addr, aliases, "pip", NULL);
+	if (!pip_path) {
+		debug("ERROR: pip path not found in device tree\n");
+		return -1;
+	}
+	pip = fdt_path_offset(fdt_addr, pip_path);
+	debug("ipdd_port=%d pip_path=%s pip=%d ", ipd_port, pip_path, pip);
+	if (pip < 0) {
+		debug("ERROR: pip not found in device tree\n");
+		return -1;
+	}
+	snprintf(name_buffer, sizeof(name_buffer), "interface@%d",
+		 interface_num);
+	iface = fdt_subnode_offset(fdt_addr, pip, name_buffer);
+	debug("iface=%d ", iface);
+	if (iface < 0) {
+		debug("ERROR : pip intf %d not found in device tree\n",
+		      interface_num);
+		return -1;
+	}
+	snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x",
+		 interface_index);
+	eth = fdt_subnode_offset(fdt_addr, iface, name_buffer);
+	debug("eth=%d\n", eth);
+	if (eth < 0) {
+		debug("ERROR : pip interface@%d ethernet@%d not found in device tree\n",
+		      interface_num, interface_index);
+		return -1;
+	}
+	return eth;
+}
+
+int __mix_eth_node(const void *fdt_addr, int aliases, int interface_index)
+{
+	char name_buffer[20];
+	const char *mix_path;
+	int mix;
+
+	snprintf(name_buffer, sizeof(name_buffer), "mix%d", interface_index);
+	mix_path =
+		(const char *)fdt_getprop(fdt_addr, aliases, name_buffer, NULL);
+	if (!mix_path) {
+		debug("ERROR: mix%d path not found in device tree\n",
+		      interface_index);
+	}
+	mix = fdt_path_offset(fdt_addr, mix_path);
+	if (mix < 0) {
+		debug("ERROR: %s not found in device tree\n", mix_path);
+		return -1;
+	}
+	return mix;
+}
+
+static int __mdiobus_addr_to_unit(u32 addr)
+{
+	int unit = (addr >> 7) & 3;
+
+	if (!OCTEON_IS_MODEL(OCTEON_CN68XX) && !OCTEON_IS_MODEL(OCTEON_CN78XX))
+		unit >>= 1;
+	return unit;
+}
+
+/**
+ * Parse the muxed MDIO interface information from the device tree
+ *
+ * @param phy_info - pointer to phy info data structure to update
+ * @param mdio_offset - offset of MDIO bus
+ * @param mux_offset - offset of MUX, parent of mdio_offset
+ *
+ * @return 0 for success or -1
+ */
+static int __get_muxed_mdio_info_from_dt(cvmx_phy_info_t *phy_info,
+					 int mdio_offset, int mux_offset)
+{
+	const void *fdt_addr = CASTPTR(const void *, gd->fdt_blob);
+	int phandle;
+	int smi_offset;
+	int gpio_offset;
+	u64 smi_addr = 0;
+	int len;
+	u32 *pgpio_handle;
+	int gpio_count = 0;
+	u32 *prop_val;
+	int offset;
+	const char *prop_name;
+
+	debug("%s(%p, 0x%x, 0x%x)\n", __func__, phy_info, mdio_offset,
+	      mux_offset);
+
+	/* Get register value to put onto the GPIO lines to select */
+	phy_info->gpio_value =
+		cvmx_fdt_get_int(fdt_addr, mdio_offset, "reg", -1);
+	if (phy_info->gpio_value < 0) {
+		debug("Could not get register value for muxed MDIO bus from DT\n");
+		return -1;
+	}
+
+	smi_offset = cvmx_fdt_lookup_phandle(fdt_addr, mux_offset,
+					     "mdio-parent-bus");
+	if (smi_offset < 0) {
+		debug("Invalid SMI offset for muxed MDIO interface in device tree\n");
+		return -1;
+	}
+	smi_addr = cvmx_fdt_get_uint64(fdt_addr, smi_offset, "reg", 0);
+
+	/* Convert SMI address to a MDIO interface */
+	switch (smi_addr) {
+	case 0x1180000001800:
+	case 0x1180000003800: /* 68XX address */
+		phy_info->mdio_unit = 0;
+		break;
+	case 0x1180000001900:
+	case 0x1180000003880:
+		phy_info->mdio_unit = 1;
+		break;
+	case 0x1180000003900:
+		phy_info->mdio_unit = 2;
+		break;
+	case 0x1180000003980:
+		phy_info->mdio_unit = 3;
+		break;
+	default:
+		phy_info->mdio_unit = 1;
+		break;
+	}
+	/* Find the GPIO MUX controller */
+	pgpio_handle =
+		(u32 *)fdt_getprop(fdt_addr, mux_offset, "gpios", &len);
+	if (!pgpio_handle || len < 12 || (len % 12) != 0 ||
+	    len > CVMX_PHY_MUX_MAX_GPIO * 12) {
+		debug("Invalid GPIO for muxed MDIO controller in DT\n");
+		return -1;
+	}
+
+	for (gpio_count = 0; gpio_count < len / 12; gpio_count++) {
+		phandle = fdt32_to_cpu(pgpio_handle[gpio_count * 3]);
+		phy_info->gpio[gpio_count] =
+			fdt32_to_cpu(pgpio_handle[gpio_count * 3 + 1]);
+		gpio_offset = fdt_node_offset_by_phandle(fdt_addr, phandle);
+		if (gpio_offset < 0) {
+			debug("Cannot access parent GPIO node in DT\n");
+			return -1;
+		}
+		if (!fdt_node_check_compatible(fdt_addr, gpio_offset,
+					       "cavium,octeon-3860-gpio")) {
+			phy_info->gpio_type[gpio_count] = GPIO_OCTEON;
+		} else if (!fdt_node_check_compatible(fdt_addr, gpio_offset,
+						      "nxp,pca8574")) {
+			/* GPIO is a TWSI GPIO unit which might sit behind
+			 * another mux.
+			 */
+			phy_info->gpio_type[gpio_count] = GPIO_PCA8574;
+			prop_val = (u32 *)fdt_getprop(
+				fdt_addr, gpio_offset, "reg", NULL);
+			if (!prop_val) {
+				debug("Could not find TWSI address of npx pca8574 GPIO from DT\n");
+				return -1;
+			}
+			/* Get the TWSI address of the GPIO unit */
+			phy_info->cvmx_gpio_twsi[gpio_count] =
+				fdt32_to_cpu(*prop_val);
+			/* Get the selector on the GPIO mux if present */
+			offset = fdt_parent_offset(fdt_addr, gpio_offset);
+			prop_val = (u32 *)fdt_getprop(fdt_addr, offset,
+							   "reg", NULL);
+			if (prop_val) {
+				phy_info->gpio_parent_mux_select =
+					fdt32_to_cpu(*prop_val);
+				/* Go up another level */
+				offset = fdt_parent_offset(fdt_addr, offset);
+				if (!fdt_node_check_compatible(fdt_addr, offset,
+							       "nxp,pca9548")) {
+					prop_val = (u32 *)fdt_getprop(
+						fdt_addr, offset, "reg", NULL);
+					if (!prop_val) {
+						debug("Could not read MUX TWSI address from DT\n");
+						return -1;
+					}
+					phy_info->gpio_parent_mux_twsi =
+						fdt32_to_cpu(*prop_val);
+				}
+			}
+		} else {
+			prop_name = (char *)fdt_getprop(fdt_addr, gpio_offset,
+							"compatible", NULL);
+			debug("Unknown GPIO type %s\n", prop_name);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Converts a BGX address to the node, interface and port number
+ *
+ * @param bgx_addr	Address of CSR register
+ *
+ * @return node, interface and port number, will be -1 for invalid address.
+ */
+static struct cvmx_xiface __cvmx_bgx_reg_addr_to_xiface(u64 bgx_addr)
+{
+	struct cvmx_xiface xi = { -1, -1 };
+
+	xi.node = cvmx_csr_addr_to_node(bgx_addr);
+	bgx_addr = cvmx_csr_addr_strip_node(bgx_addr);
+	if ((bgx_addr & 0xFFFFFFFFF0000000) != 0x00011800E0000000) {
+		debug("%s: Invalid BGX address 0x%llx\n", __func__,
+		      (unsigned long long)bgx_addr);
+		xi.node = -1;
+		return xi;
+	}
+	xi.interface = (bgx_addr >> 24) & 0x0F;
+
+	return xi;
+}
+
+static cvmx_helper_link_info_t
+__get_marvell_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+	cvmx_helper_link_info_t result;
+	int phy_status;
+	u32 phy_addr = phy_info->phy_addr;
+
+	result.u64 = 0;
+	/* Set to page 0 */
+	cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, 22, 0);
+	/* All the speed information can be read from register 17 in one go. */
+	phy_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 17);
+
+	/* If the resolve bit 11 isn't set, see if autoneg is turned off
+	 * (bit 12, reg 0). The resolve bit doesn't get set properly when
+	 * autoneg is off, so force it
+	 */
+	if ((phy_status & (1 << 11)) == 0) {
+		int auto_status =
+			cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0);
+		if ((auto_status & (1 << 12)) == 0)
+			phy_status |= 1 << 11;
+	}
+
+	/* Link is up = Speed/Duplex Resolved + RT-Link Up + G-Link Up. */
+	if ((phy_status & 0x0c08) == 0x0c08) {
+		result.s.link_up = 1;
+		result.s.full_duplex = ((phy_status >> 13) & 1);
+		switch ((phy_status >> 14) & 3) {
+		case 0: /* 10 Mbps */
+			result.s.speed = 10;
+			break;
+		case 1: /* 100 Mbps */
+			result.s.speed = 100;
+			break;
+		case 2: /* 1 Gbps */
+			result.s.speed = 1000;
+			break;
+		case 3: /* Illegal */
+			result.u64 = 0;
+			break;
+		}
+	}
+	return result;
+}
+
+/**
+ * @INTERNAL
+ * Get link state of broadcom PHY
+ *
+ * @param phy_info	PHY information
+ */
+static cvmx_helper_link_info_t
+__get_broadcom_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+	cvmx_helper_link_info_t result;
+	u32 phy_addr = phy_info->phy_addr;
+	int phy_status;
+
+	result.u64 = 0;
+	/* Below we are going to read SMI/MDIO register 0x19 which works
+	 * on Broadcom parts
+	 */
+	phy_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0x19);
+	switch ((phy_status >> 8) & 0x7) {
+	case 0:
+		result.u64 = 0;
+		break;
+	case 1:
+		result.s.link_up = 1;
+		result.s.full_duplex = 0;
+		result.s.speed = 10;
+		break;
+	case 2:
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		result.s.speed = 10;
+		break;
+	case 3:
+		result.s.link_up = 1;
+		result.s.full_duplex = 0;
+		result.s.speed = 100;
+		break;
+	case 4:
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		result.s.speed = 100;
+		break;
+	case 5:
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		result.s.speed = 100;
+		break;
+	case 6:
+		result.s.link_up = 1;
+		result.s.full_duplex = 0;
+		result.s.speed = 1000;
+		break;
+	case 7:
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		result.s.speed = 1000;
+		break;
+	}
+	return result;
+}
+
+/**
+ * @INTERNAL
+ * Get link state of generic gigabit PHY
+ *
+ * @param phy_info - information about the PHY
+ *
+ * @returns link status of the PHY
+ */
+static cvmx_helper_link_info_t
+__cvmx_get_generic_8023_c22_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+	cvmx_helper_link_info_t result;
+	u32 phy_addr = phy_info->phy_addr;
+	int phy_basic_control;	 /* Register 0x0 */
+	int phy_basic_status;	 /* Register 0x1 */
+	int phy_anog_adv;	 /* Register 0x4 */
+	int phy_link_part_avail; /* Register 0x5 */
+	int phy_control;	 /* Register 0x9 */
+	int phy_status;		 /* Register 0xA */
+
+	result.u64 = 0;
+
+	phy_basic_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 1);
+	if (!(phy_basic_status & 0x4)) /* Check if link is up */
+		return result;	       /* Link is down, return link down */
+
+	result.s.link_up = 1;
+	phy_basic_control = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0);
+	/* Check if autonegotiation is enabled and completed */
+	if ((phy_basic_control & (1 << 12)) && (phy_basic_status & (1 << 5))) {
+		phy_status =
+			cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0xA);
+		phy_control =
+			cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0x9);
+
+		phy_status &= phy_control << 2;
+		phy_link_part_avail =
+			cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0x5);
+		phy_anog_adv =
+			cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0x4);
+		phy_link_part_avail &= phy_anog_adv;
+
+		if (phy_status & 0xC00) { /* Gigabit full or half */
+			result.s.speed = 1000;
+			result.s.full_duplex = !!(phy_status & 0x800);
+		} else if (phy_link_part_avail &
+			   0x0180) { /* 100 full or half */
+			result.s.speed = 100;
+			result.s.full_duplex = !!(phy_link_part_avail & 0x100);
+		} else if (phy_link_part_avail & 0x0060) {
+			result.s.speed = 10;
+			result.s.full_duplex = !!(phy_link_part_avail & 0x0040);
+		}
+	} else {
+		/* Not autonegotiated */
+		result.s.full_duplex = !!(phy_basic_control & (1 << 8));
+
+		if (phy_basic_control & (1 << 6))
+			result.s.speed = 1000;
+		else if (phy_basic_control & (1 << 13))
+			result.s.speed = 100;
+		else
+			result.s.speed = 10;
+	}
+	return result;
+}
+
+static cvmx_helper_link_info_t
+__cvmx_get_qualcomm_s17_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+	cvmx_helper_link_info_t result;
+	u32 phy_addr = phy_info->phy_addr;
+	int phy_status;
+	int auto_status;
+
+	result.u64 = 0;
+
+	phy_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 17);
+
+	/* If bit 11 isn't set see if autonegotiation is turned off
+	 * (bit 12, reg 0).  The resolved bit doesn't get set properly when
+	 * autonegotiation is off, so force it.
+	 */
+	if ((phy_status & (1 << 11)) == 0) {
+		auto_status = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 0);
+		if ((auto_status & (1 << 12)) == 0)
+			phy_status |= 1 << 11;
+	}
+	/* Only return a link if the PHY has finished autonegotiation and set
+	 * the resolved bit (bit 11).
+	 */
+	if (phy_status & (1 << 11)) {
+		result.s.link_up = 1;
+		result.s.full_duplex = !!(phy_status & (1 << 13));
+		switch ((phy_status >> 14) & 3) {
+		case 0: /* 10Mbps */
+			result.s.speed = 10;
+			break;
+		case 1: /* 100Mbps */
+			result.s.speed = 100;
+			break;
+		case 2: /* 1Gbps */
+			result.s.speed = 1000;
+			break;
+		default: /* Illegal */
+			result.u64 = 0;
+			break;
+		}
+	}
+	debug("   link: %s, duplex: %s, speed: %lu\n",
+	      result.s.link_up ? "up" : "down",
+	      result.s.full_duplex ? "full" : "half",
+	      (unsigned long)result.s.speed);
+	return result;
+}
+
+static cvmx_helper_link_info_t
+__get_generic_8023_c45_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+	cvmx_helper_link_info_t result;
+	int phy_status;
+	int pma_ctrl1;
+	u32 phy_addr = phy_info->phy_addr;
+
+	result.u64 = 0;
+	pma_ctrl1 = cvmx_mdio_45_read(phy_addr >> 8, phy_addr & 0xff, 1, 0);
+	if ((pma_ctrl1 & 0x207c) == 0x2040)
+		result.s.speed = 10000;
+	/* PMA Status 1 (1x0001) */
+	phy_status = cvmx_mdio_45_read(phy_addr >> 8, phy_addr & 0xff, 1, 0xa);
+	if (phy_status < 0)
+		return result;
+
+	result.s.full_duplex = 1;
+	if ((phy_status & 1) == 0)
+		return result;
+	phy_status = cvmx_mdio_45_read(phy_addr >> 8, phy_addr & 0xff, 4, 0x18);
+	if (phy_status < 0)
+		return result;
+	result.s.link_up = (phy_status & 0x1000) ? 1 : 0;
+
+	return result;
+}
+
+static cvmx_helper_link_info_t
+__cvmx_get_cortina_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+	cvmx_helper_link_info_t result;
+
+	result.s.link_up = 1;
+	result.s.full_duplex = 1;
+	result.s.speed = 1000;
+	return result;
+}
+
+static cvmx_helper_link_info_t
+__get_vitesse_vsc8490_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+	cvmx_helper_link_info_t result;
+
+	result.s.link_up = 1;
+	result.s.full_duplex = 1;
+	result.s.speed = 1000;
+	return result;
+}
+
+static cvmx_helper_link_info_t
+__get_aquantia_phy_link_state(cvmx_phy_info_t *phy_info)
+{
+	cvmx_helper_link_info_t result;
+
+	result.s.link_up = 1;
+	result.s.full_duplex = 1;
+	result.s.speed = 1000;
+	return result;
+}
+
+static int __cvmx_helper_78xx_parse_phy(struct cvmx_phy_info *phy_info,
+					int ipd_port)
+{
+	const void *fdt_addr = CASTPTR(const void *, gd->fdt_blob);
+	const char *compat;
+	int phy;
+	int parent;
+	u64 mdio_base;
+	int node, bus;
+	int phy_addr;
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	int xiface = cvmx_helper_get_interface_num(ipd_port);
+	int compat_len = 0;
+
+	debug("%s(0x%p, %d) ENTER\n", __func__, phy_info, ipd_port);
+
+	phy = cvmx_helper_get_phy_fdt_node_offset(xiface, index);
+	debug("%s: xiface: 0x%x, index: %d, ipd_port: %d, phy fdt offset: %d\n",
+	      __func__, xiface, index, ipd_port, phy);
+	if (phy < 0) {
+		/* If this is the first time through we need to first parse the
+		 * device tree to get the node offsets.
+		 */
+		debug("No config present, calling __cvmx_helper_parse_bgx_dt\n");
+		if (__cvmx_helper_parse_bgx_dt(fdt_addr)) {
+			printf("Error: could not parse BGX device tree\n");
+			return -1;
+		}
+		if (__cvmx_fdt_parse_vsc7224(fdt_addr)) {
+			debug("Error: could not parse Microsemi VSC7224 in DT\n");
+			return -1;
+		}
+		if (octeon_has_feature(OCTEON_FEATURE_BGX_XCV) &&
+		    __cvmx_helper_parse_bgx_rgmii_dt(fdt_addr)) {
+			printf("Error: could not parse BGX XCV device tree\n");
+			return -1;
+		}
+		phy = cvmx_helper_get_phy_fdt_node_offset(xiface, index);
+		if (phy < 0) {
+			debug("%s: Could not get PHY node offset for IPD port 0x%x, xiface: 0x%x, index: %d\n",
+			      __func__, ipd_port, xiface, index);
+			return -1;
+		}
+		debug("%s: phy: %d (%s)\n", __func__, phy,
+		      fdt_get_name(fdt_addr, phy, NULL));
+	}
+
+	compat = (const char *)fdt_getprop(fdt_addr, phy, "compatible",
+					   &compat_len);
+	if (!compat) {
+		printf("ERROR: %d:%d:no compatible prop in phy\n", xiface,
+		       index);
+		return -1;
+	}
+
+	debug("  compatible: %s\n", compat);
+
+	phy_info->fdt_offset = phy;
+	phy_addr = cvmx_fdt_get_int(fdt_addr, phy, "reg", -1);
+	if (phy_addr == -1) {
+		printf("Error: %d:%d:could not get PHY address\n", xiface,
+		       index);
+		return -1;
+	}
+	debug("  PHY address: %d, compat: %s\n", phy_addr, compat);
+
+	if (!memcmp("marvell", compat, strlen("marvell"))) {
+		phy_info->phy_type = MARVELL_GENERIC_PHY;
+		phy_info->link_function = __get_marvell_phy_link_state;
+	} else if (!memcmp("broadcom", compat, strlen("broadcom"))) {
+		phy_info->phy_type = BROADCOM_GENERIC_PHY;
+		phy_info->link_function = __get_broadcom_phy_link_state;
+	} else if (!memcmp("cortina", compat, strlen("cortina"))) {
+		phy_info->phy_type = CORTINA_PHY;
+		phy_info->link_function = __cvmx_get_cortina_phy_link_state;
+	} else if (!strcmp("vitesse,vsc8490", compat)) {
+		phy_info->phy_type = VITESSE_VSC8490_PHY;
+		phy_info->link_function = __get_vitesse_vsc8490_phy_link_state;
+	} else if (fdt_stringlist_contains(compat, compat_len,
+					   "ethernet-phy-ieee802.3-c22")) {
+		phy_info->phy_type = GENERIC_8023_C22_PHY;
+		phy_info->link_function =
+			__cvmx_get_generic_8023_c22_phy_link_state;
+	} else if (fdt_stringlist_contains(compat, compat_len,
+					   "ethernet-phy-ieee802.3-c45")) {
+		phy_info->phy_type = GENERIC_8023_C22_PHY;
+		phy_info->link_function = __get_generic_8023_c45_phy_link_state;
+	}
+
+	phy_info->ipd_port = ipd_port;
+	phy_info->phy_sub_addr = 0;
+	phy_info->direct_connect = 1;
+
+	parent = fdt_parent_offset(fdt_addr, phy);
+	if (!fdt_node_check_compatible(fdt_addr, parent,
+				       "ethernet-phy-nexus")) {
+		debug("  nexus PHY found\n");
+		if (phy_info->phy_type == CORTINA_PHY) {
+			/* The Cortina CS422X uses the same PHY device for
+			 * multiple ports for XFI.  In this case we use a
+			 * nexus and each PHY address is the slice or
+			 * sub-address and the actual PHY address is the
+			 * nexus address.
+			 */
+			phy_info->phy_sub_addr = phy_addr;
+			phy_addr =
+				cvmx_fdt_get_int(fdt_addr, parent, "reg", -1);
+			debug("  Cortina PHY real address: 0x%x\n", phy_addr);
+		}
+		parent = fdt_parent_offset(fdt_addr, parent);
+	}
+
+	debug("  Parent: %s\n", fdt_get_name(fdt_addr, parent, NULL));
+	if (!fdt_node_check_compatible(fdt_addr, parent,
+				       "cavium,octeon-3860-mdio")) {
+		debug("  Found Octeon MDIO\n");
+		mdio_base = cvmx_fdt_get_uint64(fdt_addr, parent, "reg",
+						FDT_ADDR_T_NONE);
+		debug("  MDIO address: 0x%llx\n",
+		      (unsigned long long)mdio_base);
+
+		mdio_base = cvmx_fdt_translate_address(fdt_addr, parent,
+						       (u32 *)&mdio_base);
+		debug("  Translated: 0x%llx\n", (unsigned long long)mdio_base);
+		if (mdio_base == FDT_ADDR_T_NONE) {
+			printf("Could not get MDIO base address from reg field\n");
+			return -1;
+		}
+		__cvmx_mdio_addr_to_node_bus(mdio_base, &node, &bus);
+		if (bus < 0) {
+			printf("Invalid MDIO address 0x%llx, could not detect bus and node\n",
+			       (unsigned long long)mdio_base);
+			return -1;
+		}
+		debug("  MDIO node: %d, bus: %d\n", node, bus);
+		phy_info->mdio_unit = (node << 2) | (bus & 3);
+		phy_info->phy_addr = phy_addr | (phy_info->mdio_unit << 8);
+	} else {
+		printf("%s: Error: incompatible MDIO bus %s for IPD port %d\n",
+		       __func__,
+		       (const char *)fdt_get_name(fdt_addr, parent, NULL),
+		       ipd_port);
+		return -1;
+	}
+
+	debug("%s: EXIT 0\n", __func__);
+
+	return 0;
+}
+
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. The phy address is obtained from the device tree.
+ *
+ * @param[out] phy_info - PHY information data structure updated
+ * @param ipd_port Octeon IPD port to get the MII address for.
+ *
+ * @return MII PHY address and bus number, -1 on error, -2 if PHY info missing (OK).
+ */
+static int __get_phy_info_from_dt(cvmx_phy_info_t *phy_info, int ipd_port)
+{
+	const void *fdt_addr = CASTPTR(const void *, gd->fdt_blob);
+	int aliases, eth, phy, phy_parent, ret, i;
+	int mdio_parent;
+	const char *phy_compatible_str;
+	const char *host_mode_str = NULL;
+	int interface;
+	int phy_addr_offset = 0;
+
+	debug("%s(%p, %d)\n", __func__, phy_info, ipd_port);
+
+	if (octeon_has_feature(OCTEON_FEATURE_BGX))
+		return __cvmx_helper_78xx_parse_phy(phy_info, ipd_port);
+
+	phy_info->phy_addr = -1;
+	phy_info->phy_sub_addr = 0;
+	phy_info->ipd_port = ipd_port;
+	phy_info->direct_connect = -1;
+	phy_info->phy_type = (cvmx_phy_type_t)-1;
+	for (i = 0; i < CVMX_PHY_MUX_MAX_GPIO; i++)
+		phy_info->gpio[i] = -1;
+	phy_info->mdio_unit = -1;
+	phy_info->gpio_value = -1;
+	phy_info->gpio_parent_mux_twsi = -1;
+	phy_info->gpio_parent_mux_select = -1;
+	phy_info->link_function = NULL;
+	phy_info->fdt_offset = -1;
+	if (!fdt_addr) {
+		debug("No device tree found.\n");
+		return -1;
+	}
+
+	aliases = fdt_path_offset(fdt_addr, "/aliases");
+	if (aliases < 0) {
+		debug("Error: No /aliases node in device tree.\n");
+		return -1;
+	}
+	if (ipd_port < 0) {
+		int interface_index =
+			ipd_port - CVMX_HELPER_BOARD_MGMT_IPD_PORT;
+		eth = __mix_eth_node(fdt_addr, aliases, interface_index);
+	} else {
+		eth = __pip_eth_node(fdt_addr, aliases, ipd_port);
+	}
+	if (eth < 0) {
+		debug("ERROR : cannot find interface for ipd_port=%d\n",
+		      ipd_port);
+		return -1;
+	}
+
+	interface = cvmx_helper_get_interface_num(ipd_port);
+	/* Get handle to phy */
+	phy = cvmx_fdt_lookup_phandle(fdt_addr, eth, "phy-handle");
+	if (phy < 0) {
+		cvmx_helper_interface_mode_t if_mode;
+
+		/* Note that it's OK for RXAUI and ILK to not have a PHY
+		 * connected (i.e. EBB boards in loopback).
+		 */
+		debug("Cannot get phy-handle for ipd_port: %d\n", ipd_port);
+		if_mode = cvmx_helper_interface_get_mode(interface);
+		if (if_mode != CVMX_HELPER_INTERFACE_MODE_RXAUI &&
+		    if_mode != CVMX_HELPER_INTERFACE_MODE_ILK) {
+			debug("ERROR : phy handle not found in device tree ipd_port=%d\n",
+			      ipd_port);
+			return -1;
+		} else {
+			return -2;
+		}
+	}
+
+	phy_compatible_str =
+		(const char *)fdt_getprop(fdt_addr, phy, "compatible", NULL);
+	if (!phy_compatible_str) {
+		debug("ERROR: no compatible prop in phy\n");
+		return -1;
+	}
+	debug("Checking compatible string \"%s\" for ipd port %d\n",
+	      phy_compatible_str, ipd_port);
+	phy_info->fdt_offset = phy;
+	if (!memcmp("marvell", phy_compatible_str, strlen("marvell"))) {
+		debug("Marvell PHY detected for ipd_port %d\n", ipd_port);
+		phy_info->phy_type = MARVELL_GENERIC_PHY;
+		phy_info->link_function = __get_marvell_phy_link_state;
+	} else if (!memcmp("broadcom", phy_compatible_str,
+			   strlen("broadcom"))) {
+		phy_info->phy_type = BROADCOM_GENERIC_PHY;
+		phy_info->link_function = __get_broadcom_phy_link_state;
+		debug("Broadcom PHY detected for ipd_port %d\n", ipd_port);
+	} else if (!memcmp("vitesse", phy_compatible_str, strlen("vitesse"))) {
+		debug("Vitesse PHY detected for ipd_port %d\n", ipd_port);
+		if (!fdt_node_check_compatible(fdt_addr, phy,
+					       "vitesse,vsc8490")) {
+			phy_info->phy_type = VITESSE_VSC8490_PHY;
+			debug("Vitesse VSC8490 detected\n");
+			phy_info->link_function =
+				__get_vitesse_vsc8490_phy_link_state;
+		} else if (!fdt_node_check_compatible(
+				   fdt_addr, phy,
+				   "ethernet-phy-ieee802.3-c22")) {
+			phy_info->phy_type = GENERIC_8023_C22_PHY;
+			phy_info->link_function =
+				__cvmx_get_generic_8023_c22_phy_link_state;
+			debug("Vitesse 802.3 c22 detected\n");
+		} else {
+			phy_info->phy_type = GENERIC_8023_C45_PHY;
+			phy_info->link_function =
+				__get_generic_8023_c45_phy_link_state;
+			debug("Vitesse 802.3 c45 detected\n");
+		}
+	} else if (!memcmp("aquantia", phy_compatible_str,
+			   strlen("aquantia"))) {
+		phy_info->phy_type = AQUANTIA_PHY;
+		phy_info->link_function = __get_aquantia_phy_link_state;
+		debug("Aquantia c45 PHY detected\n");
+	} else if (!memcmp("cortina", phy_compatible_str, strlen("cortina"))) {
+		phy_info->phy_type = CORTINA_PHY;
+		phy_info->link_function = __cvmx_get_cortina_phy_link_state;
+		host_mode_str = (const char *)fdt_getprop(
+			fdt_addr, phy, "cortina,host-mode", NULL);
+		debug("Cortina PHY detected for ipd_port %d\n", ipd_port);
+	} else if (!memcmp("ti", phy_compatible_str, strlen("ti"))) {
+		phy_info->phy_type = GENERIC_8023_C45_PHY;
+		phy_info->link_function = __get_generic_8023_c45_phy_link_state;
+		debug("TI PHY detected for ipd_port %d\n", ipd_port);
+	} else if (!fdt_node_check_compatible(fdt_addr, phy,
+					      "atheros,ar8334") ||
+		   !fdt_node_check_compatible(fdt_addr, phy,
+					      "qualcomm,qca8334") ||
+		   !fdt_node_check_compatible(fdt_addr, phy,
+					      "atheros,ar8337") ||
+		   !fdt_node_check_compatible(fdt_addr, phy,
+					      "qualcomm,qca8337")) {
+		phy_info->phy_type = QUALCOMM_S17;
+		phy_info->link_function =
+			__cvmx_get_qualcomm_s17_phy_link_state;
+		debug("Qualcomm QCA833X switch detected\n");
+	} else if (!fdt_node_check_compatible(fdt_addr, phy,
+					      "ethernet-phy-ieee802.3-c22")) {
+		phy_info->phy_type = GENERIC_8023_C22_PHY;
+		phy_info->link_function =
+			__cvmx_get_generic_8023_c22_phy_link_state;
+		debug("Generic 802.3 c22 PHY detected\n");
+	} else if (!fdt_node_check_compatible(fdt_addr, phy,
+					      "ethernet-phy-ieee802.3-c45")) {
+		phy_info->phy_type = GENERIC_8023_C45_PHY;
+		phy_info->link_function = __get_generic_8023_c45_phy_link_state;
+		debug("Generic 802.3 c45 PHY detected\n");
+	} else {
+		debug("Unknown PHY compatibility\n");
+		phy_info->phy_type = (cvmx_phy_type_t)-1;
+		phy_info->link_function = NULL;
+	}
+
+	phy_info->host_mode = CVMX_PHY_HOST_MODE_UNKNOWN;
+	if (host_mode_str) {
+		if (strcmp(host_mode_str, "rxaui") == 0)
+			phy_info->host_mode = CVMX_PHY_HOST_MODE_RXAUI;
+		else if (strcmp(host_mode_str, "xaui") == 0)
+			phy_info->host_mode = CVMX_PHY_HOST_MODE_XAUI;
+		else if (strcmp(host_mode_str, "sgmii") == 0)
+			phy_info->host_mode = CVMX_PHY_HOST_MODE_SGMII;
+		else if (strcmp(host_mode_str, "qsgmii") == 0)
+			phy_info->host_mode = CVMX_PHY_HOST_MODE_QSGMII;
+		else
+			debug("Unknown PHY host mode\n");
+	}
+
+	/* Check if PHY parent is the octeon MDIO bus. Some boards are connected
+	 * though a MUX and for them direct_connect_to_phy will be 0
+	 */
+	phy_parent = fdt_parent_offset(fdt_addr, phy);
+	if (phy_parent < 0) {
+		debug("ERROR : cannot find phy parent for ipd_port=%d ret=%d\n",
+		      ipd_port, phy_parent);
+		return -1;
+	}
+	/* For multi-phy devices and devices on a MUX, go to the parent */
+	ret = fdt_node_check_compatible(fdt_addr, phy_parent,
+					"ethernet-phy-nexus");
+	if (ret == 0) {
+		/* It's a nexus so check the grandparent. */
+		phy_addr_offset =
+			cvmx_fdt_get_int(fdt_addr, phy_parent, "reg", 0);
+		phy_parent = fdt_parent_offset(fdt_addr, phy_parent);
+	}
+
+	/* Check for a muxed MDIO interface */
+	mdio_parent = fdt_parent_offset(fdt_addr, phy_parent);
+	ret = fdt_node_check_compatible(fdt_addr, mdio_parent,
+					"cavium,mdio-mux");
+	if (ret == 0) {
+		ret = __get_muxed_mdio_info_from_dt(phy_info, phy_parent,
+						    mdio_parent);
+		if (ret) {
+			printf("Error reading mdio mux information for ipd port %d\n",
+			       ipd_port);
+			return -1;
+		}
+	}
+	ret = fdt_node_check_compatible(fdt_addr, phy_parent,
+					"cavium,octeon-3860-mdio");
+	if (ret == 0) {
+		u32 *mdio_reg_base =
+			(u32 *)fdt_getprop(fdt_addr, phy_parent, "reg", 0);
+		phy_info->direct_connect = 1;
+		if (mdio_reg_base == 0) {
+			debug("ERROR : unable to get reg property in phy mdio\n");
+			return -1;
+		}
+		phy_info->mdio_unit =
+			__mdiobus_addr_to_unit(fdt32_to_cpu(mdio_reg_base[1]));
+		debug("phy parent=%s reg_base=%08x mdio_unit=%d\n",
+		      fdt_get_name(fdt_addr, phy_parent, NULL),
+		      (int)mdio_reg_base[1], phy_info->mdio_unit);
+	} else {
+		phy_info->direct_connect = 0;
+		/* The PHY is not directly connected to the Octeon MDIO bus.
+		 * SE doesn't  have abstractions for MDIO MUX or MDIO MUX
+		 * drivers and hence for the non direct cases code will be
+		 * needed which is board specific.
+		 * For now the MDIO Unit is defaulted to 1.
+		 */
+		debug("%s PHY at address: %d is not directly connected\n",
+		      __func__, phy_info->phy_addr);
+	}
+
+	phy_info->phy_addr = cvmx_fdt_get_int(fdt_addr, phy, "reg", -1);
+	if (phy_info->phy_addr < 0) {
+		debug("ERROR: Could not read phy address from reg in DT\n");
+		return -1;
+	}
+	phy_info->phy_addr += phy_addr_offset;
+	phy_info->phy_addr |= phy_info->mdio_unit << 8;
+	debug("%s(%p, %d) => 0x%x\n", __func__, phy_info, ipd_port,
+	      phy_info->phy_addr);
+	return phy_info->phy_addr;
+}
+
+/**
+ * @INTERNAL
+ * Parse the device tree and set whether a port is valid or not.
+ *
+ * @param fdt_addr	Pointer to device tree
+ *
+ * @return 0 for success, -1 on error.
+ */
+int __cvmx_helper_parse_bgx_dt(const void *fdt_addr)
+{
+	int port_index;
+	struct cvmx_xiface xi;
+	int fdt_port_node = -1;
+	int fdt_interface_node;
+	int fdt_phy_node;
+	u64 reg_addr;
+	int xiface;
+	struct cvmx_phy_info *phy_info;
+	static bool parsed;
+	int err;
+	int ipd_port;
+
+	if (parsed) {
+		debug("%s: Already parsed\n", __func__);
+		return 0;
+	}
+	while ((fdt_port_node = fdt_node_offset_by_compatible(
+			fdt_addr, fdt_port_node,
+			"cavium,octeon-7890-bgx-port")) >= 0) {
+		/* Get the port number */
+		port_index =
+			cvmx_fdt_get_int(fdt_addr, fdt_port_node, "reg", -1);
+		if (port_index < 0) {
+			debug("Error: missing reg field for bgx port in device tree\n");
+			return -1;
+		}
+		debug("%s: Parsing BGX port %d\n", __func__, port_index);
+		/* Get the interface number */
+		fdt_interface_node = fdt_parent_offset(fdt_addr, fdt_port_node);
+		if (fdt_interface_node < 0) {
+			debug("Error: device tree corrupt!\n");
+			return -1;
+		}
+		if (fdt_node_check_compatible(fdt_addr, fdt_interface_node,
+					      "cavium,octeon-7890-bgx")) {
+			debug("Error: incompatible Ethernet MAC Nexus in device tree!\n");
+			return -1;
+		}
+		reg_addr =
+			cvmx_fdt_get_addr(fdt_addr, fdt_interface_node, "reg");
+		debug("%s: BGX interface address: 0x%llx\n", __func__,
+		      (unsigned long long)reg_addr);
+		if (reg_addr == FDT_ADDR_T_NONE) {
+			debug("Device tree BGX node has invalid address 0x%llx\n",
+			      (unsigned long long)reg_addr);
+			return -1;
+		}
+		reg_addr = cvmx_fdt_translate_address(fdt_addr,
+						      fdt_interface_node,
+						      (u32 *)&reg_addr);
+		xi = __cvmx_bgx_reg_addr_to_xiface(reg_addr);
+		if (xi.node < 0) {
+			debug("Device tree BGX node has invalid address 0x%llx\n",
+			      (unsigned long long)reg_addr);
+			return -1;
+		}
+		debug("%s: Found BGX node %d, interface %d\n", __func__,
+		      xi.node, xi.interface);
+		xiface = cvmx_helper_node_interface_to_xiface(xi.node,
+							      xi.interface);
+		cvmx_helper_set_port_fdt_node_offset(xiface, port_index,
+						     fdt_port_node);
+		cvmx_helper_set_port_valid(xiface, port_index, true);
+
+		cvmx_helper_set_port_fdt_node_offset(xiface, port_index,
+						     fdt_port_node);
+		if (fdt_getprop(fdt_addr, fdt_port_node,
+				"cavium,sgmii-mac-phy-mode", NULL))
+			cvmx_helper_set_mac_phy_mode(xiface, port_index, true);
+		else
+			cvmx_helper_set_mac_phy_mode(xiface, port_index, false);
+
+		if (fdt_getprop(fdt_addr, fdt_port_node, "cavium,force-link-up",
+				NULL))
+			cvmx_helper_set_port_force_link_up(xiface, port_index,
+							   true);
+		else
+			cvmx_helper_set_port_force_link_up(xiface, port_index,
+							   false);
+
+		if (fdt_getprop(fdt_addr, fdt_port_node,
+				"cavium,sgmii-mac-1000x-mode", NULL))
+			cvmx_helper_set_1000x_mode(xiface, port_index, true);
+		else
+			cvmx_helper_set_1000x_mode(xiface, port_index, false);
+
+		if (fdt_getprop(fdt_addr, fdt_port_node,
+				"cavium,disable-autonegotiation", NULL))
+			cvmx_helper_set_port_autonegotiation(xiface, port_index,
+							     false);
+		else
+			cvmx_helper_set_port_autonegotiation(xiface, port_index,
+							     true);
+
+		fdt_phy_node = cvmx_fdt_lookup_phandle(fdt_addr, fdt_port_node,
+						       "phy-handle");
+		if (fdt_phy_node >= 0) {
+			cvmx_helper_set_phy_fdt_node_offset(xiface, port_index,
+							    fdt_phy_node);
+			debug("%s: Setting PHY fdt node offset for interface 0x%x, port %d to %d\n",
+			      __func__, xiface, port_index, fdt_phy_node);
+			debug("%s: PHY node name: %s\n", __func__,
+			      fdt_get_name(fdt_addr, fdt_phy_node, NULL));
+			cvmx_helper_set_port_phy_present(xiface, port_index,
+							 true);
+			ipd_port = cvmx_helper_get_ipd_port(xiface, port_index);
+			if (ipd_port >= 0) {
+				debug("%s: Allocating phy info for 0x%x:%d\n",
+				      __func__, xiface, port_index);
+				phy_info =
+					(cvmx_phy_info_t *)cvmx_bootmem_alloc(
+						sizeof(*phy_info), 0);
+				if (!phy_info) {
+					debug("%s: Out of memory\n", __func__);
+					return -1;
+				}
+				memset(phy_info, 0, sizeof(*phy_info));
+				phy_info->phy_addr = -1;
+				err = __get_phy_info_from_dt(phy_info,
+							     ipd_port);
+				if (err) {
+					debug("%s: Error parsing phy info for ipd port %d\n",
+					      __func__, ipd_port);
+					return -1;
+				}
+				cvmx_helper_set_port_phy_info(
+					xiface, port_index, phy_info);
+				debug("%s: Saved phy info\n", __func__);
+			}
+		} else {
+			cvmx_helper_set_phy_fdt_node_offset(xiface, port_index,
+							    -1);
+			debug("%s: No PHY fdt node offset for interface 0x%x, port %d to %d\n",
+			      __func__, xiface, port_index, fdt_phy_node);
+			cvmx_helper_set_port_phy_present(xiface, port_index,
+							 false);
+		}
+	}
+	if (!sfp_parsed)
+		if (cvmx_sfp_parse_device_tree(fdt_addr))
+			debug("%s: Error parsing SFP device tree\n", __func__);
+	parsed = true;
+	return 0;
+}
+
+int __cvmx_helper_parse_bgx_rgmii_dt(const void *fdt_addr)
+{
+	u64 reg_addr;
+	struct cvmx_xiface xi;
+	int fdt_port_node = -1;
+	int fdt_interface_node;
+	int fdt_phy_node;
+	int port_index;
+	int xiface;
+
+	/* There's only one xcv (RGMII) interface, so just search for the one
+	 * that's part of a BGX entry.
+	 */
+	while ((fdt_port_node = fdt_node_offset_by_compatible(
+			fdt_addr, fdt_port_node, "cavium,octeon-7360-xcv")) >=
+	       0) {
+		fdt_interface_node = fdt_parent_offset(fdt_addr, fdt_port_node);
+		if (fdt_interface_node < 0) {
+			printf("Error: device tree corrupt!\n");
+			return -1;
+		}
+		debug("%s: XCV parent node compatible: %s\n", __func__,
+		      (char *)fdt_getprop(fdt_addr, fdt_interface_node,
+					  "compatible", NULL));
+		if (!fdt_node_check_compatible(fdt_addr, fdt_interface_node,
+					       "cavium,octeon-7890-bgx"))
+			break;
+	}
+	if (fdt_port_node == -FDT_ERR_NOTFOUND) {
+		debug("No XCV/RGMII interface found in device tree\n");
+		return 0;
+	} else if (fdt_port_node < 0) {
+		debug("%s: Error %d parsing device tree\n", __func__,
+		      fdt_port_node);
+		return -1;
+	}
+	port_index = cvmx_fdt_get_int(fdt_addr, fdt_port_node, "reg", -1);
+	if (port_index != 0) {
+		printf("%s: Error: port index (reg) must be 0, not %d.\n",
+		       __func__, port_index);
+		return -1;
+	}
+	reg_addr = cvmx_fdt_get_addr(fdt_addr, fdt_interface_node, "reg");
+	if (reg_addr == FDT_ADDR_T_NONE) {
+		printf("%s: Error: could not get BGX interface address\n",
+		       __func__);
+		return -1;
+	}
+	/* We don't have to bother translating since only 78xx supports OCX and
+	 * doesn't support RGMII.
+	 */
+	xi = __cvmx_bgx_reg_addr_to_xiface(reg_addr);
+	debug("%s: xi.node: %d, xi.interface: 0x%x, addr: 0x%llx\n", __func__,
+	      xi.node, xi.interface, (unsigned long long)reg_addr);
+	if (xi.node < 0) {
+		printf("%s: Device tree BGX node has invalid address 0x%llx\n",
+		       __func__, (unsigned long long)reg_addr);
+		return -1;
+	}
+	debug("%s: Found XCV (RGMII) interface on interface %d\n", __func__,
+	      xi.interface);
+	debug("  phy handle: 0x%x\n",
+	      cvmx_fdt_get_int(fdt_addr, fdt_port_node, "phy-handle", -1));
+	fdt_phy_node =
+		cvmx_fdt_lookup_phandle(fdt_addr, fdt_port_node, "phy-handle");
+	debug("%s: phy-handle node: 0x%x\n", __func__, fdt_phy_node);
+	xiface = cvmx_helper_node_interface_to_xiface(xi.node, xi.interface);
+
+	cvmx_helper_set_port_fdt_node_offset(xiface, port_index, fdt_port_node);
+	if (fdt_phy_node >= 0) {
+		debug("%s: Setting PHY fdt node offset for interface 0x%x, port %d to %d\n",
+		      __func__, xiface, port_index, fdt_phy_node);
+		debug("%s: PHY node name: %s\n", __func__,
+		      fdt_get_name(fdt_addr, fdt_phy_node, NULL));
+		cvmx_helper_set_phy_fdt_node_offset(xiface, port_index,
+						    fdt_phy_node);
+		cvmx_helper_set_port_phy_present(xiface, port_index, true);
+	} else {
+		cvmx_helper_set_phy_fdt_node_offset(xiface, port_index, -1);
+		debug("%s: No PHY fdt node offset for interface 0x%x, port %d to %d\n",
+		      __func__, xiface, port_index, fdt_phy_node);
+		cvmx_helper_set_port_phy_present(xiface, port_index, false);
+	}
+
+	return 0;
+}
+
+/**
+ * Returns if a port is present on an interface
+ *
+ * @param fdt_addr - address fo flat device tree
+ * @param ipd_port - IPD port number
+ *
+ * @return 1 if port is present, 0 if not present, -1 if error
+ */
+int __cvmx_helper_board_get_port_from_dt(void *fdt_addr, int ipd_port)
+{
+	int port_index;
+	int aliases;
+	const char *pip_path;
+	char name_buffer[24];
+	int pip, iface, eth;
+	cvmx_helper_interface_mode_t mode;
+	int xiface = cvmx_helper_get_interface_num(ipd_port);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	u32 val;
+	int phy_node_offset;
+	int parse_bgx_dt_err;
+	int parse_vsc7224_err;
+
+	debug("%s(%p, %d)\n", __func__, fdt_addr, ipd_port);
+	if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
+		static int fdt_ports_initialized;
+
+		port_index = cvmx_helper_get_interface_index_num(ipd_port);
+
+		if (!fdt_ports_initialized) {
+			if (octeon_has_feature(OCTEON_FEATURE_BGX_XCV)) {
+				if (!__cvmx_helper_parse_bgx_rgmii_dt(fdt_addr))
+					fdt_ports_initialized = 1;
+				parse_bgx_dt_err =
+					__cvmx_helper_parse_bgx_dt(fdt_addr);
+				parse_vsc7224_err =
+					__cvmx_fdt_parse_vsc7224(fdt_addr);
+				if (!parse_bgx_dt_err && !parse_vsc7224_err)
+					fdt_ports_initialized = 1;
+			} else {
+				debug("%s: Error parsing FDT\n", __func__);
+				return -1;
+			}
+		}
+
+		return cvmx_helper_is_port_valid(xiface, port_index);
+	}
+
+	mode = cvmx_helper_interface_get_mode(xiface);
+
+	switch (mode) {
+	/* Device tree has information about the following mode types. */
+	case CVMX_HELPER_INTERFACE_MODE_RGMII:
+	case CVMX_HELPER_INTERFACE_MODE_GMII:
+	case CVMX_HELPER_INTERFACE_MODE_SPI:
+	case CVMX_HELPER_INTERFACE_MODE_XAUI:
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+	case CVMX_HELPER_INTERFACE_MODE_QSGMII:
+	case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+	case CVMX_HELPER_INTERFACE_MODE_AGL:
+	case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+	case CVMX_HELPER_INTERFACE_MODE_XFI:
+		aliases = 1;
+		break;
+	default:
+		aliases = 0;
+		break;
+	}
+
+	/* The device tree information is present on interfaces that have phy */
+	if (!aliases)
+		return 1;
+
+	port_index = cvmx_helper_get_interface_index_num(ipd_port);
+
+	aliases = fdt_path_offset(fdt_addr, "/aliases");
+	if (aliases < 0) {
+		debug("%s: ERROR: /aliases not found in device tree fdt_addr=%p\n",
+		      __func__, fdt_addr);
+		return -1;
+	}
+
+	pip_path = (const char *)fdt_getprop(fdt_addr, aliases, "pip", NULL);
+	if (!pip_path) {
+		debug("%s: ERROR: interface %x pip path not found in device tree\n",
+		      __func__, xiface);
+		return -1;
+	}
+	pip = fdt_path_offset(fdt_addr, pip_path);
+	if (pip < 0) {
+		debug("%s: ERROR: interface %x pip not found in device tree\n",
+		      __func__, xiface);
+		return -1;
+	}
+	snprintf(name_buffer, sizeof(name_buffer), "interface@%d",
+		 xi.interface);
+	iface = fdt_subnode_offset(fdt_addr, pip, name_buffer);
+	if (iface < 0)
+		return 0;
+	snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", port_index);
+	eth = fdt_subnode_offset(fdt_addr, iface, name_buffer);
+	debug("%s: eth subnode offset %d from %s\n", __func__, eth,
+	      name_buffer);
+
+	if (eth < 0)
+		return -1;
+
+	cvmx_helper_set_port_fdt_node_offset(xiface, port_index, eth);
+
+	phy_node_offset = cvmx_fdt_get_int(fdt_addr, eth, "phy", -1);
+	cvmx_helper_set_phy_fdt_node_offset(xiface, port_index,
+					    phy_node_offset);
+
+	if (fdt_getprop(fdt_addr, eth, "cavium,sgmii-mac-phy-mode", NULL))
+		cvmx_helper_set_mac_phy_mode(xiface, port_index, true);
+	else
+		cvmx_helper_set_mac_phy_mode(xiface, port_index, false);
+
+	if (fdt_getprop(fdt_addr, eth, "cavium,force-link-up", NULL))
+		cvmx_helper_set_port_force_link_up(xiface, port_index, true);
+	else
+		cvmx_helper_set_port_force_link_up(xiface, port_index, false);
+
+	if (fdt_getprop(fdt_addr, eth, "cavium,sgmii-mac-1000x-mode", NULL))
+		cvmx_helper_set_1000x_mode(xiface, port_index, true);
+	else
+		cvmx_helper_set_1000x_mode(xiface, port_index, false);
+
+	if (fdt_getprop(fdt_addr, eth, "cavium,disable-autonegotiation", NULL))
+		cvmx_helper_set_port_autonegotiation(xiface, port_index, false);
+	else
+		cvmx_helper_set_port_autonegotiation(xiface, port_index, true);
+
+	if (mode == CVMX_HELPER_INTERFACE_MODE_AGL) {
+		bool tx_bypass = false;
+
+		if (fdt_getprop(fdt_addr, eth, "cavium,rx-clk-delay-bypass",
+				NULL))
+			cvmx_helper_set_agl_rx_clock_delay_bypass(
+				xiface, port_index, true);
+		else
+			cvmx_helper_set_agl_rx_clock_delay_bypass(
+				xiface, port_index, false);
+
+		val = cvmx_fdt_get_int(fdt_addr, eth, "cavium,rx-clk-skew", 0);
+		cvmx_helper_set_agl_rx_clock_skew(xiface, port_index, val);
+
+		if (fdt_getprop(fdt_addr, eth, "cavium,tx-clk-delay-bypass",
+				NULL))
+			tx_bypass = true;
+
+		val = cvmx_fdt_get_int(fdt_addr, eth, "tx-clk-delay", 0);
+		cvmx_helper_cfg_set_rgmii_tx_clk_delay(xiface, port_index,
+						       tx_bypass, val);
+
+		val = cvmx_fdt_get_int(fdt_addr, eth, "cavium,refclk-sel", 0);
+		cvmx_helper_set_agl_refclk_sel(xiface, port_index, val);
+	}
+
+	return (eth >= 0);
+}
+
+/**
+ * Given the address of the MDIO registers, output the CPU node and MDIO bus
+ *
+ * @param	addr	64-bit address of MDIO registers (from device tree)
+ * @param[out]	node	CPU node number (78xx)
+ * @param[out]	bus	MDIO bus number
+ */
+void __cvmx_mdio_addr_to_node_bus(u64 addr, int *node, int *bus)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		if (node)
+			*node = cvmx_csr_addr_to_node(addr);
+		addr = cvmx_csr_addr_strip_node(addr);
+	} else {
+		if (node)
+			*node = 0;
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		switch (addr) {
+		case 0x0001180000003800:
+			*bus = 0;
+			break;
+		case 0x0001180000003880:
+			*bus = 1;
+			break;
+		case 0x0001180000003900:
+			*bus = 2;
+			break;
+		case 0x0001180000003980:
+			*bus = 3;
+			break;
+		default:
+			*bus = -1;
+			printf("%s: Invalid SMI bus address 0x%llx\n", __func__,
+			       (unsigned long long)addr);
+			break;
+		}
+	} else if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+		   OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
+		switch (addr) {
+		case 0x0001180000003800:
+			*bus = 0;
+			break;
+		case 0x0001180000003880:
+			*bus = 1;
+			break;
+		default:
+			*bus = -1;
+			printf("%s: Invalid SMI bus address 0x%llx\n", __func__,
+			       (unsigned long long)addr);
+			break;
+		}
+	} else {
+		switch (addr) {
+		case 0x0001180000001800:
+			*bus = 0;
+			break;
+		case 0x0001180000001900:
+			*bus = 1;
+			break;
+		default:
+			*bus = -1;
+			printf("%s: Invalid SMI bus address 0x%llx\n", __func__,
+			       (unsigned long long)addr);
+			break;
+		}
+	}
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-cfg.c b/arch/mips/mach-octeon/cvmx-helper-cfg.c
index 494108f..a3f4ff0 100644
--- a/arch/mips/mach-octeon/cvmx-helper-cfg.c
+++ b/arch/mips/mach-octeon/cvmx-helper-cfg.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
  *
  * Helper Functions for the Configuration Framework
  */
@@ -100,7 +100,6 @@
 static int cvmx_cfg_max_pko_engines; /* # of PKO DMA engines allocated */
 static int cvmx_pko_queue_alloc(u64 port, int count);
 static void cvmx_init_port_cfg(void);
-static const int dbg;
 
 int __cvmx_helper_cfg_pknd(int xiface, int index)
 {
@@ -184,16 +183,6 @@
 	return cvmx_cfg_max_pko_engines;
 }
 
-int cvmx_helper_cfg_opt_set(cvmx_helper_cfg_option_t opt, uint64_t val)
-{
-	if (opt >= CVMX_HELPER_CFG_OPT_MAX)
-		return -1;
-
-	cvmx_cfg_opts[opt] = val;
-
-	return 0;
-}
-
 uint64_t cvmx_helper_cfg_opt_get(cvmx_helper_cfg_option_t opt)
 {
 	if (opt >= CVMX_HELPER_CFG_OPT_MAX)
@@ -298,18 +287,6 @@
 	return 0;
 }
 
-int cvmx_helper_pko_queue_config_get(int node, cvmx_user_static_pko_queue_config_t *cfg)
-{
-	*cfg = __cvmx_pko_queue_static_config[node];
-	return 0;
-}
-
-int cvmx_helper_pko_queue_config_set(int node, cvmx_user_static_pko_queue_config_t *cfg)
-{
-	__cvmx_pko_queue_static_config[node] = *cfg;
-	return 0;
-}
-
 static int queue_range_init;
 
 int init_cvmx_pko_que_range(void)
@@ -376,91 +353,6 @@
 	return 0;
 }
 
-/*
- * return the queues for "port"
- *
- * @param  port   the port for which the queues are returned
- *
- * Return:  0 on success
- *         -1 on failure
- */
-int cvmx_pko_queue_free(uint64_t port)
-{
-	int ret_val = -1;
-
-	init_cvmx_pko_que_range();
-	if (port >= CVMX_HELPER_CFG_MAX_PKO_QUEUES) {
-		debug("ERROR: %s port=%d > %d", __func__, (int)port,
-		      CVMX_HELPER_CFG_MAX_PKO_QUEUES);
-		return -1;
-	}
-
-	ret_val = cvmx_free_global_resource_range_with_base(
-		CVMX_GR_TAG_PKO_QUEUES, cvmx_pko_queue_table[port].ccppp_queue_base,
-		cvmx_pko_queue_table[port].ccppp_num_queues);
-	if (ret_val != 0)
-		return ret_val;
-
-	cvmx_pko_queue_table[port].ccppp_num_queues = 0;
-	cvmx_pko_queue_table[port].ccppp_queue_base = CVMX_HELPER_CFG_INVALID_VALUE;
-	ret_val = 0;
-	return ret_val;
-}
-
-void cvmx_pko_queue_free_all(void)
-{
-	int i;
-
-	for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
-		if (cvmx_pko_queue_table[i].ccppp_queue_base !=
-		    CVMX_HELPER_CFG_INVALID_VALUE)
-			cvmx_pko_queue_free(i);
-}
-
-void cvmx_pko_queue_show(void)
-{
-	int i;
-
-	cvmx_show_global_resource_range(CVMX_GR_TAG_PKO_QUEUES);
-	for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
-		if (cvmx_pko_queue_table[i].ccppp_queue_base !=
-		    CVMX_HELPER_CFG_INVALID_VALUE)
-			debug("port=%d que_base=%d que_num=%d\n", i,
-			      (int)cvmx_pko_queue_table[i].ccppp_queue_base,
-			      (int)cvmx_pko_queue_table[i].ccppp_num_queues);
-}
-
-void cvmx_helper_cfg_show_cfg(void)
-{
-	int i, j;
-
-	for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
-		debug("%s: interface%d mode %10s nports%4d\n", __func__, i,
-		      cvmx_helper_interface_mode_to_string(cvmx_helper_interface_get_mode(i)),
-		      cvmx_helper_interface_enumerate(i));
-
-		for (j = 0; j < cvmx_helper_interface_enumerate(i); j++) {
-			debug("\tpknd[%i][%d]%d", i, j,
-			      __cvmx_helper_cfg_pknd(i, j));
-			debug(" pko_port_base[%i][%d]%d", i, j,
-			      __cvmx_helper_cfg_pko_port_base(i, j));
-			debug(" pko_port_num[%i][%d]%d\n", i, j,
-			      __cvmx_helper_cfg_pko_port_num(i, j));
-		}
-	}
-
-	for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++) {
-		if (__cvmx_helper_cfg_pko_queue_base(i) !=
-		    CVMX_HELPER_CFG_INVALID_VALUE) {
-			debug("%s: pko_port%d qbase%d nqueues%d interface%d index%d\n",
-			      __func__, i, __cvmx_helper_cfg_pko_queue_base(i),
-			      __cvmx_helper_cfg_pko_queue_num(i),
-			      __cvmx_helper_cfg_pko_port_interface(i),
-			      __cvmx_helper_cfg_pko_port_index(i));
-		}
-	}
-}
-
 /*
  * initialize cvmx_cfg_pko_port_map
  */
@@ -515,141 +407,6 @@
 	cvmx_cfg_max_pko_engines = pko_eid;
 }
 
-void cvmx_helper_cfg_set_jabber_and_frame_max(void)
-{
-	int interface, port;
-	/*Set the frame max size and jabber size to 65535. */
-	const unsigned int max_frame = 65535;
-
-	// FIXME: should support node argument for remote node init
-	if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
-		int ipd_port;
-		int node = cvmx_get_node_num();
-
-		for (interface = 0;
-		     interface < cvmx_helper_get_number_of_interfaces();
-		     interface++) {
-			int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
-			cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(xiface);
-			int num_ports = cvmx_helper_ports_on_interface(xiface);
-
-			// FIXME: should be an easier way to determine
-			// that an interface is Ethernet/BGX
-			switch (imode) {
-			case CVMX_HELPER_INTERFACE_MODE_SGMII:
-			case CVMX_HELPER_INTERFACE_MODE_XAUI:
-			case CVMX_HELPER_INTERFACE_MODE_RXAUI:
-			case CVMX_HELPER_INTERFACE_MODE_XLAUI:
-			case CVMX_HELPER_INTERFACE_MODE_XFI:
-			case CVMX_HELPER_INTERFACE_MODE_10G_KR:
-			case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
-				for (port = 0; port < num_ports; port++) {
-					ipd_port = cvmx_helper_get_ipd_port(xiface, port);
-					cvmx_pki_set_max_frm_len(ipd_port, max_frame);
-					cvmx_helper_bgx_set_jabber(xiface, port, max_frame);
-				}
-				break;
-			default:
-				break;
-			}
-		}
-	} else {
-		/*Set the frame max size and jabber size to 65535. */
-		for (interface = 0; interface < cvmx_helper_get_number_of_interfaces();
-		     interface++) {
-			int xiface = cvmx_helper_node_interface_to_xiface(cvmx_get_node_num(),
-									  interface);
-			/*
-			 * Set the frame max size and jabber size to 65535, as the defaults
-			 * are too small.
-			 */
-			cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(xiface);
-			int num_ports = cvmx_helper_ports_on_interface(xiface);
-
-			switch (imode) {
-			case CVMX_HELPER_INTERFACE_MODE_SGMII:
-			case CVMX_HELPER_INTERFACE_MODE_QSGMII:
-			case CVMX_HELPER_INTERFACE_MODE_XAUI:
-			case CVMX_HELPER_INTERFACE_MODE_RXAUI:
-				for (port = 0; port < num_ports; port++)
-					csr_wr(CVMX_GMXX_RXX_JABBER(port, interface), 65535);
-				/* Set max and min value for frame check */
-				cvmx_pip_set_frame_check(interface, -1);
-				break;
-
-			case CVMX_HELPER_INTERFACE_MODE_RGMII:
-			case CVMX_HELPER_INTERFACE_MODE_GMII:
-				/* Set max and min value for frame check */
-				cvmx_pip_set_frame_check(interface, -1);
-				for (port = 0; port < num_ports; port++) {
-					csr_wr(CVMX_GMXX_RXX_FRM_MAX(port, interface), 65535);
-					csr_wr(CVMX_GMXX_RXX_JABBER(port, interface), 65535);
-				}
-				break;
-			case CVMX_HELPER_INTERFACE_MODE_ILK:
-				/* Set max and min value for frame check */
-				cvmx_pip_set_frame_check(interface, -1);
-				for (port = 0; port < num_ports; port++) {
-					int ipd_port = cvmx_helper_get_ipd_port(interface, port);
-
-					cvmx_ilk_enable_la_header(ipd_port, 0);
-				}
-				break;
-			case CVMX_HELPER_INTERFACE_MODE_SRIO:
-				/* Set max and min value for frame check */
-				cvmx_pip_set_frame_check(interface, -1);
-				break;
-			case CVMX_HELPER_INTERFACE_MODE_AGL:
-				/* Set max and min value for frame check */
-				cvmx_pip_set_frame_check(interface, -1);
-				csr_wr(CVMX_AGL_GMX_RXX_FRM_MAX(0), 65535);
-				csr_wr(CVMX_AGL_GMX_RXX_JABBER(0), 65535);
-				break;
-			default:
-				break;
-			}
-		}
-	}
-}
-
-/**
- * Enable storing short packets only in the WQE
- * unless NO_WPTR is set, which already has the same effect
- */
-void cvmx_helper_cfg_store_short_packets_in_wqe(void)
-{
-	int interface, port;
-	cvmx_ipd_ctl_status_t ipd_ctl_status;
-	unsigned int dyn_rs = 1;
-
-	if (octeon_has_feature(OCTEON_FEATURE_PKI))
-		return;
-
-	/* NO_WPTR combines WQE with 1st MBUF, RS is redundant */
-	ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
-	if (ipd_ctl_status.s.no_wptr) {
-		dyn_rs = 0;
-		/* Note: consider also setting 'ignrs' wtn NO_WPTR is set */
-	}
-
-	for (interface = 0; interface < cvmx_helper_get_number_of_interfaces(); interface++) {
-		int num_ports = cvmx_helper_ports_on_interface(interface);
-
-		for (port = 0; port < num_ports; port++) {
-			cvmx_pip_port_cfg_t port_cfg;
-			int pknd = port;
-
-			if (octeon_has_feature(OCTEON_FEATURE_PKND))
-				pknd = cvmx_helper_get_pknd(interface, port);
-			else
-				pknd = cvmx_helper_get_ipd_port(interface, port);
-			port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
-			port_cfg.s.dyn_rs = dyn_rs;
-			csr_wr(CVMX_PIP_PRT_CFGX(pknd), port_cfg.u64);
-		}
-	}
-}
-
 int __cvmx_helper_cfg_pko_port_interface(int pko_port)
 {
 	return cvmx_cfg_pko_port_map[pko_port].ccppl_interface;
@@ -716,16 +473,6 @@
 	return ipd2pko_port_cache[ipd_y][ipd_x].ccppp_base_port;
 }
 
-int cvmx_helper_cfg_ipd2pko_port_num(int ipd_port)
-{
-	int ipd_y, ipd_x;
-
-	ipd_y = IPD2PKO_CACHE_Y(ipd_port);
-	ipd_x = __cvmx_helper_cfg_ipd2pko_cachex(ipd_port);
-
-	return ipd2pko_port_cache[ipd_y][ipd_x].ccppp_nports;
-}
-
 /**
  * Return the number of queues to be assigned to this pko_port
  *
@@ -980,8 +727,6 @@
 		rc = __cvmx_helper_parse_bgx_dt(fdt_addr);
 		if (!rc)
 			rc = __cvmx_fdt_parse_vsc7224(fdt_addr);
-		if (!rc)
-			rc = __cvmx_fdt_parse_avsp5410(fdt_addr);
 		if (!rc && octeon_has_feature(OCTEON_FEATURE_BGX_XCV))
 			rc = __cvmx_helper_parse_bgx_rgmii_dt(fdt_addr);
 
@@ -1030,44 +775,6 @@
 	return 0;
 }
 
-typedef int (*cvmx_import_config_t)(void);
-cvmx_import_config_t cvmx_import_app_config;
-
-int __cvmx_helper_init_port_config_data_local(void)
-{
-	int rv = 0;
-	int dbg = 0;
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-
-	if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
-		if (cvmx_import_app_config) {
-			rv = (*cvmx_import_app_config)();
-			if (rv != 0) {
-				debug("failed to import config\n");
-				return -1;
-			}
-		}
-
-		cvmx_helper_cfg_init_pko_port_map();
-		__cvmx_helper_cfg_init_ipd2pko_cache();
-	} else {
-		if (cvmx_import_app_config) {
-			rv = (*cvmx_import_app_config)();
-			if (rv != 0) {
-				debug("failed to import config\n");
-				return -1;
-			}
-		}
-	}
-	if (dbg) {
-		cvmx_helper_cfg_show_cfg();
-		cvmx_pko_queue_show();
-	}
-	return rv;
-}
-
 /*
  * This call is made from Linux octeon_ethernet driver
  * to setup the PKO with a specific queue count and
@@ -1077,9 +784,8 @@
 {
 	int rv, p, port_start, cnt;
 
-	if (dbg)
-		debug("%s: intf %d/%d pcnt %d qcnt %d\n", __func__, interface, port, port_cnt,
-		      queue_cnt);
+	debug("%s: intf %d/%d pcnt %d qcnt %d\n", __func__, interface, port, port_cnt,
+	      queue_cnt);
 
 	if (!port_cfg_data_initialized)
 		cvmx_init_port_cfg();
@@ -1122,6 +828,7 @@
 				struct cvmx_srio_port_param *sr;
 
 				pcfg = &cvmx_cfg_port[node][i][j];
+
 				memset(pcfg, 0, sizeof(*pcfg));
 
 				pcfg->port_fdt_node = CVMX_HELPER_CFG_INVALID_VALUE;
@@ -1188,8 +895,7 @@
 	int pknd = 0, bpid = 0;
 	const int use_static_config = 1;
 
-	if (dbg)
-		printf("%s:\n", __func__);
+	debug("%s:\n", __func__);
 
 	if (!port_cfg_data_initialized)
 		cvmx_init_port_cfg();
@@ -1295,10 +1001,11 @@
 		__cvmx_helper_cfg_init_ipd2pko_cache();
 	}
 
-	if (dbg) {
-		cvmx_helper_cfg_show_cfg();
-		cvmx_pko_queue_show();
-	}
+#ifdef DEBUG
+	cvmx_helper_cfg_show_cfg();
+	cvmx_pko_queue_show();
+#endif
+
 	return rv;
 }
 
@@ -1337,39 +1044,6 @@
 }
 
 /**
- * Search for a port based on its FDT node offset
- *
- * @param	of_offset	Node offset of port to search for
- * @param[out]	xiface		xinterface of match
- * @param[out]	index		port index of match
- *
- * Return:	0 if found, -1 if not found
- */
-int cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(int of_offset, int *xiface, int *index)
-{
-	int iface;
-	int i;
-	int node;
-	struct cvmx_cfg_port_param *pcfg = NULL;
-	*xiface = -1;
-	*index = -1;
-
-	for (node = 0; node < CVMX_MAX_NODES; node++) {
-		for (iface = 0; iface < CVMX_HELPER_MAX_IFACE; iface++) {
-			for (i = 0; i < CVMX_HELPER_CFG_MAX_PORT_PER_IFACE; i++) {
-				pcfg = &cvmx_cfg_port[node][iface][i];
-				if (pcfg->valid && pcfg->port_fdt_node == of_offset) {
-					*xiface = cvmx_helper_node_interface_to_xiface(node, iface);
-					*index = i;
-					return 0;
-				}
-			}
-		}
-	}
-	return -1;
-}
-
-/**
  * @INTERNAL
  * Store the FDT node offset in the device tree of a phy
  *
@@ -1441,23 +1115,6 @@
 
 /**
  * @INTERNAL
- * Override default forward error correction for a port
- *
- * @param xiface	node and interface
- * @param index		port index
- * @param enable	true to enable fec, false to disable it
- */
-void cvmx_helper_set_port_fec(int xiface, int index, bool enable)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-	cvmx_cfg_port[xi.node][xi.interface][index].enable_fec = enable;
-}
-
-/**
- * @INTERNAL
  * Returns if forward error correction is enabled or not.
  *
  * @param xiface	node and interface
@@ -1476,161 +1133,6 @@
 
 /**
  * @INTERNAL
- * Configure the SRIO RX interface AGC settings for host mode
- *
- * @param xiface	node and interface
- * @param index		lane
- * @param long_run	true for long run, false for short run
- * @param agc_override	true to put AGC in manual mode
- * @param ctle_zero	RX equalizer peaking control (default 0x6)
- * @param agc_pre_ctle	AGC pre-CTLE gain (default 0x5)
- * @param agc_post_ctle	AGC post-CTLE gain (default 0x4)
- *
- * NOTE: This must be called before SRIO is initialized to take effect
- */
-void cvmx_helper_set_srio_rx(int xiface, int index, bool long_run, bool ctle_zero_override,
-			     u8 ctle_zero, bool agc_override, uint8_t agc_pre_ctle,
-			     uint8_t agc_post_ctle)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-	struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
-	struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-	sr->srio_rx_ctle_zero_override = ctle_zero_override;
-	sr->srio_rx_ctle_zero = ctle_zero;
-	sr->srio_rx_ctle_agc_override = agc_override;
-	sr->srio_rx_agc_pre_ctle = agc_pre_ctle;
-	sr->srio_rx_agc_post_ctle = agc_post_ctle;
-}
-
-/**
- * @INTERNAL
- * Get the SRIO RX interface AGC settings for host mode
- *
- * @param xiface	node and interface
- * @param index		lane
- * @param long_run	true for long run, false for short run
- * @param[out] agc_override	true to put AGC in manual mode
- * @param[out] ctle_zero	RX equalizer peaking control (default 0x6)
- * @param[out] agc_pre_ctle	AGC pre-CTLE gain (default 0x5)
- * @param[out] agc_post_ctle	AGC post-CTLE gain (default 0x4)
- */
-void cvmx_helper_get_srio_rx(int xiface, int index, bool long_run, bool *ctle_zero_override,
-			     u8 *ctle_zero, bool *agc_override, uint8_t *agc_pre_ctle,
-			     uint8_t *agc_post_ctle)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-	struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
-	struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-	if (ctle_zero_override)
-		*ctle_zero_override = sr->srio_rx_ctle_zero_override;
-	if (ctle_zero)
-		*ctle_zero = sr->srio_rx_ctle_zero;
-	if (agc_override)
-		*agc_override = sr->srio_rx_ctle_agc_override;
-	if (agc_pre_ctle)
-		*agc_pre_ctle = sr->srio_rx_agc_pre_ctle;
-	if (agc_post_ctle)
-		*agc_post_ctle = sr->srio_rx_agc_post_ctle;
-}
-
-/**
- * @INTERNAL
- * Configure the SRIO TX interface for host mode
- *
- * @param xiface		node and interface
- * @param index			lane
- * @param long_run		true for long run, false for short run
- * @param tx_swing		tx swing value to use (default 0x7), -1 to not
- *				override.
- * @param tx_gain		PCS SDS TX gain (default 0x3), -1 to not
- *				override
- * @param tx_premptap_override	true to override preemphasis control
- * @param tx_premptap_pre	preemphasis pre tap value (default 0x0)
- * @param tx_premptap_post	preemphasis post tap value (default 0xF)
- * @param tx_vboost		vboost enable (1 = enable, -1 = don't override)
- *				hardware default is 1.
- *
- * NOTE: This must be called before SRIO is initialized to take effect
- */
-void cvmx_helper_set_srio_tx(int xiface, int index, bool long_run, int tx_swing, int tx_gain,
-			     bool tx_premptap_override, uint8_t tx_premptap_pre,
-			     u8 tx_premptap_post, int tx_vboost)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-	struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
-	struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-
-	sr->srio_tx_swing_override = (tx_swing != -1);
-	sr->srio_tx_swing = tx_swing != -1 ? tx_swing : 0x7;
-	sr->srio_tx_gain_override = (tx_gain != -1);
-	sr->srio_tx_gain = tx_gain != -1 ? tx_gain : 0x3;
-	sr->srio_tx_premptap_override = tx_premptap_override;
-	sr->srio_tx_premptap_pre = tx_premptap_override ? tx_premptap_pre : 0;
-	sr->srio_tx_premptap_post = tx_premptap_override ? tx_premptap_post : 0xF;
-	sr->srio_tx_vboost_override = tx_vboost != -1;
-	sr->srio_tx_vboost = (tx_vboost != -1) ? tx_vboost : 1;
-}
-
-/**
- * @INTERNAL
- * Get the SRIO TX interface settings for host mode
- *
- * @param xiface			node and interface
- * @param index				lane
- * @param long_run			true for long run, false for short run
- * @param[out] tx_swing_override	true to override pcs_sds_txX_swing
- * @param[out] tx_swing			tx swing value to use (default 0x7)
- * @param[out] tx_gain_override		true to override default gain
- * @param[out] tx_gain			PCS SDS TX gain (default 0x3)
- * @param[out] tx_premptap_override	true to override preemphasis control
- * @param[out] tx_premptap_pre		preemphasis pre tap value (default 0x0)
- * @param[out] tx_premptap_post		preemphasis post tap value (default 0xF)
- * @param[out] tx_vboost_override	override vboost setting
- * @param[out] tx_vboost		vboost enable (default true)
- */
-void cvmx_helper_get_srio_tx(int xiface, int index, bool long_run, bool *tx_swing_override,
-			     u8 *tx_swing, bool *tx_gain_override, uint8_t *tx_gain,
-			     bool *tx_premptap_override, uint8_t *tx_premptap_pre,
-			     u8 *tx_premptap_post, bool *tx_vboost_override, bool *tx_vboost)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-	struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
-	struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-
-	if (tx_swing_override)
-		*tx_swing_override = sr->srio_tx_swing_override;
-	if (tx_swing)
-		*tx_swing = sr->srio_tx_swing;
-	if (tx_gain_override)
-		*tx_gain_override = sr->srio_tx_gain_override;
-	if (tx_gain)
-		*tx_gain = sr->srio_tx_gain;
-	if (tx_premptap_override)
-		*tx_premptap_override = sr->srio_tx_premptap_override;
-	if (tx_premptap_pre)
-		*tx_premptap_pre = sr->srio_tx_premptap_pre;
-	if (tx_premptap_post)
-		*tx_premptap_post = sr->srio_tx_premptap_post;
-	if (tx_vboost_override)
-		*tx_vboost_override = sr->srio_tx_vboost_override;
-	if (tx_vboost)
-		*tx_vboost = sr->srio_tx_vboost;
-}
-
-/**
- * @INTERNAL
  * Sets the PHY info data structure
  *
  * @param xiface	node and interface
@@ -1685,23 +1187,6 @@
 
 /**
  * @INTERNAL
- * Sets a pointer to the PHY LED configuration (if local GPIOs drive them)
- *
- * @param xiface	node and interface
- * @param index		portindex
- * @param leds		pointer to led data structure
- */
-void cvmx_helper_set_port_phy_leds(int xiface, int index, struct cvmx_phy_gpio_leds *leds)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-	cvmx_cfg_port[xi.node][xi.interface][index].gpio_leds = leds;
-}
-
-/**
- * @INTERNAL
  * Disables RGMII TX clock bypass and sets delay value
  *
  * @param xiface	node and interface
@@ -1744,59 +1229,6 @@
 }
 
 /**
- * @INTERNAL
- * Retrieve the SFP node offset in the device tree
- *
- * @param xiface	node and interface
- * @param index		port index
- *
- * Return: offset in device tree or -1 if error or not defined.
- */
-int cvmx_helper_cfg_get_sfp_fdt_offset(int xiface, int index)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-	return cvmx_cfg_port[xi.node][xi.interface][index].sfp_of_offset;
-}
-
-/**
- * @INTERNAL
- * Sets the SFP node offset
- *
- * @param xiface	node and interface
- * @param index		port index
- * @param sfp_of_offset	Offset of SFP node in device tree
- */
-void cvmx_helper_cfg_set_sfp_fdt_offset(int xiface, int index, int sfp_of_offset)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-	cvmx_cfg_port[xi.node][xi.interface][index].sfp_of_offset = sfp_of_offset;
-}
-
-/**
- * Get data structure defining the Microsemi VSC7224 channel info
- * or NULL if not present
- *
- * @param xiface	node and interface
- * @param index		port index
- *
- * Return: pointer to vsc7224 data structure or NULL if not present
- */
-struct cvmx_vsc7224_chan *cvmx_helper_cfg_get_vsc7224_chan_info(int xiface, int index)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-	return cvmx_cfg_port[xi.node][xi.interface][index].vsc7224_chan;
-}
-
-/**
  * Sets the Microsemi VSC7224 channel info data structure
  *
  * @param	xiface	node and interface
@@ -1814,40 +1246,6 @@
 }
 
 /**
- * Get data structure defining the Avago AVSP5410 phy info
- * or NULL if not present
- *
- * @param xiface	node and interface
- * @param index		port index
- *
- * Return: pointer to avsp5410 data structure or NULL if not present
- */
-struct cvmx_avsp5410 *cvmx_helper_cfg_get_avsp5410_info(int xiface, int index)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-	return cvmx_cfg_port[xi.node][xi.interface][index].avsp5410;
-}
-
-/**
- * Sets the Avago AVSP5410 phy info data structure
- *
- * @param	xiface	node and interface
- * @param	index	port index
- * @param[in]	avsp5410_info	Avago AVSP5410 data structure
- */
-void cvmx_helper_cfg_set_avsp5410_info(int xiface, int index, struct cvmx_avsp5410 *avsp5410_info)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-	cvmx_cfg_port[xi.node][xi.interface][index].avsp5410 = avsp5410_info;
-}
-
-/**
  * Gets the SFP data associated with a port
  *
  * @param	xiface	node and interface
@@ -1879,36 +1277,3 @@
 		cvmx_init_port_cfg();
 	cvmx_cfg_port[xi.node][xi.interface][index].sfp_info = sfp_info;
 }
-
-/**
- * Returns a pointer to the phy device associated with a port
- *
- * @param	xiface		node and interface
- * @param	index		port index
- *
- * return	pointer to phy device or NULL if none
- */
-struct phy_device *cvmx_helper_cfg_get_phy_device(int xiface, int index)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-	return cvmx_cfg_port[xi.node][xi.interface][index].phydev;
-}
-
-/**
- * Sets the phy device associated with a port
- *
- * @param	xiface		node and interface
- * @param	index		port index
- * @param[in]	phydev		phy device to assiciate
- */
-void cvmx_helper_cfg_set_phy_device(int xiface, int index, struct phy_device *phydev)
-{
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
-	if (!port_cfg_data_initialized)
-		cvmx_init_port_cfg();
-	cvmx_cfg_port[xi.node][xi.interface][index].phydev = phydev;
-}
diff --git a/arch/mips/mach-octeon/cvmx-helper-fdt.c b/arch/mips/mach-octeon/cvmx-helper-fdt.c
index 3177dfb..400c73a 100644
--- a/arch/mips/mach-octeon/cvmx-helper-fdt.c
+++ b/arch/mips/mach-octeon/cvmx-helper-fdt.c
@@ -1,14 +1,17 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
  *
  * FDT Helper functions similar to those provided to U-Boot.
  */
 
+#include <dm.h>
+#include <i2c.h>
 #include <log.h>
 #include <malloc.h>
 #include <net.h>
 #include <linux/delay.h>
+#include <asm-generic/gpio.h>
 
 #include <mach/cvmx-regs.h>
 #include <mach/cvmx-csr.h>
@@ -19,35 +22,6 @@
 #include <mach/cvmx-helper-board.h>
 #include <mach/cvmx-helper-cfg.h>
 #include <mach/cvmx-helper-fdt.h>
-#include <mach/cvmx-helper-gpio.h>
-
-/** Structure used to get type of GPIO from device tree */
-struct gpio_compat {
-	char *compatible;	  /** Compatible string */
-	enum cvmx_gpio_type type; /** Type */
-	int8_t size;		  /** (max) Number of pins */
-};
-
-#define GPIO_REG_PCA953X_IN	0
-#define GPIO_REG_PCA953X_OUT	1
-#define GPIO_REG_PCA953X_INVERT 2
-#define GPIO_REG_PCA953X_DIR	3
-
-#define GPIO_REG_PCA957X_IN	0
-#define GPIO_REG_PCA957X_INVERT 1
-#define GPIO_REG_PCA957X_CFG	4
-#define GPIO_REG_PCA957X_OUT	5
-
-enum cvmx_i2c_mux_type { I2C_MUX, I2C_SWITCH };
-
-/** Structure used to get type of GPIO from device tree */
-struct mux_compat {
-	char *compatible;		 /** Compatible string */
-	enum cvmx_i2c_bus_type type;	 /** Mux chip type */
-	enum cvmx_i2c_mux_type mux_type; /** Type of mux */
-	u8 enable;			 /** Enable bit for mux */
-	u8 size;			 /** (max) Number of channels */
-};
 
 /**
  * Local allocator to handle both SE and U-Boot that also zeroes out memory
@@ -57,45 +31,19 @@
  * Return:	pointer to allocated memory or NULL if out of memory.
  *		Alignment is set to 8-bytes.
  */
-void *__cvmx_fdt_alloc(size_t size)
+static void *cvmx_fdt_alloc(size_t size)
 {
 	return calloc(size, 1);
 }
 
-/**
- * Free allocated memory.
- *
- * @param	ptr	pointer to memory to free
- *
- * NOTE: This only works in U-Boot since SE does not really have a freeing
- *	 mechanism.  In SE the memory is zeroed out.
- */
-void __cvmx_fdt_free(void *ptr, size_t size)
-{
-	free(ptr);
-}
-
-/**
- * Look up a phandle and follow it to its node then return the offset of that
- * node.
- *
- * @param[in]	fdt_addr	pointer to FDT blob
- * @param	node		node to read phandle from
- * @param[in]	prop_name	name of property to find
- * @param[in,out] lenp		Number of phandles, input max number
- * @param[out]	nodes		Array of phandle nodes
- *
- * Return:	-ve error code on error or 0 for success
- */
-int cvmx_fdt_lookup_phandles(const void *fdt_addr, int node,
-			     const char *prop_name, int *lenp,
-			     int *nodes)
+int cvmx_ofnode_lookup_phandles(ofnode node, const char *prop_name, int *lenp,
+				ofnode *nodes)
 {
 	const u32 *phandles;
 	int count;
 	int i;
 
-	phandles = fdt_getprop(fdt_addr, node, prop_name, &count);
+	phandles = ofnode_get_property(node, prop_name, &count);
 	if (!phandles || count < 0)
 		return -FDT_ERR_NOTFOUND;
 
@@ -104,8 +52,8 @@
 		count = *lenp;
 
 	for (i = 0; i < count; i++)
-		nodes[i] = fdt_node_offset_by_phandle(fdt_addr,
-						      fdt32_to_cpu(phandles[i]));
+		nodes[i] = ofnode_get_by_phandle(fdt32_to_cpu(phandles[i]));
+
 	*lenp = count;
 	return 0;
 }
@@ -143,126 +91,6 @@
 }
 
 /**
- * Get the total size of the flat device tree
- *
- * @param[in]	fdt_addr	Address of FDT
- *
- * Return:	Size of flat device tree in bytes or error if negative.
- */
-int cvmx_fdt_get_fdt_size(const void *fdt_addr)
-{
-	int rc;
-
-	rc = fdt_check_header(fdt_addr);
-	if (rc)
-		return rc;
-	return fdt_totalsize(fdt_addr);
-}
-
-/**
- * Returns if a node is compatible with one of the items in the string list
- *
- * @param[in]	fdt_addr	Pointer to flat device tree
- * @param	node		Node offset to check
- * @param[in]	strlist		Array of FDT device compatibility strings,
- *				must end with NULL or empty string.
- *
- * Return:	0 if at least one item matches, 1 if no matches
- */
-int cvmx_fdt_node_check_compatible_list(const void *fdt_addr, int node, const char *const *strlist)
-{
-	while (*strlist && **strlist) {
-		if (!fdt_node_check_compatible(fdt_addr, node, *strlist))
-			return 0;
-		strlist++;
-	}
-	return 1;
-}
-
-/**
- * Given a FDT node, return the next compatible node.
- *
- * @param[in]	fdt_addr	Pointer to flat device tree
- * @param	start_offset	Starting node offset or -1 to find the first
- * @param	strlist		Array of FDT device compatibility strings, must
- *				end with NULL or empty string.
- *
- * Return:	next matching node or -1 if no more matches.
- */
-int cvmx_fdt_node_offset_by_compatible_list(const void *fdt_addr, int startoffset,
-					    const char *const *strlist)
-{
-	int offset;
-
-	for (offset = fdt_next_node(fdt_addr, startoffset, NULL); offset >= 0;
-	     offset = fdt_next_node(fdt_addr, offset, NULL)) {
-		if (!cvmx_fdt_node_check_compatible_list(fdt_addr, offset, strlist))
-			return offset;
-	}
-	return -1;
-}
-
-/**
- * Attaches a PHY to a SFP or QSFP.
- *
- * @param	sfp		sfp to attach PHY to
- * @param	phy_info	phy descriptor to attach or NULL to detach
- */
-void cvmx_sfp_attach_phy(struct cvmx_fdt_sfp_info *sfp, struct cvmx_phy_info *phy_info)
-{
-	sfp->phy_info = phy_info;
-	if (phy_info)
-		phy_info->sfp_info = sfp;
-}
-
-/**
- * Assigns an IPD port to a SFP slot
- *
- * @param	sfp		Handle to SFP data structure
- * @param	ipd_port	Port to assign it to
- *
- * Return:	0 for success, -1 on error
- */
-int cvmx_sfp_set_ipd_port(struct cvmx_fdt_sfp_info *sfp, int ipd_port)
-{
-	int i;
-
-	if (sfp->is_qsfp) {
-		int xiface;
-		cvmx_helper_interface_mode_t mode;
-
-		xiface = cvmx_helper_get_interface_num(ipd_port);
-		mode = cvmx_helper_interface_get_mode(xiface);
-		sfp->ipd_port[0] = ipd_port;
-
-		switch (mode) {
-		case CVMX_HELPER_INTERFACE_MODE_SGMII:
-		case CVMX_HELPER_INTERFACE_MODE_XFI:
-		case CVMX_HELPER_INTERFACE_MODE_10G_KR:
-			for (i = 1; i < 4; i++)
-				sfp->ipd_port[i] = cvmx_helper_get_ipd_port(xiface, i);
-			break;
-		case CVMX_HELPER_INTERFACE_MODE_XLAUI:
-		case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
-			sfp->ipd_port[0] = ipd_port;
-			for (i = 1; i < 4; i++)
-				sfp->ipd_port[i] = -1;
-			break;
-		default:
-			debug("%s: Interface mode %s for interface 0x%x, ipd_port %d not supported for QSFP\n",
-			      __func__, cvmx_helper_interface_mode_to_string(mode), xiface,
-			      ipd_port);
-			return -1;
-		}
-	} else {
-		sfp->ipd_port[0] = ipd_port;
-		for (i = 1; i < 4; i++)
-			sfp->ipd_port[i] = -1;
-	}
-	return 0;
-}
-
-/**
  * Parses all of the channels assigned to a VSC7224 device
  *
  * @param[in]		fdt_addr	Address of flat device tree
@@ -271,10 +99,10 @@
  *
  * Return:	0 for success, -1 on error
  */
-static int cvmx_fdt_parse_vsc7224_channels(const void *fdt_addr, int of_offset,
+static int cvmx_fdt_parse_vsc7224_channels(ofnode node,
 					   struct cvmx_vsc7224 *vsc7224)
 {
-	int parent_offset = of_offset;
+	struct ofnode_phandle_args phandle;
 	int err = 0;
 	int reg;
 	int num_chan = 0;
@@ -289,35 +117,33 @@
 	bool is_tx;
 	bool is_qsfp;
 	const char *mac_str;
+	ofnode node_chan;
 
-	debug("%s(%p, %d, %s)\n", __func__, fdt_addr, of_offset, vsc7224->name);
-	do {
-		/* Walk through all channels */
-		of_offset = fdt_node_offset_by_compatible(fdt_addr, of_offset,
-							  "vitesse,vsc7224-channel");
-		if (of_offset == -FDT_ERR_NOTFOUND) {
-			break;
-		} else if (of_offset < 0) {
-			debug("%s: Failed finding compatible channel\n",
-			      __func__);
-			err = -1;
+	debug("%s(%x, %s)\n", __func__, ofnode_to_offset(node), vsc7224->name);
+	ofnode_for_each_compatible_node(node_chan, "vitesse,vsc7224-channel") {
+		if (!ofnode_valid(node_chan)) {
+			debug("%s: Error parsing FDT node %s\n",
+			      __func__, ofnode_get_name(node));
 			break;
 		}
-		if (fdt_parent_offset(fdt_addr, of_offset) != parent_offset)
+
+		if (ofnode_to_offset(ofnode_get_parent(node_chan)) !=
+		    ofnode_to_offset(node))
 			break;
-		reg = cvmx_fdt_get_int(fdt_addr, of_offset, "reg", -1);
+
+		reg = ofnode_get_addr(node_chan);
 		if (reg < 0 || reg > 3) {
 			debug("%s: channel reg is either not present or out of range\n",
 			      __func__);
 			err = -1;
 			break;
 		}
-		is_tx = cvmx_fdt_get_bool(fdt_addr, of_offset, "direction-tx");
+		is_tx = ofnode_read_bool(node_chan, "direction-tx");
 
 		debug("%s(%s): Adding %cx channel %d\n",
 		      __func__, vsc7224->name, is_tx ? 't' : 'r',
 		      reg);
-		tap_values = (const uint32_t *)fdt_getprop(fdt_addr, of_offset, "taps", &len);
+		tap_values = ofnode_get_property(node_chan, "taps", &len);
 		if (!tap_values) {
 			debug("%s: Error: no taps defined for vsc7224 channel %d\n",
 			      __func__, reg);
@@ -341,8 +167,8 @@
 		num_taps = len / 16;
 		debug("%s: Adding %d taps\n", __func__, num_taps);
 
-		channel = __cvmx_fdt_alloc(sizeof(*channel) +
-					   num_taps * sizeof(struct cvmx_vsc7224_tap));
+		channel = cvmx_fdt_alloc(sizeof(*channel) +
+					 num_taps * sizeof(struct cvmx_vsc7224_tap));
 		if (!channel) {
 			debug("%s: Out of memory\n", __func__);
 			err = -1;
@@ -351,11 +177,12 @@
 		vsc7224->channel[reg] = channel;
 		channel->num_taps = num_taps;
 		channel->lane = reg;
-		channel->of_offset = of_offset;
+		channel->of_offset = ofnode_to_offset(node_chan);
 		channel->is_tx = is_tx;
-		channel->pretap_disable = cvmx_fdt_get_bool(fdt_addr, of_offset, "pretap-disable");
-		channel->posttap_disable =
-			cvmx_fdt_get_bool(fdt_addr, of_offset, "posttap-disable");
+		channel->pretap_disable = ofnode_read_bool(node_chan,
+							   "pretap-disable");
+		channel->posttap_disable = ofnode_read_bool(node_chan,
+							    "posttap-disable");
 		channel->vsc7224 = vsc7224;
 		/* Read all the tap values */
 		for (i = 0; i < num_taps; i++) {
@@ -371,9 +198,9 @@
 		channel->ipd_port = -1;
 
 		mac_str = "sfp-mac";
-		if (fdt_getprop(fdt_addr, of_offset, mac_str, NULL)) {
+		if (ofnode_get_property(node_chan, mac_str, NULL)) {
 			is_qsfp = false;
-		} else if (fdt_getprop(fdt_addr, of_offset, "qsfp-mac", NULL)) {
+		} else if (ofnode_get_property(node_chan, "qsfp-mac", NULL)) {
 			is_qsfp = true;
 			mac_str = "qsfp-mac";
 		} else {
@@ -381,52 +208,59 @@
 			      vsc7224->name, reg);
 			return -1;
 		}
-		of_mac = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, mac_str);
-		if (of_mac < 0) {
+
+		err = ofnode_parse_phandle_with_args(node_chan, mac_str, NULL,
+						     0, 0, &phandle);
+		if (err) {
 			debug("%s: Error %d with MAC %s phandle for %s\n", __func__, of_mac,
 			      mac_str, vsc7224->name);
 			return -1;
 		}
 
-		debug("%s: Found mac at offset %d\n", __func__, of_mac);
-		err = cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(of_mac, &xiface, &index);
-		if (!err) {
-			channel->xiface = xiface;
-			channel->index = index;
-			channel->ipd_port = cvmx_helper_get_ipd_port(xiface, index);
+		debug("%s: Found mac at %s\n", __func__,
+		      ofnode_get_name(phandle.node));
 
-			debug("%s: Found MAC, xiface: 0x%x, index: %d, ipd port: %d\n", __func__,
-			      xiface, index, channel->ipd_port);
-			if (channel->ipd_port >= 0) {
-				cvmx_helper_cfg_set_vsc7224_chan_info(xiface, index, channel);
-				debug("%s: Storing config channel for xiface 0x%x, index %d\n",
-				      __func__, xiface, index);
-			}
-			sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
-			if (!sfp_info) {
-				debug("%s: Warning: no (Q)SFP+ slot found for xinterface 0x%x, index %d for channel %d\n",
-				      __func__, xiface, index, channel->lane);
-				continue;
-			}
+		xiface = (ofnode_get_addr(ofnode_get_parent(phandle.node))
+			  >> 24) & 0x0f;
+		index = ofnode_get_addr(phandle.node);
+		channel->xiface = xiface;
+		channel->index = index;
+		channel->ipd_port = cvmx_helper_get_ipd_port(xiface, index);
 
-			/* Link it */
-			channel->next = sfp_info->vsc7224_chan;
-			if (sfp_info->vsc7224_chan)
-				sfp_info->vsc7224_chan->prev = channel;
-			sfp_info->vsc7224_chan = channel;
-			sfp_info->is_vsc7224 = true;
-			debug("%s: Registering VSC7224 %s channel %d with SFP %s\n", __func__,
-			      vsc7224->name, channel->lane, sfp_info->name);
-			if (!sfp_info->mod_abs_changed) {
-				debug("%s: Registering cvmx_sfp_vsc7224_mod_abs_changed at %p for xinterface 0x%x, index %d\n",
-				      __func__, &cvmx_sfp_vsc7224_mod_abs_changed, xiface, index);
-				cvmx_sfp_register_mod_abs_changed(
-					sfp_info,
-					&cvmx_sfp_vsc7224_mod_abs_changed,
-					NULL);
-			}
+		debug("%s: Found MAC, xiface: 0x%x, index: %d, ipd port: %d\n", __func__,
+		      xiface, index, channel->ipd_port);
+		if (channel->ipd_port >= 0) {
+			cvmx_helper_cfg_set_vsc7224_chan_info(xiface, index, channel);
+			debug("%s: Storing config channel for xiface 0x%x, index %d\n",
+			      __func__, xiface, index);
+		}
+		sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
+		if (!sfp_info) {
+			debug("%s: Warning: no (Q)SFP+ slot found for xinterface 0x%x, index %d for channel %d\n",
+			      __func__, xiface, index, channel->lane);
+			continue;
+		}
+
+		/* Link it */
+		channel->next = sfp_info->vsc7224_chan;
+		if (sfp_info->vsc7224_chan)
+			sfp_info->vsc7224_chan->prev = channel;
+		sfp_info->vsc7224_chan = channel;
+		sfp_info->is_vsc7224 = true;
+		debug("%s: Registering VSC7224 %s channel %d with SFP %s\n", __func__,
+		      vsc7224->name, channel->lane, sfp_info->name);
+		if (!sfp_info->mod_abs_changed) {
+			debug("%s: Registering cvmx_sfp_vsc7224_mod_abs_changed at %p for xinterface 0x%x, index %d\n",
+			      __func__, &cvmx_sfp_vsc7224_mod_abs_changed, xiface, index);
+			cvmx_sfp_register_mod_abs_changed(
+				sfp_info,
+				&cvmx_sfp_vsc7224_mod_abs_changed,
+				NULL);
 		}
-	} while (!err && num_chan < 4);
+
+		if (num_chan >= 4)
+			break;
+	}
 
 	return err;
 }
@@ -441,12 +275,17 @@
  */
 int __cvmx_fdt_parse_vsc7224(const void *fdt_addr)
 {
-	int of_offset = -1;
 	struct cvmx_vsc7224 *vsc7224 = NULL;
-	struct cvmx_fdt_gpio_info *gpio_info = NULL;
+	ofnode node;
 	int err = 0;
-	int of_parent;
 	static bool parsed;
+	const int *init_array;
+	struct udevice *dev;
+	u16 value;
+	int reg;
+	int len;
+	int ret;
+	int i;
 
 	debug("%s(%p)\n", __func__, fdt_addr);
 
@@ -454,30 +293,23 @@
 		debug("%s: Already parsed\n", __func__);
 		return 0;
 	}
-	do {
-		of_offset = fdt_node_offset_by_compatible(fdt_addr, of_offset,
-							  "vitesse,vsc7224");
-		debug("%s: of_offset: %d\n", __func__, of_offset);
-		if (of_offset == -FDT_ERR_NOTFOUND) {
-			break;
-		} else if (of_offset < 0) {
-			err = -1;
-			debug("%s: Error %d parsing FDT\n",
-			      __func__, of_offset);
+
+	ofnode_for_each_compatible_node(node, "vitesse,vsc7224") {
+		if (!ofnode_valid(node)) {
+			debug("%s: Error parsing FDT node %s\n",
+			      __func__, ofnode_get_name(node));
 			break;
 		}
 
-		vsc7224 = __cvmx_fdt_alloc(sizeof(*vsc7224));
-
+		vsc7224 = cvmx_fdt_alloc(sizeof(*vsc7224));
 		if (!vsc7224) {
 			debug("%s: Out of memory!\n", __func__);
 			return -1;
 		}
-		vsc7224->of_offset = of_offset;
-		vsc7224->i2c_addr = cvmx_fdt_get_int(fdt_addr, of_offset,
-						     "reg", -1);
-		of_parent = fdt_parent_offset(fdt_addr, of_offset);
-		vsc7224->i2c_bus = cvmx_fdt_get_i2c_bus(fdt_addr, of_parent);
+
+		vsc7224->of_offset = ofnode_to_offset(node);
+		vsc7224->i2c_addr = ofnode_get_addr(node);
+		vsc7224->i2c_bus = cvmx_ofnode_get_i2c_bus(ofnode_get_parent(node));
 		if (vsc7224->i2c_addr < 0) {
 			debug("%s: Error: reg field missing\n", __func__);
 			err = -1;
@@ -488,168 +320,90 @@
 			err = -1;
 			break;
 		}
-		vsc7224->name = fdt_get_name(fdt_addr, of_offset, NULL);
+		vsc7224->name = ofnode_get_name(node);
 		debug("%s: Adding %s\n", __func__, vsc7224->name);
-		if (fdt_getprop(fdt_addr, of_offset, "reset", NULL)) {
-			gpio_info = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "reset");
-			vsc7224->reset_gpio = gpio_info;
-		}
-		if (fdt_getprop(fdt_addr, of_offset, "los", NULL)) {
-			gpio_info = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "los");
-			vsc7224->los_gpio = gpio_info;
-		}
-		debug("%s: Parsing channels\n", __func__);
-		err = cvmx_fdt_parse_vsc7224_channels(fdt_addr, of_offset, vsc7224);
+
+		err = gpio_request_by_name_nodev(node, "reset", 0,
+						 &vsc7224->reset_gpio,
+						 GPIOD_IS_OUT);
 		if (err) {
-			debug("%s: Error parsing VSC7224 channels\n", __func__);
-			break;
+			printf("%s: reset GPIO not found in DT!\n", __func__);
+			return -ENODEV;
 		}
-	} while (of_offset > 0);
 
-	if (err) {
-		debug("%s(): Error\n", __func__);
-		if (vsc7224) {
-			if (vsc7224->reset_gpio)
-				__cvmx_fdt_free(vsc7224->reset_gpio, sizeof(*vsc7224->reset_gpio));
-			if (vsc7224->los_gpio)
-				__cvmx_fdt_free(vsc7224->los_gpio, sizeof(*vsc7224->los_gpio));
-			if (vsc7224->i2c_bus)
-				cvmx_fdt_free_i2c_bus(vsc7224->i2c_bus);
-			__cvmx_fdt_free(vsc7224, sizeof(*vsc7224));
+		err = gpio_request_by_name_nodev(node, "los", 0,
+						 &vsc7224->los_gpio,
+						 GPIOD_IS_IN);
+		if (err) {
+			printf("%s: los GPIO not found in DT!\n", __func__);
+			return -ENODEV;
 		}
-	}
-	if (!err)
-		parsed = true;
-
-	return err;
-}
-
-/**
- * @INTERNAL
- * Parses all instances of the Avago AVSP5410 gearbox phy
- *
- * @param[in]	fdt_addr	Address of flat device tree
- *
- * Return:	0 for success, error otherwise
- */
-int __cvmx_fdt_parse_avsp5410(const void *fdt_addr)
-{
-	int of_offset = -1;
-	struct cvmx_avsp5410 *avsp5410 = NULL;
-	struct cvmx_fdt_sfp_info *sfp_info;
-	int err = 0;
-	int of_parent;
-	static bool parsed;
-	int of_mac;
-	int xiface, index;
-	bool is_qsfp;
-	const char *mac_str;
-
-	debug("%s(%p)\n", __func__, fdt_addr);
 
-	if (parsed) {
-		debug("%s: Already parsed\n", __func__);
-		return 0;
-	}
+		/*
+		 * This code was taken from the NIC23 board specific code
+		 * but should be better placed here in the common code
+		 */
+		debug("%s: Putting device in reset\n", __func__);
+		dm_gpio_set_value(&vsc7224->reset_gpio, 1);
+		mdelay(10);
+		debug("%s: Taking device out of reset\n", __func__);
+		dm_gpio_set_value(&vsc7224->reset_gpio, 0);
+		mdelay(50);
 
-	do {
-		of_offset = fdt_node_offset_by_compatible(fdt_addr, of_offset,
-							  "avago,avsp-5410");
-		debug("%s: of_offset: %d\n", __func__, of_offset);
-		if (of_offset == -FDT_ERR_NOTFOUND) {
-			break;
-		} else if (of_offset < 0) {
-			err = -1;
-			debug("%s: Error %d parsing FDT\n", __func__, of_offset);
-			break;
+		init_array = ofnode_get_property(node, "vitesse,reg-init",
+						 &len);
+		if (!init_array) {
+			debug("%s: No initialization array\n", __func__);
+			continue;
 		}
-
-		avsp5410 = __cvmx_fdt_alloc(sizeof(*avsp5410));
-
-		if (!avsp5410) {
-			debug("%s: Out of memory!\n", __func__);
+		if ((len % 8) != 0) {
+			printf("%s: Error: register init string should be an array of reg number followed by value\n",
+			       __func__);
 			return -1;
 		}
-		avsp5410->of_offset = of_offset;
-		avsp5410->i2c_addr = cvmx_fdt_get_int(fdt_addr, of_offset,
-						      "reg", -1);
-		of_parent = fdt_parent_offset(fdt_addr, of_offset);
-		avsp5410->i2c_bus = cvmx_fdt_get_i2c_bus(fdt_addr, of_parent);
-		if (avsp5410->i2c_addr < 0) {
-			debug("%s: Error: reg field missing\n", __func__);
-			err = -1;
-			break;
-		}
-		if (!avsp5410->i2c_bus) {
-			debug("%s: Error getting i2c bus\n", __func__);
-			err = -1;
-			break;
-		}
-		avsp5410->name = fdt_get_name(fdt_addr, of_offset, NULL);
-		debug("%s: Adding %s\n", __func__, avsp5410->name);
 
-		/* Now find out which interface it's mapped to */
-		avsp5410->ipd_port = -1;
-
-		mac_str = "sfp-mac";
-		if (fdt_getprop(fdt_addr, of_offset, mac_str, NULL)) {
-			is_qsfp = false;
-		} else if (fdt_getprop(fdt_addr, of_offset, "qsfp-mac", NULL)) {
-			is_qsfp = true;
-			mac_str = "qsfp-mac";
-		} else {
-			debug("%s: Error: MAC not found for %s\n", __func__, avsp5410->name);
+		ret = i2c_get_chip(vsc7224->i2c_bus->i2c_bus,
+				   vsc7224->i2c_addr, 1, &dev);
+		if (ret) {
+			debug("Cannot find I2C device: %d\n", ret);
 			return -1;
 		}
-		of_mac = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, mac_str);
-		if (of_mac < 0) {
-			debug("%s: Error %d with MAC %s phandle for %s\n", __func__, of_mac,
-			      mac_str, avsp5410->name);
-			return -1;
-		}
 
-		debug("%s: Found mac at offset %d\n", __func__, of_mac);
-		err = cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(of_mac, &xiface, &index);
-		if (!err) {
-			avsp5410->xiface = xiface;
-			avsp5410->index = index;
-			avsp5410->ipd_port = cvmx_helper_get_ipd_port(xiface, index);
+		for (i = 0; i < len / sizeof(int); i += 2) {
+			u8 buffer[2];
 
-			debug("%s: Found MAC, xiface: 0x%x, index: %d, ipd port: %d\n", __func__,
-			      xiface, index, avsp5410->ipd_port);
-			if (avsp5410->ipd_port >= 0) {
-				cvmx_helper_cfg_set_avsp5410_info(xiface, index, avsp5410);
-				debug("%s: Storing config phy for xiface 0x%x, index %d\n",
-				      __func__, xiface, index);
-			}
-			sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
-			if (!sfp_info) {
-				debug("%s: Warning: no (Q)SFP+ slot found for xinterface 0x%x, index %d\n",
-				      __func__, xiface, index);
-				continue;
+			reg = fdt32_to_cpu(init_array[i]);
+			value = fdt32_to_cpu(init_array[i + 1]);
+			buffer[0] = value >> 8;
+			buffer[1] = value & 0xff;
+			ret = dm_i2c_write(dev, reg, buffer, 2);
+			if (ret) {
+				debug("Cannot write I2C device: %d\n", ret);
+				return -1;
 			}
 
-			sfp_info->is_avsp5410 = true;
-			sfp_info->avsp5410 = avsp5410;
-			debug("%s: Registering AVSP5410 %s with SFP %s\n", __func__, avsp5410->name,
-			      sfp_info->name);
-			if (!sfp_info->mod_abs_changed) {
-				debug("%s: Registering cvmx_sfp_avsp5410_mod_abs_changed at %p for xinterface 0x%x, index %d\n",
-				      __func__, &cvmx_sfp_avsp5410_mod_abs_changed, xiface, index);
-				cvmx_sfp_register_mod_abs_changed(
-					sfp_info,
-					&cvmx_sfp_avsp5410_mod_abs_changed,
-					NULL);
-			}
+			debug("  Wrote 0x%02x <= 0x%02x%02x\n", reg,
+			      buffer[0], buffer[1]);
 		}
-	} while (of_offset > 0);
+
+		debug("%s: Parsing channels\n", __func__);
+		err = cvmx_fdt_parse_vsc7224_channels(node, vsc7224);
+		if (err) {
+			debug("%s: Error parsing VSC7224 channels\n", __func__);
+			break;
+		}
+	}
 
 	if (err) {
 		debug("%s(): Error\n", __func__);
-		if (avsp5410) {
-			if (avsp5410->i2c_bus)
-				cvmx_fdt_free_i2c_bus(avsp5410->i2c_bus);
-			__cvmx_fdt_free(avsp5410, sizeof(*avsp5410));
+		if (vsc7224) {
+			dm_gpio_free(vsc7224->reset_gpio.dev,
+				     &vsc7224->reset_gpio);
+			dm_gpio_free(vsc7224->los_gpio.dev,
+				     &vsc7224->los_gpio);
+			if (vsc7224->i2c_bus)
+				cvmx_fdt_free_i2c_bus(vsc7224->i2c_bus);
+			free(vsc7224);
 		}
 	}
 	if (!err)
@@ -659,312 +413,76 @@
 }
 
 /**
- * Parse QSFP GPIOs for SFP
- *
- * @param[in]	fdt_addr	Pointer to flat device tree
- * @param	of_offset	Offset of QSFP node
- * @param[out]	sfp_info	Pointer to sfp info to fill in
- *
- * Return:	0 for success
- */
-static int cvmx_parse_qsfp(const void *fdt_addr, int of_offset, struct cvmx_fdt_sfp_info *sfp_info)
-{
-	sfp_info->select = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "select");
-	sfp_info->mod_abs = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "mod_prs");
-	sfp_info->reset = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "reset");
-	sfp_info->interrupt = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "interrupt");
-	sfp_info->lp_mode = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "lp_mode");
-	return 0;
-}
-
-/**
- * Parse SFP GPIOs for SFP
- *
- * @param[in]	fdt_addr	Pointer to flat device tree
- * @param	of_offset	Offset of SFP node
- * @param[out]	sfp_info	Pointer to sfp info to fill in
+ * Given the parent offset of an i2c device build up a list describing the bus
+ * which can contain i2c muxes and switches.
  *
- * Return:	0 for success
- */
-static int cvmx_parse_sfp(const void *fdt_addr, int of_offset, struct cvmx_fdt_sfp_info *sfp_info)
-{
-	sfp_info->mod_abs = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "mod_abs");
-	sfp_info->rx_los = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "rx_los");
-	sfp_info->tx_disable = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "tx_disable");
-	sfp_info->tx_error = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "tx_error");
-	return 0;
-}
-
-/**
- * Parse SFP/QSFP EEPROM and diag
+ * @param[in]	node		ofnode of the parent node of a GPIO device in
+ *				the device tree.
  *
- * @param[in]	fdt_addr	Pointer to flat device tree
- * @param	of_offset	Offset of SFP node
- * @param[out]	sfp_info	Pointer to sfp info to fill in
+ * @return	pointer to list of i2c devices starting from the root which
+ *		can include i2c muxes and switches or NULL if error.  Note that
+ *		all entries are allocated on the heap.
  *
- * Return:	0 for success, -1 on error
+ * @see cvmx_fdt_free_i2c_bus()
  */
-static int cvmx_parse_sfp_eeprom(const void *fdt_addr, int of_offset,
-				 struct cvmx_fdt_sfp_info *sfp_info)
+struct cvmx_fdt_i2c_bus_info *cvmx_ofnode_get_i2c_bus(ofnode node)
 {
-	int of_eeprom;
-	int of_diag;
+	struct cvmx_fdt_i2c_bus_info *businfo = NULL;
+	struct udevice *bus;
+	int ret;
 
-	debug("%s(%p, %d, %s)\n", __func__, fdt_addr, of_offset, sfp_info->name);
-	of_eeprom = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, "eeprom");
-	if (of_eeprom < 0) {
-		debug("%s: Missing \"eeprom\" from device tree for %s\n", __func__, sfp_info->name);
-		return -1;
+	businfo = cvmx_fdt_alloc(sizeof(*businfo));
+	if (!businfo) {
+		debug("Out of memory\n");
+		return NULL;
 	}
 
-	sfp_info->i2c_bus = cvmx_fdt_get_i2c_bus(fdt_addr, fdt_parent_offset(fdt_addr, of_eeprom));
-	sfp_info->i2c_eeprom_addr = cvmx_fdt_get_int(fdt_addr, of_eeprom, "reg", 0x50);
+	debug("%s: Found node %s\n", __func__, ofnode_get_name(node));
+	businfo->of_offset = ofnode_to_offset(node);
 
-	debug("%s(%p, %d, %s, %d)\n", __func__, fdt_addr, of_offset, sfp_info->name,
-	      sfp_info->i2c_eeprom_addr);
-
-	if (!sfp_info->i2c_bus) {
-		debug("%s: Error: could not determine i2c bus for eeprom for %s\n", __func__,
-		      sfp_info->name);
-		return -1;
-	}
-	of_diag = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, "diag");
-	if (of_diag >= 0)
-		sfp_info->i2c_diag_addr = cvmx_fdt_get_int(fdt_addr, of_diag, "reg", 0x51);
-	else
-		sfp_info->i2c_diag_addr = 0x51;
-	return 0;
-}
-
-/**
- * Parse SFP information from device tree
- *
- * @param[in]	fdt_addr	Address of flat device tree
- *
- * Return: pointer to sfp info or NULL if error
- */
-struct cvmx_fdt_sfp_info *cvmx_helper_fdt_parse_sfp_info(const void *fdt_addr, int of_offset)
-{
-	struct cvmx_fdt_sfp_info *sfp_info = NULL;
-	int err = -1;
-	bool is_qsfp;
-
-	if (!fdt_node_check_compatible(fdt_addr, of_offset, "ethernet,sfp-slot")) {
-		is_qsfp = false;
-	} else if (!fdt_node_check_compatible(fdt_addr, of_offset, "ethernet,qsfp-slot")) {
-		is_qsfp = true;
-	} else {
-		debug("%s: Error: incompatible sfp/qsfp slot, compatible=%s\n", __func__,
-		      (char *)fdt_getprop(fdt_addr, of_offset, "compatible", NULL));
-		goto error_exit;
-	}
-
-	debug("%s: %ssfp module found at offset %d\n", __func__, is_qsfp ? "q" : "", of_offset);
-	sfp_info = __cvmx_fdt_alloc(sizeof(*sfp_info));
-	if (!sfp_info) {
-		debug("%s: Error: out of memory\n", __func__);
-		goto error_exit;
-	}
-	sfp_info->name = fdt_get_name(fdt_addr, of_offset, NULL);
-	sfp_info->of_offset = of_offset;
-	sfp_info->is_qsfp = is_qsfp;
-	sfp_info->last_mod_abs = -1;
-	sfp_info->last_rx_los = -1;
-
-	if (is_qsfp)
-		err = cvmx_parse_qsfp(fdt_addr, of_offset, sfp_info);
-	else
-		err = cvmx_parse_sfp(fdt_addr, of_offset, sfp_info);
-	if (err) {
-		debug("%s: Error in %s parsing %ssfp GPIO info\n", __func__, sfp_info->name,
-		      is_qsfp ? "q" : "");
-		goto error_exit;
-	}
-	debug("%s: Parsing %ssfp module eeprom\n", __func__, is_qsfp ? "q" : "");
-	err = cvmx_parse_sfp_eeprom(fdt_addr, of_offset, sfp_info);
-	if (err) {
-		debug("%s: Error parsing eeprom info for %s\n", __func__, sfp_info->name);
-		goto error_exit;
+	/*
+	 * Get I2C bus and probe it automatically - needed for later use
+	 */
+	ret = device_get_global_by_ofnode(node, &bus);
+	if (!bus || ret) {
+		printf("Cannot find a I2C bus\n");
+		return NULL;
 	}
 
-	/* Register default check for mod_abs changed */
-	if (!err)
-		cvmx_sfp_register_check_mod_abs(sfp_info, cvmx_sfp_check_mod_abs, NULL);
+	businfo->i2c_bus = bus;
 
-error_exit:
-	/* Note: we don't free any data structures on error since it gets
-	 * rather complicated with i2c buses and whatnot.
-	 */
-	return err ? NULL : sfp_info;
+	return businfo;
 }
 
 /**
- * @INTERNAL
- * Parse a slice of the Inphi/Cortina CS4343 in the device tree
+ * Return the Octeon bus number for a bus descriptor
  *
- * @param[in]	fdt_addr	Address of flat device tree
- * @param	of_offset	fdt offset of slice
- * @param	phy_info	phy_info data structure
+ * @param[in]	bus	bus descriptor
  *
- * Return:	slice number if non-negative, otherwise error
+ * @return	Octeon twsi bus number or -1 on error
  */
-static int cvmx_fdt_parse_cs4343_slice(const void *fdt_addr, int of_offset,
-				       struct cvmx_phy_info *phy_info)
+int cvmx_fdt_i2c_get_root_bus(const struct cvmx_fdt_i2c_bus_info *bus)
 {
-	struct cvmx_cs4343_slice_info *slice;
-	int reg;
-	int reg_offset;
-
-	reg = cvmx_fdt_get_int(fdt_addr, of_offset, "reg", -1);
-	reg_offset = cvmx_fdt_get_int(fdt_addr, of_offset, "slice_offset", -1);
-
-	if (reg < 0 || reg >= 4) {
-		debug("%s(%p, %d, %p): Error: reg %d undefined or out of range\n", __func__,
-		      fdt_addr, of_offset, phy_info, reg);
-		return -1;
-	}
-	if (reg_offset % 0x1000 || reg_offset > 0x3000 || reg_offset < 0) {
-		debug("%s(%p, %d, %p): Error: reg_offset 0x%x undefined or out of range\n",
-		      __func__, fdt_addr, of_offset, phy_info, reg_offset);
+	if (bus->type != CVMX_I2C_BUS_OCTEON)
 		return -1;
-	}
-	if (!phy_info->cs4343_info) {
-		debug("%s: Error: phy info cs4343 datastructure is NULL\n", __func__);
-		return -1;
-	}
-	debug("%s(%p, %d, %p): %s, reg: %d, slice offset: 0x%x\n", __func__, fdt_addr, of_offset,
-	      phy_info, fdt_get_name(fdt_addr, of_offset, NULL), reg, reg_offset);
-	slice = &phy_info->cs4343_info->slice[reg];
-	slice->name = fdt_get_name(fdt_addr, of_offset, NULL);
-	slice->mphy = phy_info->cs4343_info;
-	slice->phy_info = phy_info;
-	slice->of_offset = of_offset;
-	slice->slice_no = reg;
-	slice->reg_offset = reg_offset;
-	/* SR settings */
-	slice->sr_stx_cmode_res = cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-cmode-res", 3);
-	slice->sr_stx_drv_lower_cm =
-		cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-drv-lower-cm", 8);
-	slice->sr_stx_level = cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-level", 0x1c);
-	slice->sr_stx_pre_peak = cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-pre-peak", 1);
-	slice->sr_stx_muxsubrate_sel =
-		cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-muxsubrate-sel", 0);
-	slice->sr_stx_post_peak = cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-post-peak", 8);
-	/* CX settings */
-	slice->cx_stx_cmode_res = cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-cmode-res", 3);
-	slice->cx_stx_drv_lower_cm =
-		cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-drv-lower-cm", 8);
-	slice->cx_stx_level = cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-level", 0x1c);
-	slice->cx_stx_pre_peak = cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-pre-peak", 1);
-	slice->cx_stx_muxsubrate_sel =
-		cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-muxsubrate-sel", 0);
-	slice->cx_stx_post_peak = cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-post-peak", 0xC);
-	/* 1000Base-X settings */
-	/* CX settings */
-	slice->basex_stx_cmode_res =
-		cvmx_fdt_get_int(fdt_addr, of_offset, "basex-stx-cmode-res", 3);
-	slice->basex_stx_drv_lower_cm =
-		cvmx_fdt_get_int(fdt_addr, of_offset, "basex-stx-drv-lower-cm", 8);
-	slice->basex_stx_level = cvmx_fdt_get_int(fdt_addr, of_offset,
-						  "basex-stx-level", 0x1c);
-	slice->basex_stx_pre_peak = cvmx_fdt_get_int(fdt_addr, of_offset,
-						     "basex-stx-pre-peak", 1);
-	slice->basex_stx_muxsubrate_sel =
-		cvmx_fdt_get_int(fdt_addr, of_offset,
-				 "basex-stx-muxsubrate-sel", 0);
-	slice->basex_stx_post_peak =
-		cvmx_fdt_get_int(fdt_addr, of_offset, "basex-stx-post-peak", 8);
-	/* Get the link LED gpio pin */
-	slice->link_gpio = cvmx_fdt_get_int(fdt_addr, of_offset,
-					    "link-led-gpio", -1);
-	slice->error_gpio = cvmx_fdt_get_int(fdt_addr, of_offset,
-					     "error-led-gpio", -1);
-	slice->los_gpio = cvmx_fdt_get_int(fdt_addr, of_offset,
-					   "los-input-gpio", -1);
-	slice->link_inverted = cvmx_fdt_get_bool(fdt_addr, of_offset,
-						 "link-led-gpio-inverted");
-	slice->error_inverted = cvmx_fdt_get_bool(fdt_addr, of_offset,
-						  "error-led-gpio-inverted");
-	slice->los_inverted = cvmx_fdt_get_bool(fdt_addr, of_offset,
-						"los-input-gpio-inverted");
-	/* Convert GPIOs to be die based if they're not already */
-	if (slice->link_gpio > 4 && slice->link_gpio <= 8)
-		slice->link_gpio -= 4;
-	if (slice->error_gpio > 4 && slice->error_gpio <= 8)
-		slice->error_gpio -= 4;
-	if (slice->los_gpio > 4 && slice->los_gpio <= 8)
-		slice->los_gpio -= 4;
-
-	return reg;
+	return bus->channel;
 }
 
 /**
- * @INTERNAL
- * Parses either a CS4343 phy or a slice of the phy from the device tree
- * @param[in]	fdt_addr	Address of FDT
- * @param	of_offset	offset of slice or phy in device tree
- * @param	phy_info	phy_info data structure to fill in
+ * Frees all entries for an i2c bus descriptor
  *
- * Return:	0 for success, -1 on error
+ * @param	bus	bus to free
+ *
+ * @return	0
  */
-int cvmx_fdt_parse_cs4343(const void *fdt_addr, int of_offset, struct cvmx_phy_info *phy_info)
+int cvmx_fdt_free_i2c_bus(struct cvmx_fdt_i2c_bus_info *bus)
 {
-	int of_slice = -1;
-	struct cvmx_cs4343_info *cs4343;
-	int err = -1;
-	int reg;
-
-	debug("%s(%p, %d, %p): %s (%s)\n", __func__,
-	      fdt_addr, of_offset, phy_info,
-	      fdt_get_name(fdt_addr, of_offset, NULL),
-	      (const char *)fdt_getprop(fdt_addr, of_offset, "compatible", NULL));
+	struct cvmx_fdt_i2c_bus_info *last;
 
-	if (!phy_info->cs4343_info)
-		phy_info->cs4343_info = __cvmx_fdt_alloc(sizeof(struct cvmx_cs4343_info));
-	if (!phy_info->cs4343_info) {
-		debug("%s: Error: out of memory!\n", __func__);
-		return -1;
-	}
-	cs4343 = phy_info->cs4343_info;
-	/* If we're passed to a slice then process only that slice */
-	if (!fdt_node_check_compatible(fdt_addr, of_offset, "cortina,cs4343-slice")) {
-		err = 0;
-		of_slice = of_offset;
-		of_offset = fdt_parent_offset(fdt_addr, of_offset);
-		reg = cvmx_fdt_parse_cs4343_slice(fdt_addr, of_slice, phy_info);
-		if (reg >= 0)
-			phy_info->cs4343_slice_info = &cs4343->slice[reg];
-		else
-			err = reg;
-	} else if (!fdt_node_check_compatible(fdt_addr, of_offset,
-					      "cortina,cs4343")) {
-		/* Walk through and process all of the slices */
-		of_slice =
-			fdt_node_offset_by_compatible(fdt_addr, of_offset, "cortina,cs4343-slice");
-		while (of_slice > 0 && fdt_parent_offset(fdt_addr, of_slice) ==
-		       of_offset) {
-			debug("%s: Parsing slice %s\n", __func__,
-			      fdt_get_name(fdt_addr, of_slice, NULL));
-			err = cvmx_fdt_parse_cs4343_slice(fdt_addr, of_slice,
-							  phy_info);
-			if (err < 0)
-				break;
-			of_slice = fdt_node_offset_by_compatible(fdt_addr,
-								 of_slice,
-								 "cortina,cs4343-slice");
-		}
-	} else {
-		debug("%s: Error: unknown compatible string %s for %s\n", __func__,
-		      (const char *)fdt_getprop(fdt_addr, of_offset,
-						"compatible", NULL),
-		      fdt_get_name(fdt_addr, of_offset, NULL));
+	while (bus) {
+		last = bus;
+		bus = bus->child;
+		free(last);
 	}
-
-	if (err >= 0) {
-		cs4343->name = fdt_get_name(fdt_addr, of_offset, NULL);
-		cs4343->phy_info = phy_info;
-		cs4343->of_offset = of_offset;
-	}
-
-	return err < 0 ? -1 : 0;
+	return 0;
 }
diff --git a/arch/mips/mach-octeon/cvmx-helper-fpa.c b/arch/mips/mach-octeon/cvmx-helper-fpa.c
new file mode 100644
index 0000000..e870829
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-fpa.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Helper functions for FPA setup.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+#include <mach/cvmx-pip.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+#include <mach/cvmx-helper-pko.h>
+
+/**
+ * @INTERNAL
+ * OBSOLETE
+ *
+ * Allocate memory for and initialize a single FPA pool.
+ *
+ * @param pool    Pool to initialize
+ * @param buffer_size  Size of buffers to allocate in bytes
+ * @param buffers Number of buffers to put in the pool. Zero is allowed
+ * @param name    String name of the pool for debugging purposes
+ * @return Zero on success, non-zero on failure
+ *
+ * This function is only for transition, will be removed.
+ */
+int __cvmx_helper_initialize_fpa_pool(int pool, u64 buffer_size, u64 buffers,
+				      const char *name)
+{
+	return cvmx_fpa_setup_pool(pool, name, NULL, buffer_size, buffers);
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-ilk.c b/arch/mips/mach-octeon/cvmx-helper-ilk.c
new file mode 100644
index 0000000..9e882f1
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-ilk.c
@@ -0,0 +1,902 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for ILK initialization, configuration,
+ * and monitoring.
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+int __cvmx_helper_ilk_enumerate(int xiface)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+	xi.interface -= CVMX_ILK_GBL_BASE();
+	return cvmx_ilk_chans[xi.node][xi.interface];
+}
+
+/**
+ * @INTERNAL
+ * Initialize all tx calendar entries to the xoff state.
+ * Initialize all rx calendar entries to the xon state. The rx calendar entries
+ * must be in the xon state to allow new pko pipe assignments. If a calendar
+ * entry is assigned a different pko pipe while in the xoff state, the old pko
+ * pipe will stay in the xoff state even when no longer used by ilk.
+ *
+ * @param intf Interface whose calendar are to be initialized.
+ */
+static void __cvmx_ilk_clear_cal_cn78xx(int intf)
+{
+	cvmx_ilk_txx_cal_entryx_t tx_entry;
+	cvmx_ilk_rxx_cal_entryx_t rx_entry;
+	int i;
+	int node = (intf >> 4) & 0xf;
+	int interface = (intf & 0xf);
+
+	/* Initialize all tx calendar entries to off */
+	tx_entry.u64 = 0;
+	tx_entry.s.ctl = XOFF;
+	for (i = 0; i < CVMX_ILK_MAX_CAL; i++) {
+		csr_wr_node(node, CVMX_ILK_TXX_CAL_ENTRYX(i, interface),
+			    tx_entry.u64);
+	}
+
+	/* Initialize all rx calendar entries to on */
+	rx_entry.u64 = 0;
+	rx_entry.s.ctl = XOFF;
+	for (i = 0; i < CVMX_ILK_MAX_CAL; i++) {
+		csr_wr_node(node, CVMX_ILK_RXX_CAL_ENTRYX(i, interface),
+			    rx_entry.u64);
+	}
+}
+
+/**
+ * @INTERNAL
+ * Initialize all tx calendar entries to the xoff state.
+ * Initialize all rx calendar entries to the xon state. The rx calendar entries
+ * must be in the xon state to allow new pko pipe assignments. If a calendar
+ * entry is assigned a different pko pipe while in the xoff state, the old pko
+ * pipe will stay in the xoff state even when no longer used by ilk.
+ *
+ * @param interface whose calendar are to be initialized.
+ */
+static void __cvmx_ilk_clear_cal_cn68xx(int interface)
+{
+	cvmx_ilk_txx_idx_cal_t tx_idx;
+	cvmx_ilk_txx_mem_cal0_t tx_cal0;
+	cvmx_ilk_txx_mem_cal1_t tx_cal1;
+	cvmx_ilk_rxx_idx_cal_t rx_idx;
+	cvmx_ilk_rxx_mem_cal0_t rx_cal0;
+	cvmx_ilk_rxx_mem_cal1_t rx_cal1;
+	int i;
+
+	/*
+	 * First we initialize the tx calendar starting from entry 0,
+	 * incrementing the entry with every write.
+	 */
+	tx_idx.u64 = 0;
+	tx_idx.s.inc = 1;
+	csr_wr(CVMX_ILK_TXX_IDX_CAL(interface), tx_idx.u64);
+
+	/* Set state to xoff for all entries */
+	tx_cal0.u64 = 0;
+	tx_cal0.s.entry_ctl0 = XOFF;
+	tx_cal0.s.entry_ctl1 = XOFF;
+	tx_cal0.s.entry_ctl2 = XOFF;
+	tx_cal0.s.entry_ctl3 = XOFF;
+
+	tx_cal1.u64 = 0;
+	tx_cal1.s.entry_ctl4 = XOFF;
+	tx_cal1.s.entry_ctl5 = XOFF;
+	tx_cal1.s.entry_ctl6 = XOFF;
+	tx_cal1.s.entry_ctl7 = XOFF;
+
+	/* Write all 288 entries */
+	for (i = 0; i < CVMX_ILK_MAX_CAL_IDX; i++) {
+		csr_wr(CVMX_ILK_TXX_MEM_CAL0(interface), tx_cal0.u64);
+		csr_wr(CVMX_ILK_TXX_MEM_CAL1(interface), tx_cal1.u64);
+	}
+
+	/*
+	 * Next we initialize the rx calendar starting from entry 0,
+	 * incrementing the entry with every write.
+	 */
+	rx_idx.u64 = 0;
+	rx_idx.s.inc = 1;
+	csr_wr(CVMX_ILK_RXX_IDX_CAL(interface), rx_idx.u64);
+
+	/* Set state to xon for all entries */
+	rx_cal0.u64 = 0;
+	rx_cal0.s.entry_ctl0 = XON;
+	rx_cal0.s.entry_ctl1 = XON;
+	rx_cal0.s.entry_ctl2 = XON;
+	rx_cal0.s.entry_ctl3 = XON;
+
+	rx_cal1.u64 = 0;
+	rx_cal1.s.entry_ctl4 = XON;
+	rx_cal1.s.entry_ctl5 = XON;
+	rx_cal1.s.entry_ctl6 = XON;
+	rx_cal1.s.entry_ctl7 = XON;
+
+	/* Write all 288 entries */
+	for (i = 0; i < CVMX_ILK_MAX_CAL_IDX; i++) {
+		csr_wr(CVMX_ILK_RXX_MEM_CAL0(interface), rx_cal0.u64);
+		csr_wr(CVMX_ILK_RXX_MEM_CAL1(interface), rx_cal1.u64);
+	}
+}
+
+/**
+ * @INTERNAL
+ * Initialize all calendar entries.
+ *
+ * @param interface whose calendar is to be initialized.
+ */
+void __cvmx_ilk_clear_cal(int interface)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		__cvmx_ilk_clear_cal_cn68xx(interface);
+	else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+		__cvmx_ilk_clear_cal_cn78xx(interface);
+}
+
+void __cvmx_ilk_write_tx_cal_entry_cn68xx(int interface, int channel,
+					  unsigned char bpid)
+{
+	cvmx_ilk_txx_idx_cal_t tx_idx;
+	cvmx_ilk_txx_mem_cal0_t tx_cal0;
+	cvmx_ilk_txx_mem_cal1_t tx_cal1;
+	int entry;
+	int window;
+	int window_entry;
+
+	/*
+	 * The calendar has 288 entries. Each calendar entry represents a
+	 * channel's flow control state or the link flow control state.
+	 * Starting with the first entry, every sixteenth entry is used for the
+	 * link flow control state. The other 15 entries are used for the
+	 * channels flow control state:
+	 * entry 0   ----> link flow control state
+	 * entry 1   ----> channel 0 flow control state
+	 * entry 2   ----> channel 1 flow control state
+	 * ...
+	 * entry 15  ----> channel 14 flow control state
+	 * entry 16  ----> link flow control state
+	 * entry 17  ----> channel 15 flow control state
+	 *
+	 * Also, the calendar is accessed via windows into it. Each window maps
+	 * to 8 entries.
+	 */
+	entry = 1 + channel + (channel / 15);
+	window = entry / 8;
+	window_entry = entry % 8;
+
+	/* Indicate the window we need to access */
+	tx_idx.u64 = 0;
+	tx_idx.s.index = window;
+	csr_wr(CVMX_ILK_TXX_IDX_CAL(interface), tx_idx.u64);
+
+	/* Get the window's current value */
+	tx_cal0.u64 = csr_rd(CVMX_ILK_TXX_MEM_CAL0(interface));
+	tx_cal1.u64 = csr_rd(CVMX_ILK_TXX_MEM_CAL1(interface));
+
+	/* Force every sixteenth entry as link flow control state */
+	if ((window & 1) == 0)
+		tx_cal0.s.entry_ctl0 = LINK;
+
+	/* Update the entry */
+	switch (window_entry) {
+	case 0:
+		tx_cal0.s.entry_ctl0 = 0;
+		tx_cal0.s.bpid0 = bpid;
+		break;
+	case 1:
+		tx_cal0.s.entry_ctl1 = 0;
+		tx_cal0.s.bpid1 = bpid;
+		break;
+	case 2:
+		tx_cal0.s.entry_ctl2 = 0;
+		tx_cal0.s.bpid2 = bpid;
+		break;
+	case 3:
+		tx_cal0.s.entry_ctl3 = 0;
+		tx_cal0.s.bpid3 = bpid;
+		break;
+	case 4:
+		tx_cal1.s.entry_ctl4 = 0;
+		tx_cal1.s.bpid4 = bpid;
+		break;
+	case 5:
+		tx_cal1.s.entry_ctl5 = 0;
+		tx_cal1.s.bpid5 = bpid;
+		break;
+	case 6:
+		tx_cal1.s.entry_ctl6 = 0;
+		tx_cal1.s.bpid6 = bpid;
+		break;
+	case 7:
+		tx_cal1.s.entry_ctl7 = 0;
+		tx_cal1.s.bpid7 = bpid;
+		break;
+	}
+
+	/* Write the window new value */
+	csr_wr(CVMX_ILK_TXX_MEM_CAL0(interface), tx_cal0.u64);
+	csr_wr(CVMX_ILK_TXX_MEM_CAL1(interface), tx_cal1.u64);
+}
+
+void __cvmx_ilk_write_tx_cal_entry_cn78xx(int intf, int channel,
+					  unsigned char bpid)
+{
+	cvmx_ilk_txx_cal_entryx_t tx_cal;
+	int calender_16_block = channel / 15;
+	int calender_16_index = channel % 15 + 1;
+	int index = calender_16_block * 16 + calender_16_index;
+	int node = (intf >> 4) & 0xf;
+	int interface = intf & 0xf;
+
+	/* Program the link status on first channel */
+	if (calender_16_index == 1) {
+		tx_cal.u64 = 0;
+		tx_cal.s.ctl = 1;
+		csr_wr_node(node, CVMX_ILK_TXX_CAL_ENTRYX(index - 1, interface),
+			    tx_cal.u64);
+	}
+	tx_cal.u64 = 0;
+	tx_cal.s.ctl = 0;
+	tx_cal.s.channel = channel;
+	csr_wr_node(node, CVMX_ILK_TXX_CAL_ENTRYX(index, interface),
+		    tx_cal.u64);
+}
+
+/**
+ * @INTERNAL
+ * Setup the channel's tx calendar entry.
+ *
+ * @param interface channel belongs to
+ * @param channel whose calendar entry is to be updated
+ * @param bpid assigned to the channel
+ */
+void __cvmx_ilk_write_tx_cal_entry(int interface, int channel,
+				   unsigned char bpid)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		__cvmx_ilk_write_tx_cal_entry_cn68xx(interface, channel, bpid);
+	else
+		__cvmx_ilk_write_tx_cal_entry_cn78xx(interface, channel, bpid);
+}
+
+void __cvmx_ilk_write_rx_cal_entry_cn78xx(int intf, int channel,
+					  unsigned char bpid)
+{
+	cvmx_ilk_rxx_cal_entryx_t rx_cal;
+	int calender_16_block = channel / 15;
+	int calender_16_index = channel % 15 + 1;
+	int index = calender_16_block * 16 + calender_16_index;
+	int node = (intf >> 4) & 0xf;
+	int interface = intf & 0xf;
+
+	/* Program the link status on first channel */
+	if (calender_16_index == 1) {
+		rx_cal.u64 = 0;
+		rx_cal.s.ctl = 1;
+		csr_wr_node(node, CVMX_ILK_RXX_CAL_ENTRYX(index - 1, interface),
+			    rx_cal.u64);
+	}
+	rx_cal.u64 = 0;
+	rx_cal.s.ctl = 0;
+	rx_cal.s.channel = channel;
+	csr_wr_node(node, CVMX_ILK_RXX_CAL_ENTRYX(index, interface),
+		    rx_cal.u64);
+}
+
+void __cvmx_ilk_write_rx_cal_entry_cn68xx(int interface, int channel,
+					  unsigned char pipe)
+{
+	cvmx_ilk_rxx_idx_cal_t rx_idx;
+	cvmx_ilk_rxx_mem_cal0_t rx_cal0;
+	cvmx_ilk_rxx_mem_cal1_t rx_cal1;
+	int entry;
+	int window;
+	int window_entry;
+
+	/*
+	 * The calendar has 288 entries. Each calendar entry represents a
+	 * channel's flow control state or the link flow control state.
+	 * Starting with the first entry, every sixteenth entry is used for the
+	 * link flow control state. The other 15 entries are used for the
+	 * channels flow control state:
+	 * entry 0   ----> link flow control state
+	 * entry 1   ----> channel 0 flow control state
+	 * entry 2   ----> channel 1 flow control state
+	 * ...
+	 * entry 15  ----> channel 14 flow control state
+	 * entry 16  ----> link flow control state
+	 * entry 17  ----> channel 15 flow control state
+	 *
+	 * Also, the calendar is accessed via windows into it. Each window maps
+	 * to 8 entries.
+	 */
+	entry = 1 + channel + (channel / 15);
+	window = entry / 8;
+	window_entry = entry % 8;
+
+	/* Indicate the window we need to access */
+	rx_idx.u64 = 0;
+	rx_idx.s.index = window;
+	csr_wr(CVMX_ILK_RXX_IDX_CAL(interface), rx_idx.u64);
+
+	/* Get the window's current value */
+	rx_cal0.u64 = csr_rd(CVMX_ILK_RXX_MEM_CAL0(interface));
+	rx_cal1.u64 = csr_rd(CVMX_ILK_RXX_MEM_CAL1(interface));
+
+	/* Force every sixteenth entry as link flow control state */
+	if ((window & 1) == 0)
+		rx_cal0.s.entry_ctl0 = LINK;
+
+	/* Update the entry */
+	switch (window_entry) {
+	case 0:
+		rx_cal0.s.entry_ctl0 = 0;
+		rx_cal0.s.port_pipe0 = pipe;
+		break;
+	case 1:
+		rx_cal0.s.entry_ctl1 = 0;
+		rx_cal0.s.port_pipe1 = pipe;
+		break;
+	case 2:
+		rx_cal0.s.entry_ctl2 = 0;
+		rx_cal0.s.port_pipe2 = pipe;
+		break;
+	case 3:
+		rx_cal0.s.entry_ctl3 = 0;
+		rx_cal0.s.port_pipe3 = pipe;
+		break;
+	case 4:
+		rx_cal1.s.entry_ctl4 = 0;
+		rx_cal1.s.port_pipe4 = pipe;
+		break;
+	case 5:
+		rx_cal1.s.entry_ctl5 = 0;
+		rx_cal1.s.port_pipe5 = pipe;
+		break;
+	case 6:
+		rx_cal1.s.entry_ctl6 = 0;
+		rx_cal1.s.port_pipe6 = pipe;
+		break;
+	case 7:
+		rx_cal1.s.entry_ctl7 = 0;
+		rx_cal1.s.port_pipe7 = pipe;
+		break;
+	}
+
+	/* Write the window new value */
+	csr_wr(CVMX_ILK_RXX_MEM_CAL0(interface), rx_cal0.u64);
+	csr_wr(CVMX_ILK_RXX_MEM_CAL1(interface), rx_cal1.u64);
+}
+
+/**
+ * @INTERNAL
+ * Setup the channel's rx calendar entry.
+ *
+ * @param interface channel belongs to
+ * @param channel whose calendar entry is to be updated
+ * @param pipe PKO assigned to the channel
+ */
+void __cvmx_ilk_write_rx_cal_entry(int interface, int channel,
+				   unsigned char pipe)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		__cvmx_ilk_write_rx_cal_entry_cn68xx(interface, channel, pipe);
+	else
+		__cvmx_ilk_write_rx_cal_entry_cn78xx(interface, channel, pipe);
+}
+
+/**
+ * @INTERNAL
+ * Probe a ILK interface and determine the number of ports
+ * connected to it. The ILK interface should still be down
+ * after this call.
+ *
+ * @param xiface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_ilk_probe(int xiface)
+{
+	int res = 0;
+	int interface;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+	if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+		return res;
+
+	interface = xi.interface - CVMX_ILK_GBL_BASE();
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return 0;
+
+	/* the configuration should be done only once */
+	if (cvmx_ilk_get_intf_ena(xiface))
+		return cvmx_ilk_chans[xi.node][interface];
+
+	/* configure lanes and enable the link */
+	res = cvmx_ilk_start_interface(((xi.node << 4) | interface),
+				       cvmx_ilk_lane_mask[xi.node][interface]);
+	if (res < 0)
+		return 0;
+
+	res = __cvmx_helper_ilk_enumerate(xiface);
+
+	return res;
+}
+
+static int __cvmx_helper_ilk_init_port_cn68xx(int xiface)
+{
+	int i, j, res = -1;
+	static int pipe_base = 0, pknd_base;
+	static cvmx_ilk_pipe_chan_t *pch = NULL, *tmp;
+	static cvmx_ilk_chan_pknd_t *chpknd = NULL, *tmp1;
+	static cvmx_ilk_cal_entry_t *calent = NULL, *tmp2;
+	int enable_rx_cal = 1;
+	int interface;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int intf;
+	int num_chans;
+
+	interface = xi.interface - CVMX_ILK_GBL_BASE();
+	intf = (xi.node << 4) | interface;
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return 0;
+
+	num_chans = cvmx_ilk_chans[0][interface];
+
+	/* set up channel to pkind mapping */
+	if (pknd_base == 0)
+		pknd_base = cvmx_helper_get_pknd(xiface, 0);
+
+	/* set up the group of pipes available to ilk */
+	if (pipe_base == 0)
+		pipe_base =
+			__cvmx_pko_get_pipe(interface + CVMX_ILK_GBL_BASE(), 0);
+
+	if (pipe_base == -1) {
+		pipe_base = 0;
+		return 0;
+	}
+
+	res = cvmx_ilk_set_pipe(xiface, pipe_base,
+				cvmx_ilk_chans[0][interface]);
+	if (res < 0)
+		return 0;
+
+	/* set up pipe to channel mapping */
+	i = pipe_base;
+	if (!pch) {
+		pch = (cvmx_ilk_pipe_chan_t *)cvmx_bootmem_alloc(
+			num_chans * sizeof(cvmx_ilk_pipe_chan_t),
+			sizeof(cvmx_ilk_pipe_chan_t));
+		if (!pch)
+			return 0;
+	}
+
+	memset(pch, 0, num_chans * sizeof(cvmx_ilk_pipe_chan_t));
+	tmp = pch;
+	for (j = 0; j < num_chans; j++) {
+		tmp->pipe = i++;
+		tmp->chan = j;
+		tmp++;
+	}
+	res = cvmx_ilk_tx_set_channel(interface, pch,
+				      cvmx_ilk_chans[0][interface]);
+	if (res < 0) {
+		res = 0;
+		goto err_free_pch;
+	}
+	pipe_base += cvmx_ilk_chans[0][interface];
+	i = pknd_base;
+	if (!chpknd) {
+		chpknd = (cvmx_ilk_chan_pknd_t *)cvmx_bootmem_alloc(
+			CVMX_ILK_MAX_PKNDS * sizeof(cvmx_ilk_chan_pknd_t),
+			sizeof(cvmx_ilk_chan_pknd_t));
+		if (!chpknd) {
+			pipe_base -= cvmx_ilk_chans[xi.node][interface];
+			res = 0;
+			goto err_free_pch;
+		}
+	}
+
+	memset(chpknd, 0, CVMX_ILK_MAX_PKNDS * sizeof(cvmx_ilk_chan_pknd_t));
+	tmp1 = chpknd;
+	for (j = 0; j < cvmx_ilk_chans[xi.node][interface]; j++) {
+		tmp1->chan = j;
+		tmp1->pknd = i++;
+		tmp1++;
+	}
+
+	res = cvmx_ilk_rx_set_pknd(xiface, chpknd,
+				   cvmx_ilk_chans[xi.node][interface]);
+	if (res < 0) {
+		pipe_base -= cvmx_ilk_chans[xi.node][interface];
+		res = 0;
+		goto err_free_chpknd;
+	}
+	pknd_base += cvmx_ilk_chans[xi.node][interface];
+
+	/* Set up tx calendar */
+	if (!calent) {
+		calent = (cvmx_ilk_cal_entry_t *)cvmx_bootmem_alloc(
+			CVMX_ILK_MAX_PIPES * sizeof(cvmx_ilk_cal_entry_t),
+			sizeof(cvmx_ilk_cal_entry_t));
+		if (!calent) {
+			pipe_base -= cvmx_ilk_chans[xi.node][interface];
+			pknd_base -= cvmx_ilk_chans[xi.node][interface];
+			res = 0;
+			goto err_free_chpknd;
+		}
+	}
+
+	memset(calent, 0, CVMX_ILK_MAX_PIPES * sizeof(cvmx_ilk_cal_entry_t));
+	tmp1 = chpknd;
+	tmp2 = calent;
+	for (j = 0; j < cvmx_ilk_chans[xi.node][interface]; j++) {
+		tmp2->pipe_bpid = tmp1->pknd;
+		tmp2->ent_ctrl = PIPE_BPID;
+		tmp1++;
+		tmp2++;
+	}
+	res = cvmx_ilk_cal_setup_tx(intf, cvmx_ilk_chans[xi.node][interface],
+				    calent, 1);
+	if (res < 0) {
+		pipe_base -= cvmx_ilk_chans[xi.node][interface];
+		pknd_base -= cvmx_ilk_chans[xi.node][interface];
+		res = 0;
+		goto err_free_calent;
+	}
+
+	/* set up rx calendar. allocated memory can be reused.
+	 * this is because max pkind is always less than max pipe
+	 */
+	memset(calent, 0, CVMX_ILK_MAX_PIPES * sizeof(cvmx_ilk_cal_entry_t));
+	tmp = pch;
+	tmp2 = calent;
+	for (j = 0; j < cvmx_ilk_chans[0][interface]; j++) {
+		tmp2->pipe_bpid = tmp->pipe;
+		tmp2->ent_ctrl = PIPE_BPID;
+		tmp++;
+		tmp2++;
+	}
+	if (cvmx_ilk_use_la_mode(interface, 0))
+		enable_rx_cal = cvmx_ilk_la_mode_enable_rx_calendar(interface);
+	else
+		enable_rx_cal = 1;
+
+	res = cvmx_ilk_cal_setup_rx(intf, cvmx_ilk_chans[xi.node][interface],
+				    calent, CVMX_ILK_RX_FIFO_WM, enable_rx_cal);
+	if (res < 0) {
+		pipe_base -= cvmx_ilk_chans[xi.node][interface];
+		pknd_base -= cvmx_ilk_chans[xi.node][interface];
+		res = 0;
+		goto err_free_calent;
+	}
+	goto out;
+
+err_free_calent:
+	/* no free() for cvmx_bootmem_alloc() */
+
+err_free_chpknd:
+	/* no free() for cvmx_bootmem_alloc() */
+
+err_free_pch:
+	/* no free() for cvmx_bootmem_alloc() */
+out:
+	return res;
+}
+
+static int __cvmx_helper_ilk_init_port_cn78xx(int xiface)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface;
+	int intf;
+
+	interface = xi.interface - CVMX_ILK_GBL_BASE();
+	intf = (xi.node << 4) | interface;
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return 0;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		struct cvmx_pki_style_config style_cfg;
+		int num_channels = cvmx_ilk_chans[xi.node][interface];
+		int index, i;
+
+		for (i = 0; i < num_channels; i++) {
+			int pknd;
+
+			index = (i % 8);
+
+			/* Set jabber to allow max sized packets */
+			if (i == 0)
+				csr_wr_node(xi.node,
+					    CVMX_ILK_RXX_JABBER(interface),
+					    0xfff8);
+
+			/* Setup PKND */
+			pknd = cvmx_helper_get_pknd(xiface, index);
+			csr_wr_node(xi.node, CVMX_ILK_RXX_CHAX(i, interface),
+				    pknd);
+			cvmx_pki_read_style_config(
+				0, pknd, CVMX_PKI_CLUSTER_ALL, &style_cfg);
+			style_cfg.parm_cfg.qpg_port_sh = 0;
+			/* 256 channels */
+			style_cfg.parm_cfg.qpg_port_msb = 8;
+			cvmx_pki_write_style_config(
+				0, pknd, CVMX_PKI_CLUSTER_ALL, &style_cfg);
+		}
+
+		cvmx_ilk_cal_setup_tx(intf, num_channels, NULL, 1);
+		cvmx_ilk_cal_setup_rx(intf, num_channels, NULL,
+				      CVMX_ILK_RX_FIFO_WM, 1);
+	}
+	return 0;
+}
+
+static int __cvmx_helper_ilk_init_port(int xiface)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return __cvmx_helper_ilk_init_port_cn68xx(xiface);
+	else
+		return __cvmx_helper_ilk_init_port_cn78xx(xiface);
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable ILK interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param xiface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_ilk_enable(int xiface)
+{
+	if (__cvmx_helper_ilk_init_port(xiface) < 0)
+		return -1;
+
+	return cvmx_ilk_enable(xiface);
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by ILK link status.
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_ilk_link_get(int ipd_port)
+{
+	cvmx_helper_link_info_t result;
+	int xiface = cvmx_helper_get_interface_num(ipd_port);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface;
+	int retry_count = 0;
+	cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+	cvmx_ilk_rxx_int_t ilk_rxx_int;
+	int lane_mask = 0;
+	int i;
+	int node = xi.node;
+
+	result.u64 = 0;
+	interface = xi.interface - CVMX_ILK_GBL_BASE();
+
+retry:
+	retry_count++;
+	if (retry_count > 200)
+		goto fail;
+
+	/* Read RX config and status bits */
+	ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+	ilk_rxx_int.u64 = csr_rd_node(node, CVMX_ILK_RXX_INT(interface));
+
+	if (ilk_rxx_cfg1.s.rx_bdry_lock_ena == 0) {
+		/* (GSER-21957) GSER RX Equalization may make >= 5gbaud non-KR
+		 * channel better
+		 */
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+			int qlm, lane_mask;
+
+			for (qlm = 4; qlm < 8; qlm++) {
+				lane_mask = 1 << (qlm - 4) * 4;
+				if (lane_mask &
+				    cvmx_ilk_lane_mask[node][interface]) {
+					if (__cvmx_qlm_rx_equalization(
+						    node, qlm, -1))
+						goto retry;
+				}
+			}
+		}
+
+		/* Clear the boundary lock status bit */
+		ilk_rxx_int.u64 = 0;
+		ilk_rxx_int.s.word_sync_done = 1;
+		csr_wr_node(node, CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
+
+		/* We need to start looking for word boundary lock */
+		ilk_rxx_cfg1.s.rx_bdry_lock_ena =
+			cvmx_ilk_lane_mask[node][interface];
+		ilk_rxx_cfg1.s.rx_align_ena = 0;
+		csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
+			    ilk_rxx_cfg1.u64);
+		//debug("ILK%d: Looking for word boundary lock\n", interface);
+		udelay(50);
+		goto retry;
+	}
+
+	if (ilk_rxx_cfg1.s.rx_align_ena == 0) {
+		if (ilk_rxx_int.s.word_sync_done) {
+			/* Clear the lane align status bits */
+			ilk_rxx_int.u64 = 0;
+			ilk_rxx_int.s.lane_align_fail = 1;
+			ilk_rxx_int.s.lane_align_done = 1;
+			csr_wr_node(node, CVMX_ILK_RXX_INT(interface),
+				    ilk_rxx_int.u64);
+
+			ilk_rxx_cfg1.s.rx_align_ena = 1;
+			csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
+				    ilk_rxx_cfg1.u64);
+			//printf("ILK%d: Looking for lane alignment\n", interface);
+		}
+		udelay(50);
+		goto retry;
+	}
+
+	if (ilk_rxx_int.s.lane_align_fail) {
+		ilk_rxx_cfg1.s.rx_bdry_lock_ena = 0;
+		ilk_rxx_cfg1.s.rx_align_ena = 0;
+		csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
+			    ilk_rxx_cfg1.u64);
+		//debug("ILK%d: Lane alignment failed\n", interface);
+		goto fail;
+	}
+
+	lane_mask = ilk_rxx_cfg1.s.rx_bdry_lock_ena;
+
+	if (ilk_rxx_cfg1.s.pkt_ena == 0 && ilk_rxx_int.s.lane_align_done) {
+		cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+
+		ilk_txx_cfg1.u64 =
+			csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+		ilk_rxx_cfg1.u64 =
+			csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+		ilk_rxx_cfg1.s.pkt_ena = ilk_txx_cfg1.s.pkt_ena;
+		csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
+			    ilk_rxx_cfg1.u64);
+
+		if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+			/*
+			 * Enable rxf_ctl_perr, rxf_lnk0_perr, rxf_lnk1_perr,
+			 * pop_empty, push_full.
+			 */
+			csr_wr(CVMX_ILK_GBL_INT_EN, 0x1f);
+			/* Enable bad_pipe, bad_seq, txf_err */
+			csr_wr(CVMX_ILK_TXX_INT_EN(interface), 0x7);
+
+			/*
+			 * Enable crc24_err, lane_bad_word,
+			 * pkt_drop_{rid,rxf,sop}
+			 */
+			csr_wr(CVMX_ILK_RXX_INT_EN(interface), 0x1e2);
+		}
+		/* Need to enable ILK interrupts for 78xx */
+
+		for (i = 0; i < CVMX_ILK_MAX_LANES(); i++) {
+			if ((1 << i) & lane_mask) {
+				/* clear pending interrupts, before enabling. */
+				csr_wr_node(node, CVMX_ILK_RX_LNEX_INT(i),
+					    0x1ff);
+				/* Enable bad_64b67b, bdry_sync_loss, crc32_err,
+				 * dskew_fifo_ovfl, scrm_sync_loss,
+				 * serdes_lock_loss, stat_msg, ukwn_cntl_word
+				 */
+				if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+					csr_wr(CVMX_ILK_RX_LNEX_INT_EN(i),
+					       0x1ff);
+			}
+		}
+
+		//debug("ILK%d: Lane alignment complete\n", interface);
+	}
+
+	/* Enable error interrupts, now link is up */
+	cvmx_error_enable_group(CVMX_ERROR_GROUP_ILK,
+				node | (interface << 2) | (lane_mask << 4));
+
+	result.s.link_up = 1;
+	result.s.full_duplex = 1;
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		int qlm = cvmx_qlm_lmac(xiface, 0);
+
+		result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 64 / 67;
+	} else {
+		result.s.speed =
+			cvmx_qlm_get_gbaud_mhz(1 + interface) * 64 / 67;
+	}
+	result.s.speed *= cvmx_pop(lane_mask);
+
+	return result;
+
+fail:
+	if (ilk_rxx_cfg1.s.pkt_ena) {
+		/* Disable the interface */
+		ilk_rxx_cfg1.s.pkt_ena = 0;
+		csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
+			    ilk_rxx_cfg1.u64);
+
+		/* Disable error interrupts */
+		for (i = 0; i < CVMX_ILK_MAX_LANES(); i++) {
+			/* Disable bad_64b67b, bdry_sync_loss, crc32_err,
+			 * dskew_fifo_ovfl, scrm_sync_loss, serdes_lock_loss,
+			 * stat_msg, ukwn_cntl_word
+			 */
+			if ((1 << i) & lane_mask) {
+				csr_wr_node(node, CVMX_ILK_RX_LNEX_INT(i),
+					    0x1ff);
+				if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+					csr_wr(CVMX_ILK_RX_LNEX_INT_EN(i),
+					       ~0x1ff);
+			}
+		}
+		/* Disable error interrupts */
+		cvmx_error_enable_group(CVMX_ERROR_GROUP_ILK, 0);
+	}
+
+	return result;
+}
+
+/**
+ * @INTERNAL
+ * Set the link state of an IPD/PKO port.
+ *
+ * @param ipd_port  IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_ilk_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+	/* Do nothing */
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-ipd.c b/arch/mips/mach-octeon/cvmx-helper-ipd.c
new file mode 100644
index 0000000..cb04b63
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-ipd.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * IPD helper functions.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+#include <mach/cvmx-pip.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+/** It allocate pools for packet and wqe pools
+ * and sets up the FPA hardware
+ */
+int __cvmx_helper_ipd_setup_fpa_pools(void)
+{
+	cvmx_fpa_global_initialize();
+	if (cvmx_ipd_cfg.packet_pool.buffer_count == 0)
+		return 0;
+	__cvmx_helper_initialize_fpa_pool(cvmx_ipd_cfg.packet_pool.pool_num,
+					  cvmx_ipd_cfg.packet_pool.buffer_size,
+					  cvmx_ipd_cfg.packet_pool.buffer_count,
+					  "Packet Buffers");
+	if (cvmx_ipd_cfg.wqe_pool.buffer_count == 0)
+		return 0;
+	__cvmx_helper_initialize_fpa_pool(cvmx_ipd_cfg.wqe_pool.pool_num,
+					  cvmx_ipd_cfg.wqe_pool.buffer_size,
+					  cvmx_ipd_cfg.wqe_pool.buffer_count,
+					  "WQE Buffers");
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Setup global setting for IPD/PIP not related to a specific
+ * interface or port. This must be called before IPD is enabled.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int __cvmx_helper_ipd_global_setup(void)
+{
+	/* Setup the packet and wqe pools*/
+	__cvmx_helper_ipd_setup_fpa_pools();
+	/* Setup the global packet input options */
+	cvmx_ipd_config(cvmx_ipd_cfg.packet_pool.buffer_size / 8,
+			cvmx_ipd_cfg.first_mbuf_skip / 8,
+			cvmx_ipd_cfg.not_first_mbuf_skip / 8,
+			/* The +8 is to account for the next ptr */
+			(cvmx_ipd_cfg.first_mbuf_skip + 8) / 128,
+			/* The +8 is to account for the next ptr */
+			(cvmx_ipd_cfg.not_first_mbuf_skip + 8) / 128,
+			cvmx_ipd_cfg.wqe_pool.pool_num,
+			(cvmx_ipd_mode_t)(cvmx_ipd_cfg.cache_mode), 1);
+	return 0;
+}
+
+/**
+ * Enable or disable FCS stripping for all the ports on an interface.
+ *
+ * @param xiface
+ * @param nports number of ports
+ * @param has_fcs 0 for disable and !0 for enable
+ */
+static int cvmx_helper_fcs_op(int xiface, int nports, int has_fcs)
+{
+	u64 port_bit;
+	int index;
+	int pknd;
+	union cvmx_pip_sub_pkind_fcsx pkind_fcsx;
+	union cvmx_pip_prt_cfgx port_cfg;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+	if (!octeon_has_feature(OCTEON_FEATURE_PKND))
+		return 0;
+	if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
+		cvmx_helper_pki_set_fcs_op(xi.node, xi.interface, nports,
+					   has_fcs);
+		return 0;
+	}
+
+	port_bit = 0;
+	for (index = 0; index < nports; index++)
+		port_bit |= ((u64)1 << cvmx_helper_get_pknd(xiface, index));
+
+	pkind_fcsx.u64 = csr_rd(CVMX_PIP_SUB_PKIND_FCSX(0));
+	if (has_fcs)
+		pkind_fcsx.s.port_bit |= port_bit;
+	else
+		pkind_fcsx.s.port_bit &= ~port_bit;
+	csr_wr(CVMX_PIP_SUB_PKIND_FCSX(0), pkind_fcsx.u64);
+
+	for (pknd = 0; pknd < 64; pknd++) {
+		if ((1ull << pknd) & port_bit) {
+			port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
+			port_cfg.s.crc_en = (has_fcs) ? 1 : 0;
+			csr_wr(CVMX_PIP_PRT_CFGX(pknd), port_cfg.u64);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Configure the IPD/PIP tagging and QoS options for a specific
+ * port. This function determines the POW work queue entry
+ * contents for a port. The setup performed here is controlled by
+ * the defines in executive-config.h.
+ *
+ * @param ipd_port Port/Port kind to configure. This follows the IPD numbering,
+ *                 not the per interface numbering
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_ipd_port_setup(int ipd_port)
+{
+	union cvmx_pip_prt_cfgx port_config;
+	union cvmx_pip_prt_tagx tag_config;
+
+	if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+		int xiface, index, pknd;
+		union cvmx_pip_prt_cfgbx prt_cfgbx;
+
+		xiface = cvmx_helper_get_interface_num(ipd_port);
+		index = cvmx_helper_get_interface_index_num(ipd_port);
+		pknd = cvmx_helper_get_pknd(xiface, index);
+
+		port_config.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
+		tag_config.u64 = csr_rd(CVMX_PIP_PRT_TAGX(pknd));
+
+		port_config.s.qos = pknd & 0x7;
+
+		/* Default BPID to use for packets on this port-kind */
+		prt_cfgbx.u64 = csr_rd(CVMX_PIP_PRT_CFGBX(pknd));
+		prt_cfgbx.s.bpid = pknd;
+		csr_wr(CVMX_PIP_PRT_CFGBX(pknd), prt_cfgbx.u64);
+	} else {
+		port_config.u64 = csr_rd(CVMX_PIP_PRT_CFGX(ipd_port));
+		tag_config.u64 = csr_rd(CVMX_PIP_PRT_TAGX(ipd_port));
+
+		/* Have each port go to a different POW queue */
+		port_config.s.qos = ipd_port & 0x7;
+	}
+
+	/* Process the headers and place the IP header in the work queue */
+	port_config.s.mode =
+		(cvmx_pip_port_parse_mode_t)cvmx_ipd_cfg.port_config.parse_mode;
+
+	tag_config.s.ip6_src_flag =
+		cvmx_ipd_cfg.port_config.tag_fields.ipv6_src_ip;
+	tag_config.s.ip6_dst_flag =
+		cvmx_ipd_cfg.port_config.tag_fields.ipv6_dst_ip;
+	tag_config.s.ip6_sprt_flag =
+		cvmx_ipd_cfg.port_config.tag_fields.ipv6_src_port;
+	tag_config.s.ip6_dprt_flag =
+		cvmx_ipd_cfg.port_config.tag_fields.ipv6_dst_port;
+	tag_config.s.ip6_nxth_flag =
+		cvmx_ipd_cfg.port_config.tag_fields.ipv6_next_header;
+	tag_config.s.ip4_src_flag =
+		cvmx_ipd_cfg.port_config.tag_fields.ipv4_src_ip;
+	tag_config.s.ip4_dst_flag =
+		cvmx_ipd_cfg.port_config.tag_fields.ipv4_dst_ip;
+	tag_config.s.ip4_sprt_flag =
+		cvmx_ipd_cfg.port_config.tag_fields.ipv4_src_port;
+	tag_config.s.ip4_dprt_flag =
+		cvmx_ipd_cfg.port_config.tag_fields.ipv4_dst_port;
+	tag_config.s.ip4_pctl_flag =
+		cvmx_ipd_cfg.port_config.tag_fields.ipv4_protocol;
+	tag_config.s.inc_prt_flag =
+		cvmx_ipd_cfg.port_config.tag_fields.input_port;
+	tag_config.s.tcp6_tag_type =
+		(cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
+	tag_config.s.tcp4_tag_type =
+		(cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
+	tag_config.s.ip6_tag_type =
+		(cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
+	tag_config.s.ip4_tag_type =
+		(cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
+	tag_config.s.non_tag_type =
+		(cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
+
+	/* Put all packets in group 0. Other groups can be used by the app */
+	tag_config.s.grp = 0;
+
+	cvmx_pip_config_port(ipd_port, port_config, tag_config);
+
+	/* Give the user a chance to override our setting for each port */
+	if (cvmx_override_ipd_port_setup)
+		cvmx_override_ipd_port_setup(ipd_port);
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Setup the IPD/PIP for the ports on an interface. Packet
+ * classification and tagging are set for every port on the
+ * interface. The number of ports on the interface must already
+ * have been probed.
+ *
+ * @param xiface to setup IPD/PIP for
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_ipd_setup_interface(int xiface)
+{
+	cvmx_helper_interface_mode_t mode;
+	int num_ports = cvmx_helper_ports_on_interface(xiface);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int ipd_port = cvmx_helper_get_ipd_port(xiface, 0);
+	int delta;
+
+	if (num_ports == CVMX_HELPER_CFG_INVALID_VALUE)
+		return 0;
+
+	delta = 1;
+	if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+		if (xi.interface < CVMX_HELPER_MAX_GMX)
+			delta = 16;
+	}
+
+	while (num_ports--) {
+		if (!cvmx_helper_is_port_valid(xiface, num_ports))
+			continue;
+		if (octeon_has_feature(OCTEON_FEATURE_PKI))
+			__cvmx_helper_pki_port_setup(xi.node, ipd_port);
+		else
+			__cvmx_helper_ipd_port_setup(ipd_port);
+		ipd_port += delta;
+	}
+	/* FCS settings */
+	cvmx_helper_fcs_op(xiface, cvmx_helper_ports_on_interface(xiface),
+			   __cvmx_helper_get_has_fcs(xiface));
+
+	mode = cvmx_helper_interface_get_mode(xiface);
+
+	if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
+		__cvmx_helper_loop_enable(xiface);
+
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-loop.c b/arch/mips/mach-octeon/cvmx-helper-loop.c
new file mode 100644
index 0000000..8eaeac3
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-loop.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for LOOP initialization, configuration,
+ * and monitoring.
+ */
+
+#include <log.h>
+#include <malloc.h>
+#include <net.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/octeon_fdt.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-fdt.h>
+#include <mach/cvmx-helper-gpio.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-lbk-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+
+int __cvmx_helper_loop_enumerate(int xiface)
+{
+	return OCTEON_IS_MODEL(OCTEON_CN68XX) ?
+		       8 : (OCTEON_IS_MODEL(OCTEON_CNF71XX) ? 2 : 4);
+}
+
+/**
+ * @INTERNAL
+ * Probe a LOOP interface and determine the number of ports
+ * connected to it. The LOOP interface should still be down
+ * after this call.
+ *
+ * @param xiface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_loop_probe(int xiface)
+{
+	return __cvmx_helper_loop_enumerate(xiface);
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a LOOP interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_loop_enable(int xiface)
+{
+	cvmx_pip_prt_cfgx_t port_cfg;
+	int num_ports, index;
+	unsigned long offset;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+	num_ports = __cvmx_helper_get_num_ipd_ports(xiface);
+	/*
+	 * We need to disable length checking so packet < 64 bytes and jumbo
+	 * frames don't get errors
+	 */
+	for (index = 0; index < num_ports; index++) {
+		offset = ((octeon_has_feature(OCTEON_FEATURE_PKND)) ?
+				  cvmx_helper_get_pknd(xiface, index) :
+					cvmx_helper_get_ipd_port(xiface, index));
+
+		if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
+			cvmx_pki_endis_l2_errs(xi.node, offset, 1, 0, 0);
+			cvmx_pki_endis_fcs_check(xi.node, offset, 0, 0);
+		} else {
+			port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(offset));
+			port_cfg.s.maxerr_en = 0;
+			port_cfg.s.minerr_en = 0;
+			csr_wr(CVMX_PIP_PRT_CFGX(offset), port_cfg.u64);
+		}
+	}
+
+	/*
+	 * Disable FCS stripping for loopback ports
+	 */
+	if (!octeon_has_feature(OCTEON_FEATURE_PKND)) {
+		cvmx_ipd_sub_port_fcs_t ipd_sub_port_fcs;
+
+		ipd_sub_port_fcs.u64 = csr_rd(CVMX_IPD_SUB_PORT_FCS);
+		ipd_sub_port_fcs.s.port_bit2 = 0;
+		csr_wr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
+	}
+	/*
+	 * Set PKND and BPID for loopback ports.
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		cvmx_pko_reg_loopback_pkind_t lp_pknd;
+		cvmx_pko_reg_loopback_bpid_t lp_bpid;
+
+		for (index = 0; index < num_ports; index++) {
+			int pknd = cvmx_helper_get_pknd(xiface, index);
+			int bpid = cvmx_helper_get_bpid(xiface, index);
+
+			lp_pknd.u64 = csr_rd(CVMX_PKO_REG_LOOPBACK_PKIND);
+			lp_bpid.u64 = csr_rd(CVMX_PKO_REG_LOOPBACK_BPID);
+
+			if (index == 0)
+				lp_pknd.s.num_ports = num_ports;
+
+			switch (index) {
+			case 0:
+				lp_pknd.s.pkind0 = pknd;
+				lp_bpid.s.bpid0 = bpid;
+				break;
+			case 1:
+				lp_pknd.s.pkind1 = pknd;
+				lp_bpid.s.bpid1 = bpid;
+				break;
+			case 2:
+				lp_pknd.s.pkind2 = pknd;
+				lp_bpid.s.bpid2 = bpid;
+				break;
+			case 3:
+				lp_pknd.s.pkind3 = pknd;
+				lp_bpid.s.bpid3 = bpid;
+				break;
+			case 4:
+				lp_pknd.s.pkind4 = pknd;
+				lp_bpid.s.bpid4 = bpid;
+				break;
+			case 5:
+				lp_pknd.s.pkind5 = pknd;
+				lp_bpid.s.bpid5 = bpid;
+				break;
+			case 6:
+				lp_pknd.s.pkind6 = pknd;
+				lp_bpid.s.bpid6 = bpid;
+				break;
+			case 7:
+				lp_pknd.s.pkind7 = pknd;
+				lp_bpid.s.bpid7 = bpid;
+				break;
+			}
+			csr_wr(CVMX_PKO_REG_LOOPBACK_PKIND, lp_pknd.u64);
+			csr_wr(CVMX_PKO_REG_LOOPBACK_BPID, lp_bpid.u64);
+		}
+	} else if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
+		cvmx_lbk_chx_pkind_t lbk_pkind;
+
+		for (index = 0; index < num_ports; index++) {
+			lbk_pkind.u64 = 0;
+			lbk_pkind.s.pkind = cvmx_helper_get_pknd(xiface, index);
+			csr_wr_node(xi.node, CVMX_LBK_CHX_PKIND(index),
+				    lbk_pkind.u64);
+		}
+	}
+
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-npi.c b/arch/mips/mach-octeon/cvmx-helper-npi.c
new file mode 100644
index 0000000..92ee1a8
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-npi.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for NPI initialization, configuration,
+ * and monitoring.
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pexp-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-sli-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+static int cvmx_npi_num_pipes = -1;
+
+/**
+ * @INTERNAL
+ * Probe a NPI interface and determine the number of ports
+ * connected to it. The NPI interface should still be down
+ * after this call.
+ *
+ * @param interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_npi_probe(int interface)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return 32;
+	else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+		return 128;
+	else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+		return 64;
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a NPI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param xiface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_npi_enable(int xiface)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface = xi.interface;
+	int port;
+	int num_ports = cvmx_helper_ports_on_interface(interface);
+
+	/*
+	 * On CN50XX, CN52XX, and CN56XX we need to disable length
+	 * checking so packet < 64 bytes and jumbo frames don't get
+	 * errors.
+	 */
+	for (port = 0; port < num_ports; port++) {
+		union cvmx_pip_prt_cfgx port_cfg;
+		int ipd_port =
+			(octeon_has_feature(OCTEON_FEATURE_PKND)) ?
+				cvmx_helper_get_pknd(interface, port) :
+				      cvmx_helper_get_ipd_port(interface, port);
+		if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
+			unsigned int node = cvmx_get_node_num();
+
+			cvmx_pki_endis_l2_errs(node, ipd_port, 0, 0, 0);
+
+		} else {
+			port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(ipd_port));
+			port_cfg.s.lenerr_en = 0;
+			port_cfg.s.maxerr_en = 0;
+			port_cfg.s.minerr_en = 0;
+			csr_wr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);
+		}
+		if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+			/* Set up pknd and bpid */
+			union cvmx_sli_portx_pkind config;
+
+			config.u64 = csr_rd(CVMX_PEXP_SLI_PORTX_PKIND(port));
+			config.s.bpkind = cvmx_helper_get_bpid(interface, port);
+			config.s.pkind = cvmx_helper_get_pknd(interface, port);
+			csr_wr(CVMX_PEXP_SLI_PORTX_PKIND(port), config.u64);
+		}
+	}
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		/*
+		 * Set up pko pipes.
+		 */
+		union cvmx_sli_tx_pipe config;
+
+		config.u64 = csr_rd(CVMX_PEXP_SLI_TX_PIPE);
+		config.s.base = __cvmx_pko_get_pipe(interface, 0);
+		config.s.nump =
+			cvmx_npi_num_pipes < 0 ? num_ports : cvmx_npi_num_pipes;
+		csr_wr(CVMX_PEXP_SLI_TX_PIPE, config.u64);
+	}
+
+	/* Enables are controlled by the remote host, so nothing to do here */
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-pki.c b/arch/mips/mach-octeon/cvmx-helper-pki.c
new file mode 100644
index 0000000..51fa4fb
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-pki.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * PKI helper functions.
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pexp-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-sli-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-pki.h>
+
+#include <mach/cvmx-global-resources.h>
+#include <mach/cvmx-pko-internal-ports-range.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pip.h>
+
+static int pki_helper_debug;
+
+bool cvmx_pki_dflt_init[CVMX_MAX_NODES] = { [0 ... CVMX_MAX_NODES - 1] = 1 };
+
+static bool cvmx_pki_dflt_bp_en[CVMX_MAX_NODES] = { [0 ... CVMX_MAX_NODES - 1] =
+							    true };
+static struct cvmx_pki_cluster_grp_config pki_dflt_clgrp[CVMX_MAX_NODES] = {
+	{ 0, 0xf },
+	{ 0, 0xf }
+};
+
+struct cvmx_pki_pool_config pki_dflt_pool[CVMX_MAX_NODES] = {
+	[0 ... CVMX_MAX_NODES -
+	 1] = { .pool_num = -1, .buffer_size = 2048, .buffer_count = 0 }
+};
+
+struct cvmx_pki_aura_config pki_dflt_aura[CVMX_MAX_NODES] = {
+	[0 ... CVMX_MAX_NODES -
+	 1] = { .aura_num = 0, .pool_num = -1, .buffer_count = 0 }
+};
+
+struct cvmx_pki_style_config pki_dflt_style[CVMX_MAX_NODES] = {
+	[0 ... CVMX_MAX_NODES - 1] = { .parm_cfg = { .lenerr_en = 1,
+						     .maxerr_en = 1,
+						     .minerr_en = 1,
+						     .fcs_strip = 1,
+						     .fcs_chk = 1,
+						     .first_skip = 40,
+						     .mbuff_size = 2048 } }
+};
+
+struct cvmx_pki_sso_grp_config pki_dflt_sso_grp[CVMX_MAX_NODES];
+struct cvmx_pki_qpg_config pki_dflt_qpg[CVMX_MAX_NODES];
+struct cvmx_pki_pkind_config pki_dflt_pkind[CVMX_MAX_NODES];
+u64 pkind_style_map[CVMX_MAX_NODES][CVMX_PKI_NUM_PKIND] = {
+	[0 ... CVMX_MAX_NODES -
+	 1] = { 0,  1,	2,  3,	4,  5,	6,  7,	8,  9,	10, 11, 12, 13, 14, 15,
+		16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+		32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+		48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }
+};
+
+/* To store the qos watcher values before they are written to pcam when watcher
+ * is enabled. There is no cvmx-pip.c file exist so it ended up here
+ */
+struct cvmx_pki_legacy_qos_watcher qos_watcher[8];
+
+/** @INTERNAL
+ * This function setsup default ltype map
+ * @param node    node number
+ */
+void __cvmx_helper_pki_set_dflt_ltype_map(int node)
+{
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_NONE,
+				 CVMX_PKI_BELTYPE_NONE);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_ENET,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_VLAN,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SNAP_PAYLD,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_ARP,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_RARP,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP4,
+				 CVMX_PKI_BELTYPE_IP4);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP4_OPT,
+				 CVMX_PKI_BELTYPE_IP4);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP6,
+				 CVMX_PKI_BELTYPE_IP6);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP6_OPT,
+				 CVMX_PKI_BELTYPE_IP6);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPSEC_ESP,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPFRAG,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPCOMP,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_TCP,
+				 CVMX_PKI_BELTYPE_TCP);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_UDP,
+				 CVMX_PKI_BELTYPE_UDP);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SCTP,
+				 CVMX_PKI_BELTYPE_SCTP);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_UDP_VXLAN,
+				 CVMX_PKI_BELTYPE_UDP);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_GRE,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_NVGRE,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_GTP,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW28,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW29,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW30,
+				 CVMX_PKI_BELTYPE_MISC);
+	cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW31,
+				 CVMX_PKI_BELTYPE_MISC);
+}
+
+/** @INTERNAL
+ * This function installs the default VLAN entries to identify
+ * the VLAN and set WQE[vv], WQE[vs] if VLAN is found. In 78XX
+ * hardware (PKI) is not hardwired to recognize any 802.1Q VLAN
+ * Ethertypes
+ *
+ * @param node    node number
+ */
+int __cvmx_helper_pki_install_dflt_vlan(int node)
+{
+	struct cvmx_pki_pcam_input pcam_input;
+	struct cvmx_pki_pcam_action pcam_action;
+	enum cvmx_pki_term field;
+	int index;
+	int bank;
+	u64 cl_mask = CVMX_PKI_CLUSTER_ALL;
+
+	memset(&pcam_input, 0, sizeof(pcam_input));
+	memset(&pcam_action, 0, sizeof(pcam_action));
+
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+		/* PKI-20858 */
+		int i;
+
+		for (i = 0; i < 4; i++) {
+			union cvmx_pki_clx_ecc_ctl ecc_ctl;
+
+			ecc_ctl.u64 =
+				csr_rd_node(node, CVMX_PKI_CLX_ECC_CTL(i));
+			ecc_ctl.s.pcam_en = 0;
+			ecc_ctl.s.pcam0_cdis = 1;
+			ecc_ctl.s.pcam1_cdis = 1;
+			csr_wr_node(node, CVMX_PKI_CLX_ECC_CTL(i), ecc_ctl.u64);
+		}
+	}
+
+	for (field = CVMX_PKI_PCAM_TERM_ETHTYPE0;
+	     field < CVMX_PKI_PCAM_TERM_ETHTYPE2; field++) {
+		bank = field & 0x01;
+
+		index = cvmx_pki_pcam_entry_alloc(
+			node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
+		if (index < 0) {
+			debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
+			      node, bank);
+			return -1;
+		}
+		pcam_input.style = 0;
+		pcam_input.style_mask = 0;
+		pcam_input.field = field;
+		pcam_input.field_mask = 0xfd;
+		pcam_input.data = 0x81000000;
+		pcam_input.data_mask = 0xffff0000;
+		pcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG;
+		pcam_action.layer_type_set = CVMX_PKI_LTYPE_E_VLAN;
+		pcam_action.style_add = 0;
+		pcam_action.pointer_advance = 4;
+		cvmx_pki_pcam_write_entry(
+			node, index, cl_mask, pcam_input,
+			pcam_action); /*cluster_mask in pass2*/
+
+		index = cvmx_pki_pcam_entry_alloc(
+			node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
+		if (index < 0) {
+			debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
+			      node, bank);
+			return -1;
+		}
+		pcam_input.data = 0x88a80000;
+		cvmx_pki_pcam_write_entry(node, index, cl_mask, pcam_input,
+					  pcam_action);
+
+		index = cvmx_pki_pcam_entry_alloc(
+			node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
+		if (index < 0) {
+			debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
+			      node, bank);
+			return -1;
+		}
+		pcam_input.data = 0x92000000;
+		cvmx_pki_pcam_write_entry(
+			node, index, cl_mask, pcam_input,
+			pcam_action); /* cluster_mask in pass2*/
+
+		index = cvmx_pki_pcam_entry_alloc(
+			node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
+		if (index < 0) {
+			debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
+			      node, bank);
+			return -1;
+		}
+		pcam_input.data = 0x91000000;
+		cvmx_pki_pcam_write_entry(node, index, cl_mask, pcam_input,
+					  pcam_action);
+	}
+	return 0;
+}
+
+static int __cvmx_helper_setup_pki_cluster_groups(int node)
+{
+	u64 cl_mask;
+	int cl_group;
+
+	cl_group =
+		cvmx_pki_cluster_grp_alloc(node, pki_dflt_clgrp[node].grp_num);
+	if (cl_group == CVMX_RESOURCE_ALLOC_FAILED)
+		return -1;
+	else if (cl_group == CVMX_RESOURCE_ALREADY_RESERVED) {
+		if (pki_dflt_clgrp[node].grp_num == -1)
+			return -1;
+		else
+			return 0; /* cluster already configured, share it */
+	}
+	cl_mask = pki_dflt_clgrp[node].cluster_mask;
+	if (pki_helper_debug)
+		debug("pki-helper: setup pki cluster grp %d with cl_mask 0x%llx\n",
+		      (int)cl_group, (unsigned long long)cl_mask);
+	cvmx_pki_attach_cluster_to_group(node, cl_group, cl_mask);
+	return 0;
+}
+
+/**
+ * This function sets up pools/auras to be used by PKI
+ * @param node    node number
+ */
+static int __cvmx_helper_pki_setup_fpa_pools(int node)
+{
+	u64 buffer_count;
+	u64 buffer_size;
+
+	if (__cvmx_fpa3_aura_valid(pki_dflt_aura[node].aura))
+		return 0; /* aura already configured, share it */
+
+	buffer_count = pki_dflt_pool[node].buffer_count;
+	buffer_size = pki_dflt_pool[node].buffer_size;
+
+	if (buffer_count != 0) {
+		pki_dflt_pool[node].pool = cvmx_fpa3_setup_fill_pool(
+			node, pki_dflt_pool[node].pool_num, "PKI POOL DFLT",
+			buffer_size, buffer_count, NULL);
+		if (!__cvmx_fpa3_pool_valid(pki_dflt_pool[node].pool)) {
+			cvmx_printf("ERROR: %s: Failed to allocate pool %d\n",
+				    __func__, pki_dflt_pool[node].pool_num);
+			return -1;
+		}
+		pki_dflt_pool[node].pool_num = pki_dflt_pool[node].pool.lpool;
+
+		if (pki_helper_debug)
+			debug("%s pool %d with buffer size %d cnt %d\n",
+			      __func__, pki_dflt_pool[node].pool_num,
+			      (int)buffer_size, (int)buffer_count);
+
+		pki_dflt_aura[node].pool_num = pki_dflt_pool[node].pool_num;
+		pki_dflt_aura[node].pool = pki_dflt_pool[node].pool;
+	}
+
+	buffer_count = pki_dflt_aura[node].buffer_count;
+
+	if (buffer_count != 0) {
+		pki_dflt_aura[node].aura = cvmx_fpa3_set_aura_for_pool(
+			pki_dflt_aura[node].pool, pki_dflt_aura[node].aura_num,
+			"PKI DFLT AURA", buffer_size, buffer_count);
+
+		if (!__cvmx_fpa3_aura_valid(pki_dflt_aura[node].aura)) {
+			debug("ERROR: %sL Failed to allocate aura %d\n",
+			      __func__, pki_dflt_aura[node].aura_num);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int __cvmx_helper_setup_pki_qpg_table(int node)
+{
+	int offset;
+
+	offset = cvmx_pki_qpg_entry_alloc(node, pki_dflt_qpg[node].qpg_base, 1);
+	if (offset == CVMX_RESOURCE_ALLOC_FAILED)
+		return -1;
+	else if (offset == CVMX_RESOURCE_ALREADY_RESERVED)
+		return 0; /* share the qpg table entry */
+	if (pki_helper_debug)
+		debug("pki-helper: set qpg entry at offset %d with port add %d aura %d grp_ok %d grp_bad %d\n",
+		      offset, pki_dflt_qpg[node].port_add,
+		      pki_dflt_qpg[node].aura_num, pki_dflt_qpg[node].grp_ok,
+		      pki_dflt_qpg[node].grp_bad);
+	cvmx_pki_write_qpg_entry(node, offset, &pki_dflt_qpg[node]);
+	return 0;
+}
+
+int __cvmx_helper_pki_port_setup(int node, int ipd_port)
+{
+	int xiface, index;
+	int pknd, style_num;
+	int rs;
+	struct cvmx_pki_pkind_config pkind_cfg;
+
+	if (!cvmx_pki_dflt_init[node])
+		return 0;
+	xiface = cvmx_helper_get_interface_num(ipd_port);
+	index = cvmx_helper_get_interface_index_num(ipd_port);
+
+	pknd = cvmx_helper_get_pknd(xiface, index);
+	style_num = pkind_style_map[node][pknd];
+
+	/* try to reserve the style, if it is not configured already, reserve
+	and configure it */
+	rs = cvmx_pki_style_alloc(node, style_num);
+	if (rs < 0) {
+		if (rs == CVMX_RESOURCE_ALLOC_FAILED)
+			return -1;
+	} else {
+		if (pki_helper_debug)
+			debug("pki-helper: set style %d with default parameters\n",
+			      style_num);
+		pkind_style_map[node][pknd] = style_num;
+		/* configure style with default parameters */
+		cvmx_pki_write_style_config(node, style_num,
+					    CVMX_PKI_CLUSTER_ALL,
+					    &pki_dflt_style[node]);
+	}
+	if (pki_helper_debug)
+		debug("pki-helper: set pkind %d with initial style %d\n", pknd,
+		      style_num);
+	/* write pkind configuration */
+	pkind_cfg = pki_dflt_pkind[node];
+	pkind_cfg.initial_style = style_num;
+	cvmx_pki_write_pkind_config(node, pknd, &pkind_cfg);
+	return 0;
+}
+
+int __cvmx_helper_pki_global_setup(int node)
+{
+	__cvmx_helper_pki_set_dflt_ltype_map(node);
+	if (!cvmx_pki_dflt_init[node])
+		return 0;
+	/* Setup the packet pools*/
+	__cvmx_helper_pki_setup_fpa_pools(node);
+	/*set up default cluster*/
+	__cvmx_helper_setup_pki_cluster_groups(node);
+	//__cvmx_helper_pki_setup_sso_groups(node);
+	__cvmx_helper_setup_pki_qpg_table(node);
+	/*
+	 * errata PKI-19103 backward compat has only 1 aura
+	 * no head line blocking
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+		cvmx_pki_buf_ctl_t buf_ctl;
+
+		buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
+		buf_ctl.s.fpa_wait = 1;
+		csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
+	}
+	return 0;
+}
+
+/**
+ * This function Enabled the PKI hardware to
+ * start accepting/processing packets.
+ *
+ * @param node    node number
+ */
+void cvmx_helper_pki_enable(int node)
+{
+	if (pki_helper_debug)
+		debug("enable PKI on node %d\n", node);
+	__cvmx_helper_pki_install_dflt_vlan(node);
+	cvmx_pki_setup_clusters(node);
+	if (cvmx_pki_dflt_bp_en[node])
+		cvmx_pki_enable_backpressure(node);
+	cvmx_pki_parse_enable(node, 0);
+	cvmx_pki_enable(node);
+}
+
+/**
+ * This function setups the qos table by allocating qpg entry and writing
+ * the provided parameters to that entry (offset).
+ * @param node	  node number.
+ * @param qpg_cfg       pointer to struct containing qpg configuration
+ */
+int cvmx_helper_pki_set_qpg_entry(int node, struct cvmx_pki_qpg_config *qpg_cfg)
+{
+	int offset;
+
+	offset = cvmx_pki_qpg_entry_alloc(node, qpg_cfg->qpg_base, 1);
+	if (pki_helper_debug)
+		debug("pki-helper:set qpg entry at offset %d\n", offset);
+	if (offset == CVMX_RESOURCE_ALREADY_RESERVED) {
+		debug("INFO:setup_qpg_table: offset %d already reserved\n",
+		      qpg_cfg->qpg_base);
+		return CVMX_RESOURCE_ALREADY_RESERVED;
+	} else if (offset == CVMX_RESOURCE_ALLOC_FAILED) {
+		debug("ERROR:setup_qpg_table: no more entries available\n");
+		return CVMX_RESOURCE_ALLOC_FAILED;
+	}
+	qpg_cfg->qpg_base = offset;
+	cvmx_pki_write_qpg_entry(node, offset, qpg_cfg);
+	return offset;
+}
+
+/**
+ * This function gets all the PKI parameters related to that
+ * particular port from hardware.
+ * @param xipd_port	xipd_port port number with node to get parameter of
+ * @param port_cfg	pointer to structure where to store read parameters
+ */
+void cvmx_pki_get_port_config(int xipd_port,
+			      struct cvmx_pki_port_config *port_cfg)
+{
+	int xiface, index, pknd;
+	int style, cl_mask;
+	cvmx_pki_icgx_cfg_t pki_cl_msk;
+	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+
+	/* get the pkind used by this ipd port */
+	xiface = cvmx_helper_get_interface_num(xipd_port);
+	index = cvmx_helper_get_interface_index_num(xipd_port);
+	pknd = cvmx_helper_get_pknd(xiface, index);
+
+	cvmx_pki_read_pkind_config(xp.node, pknd, &port_cfg->pkind_cfg);
+	style = port_cfg->pkind_cfg.initial_style;
+	pki_cl_msk.u64 = csr_rd_node(
+		xp.node, CVMX_PKI_ICGX_CFG(port_cfg->pkind_cfg.cluster_grp));
+	cl_mask = pki_cl_msk.s.clusters;
+	cvmx_pki_read_style_config(xp.node, style, cl_mask,
+				   &port_cfg->style_cfg);
+}
+
+/**
+ * This function sets all the PKI parameters related to that
+ * particular port in hardware.
+ * @param xipd_port	ipd port number with node to get parameter of
+ * @param port_cfg	pointer to structure containing port parameters
+ */
+void cvmx_pki_set_port_config(int xipd_port,
+			      struct cvmx_pki_port_config *port_cfg)
+{
+	int xiface, index, pknd;
+	int style, cl_mask;
+	cvmx_pki_icgx_cfg_t pki_cl_msk;
+	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
+
+	/* get the pkind used by this ipd port */
+	xiface = cvmx_helper_get_interface_num(xipd_port);
+	index = cvmx_helper_get_interface_index_num(xipd_port);
+	pknd = cvmx_helper_get_pknd(xiface, index);
+
+	if (cvmx_pki_write_pkind_config(xp.node, pknd, &port_cfg->pkind_cfg))
+		return;
+	style = port_cfg->pkind_cfg.initial_style;
+	pki_cl_msk.u64 = csr_rd_node(
+		xp.node, CVMX_PKI_ICGX_CFG(port_cfg->pkind_cfg.cluster_grp));
+	cl_mask = pki_cl_msk.s.clusters;
+	cvmx_pki_write_style_config(xp.node, style, cl_mask,
+				    &port_cfg->style_cfg);
+}
+
+/**
+ * This function sets up all th eports of particular interface
+ * for chosen fcs mode. (only use for backward compatibility).
+ * New application can control it via init_interface calls.
+ * @param node		node number.
+ * @param interface	interface number.
+ * @param nports	number of ports
+ * @param has_fcs	1 -- enable fcs check and fcs strip.
+ *			0 -- disable fcs check.
+ */
+void cvmx_helper_pki_set_fcs_op(int node, int interface, int nports,
+				int has_fcs)
+{
+	int xiface, index;
+	int pknd;
+	unsigned int cluster = 0;
+	cvmx_pki_clx_pkindx_cfg_t pkind_cfg;
+
+	xiface = cvmx_helper_node_interface_to_xiface(node, interface);
+	for (index = 0; index < nports; index++) {
+		pknd = cvmx_helper_get_pknd(xiface, index);
+		while (cluster < CVMX_PKI_NUM_CLUSTER) {
+			/*find the cluster in use pass2*/
+			pkind_cfg.u64 = csr_rd_node(
+				node, CVMX_PKI_CLX_PKINDX_CFG(pknd, cluster));
+			pkind_cfg.s.fcs_pres = has_fcs;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_PKINDX_CFG(pknd, cluster),
+				    pkind_cfg.u64);
+			cluster++;
+		}
+		/* make sure fcs_strip and fcs_check is also enable/disable
+		 * for the style used by that port
+		 */
+		cvmx_pki_endis_fcs_check(node, pknd, has_fcs, has_fcs);
+		cluster = 0;
+	}
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-pko.c b/arch/mips/mach-octeon/cvmx-helper-pko.c
new file mode 100644
index 0000000..b9ac22c
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-pko.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Helper Functions for the PKO
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+static s64 pko_fpa_config_pool = -1;
+static u64 pko_fpa_config_size = 1024;
+
+/**
+ * cvmx_override_pko_queue_priority(int pko_port, u64
+ * priorities[16]) is a function pointer. It is meant to allow
+ * customization of the PKO queue priorities based on the port
+ * number. Users should set this pointer to a function before
+ * calling any cvmx-helper operations.
+ */
+void (*cvmx_override_pko_queue_priority)(int ipd_port,
+					 uint8_t *priorities) = NULL;
+
+int64_t cvmx_fpa_get_pko_pool(void)
+{
+	return pko_fpa_config_pool;
+}
+
+/**
+ * Gets the buffer size of pko pool
+ */
+u64 cvmx_fpa_get_pko_pool_block_size(void)
+{
+	return pko_fpa_config_size;
+}
+
+/**
+ * Initialize PKO command queue buffer pool
+ */
+static int cvmx_helper_pko_pool_init(void)
+{
+	u8 pool;
+	unsigned int buf_count;
+	unsigned int pkt_buf_count;
+	int rc;
+
+	/* Reserve pool */
+	pool = cvmx_fpa_get_pko_pool();
+
+	/* Avoid redundant pool creation */
+	if (cvmx_fpa_get_block_size(pool) > 0) {
+#ifdef DEBUG
+		debug("WARNING: %s: pool %d already initialized\n", __func__,
+		      pool);
+#endif
+		/* It is up to the app to have sufficient buffer count */
+		return pool;
+	}
+
+	/* Calculate buffer count: one per queue + 3-word-cmds * max_pkts */
+	pkt_buf_count = cvmx_fpa_get_packet_pool_buffer_count();
+	buf_count = CVMX_PKO_MAX_OUTPUT_QUEUES + (pkt_buf_count * 3) / 8;
+
+	/* Allocate pools for pko command queues */
+	rc = __cvmx_helper_initialize_fpa_pool(pool,
+					       cvmx_fpa_get_pko_pool_block_size(),
+					       buf_count, "PKO Cmd-bufs");
+
+	if (rc < 0)
+		debug("%s: ERROR: in PKO buffer pool\n", __func__);
+
+	pool = rc;
+	return pool;
+}
+
+/**
+ * Initialize the PKO
+ *
+ */
+int cvmx_helper_pko_init(void)
+{
+	int rc;
+
+	rc = cvmx_helper_pko_pool_init();
+	if (rc < 0)
+		return rc;
+
+	__cvmx_helper_init_port_config_data(0);
+
+	cvmx_pko_hw_init(cvmx_fpa_get_pko_pool(),
+			 cvmx_fpa_get_pko_pool_block_size());
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Setup the PKO for the ports on an interface. The number of
+ * queues per port and the priority of each PKO output queue
+ * is set here. PKO must be disabled when this function is called.
+ *
+ * @param interface to setup PKO for
+ *
+ * @return Zero on success, negative on failure
+ *
+ * @note This is for PKO1/PKO2, and is not used for PKO3.
+ */
+int __cvmx_helper_interface_setup_pko(int interface)
+{
+	/*
+	 * Each packet output queue has an associated priority. The
+	 * higher the priority, the more often it can send a packet. A
+	 * priority of 8 means it can send in all 8 rounds of
+	 * contention. We're going to make each queue one less than
+	 * the last.  The vector of priorities has been extended to
+	 * support CN5xxx CPUs, where up to 16 queues can be
+	 * associated to a port.  To keep backward compatibility we
+	 * don't change the initial 8 priorities and replicate them in
+	 * the second half.  With per-core PKO queues (PKO lockless
+	 * operation) all queues have the same priority.
+	 */
+	/* uint8_t priorities[16] = {8,7,6,5,4,3,2,1,8,7,6,5,4,3,2,1}; */
+	u8 priorities[16] = { [0 ... 15] = 8 };
+
+	/*
+	 * Setup the IPD/PIP and PKO for the ports discovered
+	 * above. Here packet classification, tagging and output
+	 * priorities are set.
+	 */
+	int num_ports = cvmx_helper_ports_on_interface(interface);
+
+	while (num_ports--) {
+		int ipd_port;
+
+		if (!cvmx_helper_is_port_valid(interface, num_ports))
+			continue;
+
+		ipd_port = cvmx_helper_get_ipd_port(interface, num_ports);
+		/*
+		 * Give the user a chance to override the per queue
+		 * priorities.
+		 */
+		if (cvmx_override_pko_queue_priority)
+			cvmx_override_pko_queue_priority(ipd_port, priorities);
+
+		cvmx_pko_config_port(ipd_port,
+				     cvmx_pko_get_base_queue(ipd_port),
+				     cvmx_pko_get_num_queues(ipd_port),
+				     priorities);
+		ipd_port++;
+	}
+	return 0;
+	/* NOTE:
+	 * Now this function is called for all chips including 68xx,
+	 * but on the 68xx it does not enable multiple pko_iports per
+	 * eport, while before it was doing 3 pko_iport per eport
+	 * buf the reason for that is not clear.
+	 */
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-pko3.c b/arch/mips/mach-octeon/cvmx-helper-pko3.c
new file mode 100644
index 0000000..37fe45d
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-pko3.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * PKOv3 helper file
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+/* channels are present at L2 queue level by default */
+static const enum cvmx_pko3_level_e cvmx_pko_default_channel_level =
+	CVMX_PKO_L2_QUEUES;
+
+static const int debug;
+
+static int __pko_pkt_budget, __pko_pkt_quota;
+
+/* These global variables are relevant for boot CPU only */
+static cvmx_fpa3_gaura_t __cvmx_pko3_aura[CVMX_MAX_NODES];
+
+/* This constant can not be modified, defined here for clarity only */
+#define CVMX_PKO3_POOL_BUFFER_SIZE 4096 /* 78XX PKO requires 4KB */
+
+/**
+ * @INTERNAL
+ *
+ * Build an owner tag based on interface/port
+ */
+static int __cvmx_helper_pko3_res_owner(int ipd_port)
+{
+	int res_owner;
+	const int res_owner_pfix = 0x19d0 << 14;
+
+	ipd_port &= 0x3fff; /* 12-bit for local CHAN_E value + node */
+
+	res_owner = res_owner_pfix | ipd_port;
+
+	return res_owner;
+}
+
+/**
+ * Configure an AURA/POOL designated for PKO internal use.
+ *
+ * This pool is used for (a) memory buffers that store PKO descriptor queues,
+ * (b) buffers for use with PKO_SEND_JUMP_S sub-header.
+ *
+ * The buffers of type (a) are never accessed by software, and their number
+ * should be at least equal to 4 times the number of descriptor queues
+ * in use.
+ *
+ * Type (b) buffers are consumed by PKO3 command-composition code,
+ * and are released by the hardware upon completion of transmission.
+ *
+ * @returns -1 if the pool could not be established or 12-bit AURA
+ * that includes the node number for use in PKO3 initialization call.
+ *
+ * NOTE: Linux kernel should pass its own aura to PKO3 initialization
+ * function so that the buffers can be mapped into kernel space
+ * for when software needs to adccess their contents.
+ *
+ */
+static int __cvmx_pko3_config_memory(unsigned int node)
+{
+	cvmx_fpa3_gaura_t aura;
+	int aura_num;
+	unsigned int buf_count;
+	bool small_mem;
+	int i, num_intf = 0;
+	const unsigned int pkt_per_buf =
+		(CVMX_PKO3_POOL_BUFFER_SIZE / sizeof(u64) / 16);
+	const unsigned int base_buf_count = 1024 * 4;
+
+	/* Simulator has limited memory, but uses one interface at a time */
+	//	small_mem = cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM;
+	small_mem = false;
+
+	/* Count the number of live interfaces */
+	for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
+		int xiface = cvmx_helper_node_interface_to_xiface(node, i);
+
+		if (CVMX_HELPER_INTERFACE_MODE_DISABLED !=
+		    cvmx_helper_interface_get_mode(xiface))
+			num_intf++;
+	}
+
+	buf_count = 1024;
+	__pko_pkt_quota = buf_count * pkt_per_buf;
+	__pko_pkt_budget = __pko_pkt_quota * num_intf;
+	(void)small_mem;
+	(void)base_buf_count;
+
+	if (debug)
+		debug("%s: Creating AURA with %u buffers for up to %d total packets, %d packets per interface\n",
+		      __func__, buf_count, __pko_pkt_budget, __pko_pkt_quota);
+
+	aura = cvmx_fpa3_setup_aura_and_pool(node, -1, "PKO3 AURA", NULL,
+					     CVMX_PKO3_POOL_BUFFER_SIZE,
+					     buf_count);
+
+	if (!__cvmx_fpa3_aura_valid(aura)) {
+		printf("ERROR: %s AURA create failed\n", __func__);
+		return -1;
+	}
+
+	aura_num = aura.node << 10 | aura.laura;
+
+	/* Store handle for destruction */
+	__cvmx_pko3_aura[node] = aura;
+
+	return aura_num;
+}
+
+/** Initialize a channelized port
+ * This is intended for LOOP, ILK and NPI interfaces which have one MAC
+ * per interface and need a channel per subinterface (e.g. ring).
+ * Each channel then may have 'num_queues' descriptor queues
+ * attached to it, which can also be prioritized or fair.
+ */
+static int __cvmx_pko3_config_chan_interface(int xiface, unsigned int num_chans,
+					     u8 num_queues, bool prioritized)
+{
+	int l1_q_num;
+	int l2_q_base;
+	enum cvmx_pko3_level_e level;
+	int res;
+	int parent_q, child_q;
+	unsigned int chan, dq;
+	int pko_mac_num;
+	u16 ipd_port;
+	int res_owner, prio;
+	unsigned int i;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	unsigned int node = xi.node;
+	char b1[12];
+
+	if (num_queues == 0)
+		num_queues = 1;
+	if ((cvmx_pko3_num_level_queues(CVMX_PKO_DESCR_QUEUES) / num_chans) < 3)
+		num_queues = 1;
+
+	if (prioritized && num_queues > 1)
+		prio = num_queues;
+	else
+		prio = -1;
+
+	if (debug)
+		debug("%s: configuring xiface %u:%u with %u chans %u queues each\n",
+		      __func__, xi.node, xi.interface, num_chans, num_queues);
+
+	/* all channels all go to the same mac */
+	pko_mac_num = __cvmx_pko3_get_mac_num(xiface, 0);
+	if (pko_mac_num < 0) {
+		printf("ERROR: %s: Invalid interface\n", __func__);
+		return -1;
+	}
+
+	/* Resources of all channels on this port have common owner */
+	ipd_port = cvmx_helper_get_ipd_port(xiface, 0);
+
+	/* Build an identifiable owner */
+	res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
+
+	/* Start configuration at L1/PQ */
+	level = CVMX_PKO_PORT_QUEUES;
+
+	/* Reserve port queue to make sure the MAC is not already configured */
+	l1_q_num = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
+
+	if (l1_q_num < 0) {
+		printf("ERROR: %s: Reserving L1 PQ\n", __func__);
+		return -1;
+	}
+
+	res = cvmx_pko3_pq_config(node, pko_mac_num, l1_q_num);
+	if (res < 0) {
+		printf("ERROR: %s: Configuring L1 PQ\n", __func__);
+		return -1;
+	}
+
+	/* next queue level = L2/SQ */
+	level = __cvmx_pko3_sq_lvl_next(level);
+
+	/* allocate level 2 queues, one per channel */
+	l2_q_base =
+		cvmx_pko_alloc_queues(node, level, res_owner, -1, num_chans);
+	if (l2_q_base < 0) {
+		printf("ERROR: %s: allocation L2 SQ\n", __func__);
+		return -1;
+	}
+
+	/* Configre <num_chans> L2 children for PQ, non-prioritized */
+	res = cvmx_pko3_sq_config_children(node, level, l1_q_num, l2_q_base,
+					   num_chans, -1);
+
+	if (res < 0) {
+		printf("ERROR: %s: Failed channel queues\n", __func__);
+		return -1;
+	}
+
+	/* map channels to l2 queues */
+	for (chan = 0; chan < num_chans; chan++) {
+		ipd_port = cvmx_helper_get_ipd_port(xiface, chan);
+		cvmx_pko3_map_channel(node, l1_q_num, l2_q_base + chan,
+				      ipd_port);
+	}
+
+	/* next queue level = L3/SQ */
+	level = __cvmx_pko3_sq_lvl_next(level);
+	parent_q = l2_q_base;
+
+	do {
+		child_q = cvmx_pko_alloc_queues(node, level, res_owner, -1,
+						num_chans);
+
+		if (child_q < 0) {
+			printf("ERROR: %s: allocating %s\n", __func__,
+			       __cvmx_pko3_sq_str(b1, level, child_q));
+			return -1;
+		}
+
+		for (i = 0; i < num_chans; i++) {
+			res = cvmx_pko3_sq_config_children(
+				node, level, parent_q + i, child_q + i, 1, 1);
+
+			if (res < 0) {
+				printf("ERROR: %s: configuring %s\n", __func__,
+				       __cvmx_pko3_sq_str(b1, level, child_q));
+				return -1;
+			}
+
+		} /* for i */
+
+		parent_q = child_q;
+		level = __cvmx_pko3_sq_lvl_next(level);
+
+		/* Terminate loop on DQ level, it has special handling */
+	} while (level != CVMX_PKO_DESCR_QUEUES &&
+		 level != CVMX_PKO_LEVEL_INVAL);
+
+	if (level != CVMX_PKO_DESCR_QUEUES) {
+		printf("ERROR: %s: level sequence error\n", __func__);
+		return -1;
+	}
+
+	/* Configure DQs, num_dqs per chan */
+	for (chan = 0; chan < num_chans; chan++) {
+		res = cvmx_pko_alloc_queues(node, level, res_owner, -1,
+					    num_queues);
+
+		if (res < 0)
+			goto _fail;
+		dq = res;
+
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0) && (dq & 7))
+			debug("WARNING: %s: DQ# %u not integral of 8\n",
+			      __func__, dq);
+
+		res = cvmx_pko3_sq_config_children(node, level, parent_q + chan,
+						   dq, num_queues, prio);
+		if (res < 0)
+			goto _fail;
+
+		/* register DQ range with the translation table */
+		res = __cvmx_pko3_ipd_dq_register(xiface, chan, dq, num_queues);
+		if (res < 0)
+			goto _fail;
+	}
+
+	return 0;
+_fail:
+	debug("ERROR: %s: configuring queues for xiface %u:%u chan %u\n",
+	      __func__, xi.node, xi.interface, i);
+	return -1;
+}
+
+/** Initialize a single Ethernet port with PFC-style channels
+ *
+ * One interface can contain multiple ports, this function is per-port
+ * Here, a physical port is allocated 8 logical channel, one per VLAN
+ * tag priority, one DQ is assigned to each channel, and all 8 DQs
+ * are registered for that IPD port.
+ * Note that the DQs are arrange such that the Ethernet QoS/PCP field
+ * can be used as an offset to the value returned by cvmx_pko_base_queue_get().
+ *
+ * For HighGig2 mode, 16 channels may be desired, instead of 8,
+ * but this function does not support that.
+ */
+static int __cvmx_pko3_config_pfc_interface(int xiface, unsigned int port)
+{
+	enum cvmx_pko3_level_e level;
+	int pko_mac_num;
+	int l1_q_num, l2_q_base;
+	int child_q, parent_q;
+	int dq_base;
+	int res;
+	const unsigned int num_chans = 8;
+	cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
+	unsigned int node = xi.node;
+	u16 ipd_port;
+	int res_owner;
+	char b1[12];
+	unsigned int i;
+
+	if (debug)
+		debug("%s: configuring xiface %u:%u port %u with %u PFC channels\n",
+		      __func__, node, xi.interface, port, num_chans);
+
+	/* Get MAC number for the iface/port */
+	pko_mac_num = __cvmx_pko3_get_mac_num(xiface, port);
+	if (pko_mac_num < 0) {
+		printf("ERROR: %s: Invalid interface\n", __func__);
+		return -1;
+	}
+
+	ipd_port = cvmx_helper_get_ipd_port(xiface, port);
+
+	/* Build an identifiable owner identifier */
+	res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
+
+	level = CVMX_PKO_PORT_QUEUES;
+
+	/* Allocate port queue to make sure the MAC is not already configured */
+	l1_q_num = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
+
+	if (l1_q_num < 0) {
+		printf("ERROR: %s: allocation L1 PQ\n", __func__);
+		return -1;
+	}
+
+	res = cvmx_pko3_pq_config(xi.node, pko_mac_num, l1_q_num);
+	if (res < 0) {
+		printf("ERROR: %s: Configuring %s\n", __func__,
+		       __cvmx_pko3_sq_str(b1, level, l1_q_num));
+		return -1;
+	}
+
+	/* Determine the next queue level */
+	level = __cvmx_pko3_sq_lvl_next(level);
+
+	/* Allocate 'num_chans' L2 queues, one per channel */
+	l2_q_base =
+		cvmx_pko_alloc_queues(node, level, res_owner, -1, num_chans);
+	if (l2_q_base < 0) {
+		printf("ERROR: %s: allocation L2 SQ\n", __func__);
+		return -1;
+	}
+
+	/* Configre <num_chans> L2 children for PQ, with static priority */
+	res = cvmx_pko3_sq_config_children(node, level, l1_q_num, l2_q_base,
+					   num_chans, num_chans);
+
+	if (res < 0) {
+		printf("ERROR: %s: Configuring %s for PFC\n", __func__,
+		       __cvmx_pko3_sq_str(b1, level, l1_q_num));
+		return -1;
+	}
+
+	/* Map each of the allocated channels */
+	for (i = 0; i < num_chans; i++) {
+		u16 chan;
+
+		/* Get CHAN_E value for this PFC channel, PCP in low 3 bits */
+		chan = ipd_port | cvmx_helper_prio2qos(i);
+
+		cvmx_pko3_map_channel(node, l1_q_num, l2_q_base + i, chan);
+	}
+
+	/* Iterate through the levels until DQ and allocate 'num_chans'
+	 * consecutive queues at each level and hook them up
+	 * one-to-one with the parent level queues
+	 */
+
+	parent_q = l2_q_base;
+	level = __cvmx_pko3_sq_lvl_next(level);
+
+	do {
+		child_q = cvmx_pko_alloc_queues(node, level, res_owner, -1,
+						num_chans);
+
+		if (child_q < 0) {
+			printf("ERROR: %s: allocating %s\n", __func__,
+			       __cvmx_pko3_sq_str(b1, level, child_q));
+			return -1;
+		}
+
+		for (i = 0; i < num_chans; i++) {
+			res = cvmx_pko3_sq_config_children(
+				node, level, parent_q + i, child_q + i, 1, 1);
+
+			if (res < 0) {
+				printf("ERROR: %s: configuring %s\n", __func__,
+				       __cvmx_pko3_sq_str(b1, level, child_q));
+				return -1;
+			}
+
+		} /* for i */
+
+		parent_q = child_q;
+		level = __cvmx_pko3_sq_lvl_next(level);
+
+		/* Terminate loop on DQ level, it has special handling */
+	} while (level != CVMX_PKO_DESCR_QUEUES &&
+		 level != CVMX_PKO_LEVEL_INVAL);
+
+	if (level != CVMX_PKO_DESCR_QUEUES) {
+		printf("ERROR: %s: level sequence error\n", __func__);
+		return -1;
+	}
+
+	dq_base = cvmx_pko_alloc_queues(node, level, res_owner, -1, num_chans);
+	if (dq_base < 0) {
+		printf("ERROR: %s: allocating %s\n", __func__,
+		       __cvmx_pko3_sq_str(b1, level, dq_base));
+		return -1;
+	}
+
+	/* Configure DQs in QoS order, so that QoS/PCP can be index */
+	for (i = 0; i < num_chans; i++) {
+		int dq_num = dq_base + cvmx_helper_prio2qos(i);
+
+		res = cvmx_pko3_sq_config_children(node, level, parent_q + i,
+						   dq_num, 1, 1);
+		if (res < 0) {
+			printf("ERROR: %s: configuring %s\n", __func__,
+			       __cvmx_pko3_sq_str(b1, level, dq_num));
+			return -1;
+		}
+	}
+
+	/* register entire DQ range with the IPD translation table */
+	__cvmx_pko3_ipd_dq_register(xiface, port, dq_base, num_chans);
+
+	return 0;
+}
+
+/**
+ * Initialize a simple interface with a a given number of
+ * fair or prioritized queues.
+ * This function will assign one channel per sub-interface.
+ */
+int __cvmx_pko3_config_gen_interface(int xiface, uint8_t subif, u8 num_queues,
+				     bool prioritized)
+{
+	cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
+	u8 node = xi.node;
+	int l1_q_num;
+	int parent_q, child_q;
+	int dq;
+	int res, res_owner;
+	int pko_mac_num;
+	enum cvmx_pko3_level_e level;
+	u16 ipd_port;
+	int static_pri;
+	char b1[12];
+
+	num_queues = 1;
+
+	if (num_queues == 0) {
+		num_queues = 1;
+		printf("WARNING: %s: xiface %#x misconfigured\n", __func__,
+		       xiface);
+	}
+
+	/* Configure DQs relative priority (a.k.a. scheduling) */
+	if (prioritized) {
+		/* With 8 queues or fewer, use static priority, else WRR */
+		static_pri = (num_queues < 9) ? num_queues : 0;
+	} else {
+		/* Set equal-RR scheduling among queues */
+		static_pri = -1;
+	}
+
+	if (debug)
+		debug("%s: configuring xiface %u:%u/%u nq=%u %s\n", __func__,
+		      xi.node, xi.interface, subif, num_queues,
+		      (prioritized) ? "qos" : "fair");
+
+	/* Get MAC number for the iface/port */
+	pko_mac_num = __cvmx_pko3_get_mac_num(xiface, subif);
+	if (pko_mac_num < 0) {
+		printf("ERROR: %s: Invalid interface %u:%u\n", __func__,
+		       xi.node, xi.interface);
+		return -1;
+	}
+
+	ipd_port = cvmx_helper_get_ipd_port(xiface, subif);
+
+	if (debug)
+		debug("%s: xiface %u:%u/%u ipd_port=%#03x\n", __func__, xi.node,
+		      xi.interface, subif, ipd_port);
+
+	/* Build an identifiable owner identifier */
+	res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
+
+	level = CVMX_PKO_PORT_QUEUES;
+
+	/* Reserve port queue to make sure the MAC is not already configured */
+	l1_q_num = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
+
+	if (l1_q_num < 0) {
+		printf("ERROR %s: xiface %u:%u/%u failed allocation L1 PQ\n",
+		       __func__, xi.node, xi.interface, subif);
+		return -1;
+	}
+
+	res = cvmx_pko3_pq_config(node, pko_mac_num, l1_q_num);
+	if (res < 0) {
+		printf("ERROR %s: Configuring L1 PQ\n", __func__);
+		return -1;
+	}
+
+	parent_q = l1_q_num;
+
+	/* Determine the next queue level */
+	level = __cvmx_pko3_sq_lvl_next(level);
+
+	/* Simply chain queues 1-to-1 from L2 to one before DQ level */
+	do {
+		/* allocate next level queue */
+		child_q = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
+
+		if (child_q < 0) {
+			printf("ERROR: %s: allocating %s\n", __func__,
+			       __cvmx_pko3_sq_str(b1, level, child_q));
+			return -1;
+		}
+
+		/* Configre newly allocated queue */
+		res = cvmx_pko3_sq_config_children(node, level, parent_q,
+						   child_q, 1, 1);
+
+		if (res < 0) {
+			printf("ERROR: %s: configuring %s\n", __func__,
+			       __cvmx_pko3_sq_str(b1, level, child_q));
+			return -1;
+		}
+
+		/* map IPD/channel to L2/L3 queues */
+		if (level == cvmx_pko_default_channel_level)
+			cvmx_pko3_map_channel(node, l1_q_num, child_q,
+					      ipd_port);
+
+		/* Prepare for next level */
+		level = __cvmx_pko3_sq_lvl_next(level);
+		parent_q = child_q;
+
+		/* Terminate loop on DQ level, it has special handling */
+	} while (level != CVMX_PKO_DESCR_QUEUES &&
+		 level != CVMX_PKO_LEVEL_INVAL);
+
+	if (level != CVMX_PKO_DESCR_QUEUES) {
+		printf("ERROR: %s: level sequence error\n", __func__);
+		return -1;
+	}
+
+	/* Allocate descriptor queues for the port */
+	dq = cvmx_pko_alloc_queues(node, level, res_owner, -1, num_queues);
+	if (dq < 0) {
+		printf("ERROR: %s: could not reserve DQs\n", __func__);
+		return -1;
+	}
+
+	res = cvmx_pko3_sq_config_children(node, level, parent_q, dq,
+					   num_queues, static_pri);
+	if (res < 0) {
+		printf("ERROR: %s: configuring %s\n", __func__,
+		       __cvmx_pko3_sq_str(b1, level, dq));
+		return -1;
+	}
+
+	/* register DQ/IPD translation */
+	__cvmx_pko3_ipd_dq_register(xiface, subif, dq, num_queues);
+
+	if (debug)
+		debug("%s: xiface %u:%u/%u qs %u-%u\n", __func__, xi.node,
+		      xi.interface, subif, dq, dq + num_queues - 1);
+	return 0;
+}
+
+/** Initialize the NULL interface
+ *
+ * A NULL interface is a special case in that it is not
+ * one of the enumerated interfaces in the system, and does
+ * not apply to input either. Still, it can be very handy
+ * for dealing with packets that should be discarded in
+ * a generic, streamlined way.
+ *
+ * The Descriptor Queue 0 will be reserved for the NULL interface
+ * and the normalized (i.e. IPD) port number has the all-ones value.
+ */
+static int __cvmx_pko3_config_null_interface(unsigned int node)
+{
+	int l1_q_num;
+	int parent_q, child_q;
+	enum cvmx_pko3_level_e level;
+	int i, res, res_owner;
+	int xiface, ipd_port;
+	int num_dq = 1;	  /* # of DQs for NULL */
+	const int dq = 0; /* Reserve DQ#0 for NULL */
+	char pko_mac_num;
+	char b1[12];
+
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+		pko_mac_num = 0x1C; /* MAC# 28 virtual MAC for NULL */
+	else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+		pko_mac_num = 0x0F; /* MAC# 16 virtual MAC for NULL */
+	else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
+		pko_mac_num = 0x0A; /* MAC# 10 virtual MAC for NULL */
+	else
+		return -1;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
+		num_dq = 8;
+
+	if (debug)
+		debug("%s: null iface dq=%u-%u\n", __func__, dq,
+		      dq + num_dq - 1);
+
+	ipd_port = cvmx_helper_node_to_ipd_port(node, CVMX_PKO3_IPD_PORT_NULL);
+
+	/* Build an identifiable owner identifier by MAC# for easy release */
+	res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
+	if (res_owner < 0) {
+		debug("%s: ERROR Invalid interface\n", __func__);
+		return -1;
+	}
+
+	level = CVMX_PKO_PORT_QUEUES;
+
+	/* Allocate a port queue */
+	l1_q_num = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
+
+	if (l1_q_num < 0) {
+		debug("%s: ERROR reserving L1 SQ\n", __func__);
+		return -1;
+	}
+
+	res = cvmx_pko3_pq_config(node, pko_mac_num, l1_q_num);
+	if (res < 0) {
+		printf("ERROR: %s: PQ/L1 queue configuration\n", __func__);
+		return -1;
+	}
+
+	parent_q = l1_q_num;
+
+	/* Determine the next queue level */
+	level = __cvmx_pko3_sq_lvl_next(level);
+
+	/* Simply chain queues 1-to-1 from L2 to one before DQ level */
+	do {
+		/* allocate next level queue */
+		child_q = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
+
+		if (child_q < 0) {
+			printf("ERROR: %s: allocating %s\n", __func__,
+			       __cvmx_pko3_sq_str(b1, level, child_q));
+			return -1;
+		}
+
+		/* Configre newly allocated queue */
+		res = cvmx_pko3_sq_config_children(node, level, parent_q,
+						   child_q, 1, 1);
+
+		if (res < 0) {
+			printf("ERROR: %s: configuring %s\n", __func__,
+			       __cvmx_pko3_sq_str(b1, level, child_q));
+			return -1;
+		}
+
+		/* Prepare for next level */
+		level = __cvmx_pko3_sq_lvl_next(level);
+		parent_q = child_q;
+
+		/* Terminate loop on DQ level, it has special handling */
+	} while (level != CVMX_PKO_DESCR_QUEUES &&
+		 level != CVMX_PKO_LEVEL_INVAL);
+
+	if (level != CVMX_PKO_DESCR_QUEUES) {
+		printf("ERROR: %s: level sequence error\n", __func__);
+		return -1;
+	}
+
+	/* Reserve 'num_dq' DQ's at 0 by convention */
+	res = cvmx_pko_alloc_queues(node, level, res_owner, dq, num_dq);
+	if (dq != res) {
+		debug("%s: ERROR: could not reserve DQs\n", __func__);
+		return -1;
+	}
+
+	res = cvmx_pko3_sq_config_children(node, level, parent_q, dq, num_dq,
+					   num_dq);
+	if (res < 0) {
+		printf("ERROR: %s: configuring %s\n", __func__,
+		       __cvmx_pko3_sq_str(b1, level, dq));
+		return -1;
+	}
+
+	/* NULL interface does not need to map to a CHAN_E */
+
+	/* register DQ/IPD translation */
+	xiface = cvmx_helper_node_interface_to_xiface(node, __CVMX_XIFACE_NULL);
+	__cvmx_pko3_ipd_dq_register(xiface, 0, dq, num_dq);
+
+	/* open the null DQs here */
+	for (i = 0; i < num_dq; i++) {
+		unsigned int limit = 128; /* NULL never really uses much */
+
+		cvmx_pko_dq_open(node, dq + i);
+		cvmx_pko3_dq_set_limit(node, dq + i, limit);
+	}
+
+	return 0;
+}
+
+/** Open all descriptor queues belonging to an interface/port
+ * @INTERNAL
+ */
+int __cvmx_pko3_helper_dqs_activate(int xiface, int index, bool min_pad)
+{
+	int ipd_port, dq_base, dq_count, i;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	unsigned int limit;
+
+	/* Get local IPD port for the interface */
+	ipd_port = cvmx_helper_get_ipd_port(xiface, index);
+	if (ipd_port < 0) {
+		printf("ERROR: %s: No IPD port for interface %d port %d\n",
+		       __func__, xiface, index);
+		return -1;
+	}
+
+	/* Get DQ# range for the IPD port */
+	dq_base = cvmx_pko3_get_queue_base(ipd_port);
+	dq_count = cvmx_pko3_get_queue_num(ipd_port);
+	if (dq_base < 0 || dq_count <= 0) {
+		printf("ERROR: %s: No descriptor queues for interface %d port %d\n",
+		       __func__, xiface, index);
+		return -1;
+	}
+
+	/* Mask out node from global DQ# */
+	dq_base &= (1 << 10) - 1;
+
+	limit = __pko_pkt_quota / dq_count /
+		cvmx_helper_interface_enumerate(xiface);
+
+	for (i = 0; i < dq_count; i++) {
+		/* FIXME: 2ms at 1Gbps max packet rate, make speed dependent */
+		cvmx_pko_dq_open(xi.node, dq_base + i);
+		cvmx_pko3_dq_options(xi.node, dq_base + i, min_pad);
+
+		if (debug)
+			debug("%s: DQ%u limit %d\n", __func__, dq_base + i,
+			      limit);
+
+		cvmx_pko3_dq_set_limit(xi.node, dq_base + i, limit);
+		__pko_pkt_budget -= limit;
+	}
+
+	if (__pko_pkt_budget < 0)
+		printf("WARNING: %s: PKO buffer deficit %d\n", __func__,
+		       __pko_pkt_budget);
+	else if (debug)
+		debug("%s: PKO remaining packet budget: %d\n", __func__,
+		      __pko_pkt_budget);
+
+	return i;
+}
+
+/** Configure and initialize PKO3 for an interface
+ *
+ * @param xiface is the interface number to configure
+ * @return 0 on success.
+ */
+int cvmx_helper_pko3_init_interface(int xiface)
+{
+	cvmx_helper_interface_mode_t mode;
+	int node, iface, subif, num_ports;
+	bool fcs_enable, pad_enable, pad_enable_pko;
+	u8 fcs_sof_off = 0;
+	u8 num_queues = 1;
+	bool qos = false, pfc = false;
+	int res = -1;
+	cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+	node = xi.node;
+	iface = xi.interface;
+	mode = cvmx_helper_interface_get_mode(xiface);
+	num_ports = cvmx_helper_interface_enumerate(xiface);
+	subif = 0;
+
+	if ((unsigned int)iface <
+	    NUM_ELEMENTS(__cvmx_pko_queue_static_config[node].pknd.pko_cfg_iface)) {
+		pfc = __cvmx_pko_queue_static_config[node]
+			      .pknd.pko_cfg_iface[iface]
+			      .pfc_enable;
+		num_queues = __cvmx_pko_queue_static_config[node]
+				     .pknd.pko_cfg_iface[iface]
+				     .queues_per_port;
+		qos = __cvmx_pko_queue_static_config[node]
+			      .pknd.pko_cfg_iface[iface]
+			      .qos_enable;
+	}
+
+	/* Force 8 DQs per port for pass 1.0 to circumvent limitations */
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
+		num_queues = 8;
+
+	/* For ILK there is one IPD port per channel */
+	if (mode == CVMX_HELPER_INTERFACE_MODE_ILK)
+		num_ports = __cvmx_helper_ilk_enumerate(xiface);
+
+	/* Skip non-existent interfaces */
+	if (num_ports < 1) {
+		debug("ERROR: %s: invalid iface %u:%u\n", __func__, node,
+		      iface);
+		return -1;
+	}
+
+	if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) {
+		num_queues = __cvmx_pko_queue_static_config[node]
+				     .pknd.pko_cfg_loop.queues_per_port;
+		qos = __cvmx_pko_queue_static_config[node]
+			      .pknd.pko_cfg_loop.qos_enable;
+
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
+			num_queues = 8;
+
+		res = __cvmx_pko3_config_chan_interface(xiface, num_ports,
+							num_queues, qos);
+		if (res < 0)
+			goto __cfg_error;
+	} else if (mode == CVMX_HELPER_INTERFACE_MODE_NPI) {
+		num_queues = __cvmx_pko_queue_static_config[node]
+				     .pknd.pko_cfg_npi.queues_per_port;
+		qos = __cvmx_pko_queue_static_config[node]
+			      .pknd.pko_cfg_npi.qos_enable;
+
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
+			num_queues = 8;
+
+		res = __cvmx_pko3_config_chan_interface(xiface, num_ports,
+							num_queues, qos);
+		if (res < 0)
+			goto __cfg_error;
+	}
+	/* ILK-specific queue configuration */
+	else if (mode == CVMX_HELPER_INTERFACE_MODE_ILK) {
+		unsigned int num_chans = __cvmx_helper_ilk_enumerate(xiface);
+
+		num_queues = 8;
+		qos = true;
+		pfc = false;
+
+		if (num_chans >= 128)
+			num_queues = 1;
+		else if (num_chans >= 64)
+			num_queues = 2;
+		else if (num_chans >= 32)
+			num_queues = 4;
+		else
+			num_queues = 8;
+
+		res = __cvmx_pko3_config_chan_interface(xiface, num_chans,
+							num_queues, qos);
+	}
+	/* Setup all ethernet configured for PFC */
+	else if (pfc) {
+		/* PFC interfaces have 8 prioritized queues */
+		for (subif = 0; subif < num_ports; subif++) {
+			res = __cvmx_pko3_config_pfc_interface(xiface, subif);
+			if (res < 0)
+				goto __cfg_error;
+
+			/* Enable PFC/CBFC on BGX */
+			__cvmx_helper_bgx_xaui_config_pfc(node, iface, subif,
+							  true);
+		}
+	} else {
+		/* All other interfaces follow static configuration */
+		for (subif = 0; subif < num_ports; subif++) {
+			res = __cvmx_pko3_config_gen_interface(xiface, subif,
+							       num_queues, qos);
+			if (res < 0)
+				goto __cfg_error;
+		}
+	}
+
+	fcs_enable = __cvmx_helper_get_has_fcs(xiface);
+	pad_enable = __cvmx_helper_get_pko_padding(xiface);
+
+	/* Do not use PKO PAD/FCS generation on o78p1.x on BGX interfaces */
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+		pad_enable_pko = false;
+	else
+		pad_enable_pko = pad_enable;
+
+	if (debug)
+		debug("%s: iface %u:%u FCS=%d pad=%d pko=%d\n", __func__, node,
+		      iface, fcs_enable, pad_enable, pad_enable_pko);
+
+	/* Setup interface options */
+	for (subif = 0; subif < num_ports; subif++) {
+		/* Open interface/port DQs to allow transmission to begin */
+		res = __cvmx_pko3_helper_dqs_activate(xiface, subif,
+						      pad_enable_pko);
+
+		if (res < 0)
+			goto __cfg_error;
+
+		/* ILK has only one MAC, subif == logical-channel */
+		if (mode == CVMX_HELPER_INTERFACE_MODE_ILK && subif > 0)
+			continue;
+
+		/* LOOP has only one MAC, subif == logical-channel */
+		if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP && subif > 0)
+			continue;
+
+		/* NPI has only one MAC, subif == 'ring' */
+		if (mode == CVMX_HELPER_INTERFACE_MODE_NPI && subif > 0)
+			continue;
+
+		/* for sRIO there is 16 byte sRIO header, outside of FCS */
+		if (mode == CVMX_HELPER_INTERFACE_MODE_SRIO)
+			fcs_sof_off = 16;
+
+		if (iface >= CVMX_HELPER_MAX_GMX) {
+			/* Non-BGX interface, use PKO for FCS/PAD */
+			res = cvmx_pko3_interface_options(xiface, subif,
+							  fcs_enable,
+							  pad_enable_pko,
+							  fcs_sof_off);
+		} else if (pad_enable == pad_enable_pko) {
+			/* BGX interface: FCS/PAD done by PKO */
+			res = cvmx_pko3_interface_options(xiface, subif,
+							  fcs_enable,
+							  pad_enable,
+							  fcs_sof_off);
+			cvmx_helper_bgx_tx_options(node, iface, subif, false,
+						   false);
+		} else {
+			/* BGX interface: FCS/PAD done by BGX */
+			res = cvmx_pko3_interface_options(xiface, subif, false,
+							  false, fcs_sof_off);
+			cvmx_helper_bgx_tx_options(node, iface, subif,
+						   fcs_enable, pad_enable);
+		}
+
+		if (res < 0)
+			debug("WARNING: %s: option set failed on iface %u:%u/%u\n",
+			      __func__, node, iface, subif);
+		if (debug)
+			debug("%s: face %u:%u/%u fifo size %d\n", __func__,
+			      node, iface, subif,
+			      cvmx_pko3_port_fifo_size(xiface, subif));
+	}
+	return 0;
+
+__cfg_error:
+	debug("ERROR: %s: failed on iface %u:%u/%u\n", __func__, node, iface,
+	      subif);
+	return -1;
+}
+
+/**
+ * Global initialization for PKO3
+ *
+ * Should only be called once on each node
+ *
+ * TBD: Resolve the kernel case.
+ * When Linux eats up the entire memory, bootmem will be unable to
+ * satisfy our request, and the memory needs to come from Linux free pages.
+ */
+int __cvmx_helper_pko3_init_global(unsigned int node, uint16_t gaura)
+{
+	int res;
+
+	res = cvmx_pko3_hw_init_global(node, gaura);
+	if (res < 0) {
+		debug("ERROR: %s:failed block initialization\n", __func__);
+		return res;
+	}
+
+	/* configure channel level */
+	cvmx_pko3_channel_credit_level(node, cvmx_pko_default_channel_level);
+
+	/* add NULL MAC/DQ setup */
+	res = __cvmx_pko3_config_null_interface(node);
+	if (res < 0)
+		debug("ERROR: %s: creating NULL interface\n", __func__);
+
+	return res;
+}
+
+/**
+ * Global initialization for PKO3
+ *
+ * Should only be called once on each node
+ *
+ * When Linux eats up the entire memory, bootmem will be unable to
+ * satisfy our request, and the memory needs to come from Linux free pages.
+ */
+int cvmx_helper_pko3_init_global(unsigned int node)
+{
+	void *ptr;
+	int res = -1;
+	unsigned int aura_num = ~0;
+	cvmx_fpa3_gaura_t aura;
+
+	/* Allocate memory required by PKO3 */
+	res = __cvmx_pko3_config_memory(node);
+	if (res < 0) {
+		debug("ERROR: %s: PKO3 memory allocation error\n", __func__);
+		return res;
+	}
+
+	aura_num = res;
+	aura = __cvmx_pko3_aura[node];
+
+	/* Exercise the FPA to make sure the AURA is functional */
+	ptr = cvmx_fpa3_alloc(aura);
+
+	if (!ptr) {
+		res = -1;
+	} else {
+		cvmx_fpa3_free_nosync(ptr, aura, 0);
+		res = 0;
+	}
+
+	if (res < 0) {
+		debug("ERROR: %s: FPA failure AURA=%u:%d\n", __func__,
+		      aura.node, aura.laura);
+		return -1;
+	}
+
+	res = __cvmx_helper_pko3_init_global(node, aura_num);
+
+	if (res < 0)
+		debug("ERROR: %s: failed to start PPKO\n", __func__);
+
+	return res;
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-rgmii.c b/arch/mips/mach-octeon/cvmx-helper-rgmii.c
new file mode 100644
index 0000000..dc19c3b
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-rgmii.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for RGMII/GMII/MII initialization, configuration,
+ * and monitoring.
+ */
+
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+
+#include <mach/cvmx-hwpko.h>
+
+#include <mach/cvmx-asxx-defs.h>
+#include <mach/cvmx-dbg-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-npi-defs.h>
+#include <mach/cvmx-pko-defs.h>
+
+/**
+ * @INTERNAL
+ * Probe RGMII ports and determine the number present
+ *
+ * @param xiface Interface to probe
+ *
+ * @return Number of RGMII/GMII/MII ports (0-4).
+ */
+int __cvmx_helper_rgmii_probe(int xiface)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int num_ports = 0;
+	union cvmx_gmxx_inf_mode mode;
+
+	mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(xi.interface));
+
+	if (mode.s.type)
+		debug("ERROR: Unsupported Octeon model in %s\n", __func__);
+	else
+		debug("ERROR: Unsupported Octeon model in %s\n", __func__);
+	return num_ports;
+}
+
+/**
+ * @INTERNAL
+ * Configure all of the ASX, GMX, and PKO regsiters required
+ * to get RGMII to function on the supplied interface.
+ *
+ * @param xiface PKO Interface to configure (0 or 1)
+ *
+ * @return Zero on success
+ */
+int __cvmx_helper_rgmii_enable(int xiface)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface = xi.interface;
+	int num_ports = cvmx_helper_ports_on_interface(interface);
+	int port;
+	union cvmx_gmxx_inf_mode mode;
+	union cvmx_asxx_tx_prt_en asx_tx;
+	union cvmx_asxx_rx_prt_en asx_rx;
+
+	mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
+
+	if (num_ports == -1)
+		return -1;
+	if (mode.s.en == 0)
+		return -1;
+
+	/* Configure the ASX registers needed to use the RGMII ports */
+	asx_tx.u64 = 0;
+	asx_tx.s.prt_en = cvmx_build_mask(num_ports);
+	csr_wr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
+
+	asx_rx.u64 = 0;
+	asx_rx.s.prt_en = cvmx_build_mask(num_ports);
+	csr_wr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
+
+	/* Configure the GMX registers needed to use the RGMII ports */
+	for (port = 0; port < num_ports; port++) {
+		/*
+		 * Configure more flexible RGMII preamble
+		 * checking. Pass 1 doesn't support this feature.
+		 */
+		union cvmx_gmxx_rxx_frm_ctl frm_ctl;
+
+		frm_ctl.u64 = csr_rd(CVMX_GMXX_RXX_FRM_CTL(port, interface));
+		/* New field, so must be compile time */
+		frm_ctl.s.pre_free = 1;
+		csr_wr(CVMX_GMXX_RXX_FRM_CTL(port, interface), frm_ctl.u64);
+
+		/*
+		 * Each pause frame transmitted will ask for about 10M
+		 * bit times before resume.  If buffer space comes
+		 * available before that time has expired, an XON
+		 * pause frame (0 time) will be transmitted to restart
+		 * the flow.
+		 */
+		csr_wr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface), 20000);
+		csr_wr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(port, interface),
+		       19000);
+
+		csr_wr(CVMX_ASXX_TX_CLK_SETX(port, interface), 24);
+		csr_wr(CVMX_ASXX_RX_CLK_SETX(port, interface), 24);
+	}
+
+	__cvmx_helper_setup_gmx(interface, num_ports);
+
+	/* enable the ports now */
+	for (port = 0; port < num_ports; port++) {
+		union cvmx_gmxx_prtx_cfg gmx_cfg;
+
+		cvmx_helper_link_autoconf(
+			cvmx_helper_get_ipd_port(interface, port));
+		gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(port, interface));
+		gmx_cfg.s.en = 1;
+		csr_wr(CVMX_GMXX_PRTX_CFG(port, interface), gmx_cfg.u64);
+	}
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	union cvmx_asxx_prt_loop asxx_prt_loop;
+
+	asxx_prt_loop.u64 = csr_rd(CVMX_ASXX_PRT_LOOP(interface));
+	if (asxx_prt_loop.s.int_loop & (1 << index)) {
+		/* Force 1Gbps full duplex on internal loopback */
+		cvmx_helper_link_info_t result;
+
+		result.u64 = 0;
+		result.s.full_duplex = 1;
+		result.s.link_up = 1;
+		result.s.speed = 1000;
+		return result;
+	} else {
+		return __cvmx_helper_board_link_get(ipd_port);
+	}
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_gmii_link_get(int ipd_port)
+{
+	cvmx_helper_link_info_t result;
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+	if (index == 0) {
+		result = __cvmx_helper_rgmii_link_get(ipd_port);
+	} else {
+		result.s.full_duplex = 1;
+		result.s.link_up = 1;
+		result.s.speed = 1000;
+	}
+
+	return result;
+}
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port  IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_rgmii_link_set(int ipd_port,
+				 cvmx_helper_link_info_t link_info)
+{
+	int result = 0;
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	union cvmx_gmxx_prtx_cfg original_gmx_cfg;
+	union cvmx_gmxx_prtx_cfg new_gmx_cfg;
+	union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
+	union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
+	union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
+	union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
+	int i;
+
+	/* Read the current settings so we know the current enable state */
+	original_gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+	new_gmx_cfg = original_gmx_cfg;
+
+	/* Disable the lowest level RX */
+	csr_wr(CVMX_ASXX_RX_PRT_EN(interface),
+	       csr_rd(CVMX_ASXX_RX_PRT_EN(interface)) & ~(1 << index));
+
+	memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
+	/* Disable all queues so that TX should become idle */
+	for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
+		int queue = cvmx_pko_get_base_queue(ipd_port) + i;
+
+		csr_wr(CVMX_PKO_REG_READ_IDX, queue);
+		pko_mem_queue_qos.u64 = csr_rd(CVMX_PKO_MEM_QUEUE_QOS);
+		pko_mem_queue_qos.s.pid = ipd_port;
+		pko_mem_queue_qos.s.qid = queue;
+		pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
+		pko_mem_queue_qos.s.qos_mask = 0;
+		csr_wr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
+	}
+
+	/* Disable backpressure */
+	gmx_tx_ovr_bp.u64 = csr_rd(CVMX_GMXX_TX_OVR_BP(interface));
+	gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
+	gmx_tx_ovr_bp.s.bp &= ~(1 << index);
+	gmx_tx_ovr_bp.s.en |= 1 << index;
+	csr_wr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
+	csr_rd(CVMX_GMXX_TX_OVR_BP(interface));
+
+	/*
+	 * Poll the GMX state machine waiting for it to become
+	 * idle. Preferably we should only change speed when it is
+	 * idle. If it doesn't become idle we will still do the speed
+	 * change, but there is a slight chance that GMX will
+	 * lockup.
+	 */
+	csr_wr(CVMX_NPI_DBG_SELECT, interface * 0x800 + index * 0x100 + 0x880);
+	CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data & 7, ==, 0,
+			      10000);
+	CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data & 0xf, ==, 0,
+			      10000);
+
+	/* Disable the port before we make any changes */
+	new_gmx_cfg.s.en = 0;
+	csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+	csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+
+	/* Set full/half duplex */
+	if (!link_info.s.link_up)
+		/* Force full duplex on down links */
+		new_gmx_cfg.s.duplex = 1;
+	else
+		new_gmx_cfg.s.duplex = link_info.s.full_duplex;
+
+	/* Set the link speed. Anything unknown is set to 1Gbps */
+	if (link_info.s.speed == 10) {
+		new_gmx_cfg.s.slottime = 0;
+		new_gmx_cfg.s.speed = 0;
+	} else if (link_info.s.speed == 100) {
+		new_gmx_cfg.s.slottime = 0;
+		new_gmx_cfg.s.speed = 0;
+	} else {
+		new_gmx_cfg.s.slottime = 1;
+		new_gmx_cfg.s.speed = 1;
+	}
+
+	/* Adjust the clocks */
+	if (link_info.s.speed == 10) {
+		csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 50);
+		csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
+		csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+	} else if (link_info.s.speed == 100) {
+		csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 5);
+		csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
+		csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+	} else {
+		csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+		csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+		csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+	}
+
+	/* Do a read to make sure all setup stuff is complete */
+	csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+
+	/* Save the new GMX setting without enabling the port */
+	csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+
+	/* Enable the lowest level RX */
+	if (link_info.s.link_up)
+		csr_wr(CVMX_ASXX_RX_PRT_EN(interface),
+		       csr_rd(CVMX_ASXX_RX_PRT_EN(interface)) | (1 << index));
+
+	/* Re-enable the TX path */
+	for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
+		int queue = cvmx_pko_get_base_queue(ipd_port) + i;
+
+		csr_wr(CVMX_PKO_REG_READ_IDX, queue);
+		csr_wr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos_save[i].u64);
+	}
+
+	/* Restore backpressure */
+	csr_wr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
+
+	/* Restore the GMX enable state. Port config is complete */
+	new_gmx_cfg.s.en = original_gmx_cfg.s.en;
+	csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+
+	return result;
+}
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ *                 Non zero if you want internal loopback
+ * @param enable_external
+ *                 Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
+					   int enable_external)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	int original_enable;
+	union cvmx_gmxx_prtx_cfg gmx_cfg;
+	union cvmx_asxx_prt_loop asxx_prt_loop;
+
+	/* Read the current enable state and save it */
+	gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+	original_enable = gmx_cfg.s.en;
+	/* Force port to be disabled */
+	gmx_cfg.s.en = 0;
+	if (enable_internal) {
+		/* Force speed if we're doing internal loopback */
+		gmx_cfg.s.duplex = 1;
+		gmx_cfg.s.slottime = 1;
+		gmx_cfg.s.speed = 1;
+		csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+		csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+		csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+	}
+	csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+
+	/* Set the loopback bits */
+	asxx_prt_loop.u64 = csr_rd(CVMX_ASXX_PRT_LOOP(interface));
+	if (enable_internal)
+		asxx_prt_loop.s.int_loop |= 1 << index;
+	else
+		asxx_prt_loop.s.int_loop &= ~(1 << index);
+	if (enable_external)
+		asxx_prt_loop.s.ext_loop |= 1 << index;
+	else
+		asxx_prt_loop.s.ext_loop &= ~(1 << index);
+	csr_wr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
+
+	/* Force enables in internal loopback */
+	if (enable_internal) {
+		u64 tmp;
+
+		tmp = csr_rd(CVMX_ASXX_TX_PRT_EN(interface));
+		csr_wr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
+		tmp = csr_rd(CVMX_ASXX_RX_PRT_EN(interface));
+		csr_wr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
+		original_enable = 1;
+	}
+
+	/* Restore the enable state */
+	gmx_cfg.s.en = original_enable;
+	csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-sfp.c b/arch/mips/mach-octeon/cvmx-helper-sfp.c
new file mode 100644
index 0000000..a08a6cf
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-sfp.c
@@ -0,0 +1,1309 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <errno.h>
+#include <i2c.h>
+#include <log.h>
+#include <malloc.h>
+#include <linux/delay.h>
+#include <display_options.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-fdt.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-gpio.h>
+#include <mach/cvmx-helper-util.h>
+
+extern void octeon_i2c_unblock(int bus);
+
+static struct cvmx_fdt_sfp_info *sfp_list;
+
+/**
+ * Local allocator to handle both SE and U-Boot that also zeroes out memory
+ *
+ * @param	size	number of bytes to allocate
+ *
+ * @return	pointer to allocated memory or NULL if out of memory.
+ *		Alignment is set to 8-bytes.
+ */
+static void *cvm_sfp_alloc(size_t size)
+{
+	return calloc(size, 1);
+}
+
+/**
+ * Free allocated memory.
+ *
+ * @param	ptr	pointer to memory to free
+ *
+ * NOTE: This only works in U-Boot since SE does not really have a freeing
+ *	 mechanism.  In SE the memory is zeroed out and not freed so this
+ *	 is a memory leak if errors occur.
+ */
+static inline void cvm_sfp_free(void *ptr, size_t size)
+{
+	free(ptr);
+}
+
+/**
+ * Select a QSFP device before accessing the EEPROM
+ *
+ * @param	sfp	handle for sfp/qsfp connector
+ * @param	enable	Set true to select, false to deselect
+ *
+ * @return	0 on success or if SFP or no select GPIO, -1 on GPIO error
+ */
+static int cvmx_qsfp_select(const struct cvmx_fdt_sfp_info *sfp, bool enable)
+{
+	/* Select is only needed for QSFP modules */
+	if (!sfp->is_qsfp) {
+		debug("%s(%s, %d): not QSFP\n", __func__, sfp->name, enable);
+		return 0;
+	}
+
+	if (dm_gpio_is_valid(&sfp->select)) {
+		/* Note that select is active low */
+		return dm_gpio_set_value(&sfp->select, !enable);
+	}
+
+	debug("%s: select GPIO unknown\n", __func__);
+	return 0;
+}
+
+static int cvmx_sfp_parse_sfp_buffer(struct cvmx_sfp_mod_info *sfp_info,
+				     const uint8_t *buffer)
+{
+	u8 csum = 0;
+	bool csum_good = false;
+	int i;
+
+	/* Validate the checksum */
+	for (i = 0; i < 0x3f; i++)
+		csum += buffer[i];
+	csum_good = csum == buffer[0x3f];
+	debug("%s: Lower checksum: 0x%02x, expected: 0x%02x\n", __func__, csum,
+	      buffer[0x3f]);
+	csum = 0;
+	for (i = 0x40; i < 0x5f; i++)
+		csum += buffer[i];
+	debug("%s: Upper checksum: 0x%02x, expected: 0x%02x\n", __func__, csum,
+	      buffer[0x5f]);
+	if (csum != buffer[0x5f] || !csum_good) {
+		debug("Error: SFP EEPROM checksum information is incorrect\n");
+		return -1;
+	}
+
+	sfp_info->conn_type = buffer[0];
+	if (buffer[1] < 1 || buffer[1] > 7) { /* Extended ID */
+		debug("Error: Unknown SFP extended identifier 0x%x\n",
+		      buffer[1]);
+		return -1;
+	}
+	if (buffer[1] != 4) {
+		debug("Module is not SFP/SFP+/SFP28/QSFP+\n");
+		return -1;
+	}
+	sfp_info->mod_type = buffer[2];
+	sfp_info->eth_comp = buffer[3] & 0xf0;
+	sfp_info->cable_comp = buffer[0x24];
+
+	/* There are several ways a cable can be marked as active or
+	 * passive.  8.[2-3] specify the SFP+ cable technology.  Some
+	 * modules also use 3.[0-1] for Infiniband, though it's
+	 * redundant.
+	 */
+	if ((buffer[8] & 0x0C) == 0x08) {
+		sfp_info->limiting = true;
+		sfp_info->active_cable = true;
+	} else if ((buffer[8] & 0xC) == 0x4) {
+		sfp_info->limiting = false;
+		sfp_info->active_cable = false;
+	}
+	if ((buffer[3] & 3) == 2) {
+		sfp_info->active_cable = true;
+		sfp_info->limiting = true;
+	}
+
+	switch (sfp_info->mod_type) {
+	case CVMX_SFP_MOD_OPTICAL_LC:
+	case CVMX_SFP_MOD_OPTICAL_PIGTAIL:
+		sfp_info->copper_cable = false;
+		break;
+	case CVMX_SFP_MOD_COPPER_PIGTAIL:
+		sfp_info->copper_cable = true;
+		break;
+	case CVMX_SFP_MOD_NO_SEP_CONN:
+		switch (sfp_info->cable_comp) {
+		case CVMX_SFP_CABLE_100G_25GAUI_C2M_AOC_HIGH_BER:
+		case CVMX_SFP_CABLE_100G_25GAUI_C2M_AOC_LOW_BER:
+		case CVMX_SFP_CABLE_100G_25GAUI_C2M_ACC_LOW_BER:
+			sfp_info->copper_cable = false;
+			sfp_info->limiting = true;
+			sfp_info->active_cable = true;
+			break;
+
+		case CVMX_SFP_CABLE_100G_SR4_25G_SR:
+		case CVMX_SFP_CABLE_100G_LR4_25G_LR:
+		case CVMX_SFP_CABLE_100G_ER4_25G_ER:
+		case CVMX_SFP_CABLE_100G_SR10:
+		case CVMX_SFP_CABLE_100G_CWDM4_MSA:
+		case CVMX_SFP_CABLE_100G_PSM4:
+		case CVMX_SFP_CABLE_100G_CWDM4:
+		case CVMX_SFP_CABLE_40G_ER4:
+		case CVMX_SFP_CABLE_4X10G_SR:
+		case CVMX_SFP_CABLE_G959_1_P1I1_2D1:
+		case CVMX_SFP_CABLE_G959_1_P1S1_2D2:
+		case CVMX_SFP_CABLE_G959_1_P1L1_2D2:
+		case CVMX_SFP_CABLE_100G_CLR4:
+		case CVMX_SFP_CABLE_100G_2_LAMBDA_DWDM:
+		case CVMX_SFP_CABLE_40G_SWDM4:
+		case CVMX_SFP_CABLE_100G_SWDM4:
+		case CVMX_SFP_CABLE_100G_PAM4_BIDI:
+			sfp_info->copper_cable = false;
+			break;
+
+		case CVMX_SFP_CABLE_100G_25GAUI_C2M_ACC_HIGH_BER:
+		case CVMX_SFP_CABLE_10GBASE_T:
+		case CVMX_SFP_CABLE_10GBASE_T_SR:
+		case CVMX_SFP_CABLE_5GBASE_T:
+		case CVMX_SFP_CABLE_2_5GBASE_T:
+			sfp_info->copper_cable = true;
+			sfp_info->limiting = true;
+			sfp_info->active_cable = true;
+			break;
+
+		case CVMX_SFP_CABLE_100G_CR4_25G_CR_CA_L:
+		case CVMX_SFP_CABLE_25G_CR_CA_S:
+		case CVMX_SFP_CABLE_25G_CR_CA_N:
+		case CVMX_SFP_CABLE_40G_PSM4:
+			sfp_info->copper_cable = true;
+			break;
+
+		default:
+			switch (sfp_info->eth_comp) {
+			case CVMX_SFP_CABLE_10GBASE_ER:
+			case CVMX_SFP_CABLE_10GBASE_LRM:
+			case CVMX_SFP_CABLE_10GBASE_LR:
+			case CVMX_SFP_CABLE_10GBASE_SR:
+				sfp_info->copper_cable = false;
+				break;
+			}
+			break;
+		}
+		break;
+
+	case CVMX_SFP_MOD_RJ45:
+		debug("%s: RJ45 adapter\n", __func__);
+		sfp_info->copper_cable = true;
+		sfp_info->active_cable = true;
+		sfp_info->limiting = true;
+		break;
+	case CVMX_SFP_MOD_UNKNOWN:
+		/* The Avago 1000Base-X to 1000Base-T module reports that it
+		 * is an unknown module type but the Ethernet compliance code
+		 * says it is 1000Base-T.  We'll change the reporting to RJ45.
+		 */
+		if (buffer[6] & 8) {
+			debug("RJ45 gigabit module detected\n");
+			sfp_info->mod_type = CVMX_SFP_MOD_RJ45;
+			sfp_info->copper_cable = false;
+			sfp_info->limiting = true;
+			sfp_info->active_cable = true;
+			sfp_info->max_copper_cable_len = buffer[0x12];
+			sfp_info->rate = CVMX_SFP_RATE_1G;
+		} else {
+			debug("Unknown module type 0x%x\n", sfp_info->mod_type);
+		}
+		sfp_info->limiting = true;
+		break;
+	case CVMX_SFP_MOD_MXC_2X16:
+		debug("%s: MXC 2X16\n", __func__);
+		break;
+	default:
+		sfp_info->limiting = true;
+		break;
+	}
+
+	if (sfp_info->copper_cable)
+		sfp_info->max_copper_cable_len = buffer[0x12];
+	else
+		sfp_info->max_50um_om4_cable_length = buffer[0x12] * 10;
+
+	if (buffer[0xe])
+		sfp_info->max_single_mode_cable_length = buffer[0xe] * 1000;
+	else
+		sfp_info->max_single_mode_cable_length = buffer[0xf] * 100000;
+
+	sfp_info->max_50um_om2_cable_length = buffer[0x10] * 10;
+	sfp_info->max_62_5um_om1_cable_length = buffer[0x11] * 10;
+	sfp_info->max_50um_om3_cable_length = buffer[0x13] * 10;
+
+	if (buffer[0xc] == 0xff) {
+		if (buffer[0x42] >= 255)
+			sfp_info->rate = CVMX_SFP_RATE_100G;
+		else if (buffer[0x42] >= 160)
+			sfp_info->rate = CVMX_SFP_RATE_40G;
+		else if (buffer[0x42] >= 100)
+			sfp_info->rate = CVMX_SFP_RATE_25G;
+		else
+			sfp_info->rate = CVMX_SFP_RATE_UNKNOWN;
+	} else if (buffer[0xc] >= 100) {
+		sfp_info->rate = CVMX_SFP_RATE_10G;
+	} else if (buffer[0xc] >= 10) {
+		sfp_info->rate = CVMX_SFP_RATE_1G;
+	} else {
+		sfp_info->rate = CVMX_SFP_RATE_UNKNOWN;
+	}
+
+	if (sfp_info->rate == CVMX_SFP_RATE_UNKNOWN) {
+		switch (sfp_info->cable_comp) {
+		case CVMX_SFP_CABLE_100G_SR10:
+		case CVMX_SFP_CABLE_100G_CWDM4_MSA:
+		case CVMX_SFP_CABLE_100G_PSM4:
+		case CVMX_SFP_CABLE_100G_CWDM4:
+		case CVMX_SFP_CABLE_100G_CLR4:
+		case CVMX_SFP_CABLE_100G_2_LAMBDA_DWDM:
+		case CVMX_SFP_CABLE_100G_SWDM4:
+		case CVMX_SFP_CABLE_100G_PAM4_BIDI:
+			sfp_info->rate = CVMX_SFP_RATE_100G;
+			break;
+		case CVMX_SFP_CABLE_100G_25GAUI_C2M_AOC_HIGH_BER:
+		case CVMX_SFP_CABLE_100G_SR4_25G_SR:
+		case CVMX_SFP_CABLE_100G_LR4_25G_LR:
+		case CVMX_SFP_CABLE_100G_ER4_25G_ER:
+		case CVMX_SFP_CABLE_100G_25GAUI_C2M_ACC_HIGH_BER:
+		case CVMX_SFP_CABLE_100G_CR4_25G_CR_CA_L:
+		case CVMX_SFP_CABLE_25G_CR_CA_S:
+		case CVMX_SFP_CABLE_25G_CR_CA_N:
+		case CVMX_SFP_CABLE_100G_25GAUI_C2M_AOC_LOW_BER:
+		case CVMX_SFP_CABLE_100G_25GAUI_C2M_ACC_LOW_BER:
+			sfp_info->rate = CVMX_SFP_RATE_25G;
+			break;
+		case CVMX_SFP_CABLE_40G_ER4:
+		case CVMX_SFP_CABLE_4X10G_SR:
+		case CVMX_SFP_CABLE_40G_PSM4:
+		case CVMX_SFP_CABLE_40G_SWDM4:
+			sfp_info->rate = CVMX_SFP_RATE_40G;
+			break;
+		case CVMX_SFP_CABLE_G959_1_P1I1_2D1:
+		case CVMX_SFP_CABLE_G959_1_P1S1_2D2:
+		case CVMX_SFP_CABLE_G959_1_P1L1_2D2:
+		case CVMX_SFP_CABLE_10GBASE_T:
+		case CVMX_SFP_CABLE_10GBASE_T_SR:
+		case CVMX_SFP_CABLE_5GBASE_T:
+		case CVMX_SFP_CABLE_2_5GBASE_T:
+			sfp_info->rate = CVMX_SFP_RATE_10G;
+			break;
+		default:
+			switch (sfp_info->eth_comp) {
+			case CVMX_SFP_CABLE_10GBASE_ER:
+			case CVMX_SFP_CABLE_10GBASE_LRM:
+			case CVMX_SFP_CABLE_10GBASE_LR:
+			case CVMX_SFP_CABLE_10GBASE_SR:
+				sfp_info->rate = CVMX_SFP_RATE_10G;
+				break;
+			default:
+				sfp_info->rate = CVMX_SFP_RATE_UNKNOWN;
+				break;
+			}
+			break;
+		}
+	}
+
+	if (buffer[0xc] < 0xff)
+		sfp_info->bitrate_max = buffer[0xc] * 100;
+	else
+		sfp_info->bitrate_max = buffer[0x42] * 250;
+
+	if ((buffer[8] & 0xc) == 8) {
+		if (buffer[0x3c] & 0x4)
+			sfp_info->limiting = true;
+	}
+
+	/* Currently we only set this for 25G.  FEC is required for CA-S cables
+	 * and for cable lengths >= 5M as of this writing.
+	 */
+	if ((sfp_info->rate == CVMX_SFP_RATE_25G &&
+	     sfp_info->copper_cable) &&
+	    (sfp_info->cable_comp == CVMX_SFP_CABLE_25G_CR_CA_S ||
+	     sfp_info->max_copper_cable_len >= 5))
+		sfp_info->fec_required = true;
+
+	/* copy strings and vendor info, strings will be automatically NUL
+	 * terminated.
+	 */
+	memcpy(sfp_info->vendor_name, &buffer[0x14], 16);
+	memcpy(sfp_info->vendor_oui, &buffer[0x25], 3);
+	memcpy(sfp_info->vendor_pn, &buffer[0x28], 16);
+	memcpy(sfp_info->vendor_rev, &buffer[0x38], 4);
+	memcpy(sfp_info->vendor_sn, &buffer[0x44], 16);
+	memcpy(sfp_info->date_code, &buffer[0x54], 8);
+
+	sfp_info->cooled_laser = !!(buffer[0x40] & 4);
+	sfp_info->internal_cdr = !!(buffer[0x40] & 8);
+
+	if (buffer[0x40] & 0x20)
+		sfp_info->power_level = 3;
+	else
+		sfp_info->power_level = (buffer[0x40] & 2) ? 2 : 1;
+
+	sfp_info->diag_paging = !!(buffer[0x40] & 0x10);
+	sfp_info->linear_rx_output = !(buffer[0x40] & 1);
+	sfp_info->los_implemented = !!(buffer[0x41] & 2);
+	sfp_info->los_inverted = !!(buffer[0x41] & 4);
+	sfp_info->tx_fault_implemented = !!(buffer[0x41] & 8);
+	sfp_info->tx_disable_implemented = !!(buffer[0x41] & 0x10);
+	sfp_info->rate_select_implemented = !!(buffer[0x41] & 0x20);
+	sfp_info->tuneable_transmitter = !!(buffer[0x41] & 0x40);
+	sfp_info->rx_decision_threshold_implemented = !!(buffer[0x41] & 0x80);
+
+	sfp_info->diag_monitoring = !!(buffer[0x5c] & 0x40);
+	sfp_info->diag_rx_power_averaged = !!(buffer[0x5c] & 0x8);
+	sfp_info->diag_externally_calibrated = !!(buffer[0x5c] & 0x10);
+	sfp_info->diag_internally_calibrated = !!(buffer[0x5c] & 0x20);
+	sfp_info->diag_addr_change_required = !!(buffer[0x5c] & 0x4);
+	sfp_info->diag_soft_rate_select_control = !!(buffer[0x5d] & 2);
+	sfp_info->diag_app_select_control = !!(buffer[0x5d] & 4);
+	sfp_info->diag_soft_rate_select_control = !!(buffer[0x5d] & 8);
+	sfp_info->diag_soft_rx_los_implemented = !!(buffer[0x5d] & 0x10);
+	sfp_info->diag_soft_tx_fault_implemented = !!(buffer[0x5d] & 0x20);
+	sfp_info->diag_soft_tx_disable_implemented = !!(buffer[0x5d] & 0x40);
+	sfp_info->diag_alarm_warning_flags_implemented =
+		!!(buffer[0x5d] & 0x80);
+	sfp_info->diag_rev = buffer[0x5e];
+
+	return 0;
+}
+
+static int cvmx_sfp_parse_qsfp_buffer(struct cvmx_sfp_mod_info *sfp_info,
+				      const uint8_t *buffer)
+{
+	u8 csum = 0;
+	bool csum_good = false;
+	int i;
+
+	/* Validate the checksum */
+	for (i = 0x80; i < 0xbf; i++)
+		csum += buffer[i];
+	csum_good = csum == buffer[0xbf];
+	debug("%s: Lower checksum: 0x%02x, expected: 0x%02x\n", __func__, csum,
+	      buffer[0xbf]);
+	csum = 0;
+	for (i = 0xc0; i < 0xdf; i++)
+		csum += buffer[i];
+	debug("%s: Upper checksum: 0x%02x, expected: 0x%02x\n", __func__, csum,
+	      buffer[0xdf]);
+	if (csum != buffer[0xdf] || !csum_good) {
+		debug("Error: SFP EEPROM checksum information is incorrect\n");
+		return -1;
+	}
+
+	sfp_info->conn_type = buffer[0x80];
+	sfp_info->mod_type = buffer[0x82];
+	sfp_info->eth_comp = buffer[0x83] & 0xf0;
+	sfp_info->cable_comp = buffer[0xa4];
+
+	switch (sfp_info->mod_type) {
+	case CVMX_SFP_MOD_COPPER_PIGTAIL:
+	case CVMX_SFP_MOD_NO_SEP_CONN:
+		debug("%s: copper pigtail or no separable cable\n", __func__);
+		/* There are several ways a cable can be marked as active or
+		 * passive.  8.[2-3] specify the SFP+ cable technology.  Some
+		 * modules also use 3.[0-1] for Infiniband, though it's
+		 * redundant.
+		 */
+		sfp_info->copper_cable = true;
+		if ((buffer[0x88] & 0x0C) == 0x08) {
+			sfp_info->limiting = true;
+			sfp_info->active_cable = true;
+		} else if ((buffer[0x88] & 0xC) == 0x4) {
+			sfp_info->limiting = false;
+			sfp_info->active_cable = false;
+		}
+		if ((buffer[0x83] & 3) == 2) {
+			sfp_info->active_cable = true;
+			sfp_info->limiting = true;
+		}
+		break;
+	case CVMX_SFP_MOD_RJ45:
+		debug("%s: RJ45 adapter\n", __func__);
+		sfp_info->copper_cable = true;
+		sfp_info->active_cable = true;
+		sfp_info->limiting = true;
+		break;
+	case CVMX_SFP_MOD_UNKNOWN:
+		debug("Unknown module type\n");
+		/* The Avago 1000Base-X to 1000Base-T module reports that it
+		 * is an unknown module type but the Ethernet compliance code
+		 * says it is 1000Base-T.  We'll change the reporting to RJ45.
+		 */
+		if (buffer[0x86] & 8) {
+			sfp_info->mod_type = CVMX_SFP_MOD_RJ45;
+			sfp_info->copper_cable = false;
+			sfp_info->limiting = true;
+			sfp_info->active_cable = true;
+			sfp_info->max_copper_cable_len = buffer[0x92];
+			sfp_info->rate = CVMX_SFP_RATE_1G;
+		}
+		fallthrough;
+	default:
+		sfp_info->limiting = true;
+		break;
+	}
+
+	if (sfp_info->copper_cable)
+		sfp_info->max_copper_cable_len = buffer[0x92];
+	else
+		sfp_info->max_50um_om4_cable_length = buffer[0x92] * 10;
+
+	debug("%s: copper cable: %d, max copper cable len: %d\n", __func__,
+	      sfp_info->copper_cable, sfp_info->max_copper_cable_len);
+	if (buffer[0xe])
+		sfp_info->max_single_mode_cable_length = buffer[0x8e] * 1000;
+	else
+		sfp_info->max_single_mode_cable_length = buffer[0x8f] * 100000;
+
+	sfp_info->max_50um_om2_cable_length = buffer[0x90] * 10;
+	sfp_info->max_62_5um_om1_cable_length = buffer[0x91] * 10;
+	sfp_info->max_50um_om3_cable_length = buffer[0x93] * 10;
+
+	if (buffer[0x8c] == 12) {
+		sfp_info->rate = CVMX_SFP_RATE_1G;
+	} else if (buffer[0x8c] == 103) {
+		sfp_info->rate = CVMX_SFP_RATE_10G;
+	} else if (buffer[0x8c] == 0xff) {
+		if (buffer[0xc2] == 103)
+			sfp_info->rate = CVMX_SFP_RATE_100G;
+	}
+
+	if (buffer[0x8c] < 0xff)
+		sfp_info->bitrate_max = buffer[0x8c] * 100;
+	else
+		sfp_info->bitrate_max = buffer[0xc2] * 250;
+
+	if ((buffer[0x88] & 0xc) == 8) {
+		if (buffer[0xbc] & 0x4)
+			sfp_info->limiting = true;
+	}
+
+	/* Currently we only set this for 25G.  FEC is required for CA-S cables
+	 * and for cable lengths >= 5M as of this writing.
+	 */
+	/* copy strings and vendor info, strings will be automatically NUL
+	 * terminated.
+	 */
+	memcpy(sfp_info->vendor_name, &buffer[0x94], 16);
+	memcpy(sfp_info->vendor_oui, &buffer[0xa5], 3);
+	memcpy(sfp_info->vendor_pn, &buffer[0xa8], 16);
+	memcpy(sfp_info->vendor_rev, &buffer[0xb8], 4);
+	memcpy(sfp_info->vendor_sn, &buffer[0xc4], 16);
+	memcpy(sfp_info->date_code, &buffer[0xd4], 8);
+
+	sfp_info->linear_rx_output = !!(buffer[0xc0] & 1);
+	sfp_info->cooled_laser = !!(buffer[0xc0] & 4);
+	sfp_info->internal_cdr = !!(buffer[0xc0] & 8);
+
+	if (buffer[0xc0] & 0x20)
+		sfp_info->power_level = 3;
+	else
+		sfp_info->power_level = (buffer[0xc0] & 2) ? 2 : 1;
+
+	sfp_info->diag_paging = !!(buffer[0xc0] & 0x10);
+	sfp_info->los_implemented = !!(buffer[0xc1] & 2);
+	sfp_info->los_inverted = !!(buffer[0xc1] & 4);
+	sfp_info->tx_fault_implemented = !!(buffer[0xc1] & 8);
+	sfp_info->tx_disable_implemented = !!(buffer[0xc1] & 0x10);
+	sfp_info->rate_select_implemented = !!(buffer[0xc1] & 0x20);
+	sfp_info->tuneable_transmitter = !!(buffer[0xc1] & 0x40);
+	sfp_info->rx_decision_threshold_implemented = !!(buffer[0xc1] & 0x80);
+
+	sfp_info->diag_monitoring = !!(buffer[0xdc] & 0x40);
+	sfp_info->diag_rx_power_averaged = !!(buffer[0xdc] & 0x8);
+	sfp_info->diag_externally_calibrated = !!(buffer[0xdc] & 0x10);
+	sfp_info->diag_internally_calibrated = !!(buffer[0xdc] & 0x20);
+	sfp_info->diag_addr_change_required = !!(buffer[0xdc] & 0x4);
+	sfp_info->diag_soft_rate_select_control = !!(buffer[0xdd] & 2);
+	sfp_info->diag_app_select_control = !!(buffer[0xdd] & 4);
+	sfp_info->diag_soft_rate_select_control = !!(buffer[0xdd] & 8);
+	sfp_info->diag_soft_rx_los_implemented = !!(buffer[0xdd] & 0x10);
+	sfp_info->diag_soft_tx_fault_implemented = !!(buffer[0xdd] & 0x20);
+	sfp_info->diag_soft_tx_disable_implemented = !!(buffer[0xdd] & 0x40);
+	sfp_info->diag_alarm_warning_flags_implemented =
+		!!(buffer[0xdd] & 0x80);
+	sfp_info->diag_rev = buffer[0xde];
+
+	return 0;
+}
+
+static bool sfp_verify_checksum(const uint8_t *buffer)
+{
+	u8 csum = 0;
+	u8 offset;
+	bool csum_good = false;
+	int i;
+
+	switch (buffer[0]) {
+	case CVMX_SFP_CONN_QSFP:
+	case CVMX_SFP_CONN_QSFPP:
+	case CVMX_SFP_CONN_QSFP28:
+	case CVMX_SFP_CONN_MICRO_QSFP:
+	case CVMX_SFP_CONN_QSFP_DD:
+		offset = 0x80;
+		break;
+	default:
+		offset = 0;
+		break;
+	}
+	for (i = offset; i < offset + 0x3f; i++)
+		csum += buffer[i];
+	csum_good = csum == buffer[offset + 0x3f];
+	if (!csum_good) {
+		debug("%s: Lower checksum bad, got 0x%x, expected 0x%x\n",
+		      __func__, csum, buffer[offset + 0x3f]);
+		return false;
+	}
+	csum = 0;
+	for (i = offset + 0x40; i < offset + 0x5f; i++)
+		csum += buffer[i];
+	if (csum != buffer[offset + 0x5f]) {
+		debug("%s: Upper checksum bad, got 0x%x, expected 0x%x\n",
+		      __func__, csum, buffer[offset + 0x5f]);
+		return false;
+	}
+	return true;
+}
+
+/**
+ * Reads and parses SFP/QSFP EEPROM
+ *
+ * @param	sfp	sfp handle to read
+ *
+ * @return	0 for success, -1 on error.
+ */
+int cvmx_sfp_read_i2c_eeprom(struct cvmx_fdt_sfp_info *sfp)
+{
+	const struct cvmx_fdt_i2c_bus_info *bus = sfp->i2c_bus;
+	int oct_bus = cvmx_fdt_i2c_get_root_bus(bus);
+	struct udevice *dev;
+	u8 buffer[256];
+	bool is_qsfp;
+	int retry;
+	int err;
+
+	if (!bus) {
+		debug("%s(%s): Error: i2c bus undefined for eeprom\n", __func__,
+		      sfp->name);
+		return -1;
+	}
+
+	is_qsfp = (sfp->sfp_info.conn_type == CVMX_SFP_CONN_QSFP ||
+		   sfp->sfp_info.conn_type == CVMX_SFP_CONN_QSFPP ||
+		   sfp->sfp_info.conn_type == CVMX_SFP_CONN_QSFP28 ||
+		   sfp->sfp_info.conn_type == CVMX_SFP_CONN_MICRO_QSFP) ||
+		  sfp->is_qsfp;
+
+	err = cvmx_qsfp_select(sfp, true);
+	if (err) {
+		debug("%s: Error selecting SFP/QSFP slot\n", __func__);
+		return err;
+	}
+
+	debug("%s: Reading eeprom from i2c address %d:0x%x\n", __func__,
+	      oct_bus, sfp->i2c_eeprom_addr);
+	for (retry = 0; retry < 3; retry++) {
+		err = i2c_get_chip(bus->i2c_bus, sfp->i2c_eeprom_addr, 1, &dev);
+		if (err) {
+			debug("Cannot find I2C device: %d\n", err);
+			goto error;
+		}
+
+		err = dm_i2c_read(dev, 0, buffer, 256);
+		if (err || !sfp_verify_checksum(buffer)) {
+			debug("%s: Error %d reading eeprom at 0x%x, bus %d\n",
+			      __func__, err, sfp->i2c_eeprom_addr, oct_bus);
+			debug("%s: Retry %d\n", __func__, retry + 1);
+			mdelay(1000);
+		} else {
+			break;
+		}
+	}
+	if (err) {
+		debug("%s: Error reading eeprom from SFP %s\n", __func__,
+		      sfp->name);
+		return -1;
+	}
+#ifdef DEBUG
+	print_buffer(0, buffer, 1, 256, 0);
+#endif
+	memset(&sfp->sfp_info, 0, sizeof(struct cvmx_sfp_mod_info));
+
+	switch (buffer[0]) {
+	case CVMX_SFP_CONN_SFP:
+		err = cvmx_sfp_parse_sfp_buffer(&sfp->sfp_info, buffer);
+		break;
+	case CVMX_SFP_CONN_QSFP:
+	case CVMX_SFP_CONN_QSFPP:
+	case CVMX_SFP_CONN_QSFP28:
+	case CVMX_SFP_CONN_MICRO_QSFP:
+		err = cvmx_sfp_parse_qsfp_buffer(&sfp->sfp_info, buffer);
+		break;
+	default:
+		debug("%s: Unknown SFP transceiver type 0x%x\n", __func__,
+		      buffer[0]);
+		err = -1;
+		break;
+	}
+
+error:
+	if (is_qsfp)
+		err |= cvmx_qsfp_select(sfp, false);
+
+	if (!err) {
+		sfp->valid = true;
+		sfp->sfp_info.valid = true;
+	} else {
+		sfp->valid = false;
+		sfp->sfp_info.valid = false;
+	}
+
+	return err;
+}
+
+/**
+ * Function called to check and return the status of the mod_abs pin or
+ * mod_pres pin for QSFPs.
+ *
+ * @param	sfp	Handle to SFP information.
+ * @param	data	User-defined data passed to the function
+ *
+ * @return	0 if absent, 1 if present, -1 on error
+ */
+int cvmx_sfp_check_mod_abs(struct cvmx_fdt_sfp_info *sfp, void *data)
+{
+	int val;
+	int err = 0;
+	int mode;
+
+	if (!dm_gpio_is_valid(&sfp->mod_abs)) {
+		debug("%s: Error: mod_abs not set for %s\n", __func__,
+		      sfp->name);
+		return -1;
+	}
+	val = dm_gpio_get_value(&sfp->mod_abs);
+	debug("%s(%s, %p) mod_abs: %d\n", __func__, sfp->name, data, val);
+	if (val >= 0 && val != sfp->last_mod_abs && sfp->mod_abs_changed) {
+		err = 0;
+		if (!val) {
+			err = cvmx_sfp_read_i2c_eeprom(sfp);
+			if (err)
+				debug("%s: Error reading SFP %s EEPROM\n",
+				      __func__, sfp->name);
+		}
+		err = sfp->mod_abs_changed(sfp, val, sfp->mod_abs_changed_data);
+	}
+	debug("%s(%s (%p)): Last mod_abs: %d, current: %d, changed: %p, rc: %d, next: %p, caller: %p\n",
+	      __func__, sfp->name, sfp, sfp->last_mod_abs, val,
+	      sfp->mod_abs_changed, err, sfp->next_iface_sfp,
+	      __builtin_return_address(0));
+
+	if (err >= 0) {
+		sfp->last_mod_abs = val;
+		mode = cvmx_helper_interface_get_mode(sfp->xiface);
+		cvmx_sfp_validate_module(sfp, mode);
+	} else {
+		debug("%s: mod_abs_changed for %s returned error\n", __func__,
+		      sfp->name);
+	}
+
+	return err < 0 ? err : val;
+}
+
+/**
+ * Reads the EEPROMs of all SFP modules.
+ *
+ * @return 0 for success
+ */
+int cvmx_sfp_read_all_modules(void)
+{
+	struct cvmx_fdt_sfp_info *sfp;
+	int val;
+	bool error = false;
+	int rc;
+
+	for (sfp = sfp_list; sfp; sfp = sfp->next) {
+		if (dm_gpio_is_valid(&sfp->mod_abs)) {
+			/* Check if module absent */
+			val = dm_gpio_get_value(&sfp->mod_abs);
+			sfp->last_mod_abs = val;
+			if (val)
+				continue;
+		}
+		rc = cvmx_sfp_read_i2c_eeprom(sfp);
+		if (rc) {
+			debug("%s: Error reading eeprom from SFP %s\n",
+			      __func__, sfp->name);
+			error = true;
+		}
+	}
+
+	return error ? -1 : 0;
+}
+
+/**
+ * Registers a function to be called whenever the mod_abs/mod_pres signal
+ * changes.
+ *
+ * @param	sfp		Handle to SFP data structure
+ * @param	mod_abs_changed	Function called whenever mod_abs is changed
+ *				or NULL to remove.
+ * @param	mod_abs_changed_data	User-defined data passed to
+ *					mod_abs_changed
+ *
+ * @return	0 for success
+ *
+ * @NOTE: If multiple SFP slots are linked together, all subsequent slots
+ *	  will also be registered for the same handler.
+ */
+int cvmx_sfp_register_mod_abs_changed(struct cvmx_fdt_sfp_info *sfp,
+				      int (*mod_abs_changed)(struct cvmx_fdt_sfp_info *sfp,
+							     int val, void *data),
+				      void *mod_abs_changed_data)
+{
+	sfp->mod_abs_changed = mod_abs_changed;
+	sfp->mod_abs_changed_data = mod_abs_changed_data;
+
+	sfp->last_mod_abs = -2; /* undefined */
+
+	return 0;
+}
+
+/**
+ * Parses a SFP slot from the device tree
+ *
+ * @param	sfp		SFP handle to store data in
+ * @param	fdt_addr	Address of flat device tree
+ * @param	of_offset	Node in device tree for SFP slot
+ *
+ * @return	0 on success, -1 on error
+ */
+static int cvmx_sfp_parse_sfp(struct cvmx_fdt_sfp_info *sfp, ofnode node)
+{
+	struct ofnode_phandle_args phandle;
+	int err;
+
+	sfp->name = ofnode_get_name(node);
+	sfp->of_offset = ofnode_to_offset(node);
+
+	err = gpio_request_by_name_nodev(node, "tx_disable", 0,
+					 &sfp->tx_disable, GPIOD_IS_OUT);
+	if (err) {
+		printf("%s: tx_disable not found in DT!\n", __func__);
+		return -ENODEV;
+	}
+	dm_gpio_set_value(&sfp->tx_disable, 0);
+
+	err = gpio_request_by_name_nodev(node, "mod_abs", 0,
+					 &sfp->mod_abs, GPIOD_IS_IN);
+	if (err) {
+		printf("%s: mod_abs not found in DT!\n", __func__);
+		return -ENODEV;
+	}
+
+	err = gpio_request_by_name_nodev(node, "tx_error", 0,
+					 &sfp->tx_error, GPIOD_IS_IN);
+	if (err) {
+		printf("%s: tx_error not found in DT!\n", __func__);
+		return -ENODEV;
+	}
+
+	err = gpio_request_by_name_nodev(node, "rx_los", 0,
+					 &sfp->rx_los, GPIOD_IS_IN);
+	if (err) {
+		printf("%s: rx_los not found in DT!\n", __func__);
+		return -ENODEV;
+	}
+
+	err = ofnode_parse_phandle_with_args(node, "eeprom", NULL, 0, 0,
+					     &phandle);
+	if (!err) {
+		sfp->i2c_eeprom_addr = ofnode_get_addr(phandle.node);
+		debug("%s: eeprom address: 0x%x\n", __func__,
+		      sfp->i2c_eeprom_addr);
+
+		debug("%s: Getting eeprom i2c bus for %s\n", __func__,
+		      sfp->name);
+		sfp->i2c_bus = cvmx_ofnode_get_i2c_bus(ofnode_get_parent(phandle.node));
+	}
+
+	err = ofnode_parse_phandle_with_args(node, "diag", NULL, 0, 0,
+					     &phandle);
+	if (!err) {
+		sfp->i2c_diag_addr = ofnode_get_addr(phandle.node);
+		if (!sfp->i2c_bus)
+			sfp->i2c_bus = cvmx_ofnode_get_i2c_bus(ofnode_get_parent(phandle.node));
+	}
+
+	sfp->last_mod_abs = -2;
+	sfp->last_rx_los = -2;
+
+	if (!sfp->i2c_bus) {
+		debug("%s(%s): Error: could not get i2c bus from device tree\n",
+		      __func__, sfp->name);
+		err = -1;
+	}
+
+	if (err) {
+		dm_gpio_free(sfp->tx_disable.dev, &sfp->tx_disable);
+		dm_gpio_free(sfp->mod_abs.dev, &sfp->mod_abs);
+		dm_gpio_free(sfp->tx_error.dev, &sfp->tx_error);
+		dm_gpio_free(sfp->rx_los.dev, &sfp->rx_los);
+	} else {
+		sfp->valid = true;
+	}
+
+	return err;
+}
+
+/**
+ * Parses a QSFP slot from the device tree
+ *
+ * @param	sfp		SFP handle to store data in
+ * @param	fdt_addr	Address of flat device tree
+ * @param	of_offset	Node in device tree for SFP slot
+ *
+ * @return	0 on success, -1 on error
+ */
+static int cvmx_sfp_parse_qsfp(struct cvmx_fdt_sfp_info *sfp, ofnode node)
+{
+	struct ofnode_phandle_args phandle;
+	int err;
+
+	sfp->is_qsfp = true;
+	sfp->name = ofnode_get_name(node);
+	sfp->of_offset = ofnode_to_offset(node);
+
+	err = gpio_request_by_name_nodev(node, "lp_mode", 0,
+					 &sfp->lp_mode, GPIOD_IS_OUT);
+	if (err) {
+		printf("%s: lp_mode not found in DT!\n", __func__);
+		return -ENODEV;
+	}
+
+	err = gpio_request_by_name_nodev(node, "mod_prs", 0,
+					 &sfp->mod_abs, GPIOD_IS_IN);
+	if (err) {
+		printf("%s: mod_prs not found in DT!\n", __func__);
+		return -ENODEV;
+	}
+
+	err = gpio_request_by_name_nodev(node, "select", 0,
+					 &sfp->select, GPIOD_IS_IN);
+	if (err) {
+		printf("%s: select not found in DT!\n", __func__);
+		return -ENODEV;
+	}
+
+	err = gpio_request_by_name_nodev(node, "reset", 0,
+					 &sfp->reset, GPIOD_IS_OUT);
+	if (err) {
+		printf("%s: reset not found in DT!\n", __func__);
+		return -ENODEV;
+	}
+
+	err = gpio_request_by_name_nodev(node, "interrupt", 0,
+					 &sfp->interrupt, GPIOD_IS_IN);
+	if (err) {
+		printf("%s: interrupt not found in DT!\n", __func__);
+		return -ENODEV;
+	}
+
+	err = ofnode_parse_phandle_with_args(node, "eeprom", NULL, 0, 0,
+					     &phandle);
+	if (!err) {
+		sfp->i2c_eeprom_addr = ofnode_get_addr(phandle.node);
+		sfp->i2c_bus = cvmx_ofnode_get_i2c_bus(ofnode_get_parent(phandle.node));
+	}
+
+	err = ofnode_parse_phandle_with_args(node, "diag", NULL, 0, 0,
+					     &phandle);
+	if (!err) {
+		sfp->i2c_diag_addr = ofnode_get_addr(phandle.node);
+		if (!sfp->i2c_bus)
+			sfp->i2c_bus = cvmx_ofnode_get_i2c_bus(ofnode_get_parent(phandle.node));
+	}
+
+	sfp->last_mod_abs = -2;
+	sfp->last_rx_los = -2;
+
+	if (!sfp->i2c_bus) {
+		cvmx_printf("%s(%s): Error: could not get i2c bus from device tree\n",
+			    __func__, sfp->name);
+		err = -1;
+	}
+
+	if (err) {
+		dm_gpio_free(sfp->lp_mode.dev, &sfp->lp_mode);
+		dm_gpio_free(sfp->mod_abs.dev, &sfp->mod_abs);
+		dm_gpio_free(sfp->select.dev, &sfp->select);
+		dm_gpio_free(sfp->reset.dev, &sfp->reset);
+		dm_gpio_free(sfp->interrupt.dev, &sfp->interrupt);
+	} else {
+		sfp->valid = true;
+	}
+
+	return err;
+}
+
+/**
+ * Parses the device tree for SFP and QSFP slots
+ *
+ * @param	fdt_addr	Address of flat device-tree
+ *
+ * @return	0 for success, -1 on error
+ */
+int cvmx_sfp_parse_device_tree(const void *fdt_addr)
+{
+	struct cvmx_fdt_sfp_info *sfp, *first_sfp = NULL, *last_sfp = NULL;
+	ofnode node;
+	int err = 0;
+	int reg;
+	static bool parsed;
+
+	debug("%s(%p): Parsing...\n", __func__, fdt_addr);
+	if (parsed) {
+		debug("%s(%p): Already parsed\n", __func__, fdt_addr);
+		return 0;
+	}
+
+	ofnode_for_each_compatible_node(node, "ethernet,sfp-slot") {
+		if (!ofnode_valid(node))
+			continue;
+
+		sfp = cvm_sfp_alloc(sizeof(*sfp));
+		if (!sfp)
+			return -1;
+
+		err = cvmx_sfp_parse_sfp(sfp, node);
+		if (!err) {
+			if (!sfp_list)
+				sfp_list = sfp;
+			if (last_sfp)
+				last_sfp->next = sfp;
+			sfp->prev = last_sfp;
+			last_sfp = sfp;
+			debug("%s: parsed %s\n", __func__, sfp->name);
+		} else {
+			debug("%s: Error parsing SFP at node %s\n",
+			      __func__, ofnode_get_name(node));
+			return err;
+		}
+	}
+
+	ofnode_for_each_compatible_node(node, "ethernet,qsfp-slot") {
+		if (!ofnode_valid(node))
+			continue;
+
+		sfp = cvm_sfp_alloc(sizeof(*sfp));
+		if (!sfp)
+			return -1;
+
+		err = cvmx_sfp_parse_qsfp(sfp, node);
+		if (!err) {
+			if (!sfp_list)
+				sfp_list = sfp;
+			if (last_sfp)
+				last_sfp->next = sfp;
+			sfp->prev = last_sfp;
+			last_sfp = sfp;
+			debug("%s: parsed %s\n", __func__, sfp->name);
+		} else {
+			debug("%s: Error parsing QSFP at node %s\n",
+			      __func__, ofnode_get_name(node));
+			return err;
+		}
+	}
+
+	if (!octeon_has_feature(OCTEON_FEATURE_BGX))
+		return 0;
+
+	err = 0;
+	ofnode_for_each_compatible_node(node, "cavium,octeon-7890-bgx-port") {
+		int sfp_nodes[4];
+		ofnode sfp_ofnodes[4];
+		int num_sfp_nodes;
+		u64 reg_addr;
+		struct cvmx_xiface xi;
+		int xiface, index;
+		cvmx_helper_interface_mode_t mode;
+		int i;
+		int rc;
+
+		if (!ofnode_valid(node))
+			break;
+
+		num_sfp_nodes = ARRAY_SIZE(sfp_nodes);
+		rc = cvmx_ofnode_lookup_phandles(node, "sfp-slot",
+						 &num_sfp_nodes, sfp_ofnodes);
+		if (rc != 0 || num_sfp_nodes < 1)
+			rc = cvmx_ofnode_lookup_phandles(node, "qsfp-slot",
+							 &num_sfp_nodes,
+							 sfp_ofnodes);
+		/* If no SFP or QSFP slot found, go to next port */
+		if (rc < 0)
+			continue;
+
+		last_sfp = NULL;
+		for (i = 0; i < num_sfp_nodes; i++) {
+			sfp = cvmx_sfp_find_slot_by_fdt_node(ofnode_to_offset(sfp_ofnodes[i]));
+			debug("%s: Adding sfp %s (%p) to BGX port\n",
+			      __func__, sfp->name, sfp);
+			if (last_sfp)
+				last_sfp->next_iface_sfp = sfp;
+			else
+				first_sfp = sfp;
+			last_sfp = sfp;
+		}
+		if (!first_sfp) {
+			debug("%s: Error: could not find SFP slot for BGX port %s\n",
+			      __func__,
+			      fdt_get_name(fdt_addr, sfp_nodes[0],
+					   NULL));
+			err = -1;
+			break;
+		}
+
+		/* Get the port index */
+		reg = ofnode_get_addr(node);
+		if (reg < 0) {
+			debug("%s: Error: could not get BGX port reg value\n",
+			      __func__);
+			err = -1;
+			break;
+		}
+		index = reg;
+
+		/* Get BGX node and address */
+		reg_addr = ofnode_get_addr(ofnode_get_parent(node));
+		/* Extrace node */
+		xi.node = cvmx_csr_addr_to_node(reg_addr);
+		/* Extract reg address */
+		reg_addr = cvmx_csr_addr_strip_node(reg_addr);
+		if ((reg_addr & 0xFFFFFFFFF0000000) !=
+		    0x00011800E0000000) {
+			debug("%s: Invalid BGX address 0x%llx\n",
+			      __func__, (unsigned long long)reg_addr);
+			xi.node = -1;
+			err = -1;
+			break;
+		}
+
+		/* Extract interface from address */
+		xi.interface = (reg_addr >> 24) & 0x0F;
+		/* Convert to xiface */
+		xiface = cvmx_helper_node_interface_to_xiface(xi.node,
+							      xi.interface);
+		debug("%s: Parsed %d SFP slots for interface 0x%x, index %d\n",
+		      __func__, num_sfp_nodes, xiface, index);
+
+		mode = cvmx_helper_interface_get_mode(xiface);
+		for (sfp = first_sfp; sfp; sfp = sfp->next_iface_sfp) {
+			sfp->xiface = xiface;
+			sfp->index = index;
+			/* Convert to IPD port */
+			sfp->ipd_port[0] =
+				cvmx_helper_get_ipd_port(xiface, index);
+			debug("%s: sfp %s (%p) xi: 0x%x, index: 0x%x, node: %d, mode: 0x%x, next: %p\n",
+			      __func__, sfp->name, sfp, sfp->xiface,
+			      sfp->index, xi.node, mode,
+			      sfp->next_iface_sfp);
+			if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI ||
+			    mode == CVMX_HELPER_INTERFACE_MODE_40G_KR4)
+				for (i = 1; i < 4; i++)
+					sfp->ipd_port[i] = -1;
+			else
+				for (i = 1; i < 4; i++)
+					sfp->ipd_port[i] =
+						cvmx_helper_get_ipd_port(
+							xiface, i);
+		}
+		cvmx_helper_cfg_set_sfp_info(xiface, index, first_sfp);
+	}
+
+	if (!err) {
+		parsed = true;
+		cvmx_sfp_read_all_modules();
+	}
+
+	return err;
+}
+
+/**
+ * Given a fdt node offset find the corresponding SFP or QSFP slot
+ *
+ * @param	of_offset	flat device tree node offset
+ *
+ * @return	pointer to SFP data structure or NULL if not found
+ */
+struct cvmx_fdt_sfp_info *cvmx_sfp_find_slot_by_fdt_node(int of_offset)
+{
+	struct cvmx_fdt_sfp_info *sfp = sfp_list;
+
+	while (sfp) {
+		if (sfp->of_offset == of_offset)
+			return sfp;
+		sfp = sfp->next;
+	}
+	return NULL;
+}
+
+static bool cvmx_sfp_validate_quad(struct cvmx_fdt_sfp_info *sfp,
+				   struct cvmx_phy_gpio_leds *leds)
+{
+	bool multi_led = leds && (leds->next);
+	bool error = false;
+	int mod_abs;
+
+	do {
+		/* Skip missing modules */
+		if (dm_gpio_is_valid(&sfp->mod_abs))
+			mod_abs = dm_gpio_get_value(&sfp->mod_abs);
+		else
+			mod_abs = 0;
+		if (!mod_abs) {
+			if (cvmx_sfp_read_i2c_eeprom(sfp)) {
+				debug("%s: Error reading eeprom for %s\n",
+				      __func__, sfp->name);
+			}
+			if (sfp->sfp_info.rate < CVMX_SFP_RATE_10G) {
+				cvmx_helper_leds_show_error(leds, true);
+				error = true;
+			} else if (sfp->sfp_info.rate >= CVMX_SFP_RATE_10G) {
+				/* We don't support 10GBase-T modules in
+				 * this mode.
+				 */
+				switch (sfp->sfp_info.cable_comp) {
+				case CVMX_SFP_CABLE_10GBASE_T:
+				case CVMX_SFP_CABLE_10GBASE_T_SR:
+				case CVMX_SFP_CABLE_5GBASE_T:
+				case CVMX_SFP_CABLE_2_5GBASE_T:
+					cvmx_helper_leds_show_error(leds, true);
+					error = true;
+					break;
+				default:
+					break;
+				}
+			}
+		} else if (multi_led) {
+			cvmx_helper_leds_show_error(leds, false);
+		}
+
+		if (multi_led && leds->next)
+			leds = leds->next;
+		sfp = sfp->next_iface_sfp;
+	} while (sfp);
+
+	if (!multi_led)
+		cvmx_helper_leds_show_error(leds, error);
+
+	return error;
+}
+
+/**
+ * Validates if the module is correct for the specified port
+ *
+ * @param[in]	sfp	SFP port to check
+ * @param	xiface	interface
+ * @param	index	port index
+ * @param	speed	link speed, -1 if unknown
+ * @param	mode	interface mode
+ *
+ * @return	true if module is valid, false if invalid
+ * NOTE: This will also toggle the error LED, if present
+ */
+bool cvmx_sfp_validate_module(struct cvmx_fdt_sfp_info *sfp, int mode)
+{
+	const struct cvmx_sfp_mod_info *mod_info = &sfp->sfp_info;
+	int xiface = sfp->xiface;
+	int index = sfp->index;
+	struct cvmx_phy_gpio_leds *leds;
+	bool error = false;
+	bool quad_mode = false;
+
+	debug("%s(%s, 0x%x, 0x%x, 0x%x)\n", __func__, sfp->name, xiface, index,
+	      mode);
+	if (!sfp) {
+		debug("%s: Error: sfp is NULL\n", __func__);
+		return false;
+	}
+	/* No module is valid */
+	leds = cvmx_helper_get_port_phy_leds(xiface, index);
+	if (!leds)
+		debug("%s: No leds for 0x%x:0x%x\n", __func__, xiface, index);
+
+	if (mode != CVMX_HELPER_INTERFACE_MODE_XLAUI &&
+	    mode != CVMX_HELPER_INTERFACE_MODE_40G_KR4 && !sfp->is_qsfp &&
+	    sfp->last_mod_abs && leds) {
+		cvmx_helper_leds_show_error(leds, false);
+		debug("%s: %s: last_mod_abs: %d, no error\n", __func__,
+		      sfp->name, sfp->last_mod_abs);
+		return true;
+	}
+
+	switch (mode) {
+	case CVMX_HELPER_INTERFACE_MODE_RGMII:
+	case CVMX_HELPER_INTERFACE_MODE_GMII:
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+	case CVMX_HELPER_INTERFACE_MODE_QSGMII:
+	case CVMX_HELPER_INTERFACE_MODE_AGL:
+	case CVMX_HELPER_INTERFACE_MODE_SPI:
+		if ((mod_info->active_cable &&
+		     mod_info->rate != CVMX_SFP_RATE_1G) ||
+		    mod_info->rate < CVMX_SFP_RATE_1G)
+			error = true;
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+	case CVMX_HELPER_INTERFACE_MODE_XAUI:
+	case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+	case CVMX_HELPER_INTERFACE_MODE_XFI:
+		if ((mod_info->active_cable &&
+		     mod_info->rate != CVMX_SFP_RATE_10G) ||
+		    mod_info->rate < CVMX_SFP_RATE_10G)
+			error = true;
+		break;
+	case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+	case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+		if (!sfp->is_qsfp) {
+			quad_mode = true;
+			error = cvmx_sfp_validate_quad(sfp, leds);
+		} else {
+			if ((mod_info->active_cable &&
+			     mod_info->rate != CVMX_SFP_RATE_40G) ||
+			    mod_info->rate < CVMX_SFP_RATE_25G)
+				error = true;
+		}
+		break;
+	default:
+		debug("%s: Unsupported interface mode %d on xiface 0x%x\n",
+		      __func__, mode, xiface);
+		return false;
+	}
+	debug("%s: %s: error: %d\n", __func__, sfp->name, error);
+	if (leds && !quad_mode)
+		cvmx_helper_leds_show_error(leds, error);
+
+	return !error;
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-sgmii.c b/arch/mips/mach-octeon/cvmx-helper-sgmii.c
new file mode 100644
index 0000000..b789ad5
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-sgmii.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for SGMII initialization, configuration,
+ * and monitoring.
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+/**
+ * @INTERNAL
+ * Perform initialization required only once for an SGMII port.
+ *
+ * @param interface to init
+ * @param index     Index of prot on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
+{
+	const u64 clock_mhz = 1200; /* todo: fixme */
+	union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+	union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg;
+	union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+
+	if (!cvmx_helper_is_port_valid(interface, index))
+		return 0;
+
+	/* Disable GMX */
+	gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+	gmxx_prtx_cfg.s.en = 0;
+	csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+	/*
+	 * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
+	 * appropriate value. 1000BASE-X specifies a 10ms
+	 * interval. SGMII specifies a 1.6ms interval.
+	 */
+	pcsx_miscx_ctl_reg.u64 =
+		csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+	/* Adjust the MAC mode if requested by device tree */
+	pcsx_miscx_ctl_reg.s.mac_phy =
+		cvmx_helper_get_mac_phy_mode(interface, index);
+	pcsx_miscx_ctl_reg.s.mode =
+		cvmx_helper_get_1000x_mode(interface, index);
+	csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+	       pcsx_miscx_ctl_reg.u64);
+
+	pcsx_linkx_timer_count_reg.u64 =
+		csr_rd(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
+	if (pcsx_miscx_ctl_reg.s.mode)
+		/* 1000BASE-X */
+		pcsx_linkx_timer_count_reg.s.count =
+			(10000ull * clock_mhz) >> 10;
+	else
+		/* SGMII */
+		pcsx_linkx_timer_count_reg.s.count =
+			(1600ull * clock_mhz) >> 10;
+
+	csr_wr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface),
+	       pcsx_linkx_timer_count_reg.u64);
+
+	/*
+	 * Write the advertisement register to be used as the
+	 * tx_Config_Reg<D15:D0> of the autonegotiation.  In
+	 * 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
+	 * In SGMII PHY mode, tx_Config_Reg<D15:D0> is
+	 * PCS*_SGM*_AN_ADV_REG.  In SGMII MAC mode,
+	 * tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
+	 * step can be skipped.
+	 */
+	if (pcsx_miscx_ctl_reg.s.mode) {
+		/* 1000BASE-X */
+		union cvmx_pcsx_anx_adv_reg pcsx_anx_adv_reg;
+
+		pcsx_anx_adv_reg.u64 =
+			csr_rd(CVMX_PCSX_ANX_ADV_REG(index, interface));
+		pcsx_anx_adv_reg.s.rem_flt = 0;
+		pcsx_anx_adv_reg.s.pause = 3;
+		pcsx_anx_adv_reg.s.hfd = 1;
+		pcsx_anx_adv_reg.s.fd = 1;
+		csr_wr(CVMX_PCSX_ANX_ADV_REG(index, interface),
+		       pcsx_anx_adv_reg.u64);
+	} else {
+		if (pcsx_miscx_ctl_reg.s.mac_phy) {
+			/* PHY Mode */
+			union cvmx_pcsx_sgmx_an_adv_reg pcsx_sgmx_an_adv_reg;
+
+			pcsx_sgmx_an_adv_reg.u64 = csr_rd(
+				CVMX_PCSX_SGMX_AN_ADV_REG(index, interface));
+			pcsx_sgmx_an_adv_reg.s.dup = 1;
+			pcsx_sgmx_an_adv_reg.s.speed = 2;
+			csr_wr(CVMX_PCSX_SGMX_AN_ADV_REG(index, interface),
+			       pcsx_sgmx_an_adv_reg.u64);
+		} else {
+			/* MAC Mode - Nothing to do */
+		}
+	}
+	return 0;
+}
+
+static int __cvmx_helper_need_g15618(void)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX) ||
+	    OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X) ||
+	    OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * @INTERNAL
+ * Initialize the SERTES link for the first time or after a loss
+ * of link.
+ *
+ * @param interface to init
+ * @param index     Index of prot on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
+{
+	union cvmx_pcsx_mrx_control_reg control_reg;
+	union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+	bool phy_mode;
+	bool an_disable; /** Disable autonegotiation */
+	bool mode_1000x; /** 1000Base-X mode */
+
+	if (!cvmx_helper_is_port_valid(interface, index))
+		return 0;
+
+	/*
+	 * Take PCS through a reset sequence.
+	 * PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
+	 * Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the
+	 * value of the other PCS*_MR*_CONTROL_REG bits).  Read
+	 * PCS*_MR*_CONTROL_REG[RESET] until it changes value to
+	 * zero.
+	 */
+	control_reg.u64 = csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+
+	/*
+	 * Errata G-15618 requires disabling PCS soft reset in CN63XX
+	 * pass upto 2.1.
+	 */
+	if (!__cvmx_helper_need_g15618()) {
+		control_reg.s.reset = 1;
+		csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+		       control_reg.u64);
+		if (CVMX_WAIT_FOR_FIELD64(
+			    CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+			    cvmx_pcsx_mrx_control_reg_t, reset, ==, 0, 10000)) {
+			debug("SGMII%x: Timeout waiting for port %d to finish reset\n",
+			      interface, index);
+			return -1;
+		}
+	}
+
+	/*
+	 * Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh
+	 * sgmii negotiation starts.
+	 */
+	phy_mode = cvmx_helper_get_mac_phy_mode(interface, index);
+	an_disable = (phy_mode ||
+		      !cvmx_helper_get_port_autonegotiation(interface, index));
+
+	control_reg.s.an_en = !an_disable;
+
+	/* Force a PCS reset by powering down the PCS interface
+	 * This is needed to deal with broken Qualcomm/Atheros PHYs and switches
+	 * which never recover if PCS is not power cycled.  The alternative
+	 * is to power cycle or hardware reset the Qualcomm devices whenever
+	 * SGMII is initialized.
+	 *
+	 * This is needed for the QCA8033 PHYs as well as the QCA833X switches
+	 * to work.  The QCA8337 switch has additional SGMII problems and is
+	 * best avoided if at all possible.  Failure to power cycle PCS prevents
+	 * any traffic from flowing between Octeon and Qualcomm devices if there
+	 * is a warm reset.  Even a software reset to the Qualcomm device will
+	 * not work.
+	 *
+	 * Note that this problem has been reported between Qualcomm and other
+	 * vendor's processors as well so this problem is not unique to
+	 * Qualcomm and Octeon.
+	 *
+	 * Power cycling PCS doesn't hurt anything with non-Qualcomm devices
+	 * other than adding a 25ms delay during initialization.
+	 */
+	control_reg.s.pwr_dn = 1;
+	csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), control_reg.u64);
+	csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+
+	/* 25ms should be enough, 10ms is too short */
+	mdelay(25);
+
+	control_reg.s.pwr_dn = 0;
+	csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), control_reg.u64);
+
+	/* The Cortina PHY runs in 1000base-X mode */
+	mode_1000x = cvmx_helper_get_1000x_mode(interface, index);
+	pcsx_miscx_ctl_reg.u64 =
+		csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+	pcsx_miscx_ctl_reg.s.mode = mode_1000x;
+	pcsx_miscx_ctl_reg.s.mac_phy = phy_mode;
+	csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+	       pcsx_miscx_ctl_reg.u64);
+	if (an_disable)
+		/* In PHY mode we can't query the link status so we just
+		 * assume that the link is up.
+		 */
+		return 0;
+
+	/*
+	 * Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating
+	 * that sgmii autonegotiation is complete. In MAC mode this
+	 * isn't an ethernet link, but a link between Octeon and the
+	 * PHY.
+	 */
+	if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface),
+				  union cvmx_pcsx_mrx_status_reg, an_cpt, ==, 1,
+				  10000)) {
+		debug("SGMII%x: Port %d link timeout\n", interface, index);
+		return -1;
+	}
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Configure an SGMII link to the specified speed after the SERTES
+ * link is up.
+ *
+ * @param interface to init
+ * @param index     Index of prot on the interface
+ * @param link_info Link state to configure
+ *
+ * @return Zero on success, negative on failure
+ */
+static int
+__cvmx_helper_sgmii_hardware_init_link_speed(int interface, int index,
+					     cvmx_helper_link_info_t link_info)
+{
+	int is_enabled;
+	union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+	union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+
+	if (!cvmx_helper_is_port_valid(interface, index))
+		return 0;
+
+	/* Disable GMX before we make any changes. Remember the enable state */
+	gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+	is_enabled = gmxx_prtx_cfg.s.en;
+	gmxx_prtx_cfg.s.en = 0;
+	csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+	/* Wait for GMX to be idle */
+	if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
+				  cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1,
+				  10000) ||
+	    CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
+				  cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1,
+				  10000)) {
+		debug("SGMII%d: Timeout waiting for port %d to be idle\n",
+		      interface, index);
+		return -1;
+	}
+
+	/* Read GMX CFG again to make sure the disable completed */
+	gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+
+	/*
+	 * Get the misc control for PCS. We will need to set the
+	 * duplication amount.
+	 */
+	pcsx_miscx_ctl_reg.u64 =
+		csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+
+	/*
+	 * Use GMXENO to force the link down if the status we get says
+	 * it should be down.
+	 */
+	pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
+
+	/* Only change the duplex setting if the link is up */
+	if (link_info.s.link_up)
+		gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
+
+	/* Do speed based setting for GMX */
+	switch (link_info.s.speed) {
+	case 10:
+		gmxx_prtx_cfg.s.speed = 0;
+		gmxx_prtx_cfg.s.speed_msb = 1;
+		gmxx_prtx_cfg.s.slottime = 0;
+		/* Setting from GMX-603 */
+		pcsx_miscx_ctl_reg.s.samp_pt = 25;
+		csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
+		csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+		break;
+	case 100:
+		gmxx_prtx_cfg.s.speed = 0;
+		gmxx_prtx_cfg.s.speed_msb = 0;
+		gmxx_prtx_cfg.s.slottime = 0;
+		pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
+		csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
+		csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+		break;
+	case 1000:
+		gmxx_prtx_cfg.s.speed = 1;
+		gmxx_prtx_cfg.s.speed_msb = 0;
+		gmxx_prtx_cfg.s.slottime = 1;
+		pcsx_miscx_ctl_reg.s.samp_pt = 1;
+		csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
+		if (gmxx_prtx_cfg.s.duplex)
+			/* full duplex */
+			csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+		else
+			/* half duplex */
+			csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 8192);
+		break;
+	default:
+		break;
+	}
+
+	/* Write the new misc control for PCS */
+	csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+	       pcsx_miscx_ctl_reg.u64);
+
+	/* Write the new GMX settings with the port still disabled */
+	csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+	/* Read GMX CFG again to make sure the config completed */
+	gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+
+	/* Restore the enabled / disabled state */
+	gmxx_prtx_cfg.s.en = is_enabled;
+	csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Bring up the SGMII interface to be ready for packet I/O but
+ * leave I/O disabled using the GMX override. This function
+ * follows the bringup documented in 10.6.3 of the manual.
+ *
+ * @param interface to bringup
+ * @param num_ports Number of ports on the interface
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
+{
+	int index;
+	int do_link_set = 1;
+
+	/*
+	 * CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis
+	 * be programmed.
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
+		union cvmx_ciu_qlm2 ciu_qlm;
+
+		ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
+		ciu_qlm.s.txbypass = 1;
+		ciu_qlm.s.txdeemph = 0xf;
+		ciu_qlm.s.txmargin = 0xd;
+		csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
+	}
+
+	/*
+	 * CN63XX Pass 2.x errata G-15273 requires the QLM De-emphasis
+	 * be programmed when using a 156.25Mhz ref clock.
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_X)) {
+		/* Read the QLM speed pins */
+		union cvmx_mio_rst_boot mio_rst_boot;
+
+		mio_rst_boot.u64 = csr_rd(CVMX_MIO_RST_BOOT);
+
+		if (mio_rst_boot.cn63xx.qlm2_spd == 4) {
+			union cvmx_ciu_qlm2 ciu_qlm;
+
+			ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
+			ciu_qlm.s.txbypass = 1;
+			ciu_qlm.s.txdeemph = 0x0;
+			ciu_qlm.s.txmargin = 0xf;
+			csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
+		}
+	}
+
+	__cvmx_helper_setup_gmx(interface, num_ports);
+
+	for (index = 0; index < num_ports; index++) {
+		int ipd_port = cvmx_helper_get_ipd_port(interface, index);
+
+		if (!cvmx_helper_is_port_valid(interface, index))
+			continue;
+		__cvmx_helper_sgmii_hardware_init_one_time(interface, index);
+		if (do_link_set)
+			__cvmx_helper_sgmii_link_set(ipd_port,
+						     __cvmx_helper_sgmii_link_get(ipd_port));
+	}
+
+	return 0;
+}
+
+int __cvmx_helper_sgmii_enumerate(int xiface)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
+		return 2;
+	if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+		struct cvmx_xiface xi =
+			cvmx_helper_xiface_to_node_interface(xiface);
+		enum cvmx_qlm_mode qlm_mode =
+			cvmx_qlm_get_dlm_mode(0, xi.interface);
+
+		if (qlm_mode == CVMX_QLM_MODE_SGMII)
+			return 1;
+		else if (qlm_mode == CVMX_QLM_MODE_QSGMII)
+			return 4;
+		return 0;
+	}
+	return 4;
+}
+
+/**
+ * @INTERNAL
+ * Probe a SGMII interface and determine the number of ports
+ * connected to it. The SGMII interface should still be down after
+ * this call.
+ *
+ * @param xiface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_sgmii_probe(int xiface)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface = xi.interface;
+	union cvmx_gmxx_inf_mode mode;
+	int ports;
+
+	/*
+	 * Check if QLM is configured correct for SGMII, verify the
+	 * speed as well as mode.
+	 */
+	if (OCTEON_IS_OCTEON2()) {
+		int qlm = cvmx_qlm_interface(xiface);
+
+		if (cvmx_qlm_get_mode(qlm) != CVMX_QLM_MODE_SGMII)
+			return 0;
+	}
+
+	/* Do not enable the interface if is not in SGMII mode */
+	ports = __cvmx_helper_sgmii_enumerate(xiface);
+
+	if (ports <= 0)
+		return 0;
+
+	/*
+	 * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
+	 * interface needs to be enabled before IPD otherwise per port
+	 * backpressure may not work properly.
+	 */
+	mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
+	mode.s.en = 1;
+	csr_wr(CVMX_GMXX_INF_MODE(interface), mode.u64);
+
+	return ports;
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a SGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param xiface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_sgmii_enable(int xiface)
+{
+	int num_ports = cvmx_helper_ports_on_interface(xiface);
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface = xi.interface;
+	int index;
+
+	/* Setup PKND and BPID */
+	if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+		for (index = 0; index < num_ports; index++) {
+			union cvmx_gmxx_bpid_msk bpid_msk;
+			union cvmx_gmxx_bpid_mapx bpid_map;
+			union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+
+			if (!cvmx_helper_is_port_valid(interface, index))
+				continue;
+			/* Setup PKIND */
+			gmxx_prtx_cfg.u64 =
+				csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+			gmxx_prtx_cfg.s.pknd =
+				cvmx_helper_get_pknd(interface, index);
+			csr_wr(CVMX_GMXX_PRTX_CFG(index, interface),
+			       gmxx_prtx_cfg.u64);
+
+			/* Setup BPID */
+			bpid_map.u64 =
+				csr_rd(CVMX_GMXX_BPID_MAPX(index, interface));
+			bpid_map.s.val = 1;
+			bpid_map.s.bpid =
+				cvmx_helper_get_bpid(interface, index);
+			csr_wr(CVMX_GMXX_BPID_MAPX(index, interface),
+			       bpid_map.u64);
+
+			bpid_msk.u64 = csr_rd(CVMX_GMXX_BPID_MSK(interface));
+			bpid_msk.s.msk_or |= (1 << index);
+			bpid_msk.s.msk_and &= ~(1 << index);
+			csr_wr(CVMX_GMXX_BPID_MSK(interface), bpid_msk.u64);
+		}
+	}
+
+	__cvmx_helper_sgmii_hardware_init(interface, num_ports);
+
+	/* CN68XX adds the padding and FCS in PKO, not GMX */
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		union cvmx_gmxx_txx_append gmxx_txx_append_cfg;
+
+		for (index = 0; index < num_ports; index++) {
+			if (!cvmx_helper_is_port_valid(interface, index))
+				continue;
+			gmxx_txx_append_cfg.u64 =
+				csr_rd(CVMX_GMXX_TXX_APPEND(index, interface));
+			gmxx_txx_append_cfg.s.fcs = 0;
+			gmxx_txx_append_cfg.s.pad = 0;
+			csr_wr(CVMX_GMXX_TXX_APPEND(index, interface),
+			       gmxx_txx_append_cfg.u64);
+		}
+	}
+
+	/* Enable running disparity check for QSGMII interface */
+	if (OCTEON_IS_MODEL(OCTEON_CN70XX) && num_ports > 1) {
+		union cvmx_gmxx_qsgmii_ctl qsgmii_ctl;
+
+		qsgmii_ctl.u64 = 0;
+		qsgmii_ctl.s.disparity = 1;
+		csr_wr(CVMX_GMXX_QSGMII_CTL(interface), qsgmii_ctl.u64);
+	}
+
+	for (index = 0; index < num_ports; index++) {
+		union cvmx_gmxx_txx_append append_cfg;
+		union cvmx_gmxx_txx_sgmii_ctl sgmii_ctl;
+		union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+
+		if (!cvmx_helper_is_port_valid(interface, index))
+			continue;
+		/*
+		 * Clear the align bit if preamble is set to attain
+		 * maximum tx rate.
+		 */
+		append_cfg.u64 = csr_rd(CVMX_GMXX_TXX_APPEND(index, interface));
+		sgmii_ctl.u64 =
+			csr_rd(CVMX_GMXX_TXX_SGMII_CTL(index, interface));
+		sgmii_ctl.s.align = append_cfg.s.preamble ? 0 : 1;
+		csr_wr(CVMX_GMXX_TXX_SGMII_CTL(index, interface),
+		       sgmii_ctl.u64);
+
+		gmxx_prtx_cfg.u64 =
+			csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
+		gmxx_prtx_cfg.s.en = 1;
+		csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+	}
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port)
+{
+	cvmx_helper_link_info_t result;
+	union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
+	int speed = 1000;
+	int qlm;
+
+	result.u64 = 0;
+
+	if (!cvmx_helper_is_port_valid(interface, index))
+		return result;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
+		union cvmx_gmxx_inf_mode inf_mode;
+
+		inf_mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
+		if (inf_mode.s.rate & (1 << index))
+			speed = 2500;
+		else
+			speed = 1000;
+	} else if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+		qlm = cvmx_qlm_interface(interface);
+		speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
+	} else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
+		speed = cvmx_qlm_get_gbaud_mhz(0) * 8 / 10;
+	} else if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+		speed = cvmx_qlm_get_gbaud_mhz(0) * 8 / 10;
+		if (cvmx_qlm_get_dlm_mode(0, interface) == CVMX_QLM_MODE_SGMII)
+			speed >>= 1;
+		else
+			speed >>= 2;
+	}
+
+	pcsx_mrx_control_reg.u64 =
+		csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+	if (pcsx_mrx_control_reg.s.loopbck1) {
+		/* Force 1Gbps full duplex link for internal loopback */
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		result.s.speed = speed;
+		return result;
+	}
+
+	pcsx_miscx_ctl_reg.u64 =
+		csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+	if (pcsx_miscx_ctl_reg.s.mac_phy ||
+	    cvmx_helper_get_port_force_link_up(interface, index)) {
+		/* PHY Mode */
+		/* Note that this also works for 1000base-X mode */
+
+		result.s.speed = speed;
+		result.s.full_duplex = 1;
+		result.s.link_up = 1;
+		return result;
+	}
+
+	/* MAC Mode */
+	return __cvmx_helper_board_link_get(ipd_port);
+}
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port  IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_sgmii_link_set(int ipd_port,
+				 cvmx_helper_link_info_t link_info)
+{
+	union cvmx_pcsx_mrx_control_reg control_reg;
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+	if (!cvmx_helper_is_port_valid(interface, index))
+		return 0;
+
+	/* For some devices, i.e. the Qualcomm QCA8337 switch we need to power
+	 * down the PCS interface when the link goes down and power it back
+	 * up when the link returns.
+	 */
+	if (link_info.s.link_up || !__cvmx_helper_need_g15618()) {
+		__cvmx_helper_sgmii_hardware_init_link(interface, index);
+	} else {
+		union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+
+		pcsx_miscx_ctl_reg.u64 =
+			csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+
+		/* Disable autonegotiation when MAC mode is enabled or
+		 * autonegotiation is disabled.
+		 */
+		control_reg.u64 =
+			csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+		if (pcsx_miscx_ctl_reg.s.mac_phy == 0 ||
+		    !cvmx_helper_get_port_autonegotiation(interface, index)) {
+			control_reg.s.an_en = 0;
+			control_reg.s.spdmsb = 1;
+			control_reg.s.spdlsb = 0;
+			control_reg.s.dup = 1;
+		}
+		csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+		       control_reg.u64);
+		csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+		/*
+		 * Use GMXENO to force the link down it will get
+		 * reenabled later...
+		 */
+		pcsx_miscx_ctl_reg.s.gmxeno = 1;
+		csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+		       pcsx_miscx_ctl_reg.u64);
+		csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+		return 0;
+	}
+	return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index,
+							    link_info);
+}
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ *                 Non zero if you want internal loopback
+ * @param enable_external
+ *                 Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal,
+					   int enable_external)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	int index = cvmx_helper_get_interface_index_num(ipd_port);
+	union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
+	union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+
+	if (!cvmx_helper_is_port_valid(interface, index))
+		return 0;
+
+	pcsx_mrx_control_reg.u64 =
+		csr_rd(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+	pcsx_mrx_control_reg.s.loopbck1 = enable_internal;
+	csr_wr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+	       pcsx_mrx_control_reg.u64);
+
+	pcsx_miscx_ctl_reg.u64 =
+		csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+	pcsx_miscx_ctl_reg.s.loopbck2 = enable_external;
+	csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+	       pcsx_miscx_ctl_reg.u64);
+
+	__cvmx_helper_sgmii_hardware_init_link(interface, index);
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper-util.c b/arch/mips/mach-octeon/cvmx-helper-util.c
index 92298fd..6d775ee 100644
--- a/arch/mips/mach-octeon/cvmx-helper-util.c
+++ b/arch/mips/mach-octeon/cvmx-helper-util.c
@@ -182,127 +182,6 @@
 }
 
 /**
- * Debug routine to dump the packet structure to the console
- *
- * @param work   Work queue entry containing the packet to dump
- * @return
- */
-int cvmx_helper_dump_packet(cvmx_wqe_t *work)
-{
-	u64 count;
-	u64 remaining_bytes;
-	union cvmx_buf_ptr buffer_ptr;
-	cvmx_buf_ptr_pki_t bptr;
-	cvmx_wqe_78xx_t *wqe = (void *)work;
-	u64 start_of_buffer;
-	u8 *data_address;
-	u8 *end_of_data;
-
-	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
-		cvmx_pki_dump_wqe(wqe);
-		cvmx_wqe_pki_errata_20776(work);
-	} else {
-		debug("WORD0 = %lx\n", (unsigned long)work->word0.u64);
-		debug("WORD1 = %lx\n", (unsigned long)work->word1.u64);
-		debug("WORD2 = %lx\n", (unsigned long)work->word2.u64);
-		debug("Packet Length:   %u\n", cvmx_wqe_get_len(work));
-		debug("    Input Port:  %u\n", cvmx_wqe_get_port(work));
-		debug("    QoS:         %u\n", cvmx_wqe_get_qos(work));
-		debug("    Buffers:     %u\n", cvmx_wqe_get_bufs(work));
-	}
-
-	if (cvmx_wqe_get_bufs(work) == 0) {
-		int wqe_pool;
-
-		if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
-			debug("%s: ERROR: Unexpected bufs==0 in WQE\n", __func__);
-			return -1;
-		}
-		wqe_pool = (int)cvmx_fpa_get_wqe_pool();
-		buffer_ptr.u64 = 0;
-		buffer_ptr.s.pool = wqe_pool;
-
-		buffer_ptr.s.size = 128;
-		buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
-		if (cvmx_likely(!work->word2.s.not_IP)) {
-			union cvmx_pip_ip_offset pip_ip_offset;
-
-			pip_ip_offset.u64 = csr_rd(CVMX_PIP_IP_OFFSET);
-			buffer_ptr.s.addr +=
-				(pip_ip_offset.s.offset << 3) - work->word2.s.ip_offset;
-			buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2;
-		} else {
-			/*
-			 * WARNING: This code assume that the packet
-			 * is not RAW. If it was, we would use
-			 * PIP_GBL_CFG[RAW_SHF] instead of
-			 * PIP_GBL_CFG[NIP_SHF].
-			 */
-			union cvmx_pip_gbl_cfg pip_gbl_cfg;
-
-			pip_gbl_cfg.u64 = csr_rd(CVMX_PIP_GBL_CFG);
-			buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
-		}
-	} else {
-		buffer_ptr = work->packet_ptr;
-	}
-
-	remaining_bytes = cvmx_wqe_get_len(work);
-
-	while (remaining_bytes) {
-		/* native cn78xx buffer format, unless legacy-translated */
-		if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) && !wqe->pki_wqe_translated) {
-			bptr.u64 = buffer_ptr.u64;
-			/* XXX- assumes cache-line aligned buffer */
-			start_of_buffer = (bptr.addr >> 7) << 7;
-			debug("    Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
-			debug("    Buffer Data: %llx\n", (unsigned long long)bptr.addr);
-			debug("    Buffer Size: %u\n", bptr.size);
-			data_address = (uint8_t *)cvmx_phys_to_ptr(bptr.addr);
-			end_of_data = data_address + bptr.size;
-		} else {
-			start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
-			debug("    Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
-			debug("    Buffer I   : %u\n", buffer_ptr.s.i);
-			debug("    Buffer Back: %u\n", buffer_ptr.s.back);
-			debug("    Buffer Pool: %u\n", buffer_ptr.s.pool);
-			debug("    Buffer Data: %llx\n", (unsigned long long)buffer_ptr.s.addr);
-			debug("    Buffer Size: %u\n", buffer_ptr.s.size);
-			data_address = (uint8_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr);
-			end_of_data = data_address + buffer_ptr.s.size;
-		}
-
-		debug("\t\t");
-		count = 0;
-		while (data_address < end_of_data) {
-			if (remaining_bytes == 0)
-				break;
-
-			remaining_bytes--;
-			debug("%02x", (unsigned int)*data_address);
-			data_address++;
-			if (remaining_bytes && count == 7) {
-				debug("\n\t\t");
-				count = 0;
-			} else {
-				count++;
-			}
-		}
-		debug("\n");
-
-		if (remaining_bytes) {
-			if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) &&
-			    !wqe->pki_wqe_translated)
-				buffer_ptr.u64 = *(uint64_t *)cvmx_phys_to_ptr(bptr.addr - 8);
-			else
-				buffer_ptr.u64 =
-					*(uint64_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
-		}
-	}
-	return 0;
-}
-
-/**
  * @INTERNAL
  *
  * Extract NO_WPTR mode from PIP/IPD register
@@ -678,68 +557,6 @@
 	}
 }
 
-void cvmx_helper_setup_legacy_red(int pass_thresh, int drop_thresh)
-{
-	unsigned int node = cvmx_get_node_num();
-	int aura, bpid;
-	int buf_cnt;
-	bool ena_red = 0, ena_drop = 0, ena_bp = 0;
-
-#define FPA_RED_AVG_DLY 1
-#define FPA_RED_LVL_DLY 3
-#define FPA_QOS_AVRG	0
-	/* Trying to make it backward compatible with older chips */
-
-	/* Setting up avg_dly and prb_dly, enable bits */
-	if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
-		cvmx_fpa3_config_red_params(node, FPA_QOS_AVRG,
-					    FPA_RED_LVL_DLY, FPA_RED_AVG_DLY);
-	}
-
-	/* Disable backpressure on queued buffers which is aura in 78xx*/
-	/*
-	 * Assumption is that all packets from all interface and ports goes
-	 * in same poolx/aurax for backward compatibility
-	 */
-	aura = cvmx_fpa_get_packet_pool();
-	buf_cnt = cvmx_fpa_get_packet_pool_buffer_count();
-	pass_thresh = buf_cnt - pass_thresh;
-	drop_thresh = buf_cnt - drop_thresh;
-	/* Map aura to bpid 0*/
-	bpid = 0;
-	cvmx_pki_write_aura_bpid(node, aura, bpid);
-	/* Don't enable back pressure */
-	ena_bp = 0;
-	/* enable RED */
-	ena_red = 1;
-	/*
-	 * This will enable RED on all interfaces since
-	 * they all have packet buffer coming from  same aura
-	 */
-	cvmx_helper_setup_aura_qos(node, aura, ena_red, ena_drop, pass_thresh,
-				   drop_thresh, ena_bp, 0);
-}
-
-/**
- * Setup Random Early Drop to automatically begin dropping packets.
- *
- * @param pass_thresh
- *               Packets will begin slowly dropping when there are less than
- *               this many packet buffers free in FPA 0.
- * @param drop_thresh
- *               All incoming packets will be dropped when there are less
- *               than this many free packet buffers in FPA 0.
- * Return: Zero on success. Negative on failure
- */
-int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
-{
-	if (octeon_has_feature(OCTEON_FEATURE_PKI))
-		cvmx_helper_setup_legacy_red(pass_thresh, drop_thresh);
-	else
-		cvmx_ipd_setup_red(pass_thresh, drop_thresh);
-	return 0;
-}
-
 /**
  * @INTERNAL
  * Setup the common GMX settings that determine the number of
@@ -980,35 +797,6 @@
 }
 
 /**
- * Display interface statistics.
- *
- * @param port IPD/PKO port number
- *
- * Return: none
- */
-void cvmx_helper_show_stats(int port)
-{
-	cvmx_pip_port_status_t status;
-	cvmx_pko_port_status_t pko_status;
-
-	/* ILK stats */
-	if (octeon_has_feature(OCTEON_FEATURE_ILK))
-		__cvmx_helper_ilk_show_stats();
-
-	/* PIP stats */
-	cvmx_pip_get_port_stats(port, 0, &status);
-	debug("port %d: the number of packets - ipd: %d\n", port,
-	      (int)status.packets);
-
-	/* PKO stats */
-	cvmx_pko_get_port_status(port, 0, &pko_status);
-	debug("port %d: the number of packets - pko: %d\n", port,
-	      (int)pko_status.packets);
-
-	/* TODO: other stats */
-}
-
-/**
  * Returns the interface number for an IPD/PKO port number.
  *
  * @param ipd_port IPD/PKO port number
@@ -1187,39 +975,3 @@
 
 	return -1;
 }
-
-/**
- * Prints out a buffer with the address, hex bytes, and ASCII
- *
- * @param	addr	Start address to print on the left
- * @param[in]	buffer	array of bytes to print
- * @param	count	Number of bytes to print
- */
-void cvmx_print_buffer_u8(unsigned int addr, const uint8_t *buffer,
-			  size_t count)
-{
-	uint i;
-
-	while (count) {
-		unsigned int linelen = count < 16 ? count : 16;
-
-		debug("%08x:", addr);
-
-		for (i = 0; i < linelen; i++)
-			debug(" %0*x", 2, buffer[i]);
-
-		while (i++ < 17)
-			debug("   ");
-
-		for (i = 0; i < linelen; i++) {
-			if (buffer[i] >= 0x20 && buffer[i] < 0x7f)
-				debug("%c", buffer[i]);
-			else
-				debug(".");
-		}
-		debug("\n");
-		addr += linelen;
-		buffer += linelen;
-		count -= linelen;
-	}
-}
diff --git a/arch/mips/mach-octeon/cvmx-helper-xaui.c b/arch/mips/mach-octeon/cvmx-helper-xaui.c
new file mode 100644
index 0000000..2eb3b7d
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-helper-xaui.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for XAUI initialization, configuration,
+ * and monitoring.
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+int __cvmx_helper_xaui_enumerate(int xiface)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface = xi.interface;
+	union cvmx_gmxx_hg2_control gmx_hg2_control;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+		enum cvmx_qlm_mode qlm_mode =
+			cvmx_qlm_get_dlm_mode(0, interface);
+
+		if (qlm_mode == CVMX_QLM_MODE_RXAUI)
+			return 1;
+		return 0;
+	}
+	/* If HiGig2 is enabled return 16 ports, otherwise return 1 port */
+	gmx_hg2_control.u64 = csr_rd(CVMX_GMXX_HG2_CONTROL(interface));
+	if (gmx_hg2_control.s.hg2tx_en)
+		return 16;
+	else
+		return 1;
+}
+
+/**
+ * @INTERNAL
+ * Probe a XAUI interface and determine the number of ports
+ * connected to it. The XAUI interface should still be down
+ * after this call.
+ *
+ * @param xiface Interface to probe
+ *
+ * @return Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_xaui_probe(int xiface)
+{
+	int i, ports;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface = xi.interface;
+	union cvmx_gmxx_inf_mode mode;
+
+	/*
+	 * CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis
+	 * be programmed.
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
+		union cvmx_ciu_qlm2 ciu_qlm;
+
+		ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
+		ciu_qlm.s.txbypass = 1;
+		ciu_qlm.s.txdeemph = 0x5;
+		ciu_qlm.s.txmargin = 0x1a;
+		csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
+	}
+
+	/*
+	 * CN63XX Pass 2.x errata G-15273 requires the QLM De-emphasis
+	 * be programmed when using a 156.25Mhz ref clock.
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_X)) {
+		/* Read the QLM speed pins */
+		union cvmx_mio_rst_boot mio_rst_boot;
+
+		mio_rst_boot.u64 = csr_rd(CVMX_MIO_RST_BOOT);
+
+		if (mio_rst_boot.cn63xx.qlm2_spd == 0xb) {
+			union cvmx_ciu_qlm2 ciu_qlm;
+
+			ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
+			ciu_qlm.s.txbypass = 1;
+			ciu_qlm.s.txdeemph = 0xa;
+			ciu_qlm.s.txmargin = 0x1f;
+			csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
+		}
+	}
+
+	/*
+	 * Check if QLM is configured correct for XAUI/RXAUI, verify
+	 * the speed as well as mode.
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+		int qlm = cvmx_qlm_interface(xiface);
+		enum cvmx_qlm_mode mode = cvmx_qlm_get_mode(qlm);
+
+		if (mode != CVMX_QLM_MODE_XAUI && mode != CVMX_QLM_MODE_RXAUI)
+			return 0;
+	}
+
+	ports = __cvmx_helper_xaui_enumerate(xiface);
+
+	if (ports <= 0)
+		return 0;
+
+	/*
+	 * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
+	 * interface needs to be enabled before IPD otherwise per port
+	 * backpressure may not work properly.
+	 */
+	mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
+	mode.s.en = 1;
+	csr_wr(CVMX_GMXX_INF_MODE(interface), mode.u64);
+
+	if (!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
+	    !OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+		/*
+		 * Setup PKO to support 16 ports for HiGig2 virtual
+		 * ports. We're pointing all of the PKO packet ports
+		 * for this interface to the XAUI. This allows us to
+		 * use HiGig2 backpressure per port.
+		 */
+		for (i = 0; i < 16; i++) {
+			union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
+
+			pko_mem_port_ptrs.u64 = 0;
+			/*
+			 * We set each PKO port to have equal priority
+			 * in a round robin fashion.
+			 */
+			pko_mem_port_ptrs.s.static_p = 0;
+			pko_mem_port_ptrs.s.qos_mask = 0xff;
+			/* All PKO ports map to the same XAUI hardware port */
+			pko_mem_port_ptrs.s.eid = interface * 4;
+			pko_mem_port_ptrs.s.pid = interface * 16 + i;
+			pko_mem_port_ptrs.s.bp_port = interface * 16 + i;
+			csr_wr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
+		}
+	}
+
+	return ports;
+}
+
+/**
+ * @INTERNAL
+ * Bringup XAUI interface. After this call packet I/O should be
+ * fully functional.
+ *
+ * @param interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_xaui_link_init(int interface)
+{
+	union cvmx_gmxx_prtx_cfg gmx_cfg;
+	union cvmx_pcsxx_control1_reg xaui_ctl;
+	union cvmx_pcsxx_misc_ctl_reg misc_ctl;
+	union cvmx_gmxx_tx_xaui_ctl tx_ctl;
+
+	/* (1) Interface has already been enabled. */
+
+	/* (2) Disable GMX. */
+	misc_ctl.u64 = csr_rd(CVMX_PCSXX_MISC_CTL_REG(interface));
+	misc_ctl.s.gmxeno = 1;
+	csr_wr(CVMX_PCSXX_MISC_CTL_REG(interface), misc_ctl.u64);
+
+	/* (3) Disable GMX and PCSX interrupts. */
+	csr_wr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
+	csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
+	csr_wr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
+
+	/* (4) Bring up the PCSX and GMX reconciliation layer. */
+	/* (4)a Set polarity and lane swapping. */
+	/* (4)b */
+	tx_ctl.u64 = csr_rd(CVMX_GMXX_TX_XAUI_CTL(interface));
+	/* Enable better IFG packing and improves performance */
+	tx_ctl.s.dic_en = 1;
+	tx_ctl.s.uni_en = 0;
+	csr_wr(CVMX_GMXX_TX_XAUI_CTL(interface), tx_ctl.u64);
+
+	/* (4)c Aply reset sequence */
+	xaui_ctl.u64 = csr_rd(CVMX_PCSXX_CONTROL1_REG(interface));
+	xaui_ctl.s.lo_pwr = 0;
+
+	/*
+	 * Errata G-15618 requires disabling PCS soft reset in some
+	 * OCTEON II models.
+	 */
+	if (!OCTEON_IS_MODEL(OCTEON_CN63XX) &&
+	    !OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X) &&
+	    !OCTEON_IS_MODEL(OCTEON_CN68XX))
+		xaui_ctl.s.reset = 1;
+	csr_wr(CVMX_PCSXX_CONTROL1_REG(interface), xaui_ctl.u64);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X) && interface != 1) {
+		/*
+		 * Note that GMX 1 was skipped as GMX0 is on the same
+		 * QLM and will always be done first
+		 *
+		 * Workaround for Errata (G-16467).
+		 */
+		int qlm = interface;
+#ifdef CVMX_QLM_DUMP_STATE
+		debug("%s:%d: XAUI%d: Applying workaround for Errata G-16467\n",
+		      __func__, __LINE__, qlm);
+		cvmx_qlm_display_registers(qlm);
+		debug("\n");
+#endif
+		/*
+		 * This workaround only applies to QLMs running XAUI
+		 * at 6.25Ghz
+		 */
+		if ((cvmx_qlm_get_gbaud_mhz(qlm) == 6250) &&
+		    (cvmx_qlm_jtag_get(qlm, 0, "clkf_byp") != 20)) {
+			/* Wait 100us for links to stabalize */
+			udelay(100);
+			cvmx_qlm_jtag_set(qlm, -1, "clkf_byp", 20);
+			/* Allow the QLM to exit reset */
+			cvmx_qlm_jtag_set(qlm, -1, "cfg_rst_n_clr", 0);
+			/* Wait 100us for links to stabalize */
+			udelay(100);
+			/* Allow TX on QLM */
+			cvmx_qlm_jtag_set(qlm, -1, "cfg_tx_idle_set", 0);
+		}
+#ifdef CVMX_QLM_DUMP_STATE
+		debug("%s:%d: XAUI%d: Done applying workaround for Errata G-16467\n",
+		      __func__, __LINE__, qlm);
+		cvmx_qlm_display_registers(qlm);
+		debug("\n\n");
+#endif
+	}
+
+	/* Wait for PCS to come out of reset */
+	if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_CONTROL1_REG(interface),
+				  cvmx_pcsxx_control1_reg_t, reset, ==, 0,
+				  10000))
+		return -1;
+	/* Wait for PCS to be aligned */
+	if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_10GBX_STATUS_REG(interface),
+				  cvmx_pcsxx_10gbx_status_reg_t, alignd, ==, 1,
+				  10000))
+		return -1;
+	/* Wait for RX to be ready */
+	if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_RX_XAUI_CTL(interface),
+				  cvmx_gmxx_rx_xaui_ctl_t, status, ==, 0,
+				  10000))
+		return -1;
+
+	/* (6) Configure GMX */
+
+	/* Wait for GMX RX to be idle */
+	if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(0, interface),
+				  cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1, 10000))
+		return -1;
+	/* Wait for GMX TX to be idle */
+	if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(0, interface),
+				  cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1, 10000))
+		return -1;
+
+	/* GMX configure */
+	gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(0, interface));
+	gmx_cfg.s.speed = 1;
+	gmx_cfg.s.speed_msb = 0;
+	gmx_cfg.s.slottime = 1;
+	csr_wr(CVMX_GMXX_TX_PRTS(interface), 1);
+	csr_wr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
+	csr_wr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
+	csr_wr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+	/* Wait for receive link */
+	if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS1_REG(interface),
+				  cvmx_pcsxx_status1_reg_t, rcv_lnk, ==, 1,
+				  10000))
+		return -1;
+	if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS2_REG(interface),
+				  cvmx_pcsxx_status2_reg_t, xmtflt, ==, 0,
+				  10000))
+		return -1;
+	if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS2_REG(interface),
+				  cvmx_pcsxx_status2_reg_t, rcvflt, ==, 0,
+				  10000))
+		return -1;
+
+	/* (8) Enable packet reception */
+	misc_ctl.s.gmxeno = 0;
+	csr_wr(CVMX_PCSXX_MISC_CTL_REG(interface), misc_ctl.u64);
+
+	/* Clear all error interrupts before enabling the interface. */
+	csr_wr(CVMX_GMXX_RXX_INT_REG(0, interface), ~0x0ull);
+	csr_wr(CVMX_GMXX_TX_INT_REG(interface), ~0x0ull);
+	csr_wr(CVMX_PCSXX_INT_REG(interface), ~0x0ull);
+
+	/* Enable GMX */
+	gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(0, interface));
+	gmx_cfg.s.en = 1;
+	csr_wr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Bringup and enable a XAUI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @param xiface Interface to bring up
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_xaui_enable(int xiface)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface = xi.interface;
+
+	__cvmx_helper_setup_gmx(interface, 1);
+
+	/* Setup PKND and BPID */
+	if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+		union cvmx_gmxx_bpid_msk bpid_msk;
+		union cvmx_gmxx_bpid_mapx bpid_map;
+		union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+		union cvmx_gmxx_txx_append gmxx_txx_append_cfg;
+
+		/* Setup PKIND */
+		gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(0, interface));
+		gmxx_prtx_cfg.s.pknd = cvmx_helper_get_pknd(interface, 0);
+		csr_wr(CVMX_GMXX_PRTX_CFG(0, interface), gmxx_prtx_cfg.u64);
+
+		/* Setup BPID */
+		bpid_map.u64 = csr_rd(CVMX_GMXX_BPID_MAPX(0, interface));
+		bpid_map.s.val = 1;
+		bpid_map.s.bpid = cvmx_helper_get_bpid(interface, 0);
+		csr_wr(CVMX_GMXX_BPID_MAPX(0, interface), bpid_map.u64);
+
+		bpid_msk.u64 = csr_rd(CVMX_GMXX_BPID_MSK(interface));
+		bpid_msk.s.msk_or |= 1;
+		bpid_msk.s.msk_and &= ~1;
+		csr_wr(CVMX_GMXX_BPID_MSK(interface), bpid_msk.u64);
+
+		/* CN68XX adds the padding and FCS in PKO, not GMX */
+		gmxx_txx_append_cfg.u64 =
+			csr_rd(CVMX_GMXX_TXX_APPEND(0, interface));
+		gmxx_txx_append_cfg.s.fcs = 0;
+		gmxx_txx_append_cfg.s.pad = 0;
+		csr_wr(CVMX_GMXX_TXX_APPEND(0, interface),
+		       gmxx_txx_append_cfg.u64);
+	}
+
+	/* 70XX eval boards use Marvel phy, set disparity accordingly. */
+	if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+		union cvmx_gmxx_rxaui_ctl rxaui_ctl;
+
+		rxaui_ctl.u64 = csr_rd(CVMX_GMXX_RXAUI_CTL(interface));
+		rxaui_ctl.s.disparity = 1;
+		csr_wr(CVMX_GMXX_RXAUI_CTL(interface), rxaui_ctl.u64);
+	}
+
+	__cvmx_helper_xaui_link_init(interface);
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @param ipd_port IPD/PKO port to query
+ *
+ * @return Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
+	union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
+	union cvmx_pcsxx_status1_reg pcsxx_status1_reg;
+	cvmx_helper_link_info_t result;
+
+	gmxx_tx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_TX_XAUI_CTL(interface));
+	gmxx_rx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_RX_XAUI_CTL(interface));
+	pcsxx_status1_reg.u64 = csr_rd(CVMX_PCSXX_STATUS1_REG(interface));
+	result.u64 = 0;
+
+	/* Only return a link if both RX and TX are happy */
+	if (gmxx_tx_xaui_ctl.s.ls == 0 && gmxx_rx_xaui_ctl.s.status == 0 &&
+	    pcsxx_status1_reg.s.rcv_lnk == 1) {
+		union cvmx_pcsxx_misc_ctl_reg misc_ctl;
+
+		result.s.link_up = 1;
+		result.s.full_duplex = 1;
+		if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+			union cvmx_mio_qlmx_cfg qlm_cfg;
+			int lanes;
+			int qlm = (interface == 1) ? 0 : interface;
+
+			qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
+			result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
+			lanes = (qlm_cfg.s.qlm_cfg == 7) ? 2 : 4;
+			result.s.speed *= lanes;
+		} else if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+			int qlm = cvmx_qlm_interface(interface);
+
+			result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
+			result.s.speed *= 4;
+		} else {
+			result.s.speed = 10000;
+		}
+		misc_ctl.u64 = csr_rd(CVMX_PCSXX_MISC_CTL_REG(interface));
+		if (misc_ctl.s.gmxeno)
+			__cvmx_helper_xaui_link_init(interface);
+	} else {
+		/* Disable GMX and PCSX interrupts. */
+		csr_wr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
+		csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
+		csr_wr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
+	}
+	return result;
+}
+
+/**
+ * @INTERNAL
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @param ipd_port  IPD/PKO port to configure
+ * @param link_info The new link state
+ *
+ * @return Zero on success, negative on failure
+ */
+int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
+	union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
+
+	gmxx_tx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_TX_XAUI_CTL(interface));
+	gmxx_rx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_RX_XAUI_CTL(interface));
+
+	/* If the link shouldn't be up, then just return */
+	if (!link_info.s.link_up)
+		return 0;
+
+	/* Do nothing if both RX and TX are happy */
+	if (gmxx_tx_xaui_ctl.s.ls == 0 && gmxx_rx_xaui_ctl.s.status == 0)
+		return 0;
+
+	/* Bring the link up */
+	return __cvmx_helper_xaui_link_init(interface);
+}
+
+/**
+ * @INTERNAL
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @param ipd_port IPD/PKO port to loopback.
+ * @param enable_internal
+ *                 Non zero if you want internal loopback
+ * @param enable_external
+ *                 Non zero if you want external loopback
+ *
+ * @return Zero on success, negative on failure.
+ */
+extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
+						 int enable_internal,
+						 int enable_external)
+{
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+	union cvmx_pcsxx_control1_reg pcsxx_control1_reg;
+	union cvmx_gmxx_xaui_ext_loopback gmxx_xaui_ext_loopback;
+
+	/* Set the internal loop */
+	pcsxx_control1_reg.u64 = csr_rd(CVMX_PCSXX_CONTROL1_REG(interface));
+	pcsxx_control1_reg.s.loopbck1 = enable_internal;
+	csr_wr(CVMX_PCSXX_CONTROL1_REG(interface), pcsxx_control1_reg.u64);
+
+	/* Set the external loop */
+	gmxx_xaui_ext_loopback.u64 =
+		csr_rd(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface));
+	gmxx_xaui_ext_loopback.s.en = enable_external;
+	csr_wr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface),
+	       gmxx_xaui_ext_loopback.u64);
+
+	/* Take the link through a reset */
+	return __cvmx_helper_xaui_link_init(interface);
+}
diff --git a/arch/mips/mach-octeon/cvmx-helper.c b/arch/mips/mach-octeon/cvmx-helper.c
index d0620d6..ccec57e 100644
--- a/arch/mips/mach-octeon/cvmx-helper.c
+++ b/arch/mips/mach-octeon/cvmx-helper.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
  *
  * Helper functions for common, but complicated tasks.
  */
@@ -305,20 +305,6 @@
 /**
  * @INTERNAL
  * This structure specifies the interface methods used by interfaces
- * configured as srio.
- */
-static const struct iface_ops iface_ops_srio = {
-	.mode = CVMX_HELPER_INTERFACE_MODE_SRIO,
-	.enumerate = __cvmx_helper_srio_probe,
-	.probe = __cvmx_helper_srio_probe,
-	.enable = __cvmx_helper_srio_enable,
-	.link_get = __cvmx_helper_srio_link_get,
-	.link_set = __cvmx_helper_srio_link_set,
-};
-
-/**
- * @INTERNAL
- * This structure specifies the interface methods used by interfaces
  * configured as agl.
  */
 static const struct iface_ops iface_ops_agl = {
@@ -442,35 +428,6 @@
 	return 0;
 }
 
-/*
- * Shut down the interfaces; free the resources.
- * @INTERNAL
- */
-void __cvmx_helper_shutdown_interfaces_node(unsigned int node)
-{
-	int i;
-	int nifaces; /* number of interfaces */
-	struct cvmx_iface *piface;
-
-	nifaces = cvmx_helper_get_number_of_interfaces();
-	for (i = 0; i < nifaces; i++) {
-		piface = &cvmx_interfaces[node][i];
-
-		/*
-		 * For SE apps, bootmem was meant to be allocated and never
-		 * freed.
-		 */
-		piface->cvif_ipd_port_link_info = 0;
-	}
-}
-
-void __cvmx_helper_shutdown_interfaces(void)
-{
-	unsigned int node = cvmx_get_node_num();
-
-	__cvmx_helper_shutdown_interfaces_node(node);
-}
-
 int __cvmx_helper_set_link_info(int xiface, int index, cvmx_helper_link_info_t link_info)
 {
 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
@@ -525,11 +482,6 @@
 typedef int (*cvmx_export_config_t)(void);
 cvmx_export_config_t cvmx_export_app_config;
 
-void cvmx_rgmii_set_back_pressure(uint64_t backpressure_dis)
-{
-	cvmx_rgmii_backpressure_dis = backpressure_dis;
-}
-
 /*
  * internal functions that are not exported in the .h file but must be
  * declared to make gcc happy.
@@ -537,16 +489,6 @@
 extern cvmx_helper_link_info_t __cvmx_helper_get_link_info(int interface, int port);
 
 /**
- * cvmx_override_iface_phy_mode(int interface, int index) is a function pointer.
- * It is meant to allow customization of interfaces which do not have a PHY.
- *
- * @returns 0 if MAC decides TX_CONFIG_REG or 1 if PHY decides  TX_CONFIG_REG.
- *
- * If this function pointer is NULL then it defaults to the MAC.
- */
-int (*cvmx_override_iface_phy_mode)(int interface, int index);
-
-/**
  * cvmx_override_ipd_port_setup(int ipd_port) is a function
  * pointer. It is meant to allow customization of the IPD
  * port/port kind setup before packet input/output comes online.
@@ -607,7 +549,7 @@
  * chip and configuration, this can be 1-16. A value of 0
  * specifies that the interface doesn't exist or isn't usable.
  *
- * @param xiface xiface to get the port count for
+ * @param xiface to get the port count for
  *
  * Return: Number of ports on interface. Can be Zero.
  */
@@ -919,15 +861,9 @@
 			break;
 		}
 	} else if ((interface < 3) && OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
-		cvmx_sriox_status_reg_t sriox_status_reg;
-		int srio_port = interface - 1;
-
-		sriox_status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(srio_port));
-
-		if (sriox_status_reg.s.srio)
-			iface_ops[interface] = &iface_ops_srio;
-		else
-			iface_ops[interface] = &iface_ops_dis;
+		/* SRIO is disabled for now */
+		printf("SRIO disabled for now!\n");
+		iface_ops[interface] = &iface_ops_dis;
 	} else if (interface == 3) { /* DPI */
 		iface_ops[interface] = &iface_ops_npi;
 	} else if (interface == 4) { /* LOOP */
@@ -1046,7 +982,6 @@
 		   (OCTEON_IS_MODEL(OCTEON_CN66XX) && interface >= 4 &&
 		    interface <= 7)) {
 		/* Only present in CN63XX & CN66XX Octeon model */
-		union cvmx_sriox_status_reg sriox_status_reg;
 
 		/* cn66xx pass1.0 has only 2 SRIO interfaces. */
 		if ((interface == 5 || interface == 7) &&
@@ -1059,12 +994,9 @@
 			 */
 			iface_ops[interface] = &iface_ops_dis;
 		} else {
-			sriox_status_reg.u64 =
-				csr_rd(CVMX_SRIOX_STATUS_REG(interface - 4));
-			if (sriox_status_reg.s.srio)
-				iface_ops[interface] = &iface_ops_srio;
-			else
-				iface_ops[interface] = &iface_ops_dis;
+			/* SRIO is disabled for now */
+			printf("SRIO disabled for now!\n");
+			iface_ops[interface] = &iface_ops_dis;
 		}
 	} else if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
 		union cvmx_mio_qlmx_cfg mio_qlm_cfg;
@@ -1440,16 +1372,6 @@
 
 /**
  * @INTERNAL
- * Verify the per port IPD backpressure is aligned properly.
- * Return: Zero if working, non zero if misaligned
- */
-int __cvmx_helper_backpressure_is_misaligned(void)
-{
-	return 0;
-}
-
-/**
- * @INTERNAL
  * Enable packet input/output from the hardware. This function is
  * called after all internal setup is complete and IPD is enabled.
  * After this function completes, packets will be accepted from the
@@ -1467,7 +1389,7 @@
 
 	if (iface_node_ops[xi.node][xi.interface]->enable)
 		result = iface_node_ops[xi.node][xi.interface]->enable(xiface);
-	result |= __cvmx_helper_board_hardware_enable(xiface);
+
 	return result;
 }
 
@@ -1609,7 +1531,8 @@
 		/* Skip invalid/disabled interfaces */
 		if (cvmx_helper_ports_on_interface(xiface) <= 0)
 			continue;
-		printf("Node %d Interface %d has %d ports (%s)\n", node, interface,
+		debug("Node %d Interface %d has %d ports (%s)\n",
+		      node, interface,
 		       cvmx_helper_ports_on_interface(xiface),
 		       cvmx_helper_interface_mode_to_string(
 			       cvmx_helper_interface_get_mode(xiface)));
@@ -1731,445 +1654,6 @@
 }
 
 /**
- * Helper function for global packet IO shutdown
- */
-int cvmx_helper_shutdown_packet_io_global_cn78xx(int node)
-{
-	int num_interfaces = cvmx_helper_get_number_of_interfaces();
-	cvmx_wqe_t *work;
-	int interface;
-	int result = 0;
-
-	/* Shut down all interfaces and disable TX and RX on all ports */
-	for (interface = 0; interface < num_interfaces; interface++) {
-		int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
-		int index;
-		int num_ports = cvmx_helper_ports_on_interface(xiface);
-
-		if (num_ports > 4)
-			num_ports = 4;
-
-		cvmx_bgx_set_backpressure_override(xiface, 0);
-		for (index = 0; index < num_ports; index++) {
-			cvmx_helper_link_info_t link_info;
-
-			if (!cvmx_helper_is_port_valid(xiface, index))
-				continue;
-
-			cvmx_helper_bgx_shutdown_port(xiface, index);
-
-			/* Turn off link LEDs */
-			link_info.u64 = 0;
-			cvmx_helper_update_link_led(xiface, index, link_info);
-		}
-	}
-
-	/* Stop input first */
-	cvmx_helper_pki_shutdown(node);
-
-	/* Retrieve all packets from the SSO and free them */
-	result = 0;
-	while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) {
-		cvmx_helper_free_pki_pkt_data(work);
-		cvmx_wqe_pki_free(work);
-		result++;
-	}
-
-	if (result > 0)
-		debug("%s: Purged %d packets from SSO\n", __func__, result);
-
-	/*
-	 * No need to wait for PKO queues to drain,
-	 * dq_close() drains the queues to NULL.
-	 */
-
-	/* Shutdown PKO interfaces */
-	for (interface = 0; interface < num_interfaces; interface++) {
-		int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
-
-		cvmx_helper_pko3_shut_interface(xiface);
-	}
-
-	/* Disable MAC address filtering */
-	for (interface = 0; interface < num_interfaces; interface++) {
-		int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
-
-		switch (cvmx_helper_interface_get_mode(xiface)) {
-		case CVMX_HELPER_INTERFACE_MODE_XAUI:
-		case CVMX_HELPER_INTERFACE_MODE_RXAUI:
-		case CVMX_HELPER_INTERFACE_MODE_XLAUI:
-		case CVMX_HELPER_INTERFACE_MODE_XFI:
-		case CVMX_HELPER_INTERFACE_MODE_10G_KR:
-		case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
-		case CVMX_HELPER_INTERFACE_MODE_SGMII:
-		case CVMX_HELPER_INTERFACE_MODE_MIXED: {
-			int index;
-			int num_ports = cvmx_helper_ports_on_interface(xiface);
-
-			for (index = 0; index < num_ports; index++) {
-				if (!cvmx_helper_is_port_valid(xiface, index))
-					continue;
-
-				/* Reset MAC filtering */
-				cvmx_helper_bgx_rx_adr_ctl(node, interface, index, 0, 0, 0);
-			}
-			break;
-		}
-		default:
-			break;
-		}
-	}
-
-	for (interface = 0; interface < num_interfaces; interface++) {
-		int index;
-		int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
-		int num_ports = cvmx_helper_ports_on_interface(xiface);
-
-		for (index = 0; index < num_ports; index++) {
-			/* Doing this twice should clear it since no packets
-			 * can be received.
-			 */
-			cvmx_update_rx_activity_led(xiface, index, false);
-			cvmx_update_rx_activity_led(xiface, index, false);
-		}
-	}
-
-	/* Shutdown the PKO unit */
-	result = cvmx_helper_pko3_shutdown(node);
-
-	/* Release interface structures */
-	__cvmx_helper_shutdown_interfaces();
-
-	return result;
-}
-
-/**
- * Undo the initialization performed in
- * cvmx_helper_initialize_packet_io_global(). After calling this routine and the
- * local version on each core, packet IO for Octeon will be disabled and placed
- * in the initial reset state. It will then be safe to call the initialize
- * later on. Note that this routine does not empty the FPA pools. It frees all
- * buffers used by the packet IO hardware to the FPA so a function emptying the
- * FPA after shutdown should find all packet buffers in the FPA.
- *
- * Return: Zero on success, negative on failure.
- */
-int cvmx_helper_shutdown_packet_io_global(void)
-{
-	const int timeout = 5; /* Wait up to 5 seconds for timeouts */
-	int result = 0;
-	int num_interfaces = cvmx_helper_get_number_of_interfaces();
-	int interface;
-	int num_ports;
-	int index;
-	struct cvmx_buffer_list *pool0_buffers;
-	struct cvmx_buffer_list *pool0_buffers_tail;
-	cvmx_wqe_t *work;
-	union cvmx_ipd_ctl_status ipd_ctl_status;
-	int wqe_pool = (int)cvmx_fpa_get_wqe_pool();
-	int node = cvmx_get_node_num();
-	cvmx_pcsx_mrx_control_reg_t control_reg;
-
-	if (octeon_has_feature(OCTEON_FEATURE_BGX))
-		return cvmx_helper_shutdown_packet_io_global_cn78xx(node);
-
-	/* Step 1: Disable all backpressure */
-	for (interface = 0; interface < num_interfaces; interface++) {
-		cvmx_helper_interface_mode_t mode =
-			cvmx_helper_interface_get_mode(interface);
-
-		if (mode == CVMX_HELPER_INTERFACE_MODE_AGL)
-			cvmx_agl_set_backpressure_override(interface, 0x1);
-		else if (mode != CVMX_HELPER_INTERFACE_MODE_DISABLED)
-			cvmx_gmx_set_backpressure_override(interface, 0xf);
-	}
-
-	/* Step 2: Wait for the PKO queues to drain */
-	result = __cvmx_helper_pko_drain();
-	if (result < 0) {
-		debug("WARNING: %s: Failed to drain some PKO queues\n",
-		      __func__);
-	}
-
-	/* Step 3: Disable TX and RX on all ports */
-	for (interface = 0; interface < num_interfaces; interface++) {
-		int xiface = cvmx_helper_node_interface_to_xiface(node,
-								  interface);
-
-		switch (cvmx_helper_interface_get_mode(interface)) {
-		case CVMX_HELPER_INTERFACE_MODE_DISABLED:
-		case CVMX_HELPER_INTERFACE_MODE_PCIE:
-			/* Not a packet interface */
-			break;
-		case CVMX_HELPER_INTERFACE_MODE_NPI:
-		case CVMX_HELPER_INTERFACE_MODE_SRIO:
-		case CVMX_HELPER_INTERFACE_MODE_ILK:
-			/*
-			 * We don't handle the NPI/NPEI/SRIO packet
-			 * engines. The caller must know these are
-			 * idle.
-			 */
-			break;
-		case CVMX_HELPER_INTERFACE_MODE_LOOP:
-			/*
-			 * Nothing needed. Once PKO is idle, the
-			 * loopback devices must be idle.
-			 */
-			break;
-		case CVMX_HELPER_INTERFACE_MODE_SPI:
-			/*
-			 * SPI cannot be disabled from Octeon. It is
-			 * the responsibility of the caller to make
-			 * sure SPI is idle before doing shutdown.
-			 *
-			 * Fall through and do the same processing as
-			 * RGMII/GMII.
-			 */
-			fallthrough;
-		case CVMX_HELPER_INTERFACE_MODE_GMII:
-		case CVMX_HELPER_INTERFACE_MODE_RGMII:
-			/* Disable outermost RX at the ASX block */
-			csr_wr(CVMX_ASXX_RX_PRT_EN(interface), 0);
-			num_ports = cvmx_helper_ports_on_interface(xiface);
-			if (num_ports > 4)
-				num_ports = 4;
-			for (index = 0; index < num_ports; index++) {
-				union cvmx_gmxx_prtx_cfg gmx_cfg;
-
-				if (!cvmx_helper_is_port_valid(interface, index))
-					continue;
-				gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
-				gmx_cfg.s.en = 0;
-				csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-				/* Poll the GMX state machine waiting for it to become idle */
-				csr_wr(CVMX_NPI_DBG_SELECT,
-				       interface * 0x800 + index * 0x100 + 0x880);
-				if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data,
-							  data & 7, ==, 0, timeout * 1000000)) {
-					debug("GMX RX path timeout waiting for idle\n");
-					result = -1;
-				}
-				if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data,
-							  data & 0xf, ==, 0, timeout * 1000000)) {
-					debug("GMX TX path timeout waiting for idle\n");
-					result = -1;
-				}
-			}
-			/* Disable outermost TX at the ASX block */
-			csr_wr(CVMX_ASXX_TX_PRT_EN(interface), 0);
-			/* Disable interrupts for interface */
-			csr_wr(CVMX_ASXX_INT_EN(interface), 0);
-			csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0);
-			break;
-		case CVMX_HELPER_INTERFACE_MODE_XAUI:
-		case CVMX_HELPER_INTERFACE_MODE_RXAUI:
-		case CVMX_HELPER_INTERFACE_MODE_SGMII:
-		case CVMX_HELPER_INTERFACE_MODE_QSGMII:
-		case CVMX_HELPER_INTERFACE_MODE_PICMG:
-			num_ports = cvmx_helper_ports_on_interface(xiface);
-			if (num_ports > 4)
-				num_ports = 4;
-			for (index = 0; index < num_ports; index++) {
-				union cvmx_gmxx_prtx_cfg gmx_cfg;
-
-				if (!cvmx_helper_is_port_valid(interface, index))
-					continue;
-				gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
-				gmx_cfg.s.en = 0;
-				csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-				if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
-							  union cvmx_gmxx_prtx_cfg, rx_idle, ==, 1,
-							  timeout * 1000000)) {
-					debug("GMX RX path timeout waiting for idle\n");
-					result = -1;
-				}
-				if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
-							  union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
-							  timeout * 1000000)) {
-					debug("GMX TX path timeout waiting for idle\n");
-					result = -1;
-				}
-				/* For SGMII some PHYs require that the PCS
-				 * interface be powered down and reset (i.e.
-				 * Atheros/Qualcomm PHYs).
-				 */
-				if (cvmx_helper_interface_get_mode(interface) ==
-				    CVMX_HELPER_INTERFACE_MODE_SGMII) {
-					u64 reg;
-
-					reg = CVMX_PCSX_MRX_CONTROL_REG(index, interface);
-					/* Power down the interface */
-					control_reg.u64 = csr_rd(reg);
-					control_reg.s.pwr_dn = 1;
-					csr_wr(reg, control_reg.u64);
-					csr_rd(reg);
-				}
-			}
-			break;
-		case CVMX_HELPER_INTERFACE_MODE_AGL: {
-			int port = cvmx_helper_agl_get_port(interface);
-			union cvmx_agl_gmx_prtx_cfg agl_gmx_cfg;
-
-			agl_gmx_cfg.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
-			agl_gmx_cfg.s.en = 0;
-			csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_cfg.u64);
-			if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
-						  union cvmx_agl_gmx_prtx_cfg, rx_idle, ==, 1,
-						  timeout * 1000000)) {
-				debug("AGL RX path timeout waiting for idle\n");
-				result = -1;
-			}
-			if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
-						  union cvmx_agl_gmx_prtx_cfg, tx_idle, ==, 1,
-						  timeout * 1000000)) {
-				debug("AGL TX path timeout waiting for idle\n");
-				result = -1;
-			}
-		} break;
-		default:
-			break;
-		}
-	}
-
-	/* Step 4: Retrieve all packets from the POW and free them */
-	while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) {
-		cvmx_helper_free_packet_data(work);
-		cvmx_fpa1_free(work, wqe_pool, 0);
-	}
-
-	/* Step 5 */
-	cvmx_ipd_disable();
-
-	/*
-	 * Step 6: Drain all prefetched buffers from IPD/PIP. Note that IPD/PIP
-	 * have not been reset yet
-	 */
-	__cvmx_ipd_free_ptr();
-
-	/* Step 7: Free the PKO command buffers and put PKO in reset */
-	cvmx_pko_shutdown();
-
-	/* Step 8: Disable MAC address filtering */
-	for (interface = 0; interface < num_interfaces; interface++) {
-		int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
-
-		switch (cvmx_helper_interface_get_mode(interface)) {
-		case CVMX_HELPER_INTERFACE_MODE_DISABLED:
-		case CVMX_HELPER_INTERFACE_MODE_PCIE:
-		case CVMX_HELPER_INTERFACE_MODE_SRIO:
-		case CVMX_HELPER_INTERFACE_MODE_ILK:
-		case CVMX_HELPER_INTERFACE_MODE_NPI:
-		case CVMX_HELPER_INTERFACE_MODE_LOOP:
-			break;
-		case CVMX_HELPER_INTERFACE_MODE_XAUI:
-		case CVMX_HELPER_INTERFACE_MODE_RXAUI:
-		case CVMX_HELPER_INTERFACE_MODE_GMII:
-		case CVMX_HELPER_INTERFACE_MODE_RGMII:
-		case CVMX_HELPER_INTERFACE_MODE_SPI:
-		case CVMX_HELPER_INTERFACE_MODE_SGMII:
-		case CVMX_HELPER_INTERFACE_MODE_QSGMII:
-		case CVMX_HELPER_INTERFACE_MODE_PICMG:
-			num_ports = cvmx_helper_ports_on_interface(xiface);
-			if (num_ports > 4)
-				num_ports = 4;
-			for (index = 0; index < num_ports; index++) {
-				if (!cvmx_helper_is_port_valid(interface, index))
-					continue;
-				csr_wr(CVMX_GMXX_RXX_ADR_CTL(index, interface), 1);
-				csr_wr(CVMX_GMXX_RXX_ADR_CAM_EN(index, interface), 0);
-				csr_wr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), 0);
-				csr_wr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), 0);
-				csr_wr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), 0);
-				csr_wr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), 0);
-				csr_wr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), 0);
-				csr_wr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), 0);
-			}
-			break;
-		case CVMX_HELPER_INTERFACE_MODE_AGL: {
-			int port = cvmx_helper_agl_get_port(interface);
-
-			csr_wr(CVMX_AGL_GMX_RXX_ADR_CTL(port), 1);
-			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 0);
-			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), 0);
-			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), 0);
-			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), 0);
-			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), 0);
-			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), 0);
-			csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), 0);
-		} break;
-		default:
-			break;
-		}
-	}
-
-	/*
-	 * Step 9: Drain all FPA buffers out of pool 0 before we reset
-	 * IPD/PIP.  This is needed to keep IPD_QUE0_FREE_PAGE_CNT in
-	 * sync. We temporarily keep the buffers in the pool0_buffers
-	 * list.
-	 */
-	pool0_buffers = NULL;
-	pool0_buffers_tail = NULL;
-	while (1) {
-		struct cvmx_buffer_list *buffer = cvmx_fpa1_alloc(0);
-
-		if (buffer) {
-			buffer->next = NULL;
-
-			if (!pool0_buffers)
-				pool0_buffers = buffer;
-			else
-				pool0_buffers_tail->next = buffer;
-
-			pool0_buffers_tail = buffer;
-		} else {
-			break;
-		}
-	}
-
-	/* Step 10: Reset IPD and PIP */
-	ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
-	ipd_ctl_status.s.reset = 1;
-	csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
-
-	/* Make sure IPD has finished reset. */
-	if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) {
-		if (CVMX_WAIT_FOR_FIELD64(CVMX_IPD_CTL_STATUS, union cvmx_ipd_ctl_status, rst_done,
-					  ==, 0, 1000)) {
-			debug("IPD reset timeout waiting for idle\n");
-			result = -1;
-		}
-	}
-
-	/* Step 11: Restore the FPA buffers into pool 0 */
-	while (pool0_buffers) {
-		struct cvmx_buffer_list *n = pool0_buffers->next;
-
-		cvmx_fpa1_free(pool0_buffers, 0, 0);
-		pool0_buffers = n;
-	}
-
-	/* Step 12: Release interface structures */
-	__cvmx_helper_shutdown_interfaces();
-
-	return result;
-}
-
-/**
- * Does core local shutdown of packet io
- *
- * Return: Zero on success, non-zero on failure
- */
-int cvmx_helper_shutdown_packet_io_local(void)
-{
-	/*
-	 * Currently there is nothing to do per core. This may change
-	 * in the future.
-	 */
-	return 0;
-}
-
-/**
  * Auto configure an IPD/PKO port link state and speed. This
  * function basically does the equivalent of:
  * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
@@ -2290,50 +1774,6 @@
 	return result;
 }
 
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @param xipd_port IPD/PKO port to loopback.
- * @param enable_internal
- *                 Non zero if you want internal loopback
- * @param enable_external
- *                 Non zero if you want external loopback
- *
- * Return: Zero on success, negative on failure.
- */
-int cvmx_helper_configure_loopback(int xipd_port, int enable_internal, int enable_external)
-{
-	int result = -1;
-	int xiface = cvmx_helper_get_interface_num(xipd_port);
-	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-	int index = cvmx_helper_get_interface_index_num(xipd_port);
-
-	if (index >= cvmx_helper_ports_on_interface(xiface))
-		return -1;
-
-	cvmx_helper_interface_get_mode(xiface);
-	if (iface_node_ops[xi.node][xi.interface]->loopback)
-		result = iface_node_ops[xi.node][xi.interface]->loopback(xipd_port, enable_internal,
-									 enable_external);
-
-	return result;
-}
-
-void cvmx_helper_setup_simulator_io_buffer_counts(int node, int num_packet_buffers, int pko_buffers)
-{
-	if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
-		cvmx_helper_pki_set_dflt_pool_buffer(node, num_packet_buffers);
-		cvmx_helper_pki_set_dflt_aura_buffer(node, num_packet_buffers);
-
-	} else {
-		cvmx_ipd_set_packet_pool_buffer_count(num_packet_buffers);
-		cvmx_ipd_set_wqe_pool_buffer_count(num_packet_buffers);
-		cvmx_pko_set_cmd_queue_pool_buffer_count(pko_buffers);
-	}
-}
-
 void *cvmx_helper_mem_alloc(int node, uint64_t alloc_size, uint64_t align)
 {
 	s64 paddr;
@@ -2346,266 +1786,3 @@
 	}
 	return cvmx_phys_to_ptr(paddr);
 }
-
-void cvmx_helper_mem_free(void *buffer, uint64_t size)
-{
-	__cvmx_bootmem_phy_free(cvmx_ptr_to_phys(buffer), size, 0);
-}
-
-int cvmx_helper_qos_config_init(cvmx_qos_proto_t qos_proto, cvmx_qos_config_t *qos_cfg)
-{
-	int i;
-
-	memset(qos_cfg, 0, sizeof(cvmx_qos_config_t));
-	qos_cfg->pkt_mode = CVMX_QOS_PKT_MODE_HWONLY; /* Process PAUSEs in hardware only.*/
-	qos_cfg->pool_mode = CVMX_QOS_POOL_PER_PORT;  /* One Pool per BGX:LMAC.*/
-	qos_cfg->pktbuf_size = 2048;		      /* Fit WQE + MTU in one buffer.*/
-	qos_cfg->aura_size = 1024;	/* 1K buffers typically enough for any application.*/
-	qos_cfg->pko_pfc_en = 1;	/* Enable PKO layout for PFC feature. */
-	qos_cfg->vlan_num = 1;		/* For Stacked VLAN, use 2nd VLAN in the QPG algorithm.*/
-	qos_cfg->qos_proto = qos_proto; /* Use PFC flow-control protocol.*/
-	qos_cfg->qpg_base = -1;		/* QPG Table index is undefined.*/
-	qos_cfg->p_time = 0x60;		/* PAUSE packets time window.*/
-	qos_cfg->p_interval = 0x10;	/* PAUSE packets interval.*/
-	for (i = 0; i < CVMX_QOS_NUM; i++) {
-		qos_cfg->groups[i] = i;	      /* SSO Groups = 0...7 */
-		qos_cfg->group_prio[i] = i;   /* SSO Group priority = QOS. */
-		qos_cfg->drop_thresh[i] = 99; /* 99% of the Aura size.*/
-		qos_cfg->red_thresh[i] = 90;  /* 90% of the Aura size.*/
-		qos_cfg->bp_thresh[i] = 70;   /* 70% of the Aura size.*/
-	}
-	return 0;
-}
-
-int cvmx_helper_qos_port_config_update(int xipdport, cvmx_qos_config_t *qos_cfg)
-{
-	cvmx_user_static_pko_queue_config_t pkocfg;
-	cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
-	int xiface = cvmx_helper_get_interface_num(xipdport);
-	cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
-
-	/* Configure PKO port for PFC SQ layout: */
-	cvmx_helper_pko_queue_config_get(xp.node, &pkocfg);
-	pkocfg.pknd.pko_cfg_iface[xi.interface].pfc_enable = 1;
-	cvmx_helper_pko_queue_config_set(xp.node, &pkocfg);
-	return 0;
-}
-
-int cvmx_helper_qos_port_setup(int xipdport, cvmx_qos_config_t *qos_cfg)
-{
-	const int channles = CVMX_QOS_NUM;
-	int bufsize = qos_cfg->pktbuf_size;
-	int aura_size = qos_cfg->aura_size;
-	cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
-	int node = xp.node;
-	int ipdport = xp.port;
-	int port = cvmx_helper_get_interface_index_num(xp.port);
-	int xiface = cvmx_helper_get_interface_num(xipdport);
-	cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
-	cvmx_fpa3_pool_t gpool;
-	cvmx_fpa3_gaura_t gaura;
-	cvmx_bgxx_cmr_rx_ovr_bp_t ovrbp;
-	struct cvmx_pki_qpg_config qpgcfg;
-	struct cvmx_pki_style_config stcfg, stcfg_dflt;
-	struct cvmx_pki_pkind_config pkcfg;
-	int chan, bpid, group, qpg;
-	int bpen, reden, dropen, passthr, dropthr, bpthr;
-	int nbufs, pkind, style;
-	char name[32];
-
-	if (qos_cfg->pool_mode == CVMX_QOS_POOL_PER_PORT) {
-		/* Allocate and setup packet Pool: */
-		nbufs = aura_size * channles;
-		sprintf(name, "QOS.P%d", ipdport);
-		gpool = cvmx_fpa3_setup_fill_pool(node, -1 /*auto*/, name, bufsize, nbufs, NULL);
-		if (!__cvmx_fpa3_pool_valid(gpool)) {
-			printf("%s: Failed to setup FPA Pool\n", __func__);
-			return -1;
-		}
-		for (chan = 0; chan < channles; chan++)
-			qos_cfg->gpools[chan] = gpool;
-	} else {
-		printf("%s: Invalid pool_mode %d\n", __func__, qos_cfg->pool_mode);
-		return -1;
-	}
-	/* Allocate QPG entries: */
-	qos_cfg->qpg_base = cvmx_pki_qpg_entry_alloc(node, -1 /*auto*/, channles);
-	if (qos_cfg->qpg_base < 0) {
-		printf("%s: Failed to allocate QPG entry\n", __func__);
-		return -1;
-	}
-	for (chan = 0; chan < channles; chan++) {
-		/* Allocate and setup Aura, setup BP threshold: */
-		gpool = qos_cfg->gpools[chan];
-		sprintf(name, "QOS.A%d", ipdport + chan);
-		gaura = cvmx_fpa3_set_aura_for_pool(gpool, -1 /*auto*/, name, bufsize, aura_size);
-		if (!__cvmx_fpa3_aura_valid(gaura)) {
-			printf("%s: Failed to setup FPA Aura for Channel %d\n", __func__, chan);
-			return -1;
-		}
-		qos_cfg->gauras[chan] = gaura;
-		bpen = 1;
-		reden = 1;
-		dropen = 1;
-		dropthr = (qos_cfg->drop_thresh[chan] * 10 * aura_size) / 1000;
-		passthr = (qos_cfg->red_thresh[chan] * 10 * aura_size) / 1000;
-		bpthr = (qos_cfg->bp_thresh[chan] * 10 * aura_size) / 1000;
-		cvmx_fpa3_setup_aura_qos(gaura, reden, passthr, dropthr, bpen, bpthr);
-		cvmx_pki_enable_aura_qos(node, gaura.laura, reden, dropen, bpen);
-
-		/* Allocate BPID, link Aura and Channel using BPID: */
-		bpid = cvmx_pki_bpid_alloc(node, -1 /*auto*/);
-		if (bpid < 0) {
-			printf("%s: Failed to allocate BPID for channel %d\n",
-			       __func__, chan);
-			return -1;
-		}
-		qos_cfg->bpids[chan] = bpid;
-		cvmx_pki_write_aura_bpid(node, gaura.laura, bpid);
-		cvmx_pki_write_channel_bpid(node, ipdport + chan, bpid);
-
-		/* Setup QPG entries: */
-		group = qos_cfg->groups[chan];
-		qpg = qos_cfg->qpg_base + chan;
-		cvmx_pki_read_qpg_entry(node, qpg, &qpgcfg);
-		qpgcfg.port_add = chan;
-		qpgcfg.aura_num = gaura.laura;
-		qpgcfg.grp_ok = (node << CVMX_WQE_GRP_NODE_SHIFT) | group;
-		qpgcfg.grp_bad = (node << CVMX_WQE_GRP_NODE_SHIFT) | group;
-		qpgcfg.grptag_ok = (node << CVMX_WQE_GRP_NODE_SHIFT) | 0;
-		qpgcfg.grptag_bad = (node << CVMX_WQE_GRP_NODE_SHIFT) | 0;
-		cvmx_pki_write_qpg_entry(node, qpg, &qpgcfg);
-	}
-	/* Allocate and setup STYLE: */
-	cvmx_helper_pki_get_dflt_style(node, &stcfg_dflt);
-	style = cvmx_pki_style_alloc(node, -1 /*auto*/);
-	cvmx_pki_read_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &stcfg);
-	stcfg.tag_cfg = stcfg_dflt.tag_cfg;
-	stcfg.parm_cfg.tag_type = CVMX_POW_TAG_TYPE_ORDERED;
-	stcfg.parm_cfg.qpg_qos = CVMX_PKI_QPG_QOS_VLAN;
-	stcfg.parm_cfg.qpg_base = qos_cfg->qpg_base;
-	stcfg.parm_cfg.qpg_port_msb = 0;
-	stcfg.parm_cfg.qpg_port_sh = 0;
-	stcfg.parm_cfg.qpg_dis_grptag = 1;
-	stcfg.parm_cfg.fcs_strip = 1;
-	stcfg.parm_cfg.mbuff_size = bufsize - 64; /* Do not use 100% of the buffer. */
-	stcfg.parm_cfg.force_drop = 0;
-	stcfg.parm_cfg.nodrop = 0;
-	stcfg.parm_cfg.rawdrp = 0;
-	stcfg.parm_cfg.cache_mode = 2; /* 1st buffer in L2 */
-	stcfg.parm_cfg.wqe_vs = qos_cfg->vlan_num;
-	cvmx_pki_write_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &stcfg);
-
-	/* Setup PKIND: */
-	pkind = cvmx_helper_get_pknd(xiface, port);
-	cvmx_pki_read_pkind_config(node, pkind, &pkcfg);
-	pkcfg.cluster_grp = 0; /* OCTEON3 has only one cluster group = 0 */
-	pkcfg.initial_style = style;
-	pkcfg.initial_parse_mode = CVMX_PKI_PARSE_LA_TO_LG;
-	cvmx_pki_write_pkind_config(node, pkind, &pkcfg);
-
-	/* Setup parameters of the QOS packet and enable QOS flow-control: */
-	cvmx_bgx_set_pause_pkt_param(xipdport, 0, 0x0180c2000001, 0x8808, qos_cfg->p_time,
-				     qos_cfg->p_interval);
-	cvmx_bgx_set_flowctl_mode(xipdport, qos_cfg->qos_proto, qos_cfg->pkt_mode);
-
-	/* Enable PKI channel backpressure in the BGX: */
-	ovrbp.u64 = csr_rd_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface));
-	ovrbp.s.en &= ~(1 << port);
-	ovrbp.s.ign_fifo_bp &= ~(1 << port);
-	csr_wr_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface), ovrbp.u64);
-	return 0;
-}
-
-int cvmx_helper_qos_sso_setup(int xipdport, cvmx_qos_config_t *qos_cfg)
-{
-	const int channels = CVMX_QOS_NUM;
-	cvmx_sso_grpx_pri_t grppri;
-	int chan, qos, group;
-	cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
-	int node = xp.node;
-
-	for (chan = 0; chan < channels; chan++) {
-		qos = cvmx_helper_qos2prio(chan);
-		group = qos_cfg->groups[qos];
-		grppri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(group));
-		grppri.s.pri = qos_cfg->group_prio[chan];
-		csr_wr_node(node, CVMX_SSO_GRPX_PRI(group), grppri.u64);
-	}
-	return 0;
-}
-
-int cvmx_helper_get_chan_e_name(int chan, char *namebuf, int buflen)
-{
-	int n, dpichans;
-
-	if ((unsigned int)chan >= CVMX_PKO3_IPD_NUM_MAX) {
-		printf("%s: Channel %d is out of range (0..4095)\n", __func__, chan);
-		return -1;
-	}
-	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
-		dpichans = 64;
-	else
-		dpichans = 128;
-
-	if (chan >= 0 && chan < 64)
-		n = snprintf(namebuf, buflen, "LBK%d", chan);
-	else if (chan >= 0x100 && chan < (0x100 + dpichans))
-		n = snprintf(namebuf, buflen, "DPI%d", chan - 0x100);
-	else if (chan == 0x200)
-		n = snprintf(namebuf, buflen, "NQM");
-	else if (chan >= 0x240 && chan < (0x240 + (1 << 1) + 2))
-		n = snprintf(namebuf, buflen, "SRIO%d:%d", (chan - 0x240) >> 1,
-			     (chan - 0x240) & 0x1);
-	else if (chan >= 0x400 && chan < (0x400 + (1 << 8) + 256))
-		n = snprintf(namebuf, buflen, "ILK%d:%d", (chan - 0x400) >> 8,
-			     (chan - 0x400) & 0xFF);
-	else if (chan >= 0x800 && chan < (0x800 + (5 << 8) + (3 << 4) + 16))
-		n = snprintf(namebuf, buflen, "BGX%d:%d:%d", (chan - 0x800) >> 8,
-			     ((chan - 0x800) >> 4) & 0x3, (chan - 0x800) & 0xF);
-	else
-		n = snprintf(namebuf, buflen, "--");
-	return n;
-}
-
-#ifdef CVMX_DUMP_DIAGNOSTICS
-void cvmx_helper_dump_for_diagnostics(int node)
-{
-	if (!(OCTEON_IS_OCTEON3() && !OCTEON_IS_MODEL(OCTEON_CN70XX))) {
-		printf("Diagnostics are not implemented for this model\n");
-		return;
-	}
-#ifdef CVMX_DUMP_GSER
-	{
-		int qlm, num_qlms;
-
-		num_qlms = cvmx_qlm_get_num();
-		for (qlm = 0; qlm < num_qlms; qlm++) {
-			cvmx_dump_gser_config_node(node, qlm);
-			cvmx_dump_gser_status_node(node, qlm);
-		}
-	}
-#endif
-#ifdef CVMX_DUMP_BGX
-	{
-		int bgx;
-
-		for (bgx = 0; bgx < CVMX_HELPER_MAX_GMX; bgx++) {
-			cvmx_dump_bgx_config_node(node, bgx);
-			cvmx_dump_bgx_status_node(node, bgx);
-		}
-	}
-#endif
-#ifdef CVMX_DUMP_PKI
-	cvmx_pki_config_dump(node);
-	cvmx_pki_stats_dump(node);
-#endif
-#ifdef CVMX_DUMP_PKO
-	cvmx_helper_pko3_config_dump(node);
-	cvmx_helper_pko3_stats_dump(node);
-#endif
-#ifdef CVMX_DUMO_SSO
-	cvmx_sso_config_dump(node);
-#endif
-}
-#endif
diff --git a/arch/mips/mach-octeon/cvmx-ilk.c b/arch/mips/mach-octeon/cvmx-ilk.c
new file mode 100644
index 0000000..6223ff2
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-ilk.c
@@ -0,0 +1,1199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Support library for the ILK
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+/*
+ * global configurations.
+ *
+ * for cn68, the default is {0xf, 0xf0}. to disable the 2nd ILK, set
+ * cvmx_ilk_lane_mask[CVMX_NUM_ILK_INTF] = {0xff, 0x0} and
+ * cvmx_ilk_chans[CVMX_NUM_ILK_INTF] = {8, 0}
+ */
+unsigned short cvmx_ilk_lane_mask[CVMX_MAX_NODES][CVMX_NUM_ILK_INTF] = {
+	[0 ... CVMX_MAX_NODES - 1] = { 0x000f, 0x00f0 }
+};
+
+int cvmx_ilk_chans[CVMX_MAX_NODES][CVMX_NUM_ILK_INTF] = {
+	[0 ... CVMX_MAX_NODES - 1] = { 8, 8 }
+};
+
+static cvmx_ilk_intf_t cvmx_ilk_intf_cfg[CVMX_MAX_NODES][CVMX_NUM_ILK_INTF];
+
+cvmx_ilk_LA_mode_t cvmx_ilk_LA_mode[CVMX_NUM_ILK_INTF] = { { 0, 0 }, { 0, 0 } };
+/**
+ * User-overrideable callback function that returns whether or not an interface
+ * should use look-aside mode.
+ *
+ * @param interface - interface being checked
+ * @param channel - channel number, can be 0 or 1 or -1 to see if LA mode
+ *                  should be enabled for the interface.
+ * @return 0 to not use LA-mode, 1 to use LA-mode.
+ */
+int cvmx_ilk_use_la_mode(int interface, int channel)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_0))
+		return 0;
+
+	if (interface >= CVMX_NUM_ILK_INTF) {
+		debug("ERROR: invalid interface=%d in %s\n",
+		      interface, __func__);
+		return -1;
+	}
+	return cvmx_ilk_LA_mode[interface].ilk_LA_mode;
+}
+
+/**
+ * User-overrideable callback function that returns whether or not an interface
+ * in look-aside mode should enable the RX calendar.
+ *
+ * @param interface - interface to check
+ * @return 1 to enable RX calendar, 0 to disable RX calendar.
+ *
+ * NOTE: For the CN68XX pass 2.0 this will enable the RX calendar for interface
+ * 0 and not interface 1.  It is up to the customer to override this behavior.
+ */
+int cvmx_ilk_la_mode_enable_rx_calendar(int interface)
+{
+	/* There is an errata in the CN68XX pass 2.0 where if connected
+	 * in a loopback configuration or back to back then only one interface
+	 * can have the RX calendar enabled.
+	 */
+	if (interface >= CVMX_NUM_ILK_INTF) {
+		debug("ERROR: invalid interface=%d in %s\n",
+		      interface, __func__);
+		return -1;
+	}
+	return cvmx_ilk_LA_mode[interface].ilk_LA_mode_cal_ena;
+}
+
+/**
+ * Initialize and start the ILK interface.
+ *
+ * @param interface The identifier of the packet interface to configure and
+ *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ *                  ilk1.
+ *
+ * @param lane_mask the lane group for this interface
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_start_interface(int interface, unsigned short lane_mask)
+{
+	int res = -1;
+	int other_intf, this_qlm, other_qlm;
+	unsigned short uni_mask;
+	cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+	cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+	cvmx_ilk_ser_cfg_t ilk_ser_cfg;
+	int node = (interface >> 4) & 0xf;
+
+	interface &= 0xf;
+
+	if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+		return res;
+
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return res;
+
+	if (lane_mask == 0)
+		return res;
+
+	/* check conflicts between 2 ilk interfaces. 1 lane can be assigned to 1
+	 * interface only
+	 */
+	other_intf = !interface;
+	if (cvmx_ilk_lane_mask[node][other_intf] & lane_mask) {
+		debug("ILK%d:%d: %s: lane assignment conflict\n", node,
+		      interface, __func__);
+		return res;
+	}
+
+	/* check the legality of the lane mask. interface 0 can have 8 lanes,
+	 * while interface 1 can have 4 lanes at most
+	 */
+	uni_mask = lane_mask >> (interface * 4);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		cvmx_mio_qlmx_cfg_t mio_qlmx_cfg, other_mio_qlmx_cfg;
+
+		if ((uni_mask != 0x1 && uni_mask != 0x3 && uni_mask != 0xf &&
+		     uni_mask != 0xff) ||
+		    (interface == 1 && lane_mask > 0xf0)) {
+			debug("ILK%d: %s: incorrect lane mask: 0x%x\n",
+			      interface, __func__, uni_mask);
+			return res;
+		}
+		/* check the availability of qlms. qlm_cfg = 001 means the chip
+		 * is fused to give this qlm to ilk
+		 */
+		this_qlm = interface + CVMX_ILK_QLM_BASE();
+		other_qlm = other_intf + CVMX_ILK_QLM_BASE();
+		mio_qlmx_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(this_qlm));
+		other_mio_qlmx_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(other_qlm));
+		if (mio_qlmx_cfg.s.qlm_cfg != 1 ||
+		    (uni_mask == 0xff && other_mio_qlmx_cfg.s.qlm_cfg != 1)) {
+			debug("ILK%d: %s: qlm unavailable\n", interface,
+			      __func__);
+			return res;
+		}
+		/* Has 8 lanes */
+		lane_mask &= 0xff;
+	}
+
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		int qlm;
+		unsigned short lane_mask_all = 0;
+
+		/* QLM 4 - QLM 7 can be configured for ILK. Get the lane mask
+		 * of all the qlms that are configured for ilk
+		 */
+		for (qlm = 4; qlm < 8; qlm++) {
+			cvmx_gserx_cfg_t gserx_cfg;
+			cvmx_gserx_phy_ctl_t phy_ctl;
+
+			/* Make sure QLM is powered and out of reset */
+			phy_ctl.u64 =
+				csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
+			if (phy_ctl.s.phy_pd || phy_ctl.s.phy_reset)
+				continue;
+
+			/* Make sure QLM is in ILK mode */
+			gserx_cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
+			if (gserx_cfg.s.ila)
+				lane_mask_all |= ((1 << 4) - 1)
+						 << (4 * (qlm - 4));
+		}
+
+		if ((lane_mask_all & lane_mask) != lane_mask) {
+			debug("ILK%d: %s: incorrect lane mask: 0x%x\n",
+			      interface, __func__, lane_mask);
+			return res;
+		}
+	}
+
+	/* power up the serdes */
+	ilk_ser_cfg.u64 = csr_rd_node(node, CVMX_ILK_SER_CFG);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		if (ilk_ser_cfg.cn68xx.ser_pwrup == 0) {
+			ilk_ser_cfg.cn68xx.ser_rxpol_auto = 1;
+			ilk_ser_cfg.cn68xx.ser_rxpol = 0;
+			ilk_ser_cfg.cn68xx.ser_txpol = 0;
+			ilk_ser_cfg.cn68xx.ser_reset_n = 0xff;
+			ilk_ser_cfg.cn68xx.ser_haul = 0;
+		}
+		ilk_ser_cfg.cn68xx.ser_pwrup |=
+			((interface == 0) && (lane_mask > 0xf)) ?
+				0x3 :
+				      (1 << interface);
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		ilk_ser_cfg.cn78xx.ser_rxpol_auto = 1;
+		ilk_ser_cfg.cn78xx.ser_rxpol = 0;
+		ilk_ser_cfg.cn78xx.ser_txpol = 0;
+		ilk_ser_cfg.cn78xx.ser_reset_n = 0xffff;
+	}
+	csr_wr_node(node, CVMX_ILK_SER_CFG, ilk_ser_cfg.u64);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X)) {
+		/* Workaround for Errata (G-16467) */
+		int qlm = (interface) ? 2 : 1;
+		int start_qlm, end_qlm;
+
+		/* Apply the workaround to both the QLMs if configured for x8 lanes */
+		if (cvmx_pop(lane_mask) > 4) {
+			start_qlm = 1;
+			end_qlm = 2;
+		} else {
+			start_qlm = qlm;
+			end_qlm = qlm;
+		}
+
+		for (qlm = start_qlm; qlm <= end_qlm; qlm++) {
+#ifdef CVMX_QLM_DUMP_STATE
+			debug("%s:%d: ILK%d: Applying workaround for Errata G-16467\n",
+			      __func__, __LINE__, qlm);
+			cvmx_qlm_display_registers(qlm);
+			debug("\n");
+#endif
+			/* This workaround only applies to QLMs running ILK at 6.25Ghz */
+			if ((cvmx_qlm_get_gbaud_mhz(qlm) == 6250) &&
+			    (cvmx_qlm_jtag_get(qlm, 0, "clkf_byp") != 20)) {
+				udelay(100); /* Wait 100us for links to stabalize */
+				cvmx_qlm_jtag_set(qlm, -1, "clkf_byp", 20);
+				/* Allow the QLM to exit reset */
+				cvmx_qlm_jtag_set(qlm, -1, "cfg_rst_n_clr", 0);
+				udelay(100); /* Wait 100us for links to stabalize */
+				/* Allow TX on QLM */
+				cvmx_qlm_jtag_set(qlm, -1, "cfg_tx_idle_set",
+						  0);
+			}
+#ifdef CVMX_QLM_DUMP_STATE
+			debug("%s:%d: ILK%d: Done applying workaround for Errata G-16467\n",
+			      __func__, __LINE__, qlm);
+			cvmx_qlm_display_registers(qlm);
+			debug("\n\n");
+#endif
+		}
+	}
+
+	/* Initialize all calendar entries to xoff state */
+	__cvmx_ilk_clear_cal((node << 4) | interface);
+
+	/* Enable ILK LA mode if configured. */
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		if (cvmx_ilk_use_la_mode(interface, 0)) {
+			cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+			cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+
+			ilk_txx_cfg1.u64 = csr_rd(CVMX_ILK_TXX_CFG1(interface));
+			ilk_rxx_cfg1.u64 = csr_rd(CVMX_ILK_RXX_CFG1(interface));
+			ilk_txx_cfg1.s.la_mode = 1;
+			ilk_txx_cfg1.s.tx_link_fc_jam = 1;
+			ilk_txx_cfg1.s.rx_link_fc_ign = 1;
+			ilk_rxx_cfg1.s.la_mode = 1;
+			csr_wr(CVMX_ILK_TXX_CFG1(interface), ilk_txx_cfg1.u64);
+			csr_wr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
+			cvmx_ilk_intf_cfg[node][interface].la_mode =
+				1; /* Enable look-aside mode */
+		} else {
+			cvmx_ilk_intf_cfg[node][interface].la_mode =
+				0; /* Disable look-aside mode */
+		}
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+		cvmx_ilk_intf_cfg[node][interface].la_mode = 0;
+
+	/* configure the lane enable of the interface */
+	ilk_txx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+	ilk_rxx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+	ilk_rxx_cfg0.s.lane_ena = lane_mask;
+	ilk_txx_cfg0.s.lane_ena = lane_mask;
+	csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+	csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+
+	/* For 10.3125Gbs data rate, set SER_LIMIT to 0x3ff for x8 & x12 mode */
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		cvmx_gserx_lane_mode_t lmode0, lmode1;
+
+		lmode0.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(5));
+		lmode1.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(7));
+		if ((lmode0.s.lmode == 5 || lmode1.s.lmode == 5) &&
+		    (lane_mask == 0xfff || lane_mask == 0xfff0 ||
+		     lane_mask == 0xff || lane_mask == 0xff00)) {
+			cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+
+			ilk_txx_cfg1.u64 =
+				csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+			ilk_txx_cfg1.s.ser_limit = 0x3ff;
+			csr_wr_node(node, CVMX_ILK_TXX_CFG1(interface),
+				    ilk_txx_cfg1.u64);
+		}
+	}
+
+	/* write to local cache. for lane speed, if interface 0 has 8 lanes,
+	 * assume both qlms have the same speed
+	 */
+	cvmx_ilk_intf_cfg[node][interface].intf_en = 1;
+	res = 0;
+
+	return res;
+}
+
+/**
+ * set pipe group base and length for the interface
+ *
+ * @param xiface    The identifier of the packet interface to configure and
+ *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ *                  ilk1.
+ *
+ * @param pipe_base the base of the pipe group
+ * @param pipe_len  the length of the pipe group
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_set_pipe(int xiface, int pipe_base, unsigned int pipe_len)
+{
+	int res = -1;
+	cvmx_ilk_txx_pipe_t ilk_txx_pipe;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface = xi.interface - CVMX_ILK_GBL_BASE();
+
+	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+		return res;
+
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return res;
+
+	/* set them in ilk tx section */
+	ilk_txx_pipe.u64 = csr_rd_node(xi.node, CVMX_ILK_TXX_PIPE(interface));
+	ilk_txx_pipe.s.base = pipe_base;
+	ilk_txx_pipe.s.nump = pipe_len;
+	csr_wr_node(xi.node, CVMX_ILK_TXX_PIPE(interface), ilk_txx_pipe.u64);
+	res = 0;
+
+	return res;
+}
+
+/**
+ * set logical channels for tx
+ *
+ * @param interface The identifier of the packet interface to configure and
+ *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ *                  ilk1.
+ *
+ * @param pch     pointer to an array of pipe-channel pair
+ * @param num_chs the number of entries in the pipe-channel array
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_tx_set_channel(int interface, cvmx_ilk_pipe_chan_t *pch,
+			    unsigned int num_chs)
+{
+	int res = -1;
+	cvmx_ilk_txx_idx_pmap_t ilk_txx_idx_pmap;
+	cvmx_ilk_txx_mem_pmap_t ilk_txx_mem_pmap;
+	unsigned int i;
+
+	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+		return res;
+
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return res;
+
+	if (!pch || num_chs > CVMX_ILK_MAX_PIPES)
+		return res;
+
+	if (cvmx_ilk_use_la_mode(interface, 0)) {
+		ilk_txx_idx_pmap.u64 = 0;
+		ilk_txx_mem_pmap.u64 = 0;
+		for (i = 0; i < num_chs; i++) {
+			ilk_txx_idx_pmap.s.index = pch->pipe;
+			ilk_txx_mem_pmap.s.channel = pch->chan;
+			ilk_txx_mem_pmap.s.remap = 1;
+			csr_wr(CVMX_ILK_TXX_IDX_PMAP(interface),
+			       ilk_txx_idx_pmap.u64);
+			csr_wr(CVMX_ILK_TXX_MEM_PMAP(interface),
+			       ilk_txx_mem_pmap.u64);
+			pch++;
+		}
+	} else {
+		/* write the pair to ilk tx */
+		ilk_txx_mem_pmap.u64 = 0;
+		ilk_txx_idx_pmap.u64 = 0;
+		for (i = 0; i < num_chs; i++) {
+			ilk_txx_idx_pmap.s.index = pch->pipe;
+			ilk_txx_mem_pmap.s.channel = pch->chan;
+			csr_wr(CVMX_ILK_TXX_IDX_PMAP(interface),
+			       ilk_txx_idx_pmap.u64);
+			csr_wr(CVMX_ILK_TXX_MEM_PMAP(interface),
+			       ilk_txx_mem_pmap.u64);
+			pch++;
+		}
+	}
+	res = 0;
+
+	return res;
+}
+
+/**
+ * set pkind for rx
+ *
+ * @param xiface    The identifier of the packet interface to configure and
+ *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ *                  ilk1.
+ *
+ * @param chpknd    pointer to an array of channel-pkind pair
+ * @param num_pknd the number of entries in the channel-pkind array
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_rx_set_pknd(int xiface, cvmx_ilk_chan_pknd_t *chpknd,
+			 unsigned int num_pknd)
+{
+	int res = -1;
+	cvmx_ilk_rxf_idx_pmap_t ilk_rxf_idx_pmap;
+	unsigned int i;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface = xi.interface - CVMX_ILK_GBL_BASE();
+
+	if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
+		return res;
+
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return res;
+
+	if (!chpknd || num_pknd > CVMX_ILK_MAX_PKNDS)
+		return res;
+
+	res = 0;
+
+	for (i = 0; i < num_pknd; i++) {
+		ilk_rxf_idx_pmap.u64 = 0;
+		/* write the pair to ilk rx. note the channels for different
+		 * interfaces are given in *chpknd and interface is not used
+		 * as a param
+		 */
+		if (chpknd->chan < 2 &&
+		    cvmx_ilk_use_la_mode(interface, chpknd->chan)) {
+			ilk_rxf_idx_pmap.s.index =
+				interface * 256 + 128 + chpknd->chan;
+			csr_wr(CVMX_ILK_RXF_IDX_PMAP, ilk_rxf_idx_pmap.u64);
+			csr_wr(CVMX_ILK_RXF_MEM_PMAP, chpknd->pknd);
+		}
+		ilk_rxf_idx_pmap.s.index = interface * 256 + chpknd->chan;
+		csr_wr(CVMX_ILK_RXF_IDX_PMAP, ilk_rxf_idx_pmap.u64);
+		csr_wr(CVMX_ILK_RXF_MEM_PMAP, chpknd->pknd);
+		chpknd++;
+	}
+
+	return res;
+}
+
+/**
+ * configure calendar for rx
+ *
+ * @param intf The identifier of the packet interface to configure and
+ *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ *                  ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent      pointer to calendar entries
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_rx_cal_conf(int intf, int cal_depth, cvmx_ilk_cal_entry_t *pent)
+{
+	int res = -1, i;
+	cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+	int num_entries;
+	int node = (intf >> 4) & 0xf;
+	int interface = intf & 0xf;
+
+	if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+		return res;
+
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return res;
+
+	if (cal_depth < CVMX_ILK_RX_MIN_CAL || cal_depth > CVMX_ILK_MAX_CAL ||
+	    (OCTEON_IS_MODEL(OCTEON_CN68XX) && !pent))
+		return res;
+
+	/* mandatory link-level fc as workarounds for ILK-15397 and
+	 * ILK-15479
+	 */
+	/* TODO: test effectiveness */
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		/* Update the calendar for each channel */
+		if ((cvmx_ilk_use_la_mode(interface, 0) == 0) ||
+		    (cvmx_ilk_use_la_mode(interface, 0) &&
+		     cvmx_ilk_la_mode_enable_rx_calendar(interface))) {
+			for (i = 0; i < cal_depth; i++) {
+				__cvmx_ilk_write_rx_cal_entry(
+					interface, i, pent[i].pipe_bpid);
+			}
+		}
+
+		/* Update the depth */
+		ilk_rxx_cfg0.u64 = csr_rd(CVMX_ILK_RXX_CFG0(interface));
+		num_entries = 1 + cal_depth + (cal_depth - 1) / 15;
+		ilk_rxx_cfg0.s.cal_depth = num_entries;
+		if (cvmx_ilk_use_la_mode(interface, 0)) {
+			ilk_rxx_cfg0.s.mproto_ign = 1;
+			ilk_rxx_cfg0.s.lnk_stats_ena = 1;
+			ilk_rxx_cfg0.s.lnk_stats_wrap = 1;
+		}
+		csr_wr(CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+	}
+
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		ilk_rxx_cfg0.u64 =
+			csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+		/*
+		 * Make sure cal_ena is 0 for programming the calendar table,
+		 * as per Errata ILK-19398
+		 */
+		ilk_rxx_cfg0.s.cal_ena = 0;
+		csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface),
+			    ilk_rxx_cfg0.u64);
+
+		for (i = 0; i < cal_depth; i++)
+			__cvmx_ilk_write_rx_cal_entry(intf, i, 0);
+
+		ilk_rxx_cfg0.u64 =
+			csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+		num_entries = 1 + cal_depth + (cal_depth - 1) / 15;
+		ilk_rxx_cfg0.s.cal_depth = num_entries;
+		csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface),
+			    ilk_rxx_cfg0.u64);
+	}
+
+	return 0;
+}
+
+/**
+ * set high water mark for rx
+ *
+ * @param intf      The identifier of the packet interface to configure and
+ *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ *                  ilk1.
+ *
+ * @param hi_wm     high water mark for this interface
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_rx_set_hwm(int intf, int hi_wm)
+{
+	int res = -1;
+	cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+	int node = (intf >> 4) & 0xf;
+	int interface = intf & 0xf;
+
+	if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+		return res;
+
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return res;
+
+	if (hi_wm <= 0)
+		return res;
+
+	/* set the hwm */
+	ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+	ilk_rxx_cfg1.s.rx_fifo_hwm = hi_wm;
+	csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
+	res = 0;
+
+	return res;
+}
+
+/**
+ * enable calendar for rx
+ *
+ * @param intf      The identifier of the packet interface to configure and
+ *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ *                  ilk1.
+ *
+ * @param cal_ena   enable or disable calendar
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_rx_cal_ena(int intf, unsigned char cal_ena)
+{
+	int res = -1;
+	cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+	int node = (intf >> 4) & 0xf;
+	int interface = intf & 0xf;
+
+	if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+		return res;
+
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return res;
+
+	if (cvmx_ilk_use_la_mode(interface, 0) &&
+	    !cvmx_ilk_la_mode_enable_rx_calendar(interface))
+		return 0;
+
+	/* set the enable */
+	ilk_rxx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+	ilk_rxx_cfg0.s.cal_ena = cal_ena;
+	csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+	csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+	res = 0;
+
+	return res;
+}
+
+/**
+ * set up calendar for rx
+ *
+ * @param intf      The identifier of the packet interface to configure and
+ *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ *                  ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent      pointer to calendar entries
+ * @param hi_wm     high water mark for this interface
+ * @param cal_ena   enable or disable calendar
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_cal_setup_rx(int intf, int cal_depth, cvmx_ilk_cal_entry_t *pent,
+			  int hi_wm, unsigned char cal_ena)
+{
+	int res = -1;
+
+	if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+		return res;
+
+	res = cvmx_ilk_rx_cal_conf(intf, cal_depth, pent);
+	if (res < 0)
+		return res;
+
+	res = cvmx_ilk_rx_set_hwm(intf, hi_wm);
+	if (res < 0)
+		return res;
+
+	res = cvmx_ilk_rx_cal_ena(intf, cal_ena);
+	return res;
+}
+
+/**
+ * configure calendar for tx
+ *
+ * @param intf      The identifier of the packet interface to configure and
+ *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ *                  ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent      pointer to calendar entries
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_tx_cal_conf(int intf, int cal_depth, cvmx_ilk_cal_entry_t *pent)
+{
+	int res = -1, i;
+	cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+	int num_entries;
+	int node = (intf >> 4) & 0xf;
+	int interface = intf & 0xf;
+
+	if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+		return res;
+
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return res;
+
+	if (cal_depth < CVMX_ILK_TX_MIN_CAL || cal_depth > CVMX_ILK_MAX_CAL ||
+	    (OCTEON_IS_MODEL(OCTEON_CN68XX) && !pent))
+		return res;
+
+	/* mandatory link-level fc as workarounds for ILK-15397 and
+	 * ILK-15479
+	 */
+	/* TODO: test effectiveness */
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		/* Update the calendar for each channel */
+		for (i = 0; i < cal_depth; i++) {
+			__cvmx_ilk_write_tx_cal_entry(interface, i,
+						      pent[i].pipe_bpid);
+		}
+
+		/* Set the depth (must be multiple of 8)*/
+		ilk_txx_cfg0.u64 = csr_rd(CVMX_ILK_TXX_CFG0(interface));
+		num_entries = 1 + cal_depth + (cal_depth - 1) / 15;
+		ilk_txx_cfg0.s.cal_depth = (num_entries + 7) & ~7;
+		csr_wr(CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+	}
+
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		ilk_txx_cfg0.u64 =
+			csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+		/*
+		 * Make sure cal_ena is 0 for programming the calendar table,
+		 * as per Errata ILK-19398
+		 */
+		ilk_txx_cfg0.s.cal_ena = 0;
+		csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface),
+			    ilk_txx_cfg0.u64);
+
+		for (i = 0; i < cal_depth; i++)
+			__cvmx_ilk_write_tx_cal_entry(intf, i, 0);
+
+		ilk_txx_cfg0.u64 =
+			csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+		num_entries = 1 + cal_depth + (cal_depth - 1) / 15;
+		/* cal_depth[2:0] needs to be zero, round up */
+		ilk_txx_cfg0.s.cal_depth = (num_entries + 7) & 0x1f8;
+		csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface),
+			    ilk_txx_cfg0.u64);
+	}
+
+	return 0;
+}
+
+/**
+ * enable calendar for tx
+ *
+ * @param intf	    The identifier of the packet interface to configure and
+ *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ *                  ilk1.
+ *
+ * @param cal_ena   enable or disable calendar
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_tx_cal_ena(int intf, unsigned char cal_ena)
+{
+	int res = -1;
+	cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+	int node = (intf >> 4) & 0xf;
+	int interface = intf & 0xf;
+
+	if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+		return res;
+
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return res;
+
+	/* set the enable */
+	ilk_txx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+	ilk_txx_cfg0.s.cal_ena = cal_ena;
+	csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+	csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+	res = 0;
+
+	return res;
+}
+
+/**
+ * set up calendar for tx
+ *
+ * @param intf      The identifier of the packet interface to configure and
+ *                  use as a ILK interface. cn68xx has 2 interfaces: ilk0 and
+ *                  ilk1.
+ *
+ * @param cal_depth the number of calendar entries
+ * @param pent      pointer to calendar entries
+ * @param cal_ena   enable or disable calendar
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_cal_setup_tx(int intf, int cal_depth, cvmx_ilk_cal_entry_t *pent,
+			  unsigned char cal_ena)
+{
+	int res = -1;
+
+	if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+		return res;
+
+	res = cvmx_ilk_tx_cal_conf(intf, cal_depth, pent);
+	if (res < 0)
+		return res;
+
+	res = cvmx_ilk_tx_cal_ena(intf, cal_ena);
+	return res;
+}
+
+/* #define CVMX_ILK_STATS_ENA 1 */
+#ifdef CVMX_ILK_STATS_ENA
+static void cvmx_ilk_reg_dump_rx(int intf)
+{
+	int i;
+	cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+	cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+	cvmx_ilk_rxx_int_t ilk_rxx_int;
+	cvmx_ilk_rxx_jabber_t ilk_rxx_jabber;
+	cvmx_ilk_rx_lnex_cfg_t ilk_rx_lnex_cfg;
+	cvmx_ilk_rx_lnex_int_t ilk_rx_lnex_int;
+	cvmx_ilk_gbl_cfg_t ilk_gbl_cfg;
+	cvmx_ilk_ser_cfg_t ilk_ser_cfg;
+	cvmx_ilk_rxf_idx_pmap_t ilk_rxf_idx_pmap;
+	cvmx_ilk_rxf_mem_pmap_t ilk_rxf_mem_pmap;
+	cvmx_ilk_rxx_idx_cal_t ilk_rxx_idx_cal;
+	cvmx_ilk_rxx_mem_cal0_t ilk_rxx_mem_cal0;
+	cvmx_ilk_rxx_mem_cal1_t ilk_rxx_mem_cal1;
+	int node = (intf >> 4) & 0xf;
+	int interface = intf & 0xf;
+
+	ilk_rxx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+	debug("ilk rxx cfg0: 0x%16lx\n", ilk_rxx_cfg0.u64);
+
+	ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+	debug("ilk rxx cfg1: 0x%16lx\n", ilk_rxx_cfg1.u64);
+
+	ilk_rxx_int.u64 = csr_rd_node(node, CVMX_ILK_RXX_INT(interface));
+	debug("ilk rxx int: 0x%16lx\n", ilk_rxx_int.u64);
+	csr_wr_node(node, CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
+
+	ilk_rxx_jabber.u64 = csr_rd_node(node, CVMX_ILK_RXX_JABBER(interface));
+	debug("ilk rxx jabber: 0x%16lx\n", ilk_rxx_jabber.u64);
+
+#define LNE_NUM_DBG 4
+	for (i = 0; i < LNE_NUM_DBG; i++) {
+		ilk_rx_lnex_cfg.u64 =
+			csr_rd_node(node, CVMX_ILK_RX_LNEX_CFG(i));
+		debug("ilk rx lnex cfg lane: %d  0x%16lx\n", i,
+		      ilk_rx_lnex_cfg.u64);
+	}
+
+	for (i = 0; i < LNE_NUM_DBG; i++) {
+		ilk_rx_lnex_int.u64 =
+			csr_rd_node(node, CVMX_ILK_RX_LNEX_INT(i));
+		debug("ilk rx lnex int lane: %d  0x%16lx\n", i,
+		      ilk_rx_lnex_int.u64);
+		csr_wr_node(node, CVMX_ILK_RX_LNEX_INT(i), ilk_rx_lnex_int.u64);
+	}
+
+	ilk_gbl_cfg.u64 = csr_rd_node(node, CVMX_ILK_GBL_CFG);
+	debug("ilk gbl cfg: 0x%16lx\n", ilk_gbl_cfg.u64);
+
+	ilk_ser_cfg.u64 = csr_rd_node(node, CVMX_ILK_SER_CFG);
+	debug("ilk ser cfg: 0x%16lx\n", ilk_ser_cfg.u64);
+
+#define CHAN_NUM_DBG 8
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		ilk_rxf_idx_pmap.u64 = 0;
+		ilk_rxf_idx_pmap.s.index = interface * 256;
+		ilk_rxf_idx_pmap.s.inc = 1;
+		csr_wr(CVMX_ILK_RXF_IDX_PMAP, ilk_rxf_idx_pmap.u64);
+		for (i = 0; i < CHAN_NUM_DBG; i++) {
+			ilk_rxf_mem_pmap.u64 = csr_rd(CVMX_ILK_RXF_MEM_PMAP);
+			debug("ilk rxf mem pmap chan: %3d  0x%16lx\n", i,
+			      ilk_rxf_mem_pmap.u64);
+		}
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		cvmx_ilk_rxx_chax_t rxx_chax;
+
+		for (i = 0; i < CHAN_NUM_DBG; i++) {
+			rxx_chax.u64 = csr_rd_node(
+				node, CVMX_ILK_RXX_CHAX(i, interface));
+			debug("ilk chan: %d  pki chan: 0x%x\n", i,
+			      rxx_chax.s.port_kind);
+		}
+	}
+
+#define CAL_NUM_DBG 2
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		ilk_rxx_idx_cal.u64 = 0;
+		ilk_rxx_idx_cal.s.inc = 1;
+		csr_wr(CVMX_ILK_RXX_IDX_CAL(interface), ilk_rxx_idx_cal.u64);
+		for (i = 0; i < CAL_NUM_DBG; i++) {
+			ilk_rxx_idx_cal.u64 =
+				csr_rd(CVMX_ILK_RXX_IDX_CAL(interface));
+			debug("ilk rxx idx cal: 0x%16lx\n",
+			      ilk_rxx_idx_cal.u64);
+
+			ilk_rxx_mem_cal0.u64 =
+				csr_rd(CVMX_ILK_RXX_MEM_CAL0(interface));
+			debug("ilk rxx mem cal0: 0x%16lx\n",
+			      ilk_rxx_mem_cal0.u64);
+			ilk_rxx_mem_cal1.u64 =
+				csr_rd(CVMX_ILK_RXX_MEM_CAL1(interface));
+			debug("ilk rxx mem cal1: 0x%16lx\n",
+			      ilk_rxx_mem_cal1.u64);
+		}
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		cvmx_ilk_rxx_cal_entryx_t rxx_cal_entryx;
+
+		for (i = 0; i < CAL_NUM_DBG; i++) {
+			rxx_cal_entryx.u64 = csr_rd_node(
+				node, CVMX_ILK_RXX_CAL_ENTRYX(i, interface));
+			debug("ilk rxx cal idx: %d\n", i);
+			debug("ilk rxx cal ctl: 0x%x\n", rxx_cal_entryx.s.ctl);
+			debug("ilk rxx cal pko chan: 0x%x\n",
+			      rxx_cal_entryx.s.channel);
+		}
+	}
+}
+
+static void cvmx_ilk_reg_dump_tx(int intf)
+{
+	int i;
+	cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+	cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+	cvmx_ilk_txx_idx_pmap_t ilk_txx_idx_pmap;
+	cvmx_ilk_txx_mem_pmap_t ilk_txx_mem_pmap;
+	cvmx_ilk_txx_int_t ilk_txx_int;
+	cvmx_ilk_txx_pipe_t ilk_txx_pipe;
+	cvmx_ilk_txx_idx_cal_t ilk_txx_idx_cal;
+	cvmx_ilk_txx_mem_cal0_t ilk_txx_mem_cal0;
+	cvmx_ilk_txx_mem_cal1_t ilk_txx_mem_cal1;
+	int node = (intf >> 4) & 0xf;
+	int interface = intf & 0xf;
+
+	ilk_txx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+	debug("ilk txx cfg0: 0x%16lx\n", ilk_txx_cfg0.u64);
+
+	ilk_txx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+	debug("ilk txx cfg1: 0x%16lx\n", ilk_txx_cfg1.u64);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		ilk_txx_pipe.u64 = csr_rd(CVMX_ILK_TXX_PIPE(interface));
+		debug("ilk txx pipe: 0x%16lx\n", ilk_txx_pipe.u64);
+
+		ilk_txx_idx_pmap.u64 = 0;
+		ilk_txx_idx_pmap.s.index = ilk_txx_pipe.s.base;
+		ilk_txx_idx_pmap.s.inc = 1;
+		csr_wr(CVMX_ILK_TXX_IDX_PMAP(interface), ilk_txx_idx_pmap.u64);
+		for (i = 0; i < CHAN_NUM_DBG; i++) {
+			ilk_txx_mem_pmap.u64 =
+				csr_rd(CVMX_ILK_TXX_MEM_PMAP(interface));
+			debug("ilk txx mem pmap pipe: %3d  0x%16lx\n",
+			      ilk_txx_pipe.s.base + i, ilk_txx_mem_pmap.u64);
+		}
+	}
+
+	ilk_txx_int.u64 = csr_rd_node(node, CVMX_ILK_TXX_INT(interface));
+	debug("ilk txx int: 0x%16lx\n", ilk_txx_int.u64);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		ilk_txx_idx_cal.u64 = 0;
+		ilk_txx_idx_cal.s.inc = 1;
+		csr_wr(CVMX_ILK_TXX_IDX_CAL(interface), ilk_txx_idx_cal.u64);
+		for (i = 0; i < CAL_NUM_DBG; i++) {
+			ilk_txx_idx_cal.u64 =
+				csr_rd(CVMX_ILK_TXX_IDX_CAL(interface));
+			debug("ilk txx idx cal: 0x%16lx\n",
+			      ilk_txx_idx_cal.u64);
+
+			ilk_txx_mem_cal0.u64 =
+				csr_rd(CVMX_ILK_TXX_MEM_CAL0(interface));
+			debug("ilk txx mem cal0: 0x%16lx\n",
+			      ilk_txx_mem_cal0.u64);
+			ilk_txx_mem_cal1.u64 =
+				csr_rd(CVMX_ILK_TXX_MEM_CAL1(interface));
+			debug("ilk txx mem cal1: 0x%16lx\n",
+			      ilk_txx_mem_cal1.u64);
+		}
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		cvmx_ilk_txx_cal_entryx_t txx_cal_entryx;
+
+		for (i = 0; i < CAL_NUM_DBG; i++) {
+			txx_cal_entryx.u64 = csr_rd_node(
+				node, CVMX_ILK_TXX_CAL_ENTRYX(i, interface));
+			debug("ilk txx cal idx: %d\n", i);
+			debug("ilk txx cal ctl: 0x%x\n", txx_cal_entryx.s.ctl);
+			debug("ilk txx cal pki chan: 0x%x\n",
+			      txx_cal_entryx.s.channel);
+		}
+	}
+}
+#endif
+
+/**
+ * show run time status
+ *
+ * @param interface The identifier of the packet interface to enable. cn68xx
+ *                  has 2 interfaces: ilk0 and ilk1.
+ *
+ * @return nothing
+ */
+#ifdef CVMX_ILK_RUNTIME_DBG
+void cvmx_ilk_runtime_status(int interface)
+{
+	cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+	cvmx_ilk_txx_flow_ctl0_t ilk_txx_flow_ctl0;
+	cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+	cvmx_ilk_rxx_int_t ilk_rxx_int;
+	cvmx_ilk_rxx_flow_ctl0_t ilk_rxx_flow_ctl0;
+	cvmx_ilk_rxx_flow_ctl1_t ilk_rxx_flow_ctl1;
+	cvmx_ilk_gbl_int_t ilk_gbl_int;
+
+	debug("\nilk run-time status: interface: %d\n", interface);
+
+	ilk_txx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+	debug("\nilk txx cfg1: 0x%16lx\n", ilk_txx_cfg1.u64);
+	if (ilk_txx_cfg1.s.rx_link_fc)
+		debug("link flow control received\n");
+	if (ilk_txx_cfg1.s.tx_link_fc)
+		debug("link flow control sent\n");
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		ilk_txx_flow_ctl0.u64 =
+			csr_rd(CVMX_ILK_TXX_FLOW_CTL0(interface));
+		debug("\nilk txx flow ctl0: 0x%16lx\n", ilk_txx_flow_ctl0.u64);
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		int i;
+		cvmx_ilk_txx_cha_xonx_t txx_cha_xonx;
+
+		for (i = 0; i < 4; i++) {
+			txx_cha_xonx.u64 = csr_rd_node(
+				node, CVMX_ILK_TXX_CHA_XONX(i, interface));
+			debug("\nilk txx cha xon: 0x%16lx\n", txx_cha_xonx.u64);
+		}
+	}
+
+	ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+	debug("\nilk rxx cfg1: 0x%16lx\n", ilk_rxx_cfg1.u64);
+	debug("rx fifo count: %d\n", ilk_rxx_cfg1.s.rx_fifo_cnt);
+
+	ilk_rxx_int.u64 = csr_rd_node(node, CVMX_ILK_RXX_INT(interface));
+	debug("\nilk rxx int: 0x%16lx\n", ilk_rxx_int.u64);
+	if (ilk_rxx_int.s.pkt_drop_rxf)
+		debug("rx fifo packet drop\n");
+	if (ilk_rxx_int.u64)
+		csr_wr_node(node, CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		ilk_rxx_flow_ctl0.u64 =
+			csr_rd(CVMX_ILK_RXX_FLOW_CTL0(interface));
+		debug("\nilk rxx flow ctl0: 0x%16lx\n", ilk_rxx_flow_ctl0.u64);
+
+		ilk_rxx_flow_ctl1.u64 =
+			csr_rd(CVMX_ILK_RXX_FLOW_CTL1(interface));
+		debug("\nilk rxx flow ctl1: 0x%16lx\n", ilk_rxx_flow_ctl1.u64);
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		int i;
+		cvmx_ilk_rxx_cha_xonx_t rxx_cha_xonx;
+
+		for (i = 0; i < 4; i++) {
+			rxx_cha_xonx.u64 = csr_rd_node(
+				node, CVMX_ILK_RXX_CHA_XONX(i, interface));
+			debug("\nilk rxx cha xon: 0x%16lx\n", rxx_cha_xonx.u64);
+		}
+	}
+
+	ilk_gbl_int.u64 = csr_rd_node(node, CVMX_ILK_GBL_INT);
+	debug("\nilk gbl int: 0x%16lx\n", ilk_gbl_int.u64);
+	if (ilk_gbl_int.s.rxf_push_full)
+		debug("rx fifo overflow\n");
+	if (ilk_gbl_int.u64)
+		csr_wr_node(node, CVMX_ILK_GBL_INT, ilk_gbl_int.u64);
+}
+#endif
+
+/**
+ * enable interface
+ *
+ * @param xiface    The identifier of the packet interface to enable. cn68xx
+ *                  has 2 interfaces: ilk0 and ilk1.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int cvmx_ilk_enable(int xiface)
+{
+	int res = -1;
+	int retry_count = 0;
+	cvmx_helper_link_info_t result;
+	cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
+	cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
+#ifdef CVMX_ILK_STATS_ENA
+	cvmx_ilk_rxx_cfg0_t ilk_rxx_cfg0;
+	cvmx_ilk_txx_cfg0_t ilk_txx_cfg0;
+#endif
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int node = xi.node;
+	int interface = xi.interface - CVMX_ILK_GBL_BASE();
+
+	if (!octeon_has_feature(OCTEON_FEATURE_ILK))
+		return res;
+
+	if (interface >= CVMX_NUM_ILK_INTF)
+		return res;
+
+	result.u64 = 0;
+
+#ifdef CVMX_ILK_STATS_ENA
+	debug("\n");
+	debug("<<<< ILK%d: Before enabling ilk\n", interface);
+	cvmx_ilk_reg_dump_rx(intf);
+	cvmx_ilk_reg_dump_tx(intf);
+#endif
+
+	/* RX packet will be enabled only if link is up */
+
+	/* TX side */
+	ilk_txx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+	ilk_txx_cfg1.s.pkt_ena = 1;
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		if (cvmx_ilk_use_la_mode(interface, 0)) {
+			ilk_txx_cfg1.s.la_mode = 1;
+			ilk_txx_cfg1.s.tx_link_fc_jam = 1;
+		}
+	}
+	csr_wr_node(node, CVMX_ILK_TXX_CFG1(interface), ilk_txx_cfg1.u64);
+	csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
+
+#ifdef CVMX_ILK_STATS_ENA
+	/* RX side stats */
+	ilk_rxx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG0(interface));
+	ilk_rxx_cfg0.s.lnk_stats_ena = 1;
+	csr_wr_node(node, CVMX_ILK_RXX_CFG0(interface), ilk_rxx_cfg0.u64);
+
+	/* TX side stats */
+	ilk_txx_cfg0.u64 = csr_rd_node(node, CVMX_ILK_TXX_CFG0(interface));
+	ilk_txx_cfg0.s.lnk_stats_ena = 1;
+	csr_wr_node(node, CVMX_ILK_TXX_CFG0(interface), ilk_txx_cfg0.u64);
+#endif
+
+retry:
+	retry_count++;
+	if (retry_count > 10)
+		goto out;
+
+	/* Make sure the link is up, so that packets can be sent. */
+	result = __cvmx_helper_ilk_link_get(
+		cvmx_helper_get_ipd_port((interface + CVMX_ILK_GBL_BASE()), 0));
+
+	/* Small delay before another retry. */
+	udelay(100);
+
+	ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
+	if (ilk_rxx_cfg1.s.pkt_ena == 0)
+		goto retry;
+
+out:
+
+#ifdef CVMX_ILK_STATS_ENA
+	debug(">>>> ILK%d: After ILK is enabled\n", interface);
+	cvmx_ilk_reg_dump_rx(intf);
+	cvmx_ilk_reg_dump_tx(intf);
+#endif
+
+	if (result.s.link_up)
+		return 0;
+
+	return -1;
+}
+
+/**
+ * Provide interface enable status
+ *
+ * @param xiface The identifier of the packet xiface to disable. cn68xx
+ *                  has 2 interfaces: ilk0 and ilk1.
+ *
+ * @return Zero, not enabled; One, enabled.
+ */
+int cvmx_ilk_get_intf_ena(int xiface)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	int interface = xi.interface - CVMX_ILK_GBL_BASE();
+	return cvmx_ilk_intf_cfg[xi.node][interface].intf_en;
+}
diff --git a/arch/mips/mach-octeon/cvmx-ipd.c b/arch/mips/mach-octeon/cvmx-ipd.c
new file mode 100644
index 0000000..c462b0f
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-ipd.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * IPD Support.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+cvmx_ipd_config_t cvmx_ipd_cfg = {
+	.first_mbuf_skip = 184,
+	.ipd_enable = 1,
+	.cache_mode = CVMX_IPD_OPC_MODE_STT,
+	.packet_pool = { 0, 2048, 0 },
+	.wqe_pool = { 1, 128, 0 },
+	.port_config = { CVMX_PIP_PORT_CFG_MODE_SKIPL2,
+			 CVMX_POW_TAG_TYPE_ORDERED, CVMX_PIP_TAG_MODE_TUPLE,
+			 .tag_fields = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 } }
+};
+
+#define IPD_RED_AVG_DLY 1000
+#define IPD_RED_PRB_DLY 1000
+
+void cvmx_ipd_config(u64 mbuff_size, u64 first_mbuff_skip,
+		     u64 not_first_mbuff_skip, u64 first_back, u64 second_back,
+		     u64 wqe_fpa_pool, cvmx_ipd_mode_t cache_mode,
+		     u64 back_pres_enable_flag)
+{
+	cvmx_ipd_1st_mbuff_skip_t first_skip;
+	cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
+	cvmx_ipd_packet_mbuff_size_t size;
+	cvmx_ipd_1st_next_ptr_back_t first_back_struct;
+	cvmx_ipd_second_next_ptr_back_t second_back_struct;
+	cvmx_ipd_wqe_fpa_queue_t wqe_pool;
+	cvmx_ipd_ctl_status_t ipd_ctl_reg;
+
+	/* Enforce 1st skip minimum if WQE shares the buffer with packet */
+	if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
+		union cvmx_ipd_ctl_status ctl_status;
+
+		ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+		if (ctl_status.s.no_wptr != 0 && first_mbuff_skip < 16)
+			first_mbuff_skip = 16;
+	}
+
+	first_skip.u64 = 0;
+	first_skip.s.skip_sz = first_mbuff_skip;
+	csr_wr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
+
+	not_first_skip.u64 = 0;
+	not_first_skip.s.skip_sz = not_first_mbuff_skip;
+	csr_wr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
+
+	size.u64 = 0;
+	size.s.mb_size = mbuff_size;
+	csr_wr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
+
+	first_back_struct.u64 = 0;
+	first_back_struct.s.back = first_back;
+	csr_wr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
+
+	second_back_struct.u64 = 0;
+	second_back_struct.s.back = second_back;
+	csr_wr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64);
+
+	wqe_pool.u64 = 0;
+	wqe_pool.s.wqe_pool = wqe_fpa_pool;
+	csr_wr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
+
+	ipd_ctl_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+	ipd_ctl_reg.s.opc_mode = cache_mode;
+	ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
+	csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
+
+	/* Note: the example RED code is below */
+}
+
+/**
+ * Enable IPD
+ */
+void cvmx_ipd_enable(void)
+{
+	cvmx_ipd_ctl_status_t ipd_reg;
+
+	ipd_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+
+	/*
+	 * busy-waiting for rst_done in o68
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		while (ipd_reg.s.rst_done != 0)
+			ipd_reg.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
+
+	if (ipd_reg.s.ipd_en)
+		debug("Warning: Enabling IPD when IPD already enabled.\n");
+
+	ipd_reg.s.ipd_en = 1;
+
+	if (cvmx_ipd_cfg.enable_len_M8_fix)
+		ipd_reg.s.len_m8 = 1;
+
+	csr_wr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
+}
diff --git a/arch/mips/mach-octeon/cvmx-pki-resources.c b/arch/mips/mach-octeon/cvmx-pki-resources.c
new file mode 100644
index 0000000..ab84172
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pki-resources.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * PKI Support.
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+static s32 cvmx_pki_style_refcnt[CVMX_MAX_NODES][CVMX_PKI_NUM_INTERNAL_STYLE];
+
+/**
+ * This function allocates/reserves a style from pool of global styles per node.
+ * @param node	node to allocate style from.
+ * @param style	style to allocate, if -1 it will be allocated
+ *		first available style from style resource. If index is positive
+ *		number and in range, it will try to allocate specified style.
+ * @return	style number on success,
+ *		-1 on alloc failure.
+ *		-2 on resource already reserved.
+ */
+int cvmx_pki_style_alloc(int node, int style)
+{
+	int rs;
+
+	if (cvmx_create_global_resource_range(CVMX_GR_TAG_STYLE(node),
+					      CVMX_PKI_NUM_INTERNAL_STYLE)) {
+		printf("ERROR: Failed to create styles global resource\n");
+		return -1;
+	}
+	if (style >= 0) {
+		/* Reserving specific style, use refcnt for sharing */
+		rs = cvmx_atomic_fetch_and_add32(
+			&cvmx_pki_style_refcnt[node][style], 1);
+		if (rs > 0)
+			return CVMX_RESOURCE_ALREADY_RESERVED;
+
+		rs = cvmx_reserve_global_resource_range(CVMX_GR_TAG_STYLE(node),
+							style, style, 1);
+		if (rs == -1) {
+			/* This means the style is taken by another app */
+			printf("ERROR: style %d is reserved by another app\n",
+			       style);
+			cvmx_atomic_fetch_and_add32(
+				&cvmx_pki_style_refcnt[node][style], -1);
+			return CVMX_RESOURCE_ALLOC_FAILED;
+		}
+	} else {
+		/* Allocate first available style */
+		rs = cvmx_allocate_global_resource_range(
+			CVMX_GR_TAG_STYLE(node), style, 1, 1);
+		if (rs < 0) {
+			printf("ERROR: Failed to allocate style, none available\n");
+			return CVMX_RESOURCE_ALLOC_FAILED;
+		}
+		style = rs;
+		/* Increment refcnt for newly created style */
+		cvmx_atomic_fetch_and_add32(&cvmx_pki_style_refcnt[node][style],
+					    1);
+	}
+	return style;
+}
+
+/**
+ * This function frees a style from pool of global styles per node.
+ * @param node	 node to free style from.
+ * @param style	 style to free
+ * @return	 0 on success, -1 on failure or
+ * if the style is shared a positive count of remaining users for this style.
+ */
+int cvmx_pki_style_free(int node, int style)
+{
+	int rs;
+
+	rs = cvmx_atomic_fetch_and_add32(&cvmx_pki_style_refcnt[node][style],
+					 -1);
+	if (rs > 1)
+		return rs - 1;
+
+	if (cvmx_free_global_resource_range_with_base(CVMX_GR_TAG_STYLE(node),
+						      style, 1) == -1) {
+		printf("ERROR Failed to release style %d\n", (int)style);
+		return -1;
+	}
+	return 0;
+}
+
+/**
+ * This function allocates/reserves a cluster group from per node
+   cluster group resources.
+ * @param node		node to allocate cluster group from.
+   @param cl_grp	cluster group to allocate/reserve, if -1 ,
+ *			allocate any available cluster group.
+ * @return		cluster group number
+ *			-1 on alloc failure.
+ *			-2 on resource already reserved.
+ */
+int cvmx_pki_cluster_grp_alloc(int node, int cl_grp)
+{
+	int rs;
+
+	if (node >= CVMX_MAX_NODES) {
+		printf("ERROR: Invalid node number %d\n", node);
+		return -1;
+	}
+	if (cvmx_create_global_resource_range(CVMX_GR_TAG_CLUSTER_GRP(node),
+					      CVMX_PKI_NUM_CLUSTER_GROUP)) {
+		printf("ERROR: Failed to create Cluster group global resource\n");
+		return -1;
+	}
+	if (cl_grp >= 0) {
+		rs = cvmx_reserve_global_resource_range(
+			CVMX_GR_TAG_CLUSTER_GRP(node), 0, cl_grp, 1);
+		if (rs == -1) {
+			debug("INFO: cl_grp %d is already reserved\n",
+			      (int)cl_grp);
+			return CVMX_RESOURCE_ALREADY_RESERVED;
+		}
+	} else {
+		rs = cvmx_allocate_global_resource_range(
+			CVMX_GR_TAG_CLUSTER_GRP(node), 0, 1, 1);
+		if (rs == -1) {
+			debug("Warning: Failed to alloc cluster grp\n");
+			return CVMX_RESOURCE_ALLOC_FAILED;
+		}
+	}
+	cl_grp = rs;
+	return cl_grp;
+}
+
+/**
+ * This function allocates/reserves a pcam entry from node
+ * @param node		node to allocate pcam entry from.
+ * @param index	index of pacm entry (0-191), if -1 ,
+ *			allocate any available pcam entry.
+ * @param bank		pcam bank where to allocate/reserve pcan entry from
+ * @param cluster_mask  mask of clusters from which pcam entry is needed.
+ * @return		pcam entry of -1 on failure
+ */
+int cvmx_pki_pcam_entry_alloc(int node, int index, int bank, u64 cluster_mask)
+{
+	int rs = 0;
+	unsigned int cluster;
+
+	for (cluster = 0; cluster < CVMX_PKI_NUM_CLUSTER; cluster++) {
+		if ((cluster_mask & (1 << cluster)) == 0)
+			continue;
+		rs = cvmx_create_global_resource_range(
+			CVMX_GR_TAG_PCAM(node, cluster, bank),
+			CVMX_PKI_TOTAL_PCAM_ENTRY);
+		if (rs != 0) {
+			printf("ERROR: Failed to create pki pcam global resource\n");
+			return -1;
+		}
+		if (index >= 0)
+			rs = cvmx_reserve_global_resource_range(
+				CVMX_GR_TAG_PCAM(node, cluster, bank), cluster,
+				index, 1);
+		else
+			rs = cvmx_allocate_global_resource_range(
+				CVMX_GR_TAG_PCAM(node, cluster, bank), cluster,
+				1, 1);
+		if (rs == -1) {
+			printf("ERROR: PCAM :index %d not available in cluster %d bank %d",
+			       (int)index, (int)cluster, bank);
+			return -1;
+		}
+	} /* for cluster */
+	index = rs;
+	/* implement cluster handle for pass2, for now assume
+	all clusters will have same base index*/
+	return index;
+}
+
+/**
+ * This function allocates/reserves QPG table entries per node.
+ * @param node		node number.
+ * @param base_offset	base_offset in qpg table. If -1, first available
+ *			qpg base_offset will be allocated. If base_offset is positive
+ *			number and in range, it will try to allocate specified base_offset.
+ * @param count		number of consecutive qpg entries to allocate. They will be consecutive
+ *                       from base offset.
+ * @return		qpg table base offset number on success
+ *			-1 on alloc failure.
+ *			-2 on resource already reserved.
+ */
+int cvmx_pki_qpg_entry_alloc(int node, int base_offset, int count)
+{
+	int rs;
+
+	if (cvmx_create_global_resource_range(CVMX_GR_TAG_QPG_ENTRY(node),
+					      CVMX_PKI_NUM_QPG_ENTRY)) {
+		printf("ERROR: Failed to create qpg_entry global resource\n");
+		return -1;
+	}
+	if (base_offset >= 0) {
+		rs = cvmx_reserve_global_resource_range(
+			CVMX_GR_TAG_QPG_ENTRY(node), base_offset, base_offset,
+			count);
+		if (rs == -1) {
+			debug("INFO: qpg entry %d is already reserved\n",
+			      (int)base_offset);
+			return CVMX_RESOURCE_ALREADY_RESERVED;
+		}
+	} else {
+		rs = cvmx_allocate_global_resource_range(
+			CVMX_GR_TAG_QPG_ENTRY(node), base_offset, count, 1);
+		if (rs == -1) {
+			printf("ERROR: Failed to allocate qpg entry\n");
+			return CVMX_RESOURCE_ALLOC_FAILED;
+		}
+	}
+	base_offset = rs;
+	return base_offset;
+}
+
+/**
+ * This function frees QPG table entries per node.
+ * @param node		node number.
+ * @param base_offset	base_offset in qpg table. If -1, first available
+ *			qpg base_offset will be allocated. If base_offset is positive
+ *			number and in range, it will try to allocate specified base_offset.
+ * @param count		number of consecutive qpg entries to allocate. They will be consecutive
+ *			from base offset.
+ * @return		qpg table base offset number on success, -1 on failure.
+ */
+int cvmx_pki_qpg_entry_free(int node, int base_offset, int count)
+{
+	if (cvmx_free_global_resource_range_with_base(
+		    CVMX_GR_TAG_QPG_ENTRY(node), base_offset, count) == -1) {
+		printf("ERROR Failed to release qpg offset %d",
+		       (int)base_offset);
+		return -1;
+	}
+	return 0;
+}
+
+int cvmx_pki_mtag_idx_alloc(int node, int idx)
+{
+	if (cvmx_create_global_resource_range(CVMX_GR_TAG_MTAG_IDX(node),
+					      CVMX_PKI_NUM_MTAG_IDX)) {
+		printf("ERROR: Failed to create MTAG-IDX global resource\n");
+		return -1;
+	}
+	if (idx >= 0) {
+		idx = cvmx_reserve_global_resource_range(
+			CVMX_GR_TAG_MTAG_IDX(node), idx, idx, 1);
+		if (idx == -1) {
+			debug("INFO: MTAG index %d is already reserved\n",
+			      (int)idx);
+			return CVMX_RESOURCE_ALREADY_RESERVED;
+		}
+	} else {
+		idx = cvmx_allocate_global_resource_range(
+			CVMX_GR_TAG_MTAG_IDX(node), idx, 1, 1);
+		if (idx == -1) {
+			printf("ERROR: Failed to allocate MTAG index\n");
+			return CVMX_RESOURCE_ALLOC_FAILED;
+		}
+	}
+	return idx;
+}
diff --git a/arch/mips/mach-octeon/cvmx-pki.c b/arch/mips/mach-octeon/cvmx-pki.c
new file mode 100644
index 0000000..4e2cf08
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pki.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * PKI Support.
+ */
+
+#include <time.h>
+#include <log.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pki-cluster.h>
+#include <mach/cvmx-pki-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+/**
+ * This function enables PKI
+ *
+ * @param node  Node to enable PKI.
+ */
+void cvmx_pki_enable(int node)
+{
+	cvmx_pki_sft_rst_t sft_rst;
+	cvmx_pki_buf_ctl_t buf_ctl;
+
+	sft_rst.u64 = csr_rd_node(node, CVMX_PKI_SFT_RST);
+	while (sft_rst.s.busy != 0)
+		sft_rst.u64 = csr_rd_node(node, CVMX_PKI_SFT_RST);
+
+	buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
+	if (buf_ctl.s.pki_en)
+		debug("Warning: Enabling PKI when PKI already enabled.\n");
+
+	buf_ctl.s.pki_en = 1;
+	csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
+}
+
+/**
+ * This function sets the clusters in PKI.
+ *
+ * @param node  Node to set clusters.
+ */
+int cvmx_pki_setup_clusters(int node)
+{
+	int i;
+
+	for (i = 0; i < cvmx_pki_cluster_code_length; i++)
+		csr_wr_node(node, CVMX_PKI_IMEMX(i),
+			    cvmx_pki_cluster_code_default[i]);
+
+	return 0;
+}
+
+/**
+ * This function reads global configuration of PKI block.
+ *
+ * @param node  Node number.
+ * @param gbl_cfg  Pointer to struct to read global configuration.
+ */
+void cvmx_pki_read_global_config(int node,
+				 struct cvmx_pki_global_config *gbl_cfg)
+{
+	cvmx_pki_stat_ctl_t stat_ctl;
+	cvmx_pki_icgx_cfg_t icg_cfg;
+	cvmx_pki_gbl_pen_t gbl_pen;
+	cvmx_pki_tag_secret_t tag_secret;
+	cvmx_pki_frm_len_chkx_t frm_len_chk;
+	cvmx_pki_buf_ctl_t buf_ctl;
+	unsigned int cl_grp;
+	int id;
+
+	stat_ctl.u64 = csr_rd_node(node, CVMX_PKI_STAT_CTL);
+	gbl_cfg->stat_mode = stat_ctl.s.mode;
+
+	for (cl_grp = 0; cl_grp < CVMX_PKI_NUM_CLUSTER_GROUP; cl_grp++) {
+		icg_cfg.u64 = csr_rd_node(node, CVMX_PKI_ICGX_CFG(cl_grp));
+		gbl_cfg->cluster_mask[cl_grp] = icg_cfg.s.clusters;
+	}
+	gbl_pen.u64 = csr_rd_node(node, CVMX_PKI_GBL_PEN);
+	gbl_cfg->gbl_pen.virt_pen = gbl_pen.s.virt_pen;
+	gbl_cfg->gbl_pen.clg_pen = gbl_pen.s.clg_pen;
+	gbl_cfg->gbl_pen.cl2_pen = gbl_pen.s.cl2_pen;
+	gbl_cfg->gbl_pen.l4_pen = gbl_pen.s.l4_pen;
+	gbl_cfg->gbl_pen.il3_pen = gbl_pen.s.il3_pen;
+	gbl_cfg->gbl_pen.l3_pen = gbl_pen.s.l3_pen;
+	gbl_cfg->gbl_pen.mpls_pen = gbl_pen.s.mpls_pen;
+	gbl_cfg->gbl_pen.fulc_pen = gbl_pen.s.fulc_pen;
+	gbl_cfg->gbl_pen.dsa_pen = gbl_pen.s.dsa_pen;
+	gbl_cfg->gbl_pen.hg_pen = gbl_pen.s.hg_pen;
+
+	tag_secret.u64 = csr_rd_node(node, CVMX_PKI_TAG_SECRET);
+	gbl_cfg->tag_secret.dst6 = tag_secret.s.dst6;
+	gbl_cfg->tag_secret.src6 = tag_secret.s.src6;
+	gbl_cfg->tag_secret.dst = tag_secret.s.dst;
+	gbl_cfg->tag_secret.src = tag_secret.s.src;
+
+	for (id = 0; id < CVMX_PKI_NUM_FRAME_CHECK; id++) {
+		frm_len_chk.u64 = csr_rd_node(node, CVMX_PKI_FRM_LEN_CHKX(id));
+		gbl_cfg->frm_len[id].maxlen = frm_len_chk.s.maxlen;
+		gbl_cfg->frm_len[id].minlen = frm_len_chk.s.minlen;
+	}
+	buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
+	gbl_cfg->fpa_wait = buf_ctl.s.fpa_wait;
+}
+
+/**
+ * This function writes max and min frame lengths to hardware which can be used
+ * to check the size of frame arrived.There are 2 possible combination which are
+ * indicated by id field.
+ *
+ * @param node  Node number.
+ * @param id  Choose which frame len register to write to
+ * @param len_chk  Struct containing byte count for max-sized/min-sized frame check.
+ */
+static void cvmx_pki_write_frame_len(int node, int id,
+				     struct cvmx_pki_frame_len len_chk)
+{
+	cvmx_pki_frm_len_chkx_t frm_len_chk;
+
+	frm_len_chk.u64 = csr_rd_node(node, CVMX_PKI_FRM_LEN_CHKX(id));
+	frm_len_chk.s.maxlen = len_chk.maxlen;
+	frm_len_chk.s.minlen = len_chk.minlen;
+	csr_wr_node(node, CVMX_PKI_FRM_LEN_CHKX(id), frm_len_chk.u64);
+}
+
+/**
+ * This function writes global configuration of PKI into hw.
+ *
+ * @param node  Node number.
+ * @param gbl_cfg  Pointer to struct to global configuration.
+ */
+void cvmx_pki_write_global_config(int node,
+				  struct cvmx_pki_global_config *gbl_cfg)
+{
+	cvmx_pki_stat_ctl_t stat_ctl;
+	cvmx_pki_buf_ctl_t buf_ctl;
+	unsigned int cl_grp;
+
+	for (cl_grp = 0; cl_grp < CVMX_PKI_NUM_CLUSTER_GROUP; cl_grp++)
+		cvmx_pki_attach_cluster_to_group(node, cl_grp,
+						 gbl_cfg->cluster_mask[cl_grp]);
+
+	stat_ctl.u64 = 0;
+	stat_ctl.s.mode = gbl_cfg->stat_mode;
+	csr_wr_node(node, CVMX_PKI_STAT_CTL, stat_ctl.u64);
+
+	buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
+	buf_ctl.s.fpa_wait = gbl_cfg->fpa_wait;
+	csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
+
+	cvmx_pki_write_global_parse(node, gbl_cfg->gbl_pen);
+	cvmx_pki_write_tag_secret(node, gbl_cfg->tag_secret);
+	cvmx_pki_write_frame_len(node, 0, gbl_cfg->frm_len[0]);
+	cvmx_pki_write_frame_len(node, 1, gbl_cfg->frm_len[1]);
+}
+
+/**
+ * This function reads per pkind parameters in hardware which defines how
+ * the incoming packet is processed.
+ *
+ * @param node  Node number.
+ * @param pkind  PKI supports a large number of incoming interfaces and packets
+ *     arriving on different interfaces or channels may want to be processed
+ *     differently. PKI uses the pkind to determine how the incoming packet
+ *     is processed.
+ * @param pkind_cfg  Pointer to struct conatining pkind configuration read
+ *     from the hardware.
+ */
+int cvmx_pki_read_pkind_config(int node, int pkind,
+			       struct cvmx_pki_pkind_config *pkind_cfg)
+{
+	int cluster = 0;
+	u64 cl_mask;
+	cvmx_pki_pkindx_icgsel_t icgsel;
+	cvmx_pki_clx_pkindx_style_t pstyle;
+	cvmx_pki_icgx_cfg_t icg_cfg;
+	cvmx_pki_clx_pkindx_cfg_t pcfg;
+	cvmx_pki_clx_pkindx_skip_t skip;
+	cvmx_pki_clx_pkindx_l2_custom_t l2cust;
+	cvmx_pki_clx_pkindx_lg_custom_t lgcust;
+
+	icgsel.u64 = csr_rd_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind));
+	icg_cfg.u64 = csr_rd_node(node, CVMX_PKI_ICGX_CFG(icgsel.s.icg));
+	pkind_cfg->cluster_grp = (uint8_t)icgsel.s.icg;
+	cl_mask = (uint64_t)icg_cfg.s.clusters;
+	cluster = __builtin_ffsll(cl_mask) - 1;
+
+	pstyle.u64 =
+		csr_rd_node(node, CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster));
+	pkind_cfg->initial_parse_mode = pstyle.s.pm;
+	pkind_cfg->initial_style = pstyle.s.style;
+
+	pcfg.u64 = csr_rd_node(node, CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster));
+	pkind_cfg->fcs_pres = pcfg.s.fcs_pres;
+	pkind_cfg->parse_en.inst_hdr = pcfg.s.inst_hdr;
+	pkind_cfg->parse_en.mpls_en = pcfg.s.mpls_en;
+	pkind_cfg->parse_en.lg_custom = pcfg.s.lg_custom;
+	pkind_cfg->parse_en.fulc_en = pcfg.s.fulc_en;
+	pkind_cfg->parse_en.dsa_en = pcfg.s.dsa_en;
+	pkind_cfg->parse_en.hg2_en = pcfg.s.hg2_en;
+	pkind_cfg->parse_en.hg_en = pcfg.s.hg_en;
+
+	skip.u64 = csr_rd_node(node, CVMX_PKI_CLX_PKINDX_SKIP(pkind, cluster));
+	pkind_cfg->fcs_skip = skip.s.fcs_skip;
+	pkind_cfg->inst_skip = skip.s.inst_skip;
+
+	l2cust.u64 = csr_rd_node(node,
+				 CVMX_PKI_CLX_PKINDX_L2_CUSTOM(pkind, cluster));
+	pkind_cfg->l2_scan_offset = l2cust.s.offset;
+
+	lgcust.u64 = csr_rd_node(node,
+				 CVMX_PKI_CLX_PKINDX_LG_CUSTOM(pkind, cluster));
+	pkind_cfg->lg_scan_offset = lgcust.s.offset;
+	return 0;
+}
+
+/**
+ * This function writes per pkind parameters in hardware which defines how
+ * the incoming packet is processed.
+ *
+ * @param node  Node number.
+ * @param pkind  PKI supports a large number of incoming interfaces and packets
+ *     arriving on different interfaces or channels may want to be processed
+ *     differently. PKI uses the pkind to determine how the incoming
+ *     packet is processed.
+ * @param pkind_cfg  Pointer to struct conatining pkind configuration need
+ *     to be written in the hardware.
+ */
+int cvmx_pki_write_pkind_config(int node, int pkind,
+				struct cvmx_pki_pkind_config *pkind_cfg)
+{
+	unsigned int cluster = 0;
+	u64 cluster_mask;
+	cvmx_pki_pkindx_icgsel_t icgsel;
+	cvmx_pki_clx_pkindx_style_t pstyle;
+	cvmx_pki_icgx_cfg_t icg_cfg;
+	cvmx_pki_clx_pkindx_cfg_t pcfg;
+	cvmx_pki_clx_pkindx_skip_t skip;
+	cvmx_pki_clx_pkindx_l2_custom_t l2cust;
+	cvmx_pki_clx_pkindx_lg_custom_t lgcust;
+
+	if (pkind >= CVMX_PKI_NUM_PKIND ||
+	    pkind_cfg->cluster_grp >= CVMX_PKI_NUM_CLUSTER_GROUP ||
+	    pkind_cfg->initial_style >= CVMX_PKI_NUM_FINAL_STYLE) {
+		debug("ERROR: Configuring PKIND pkind = %d cluster_group = %d style = %d\n",
+		      pkind, pkind_cfg->cluster_grp, pkind_cfg->initial_style);
+		return -1;
+	}
+	icgsel.u64 = csr_rd_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind));
+	icgsel.s.icg = pkind_cfg->cluster_grp;
+	csr_wr_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind), icgsel.u64);
+
+	icg_cfg.u64 =
+		csr_rd_node(node, CVMX_PKI_ICGX_CFG(pkind_cfg->cluster_grp));
+	cluster_mask = (uint64_t)icg_cfg.s.clusters;
+	while (cluster < CVMX_PKI_NUM_CLUSTER) {
+		if (cluster_mask & (0x01L << cluster)) {
+			pstyle.u64 = csr_rd_node(
+				node,
+				CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster));
+			pstyle.s.pm = pkind_cfg->initial_parse_mode;
+			pstyle.s.style = pkind_cfg->initial_style;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster),
+				    pstyle.u64);
+
+			pcfg.u64 = csr_rd_node(
+				node, CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster));
+			pcfg.s.fcs_pres = pkind_cfg->fcs_pres;
+			pcfg.s.inst_hdr = pkind_cfg->parse_en.inst_hdr;
+			pcfg.s.mpls_en = pkind_cfg->parse_en.mpls_en;
+			pcfg.s.lg_custom = pkind_cfg->parse_en.lg_custom;
+			pcfg.s.fulc_en = pkind_cfg->parse_en.fulc_en;
+			pcfg.s.dsa_en = pkind_cfg->parse_en.dsa_en;
+			pcfg.s.hg2_en = pkind_cfg->parse_en.hg2_en;
+			pcfg.s.hg_en = pkind_cfg->parse_en.hg_en;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster),
+				    pcfg.u64);
+
+			skip.u64 = csr_rd_node(
+				node, CVMX_PKI_CLX_PKINDX_SKIP(pkind, cluster));
+			skip.s.fcs_skip = pkind_cfg->fcs_skip;
+			skip.s.inst_skip = pkind_cfg->inst_skip;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_PKINDX_SKIP(pkind, cluster),
+				    skip.u64);
+
+			l2cust.u64 = csr_rd_node(
+				node,
+				CVMX_PKI_CLX_PKINDX_L2_CUSTOM(pkind, cluster));
+			l2cust.s.offset = pkind_cfg->l2_scan_offset;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_PKINDX_L2_CUSTOM(pkind,
+								  cluster),
+				    l2cust.u64);
+
+			lgcust.u64 = csr_rd_node(
+				node,
+				CVMX_PKI_CLX_PKINDX_LG_CUSTOM(pkind, cluster));
+			lgcust.s.offset = pkind_cfg->lg_scan_offset;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_PKINDX_LG_CUSTOM(pkind,
+								  cluster),
+				    lgcust.u64);
+		}
+		cluster++;
+	}
+	return 0;
+}
+
+/**
+ * This function reads parameters associated with tag configuration in hardware.
+ * Only first cluster in the group is used.
+ *
+ * @param node  Node number.
+ * @param style  Style to configure tag for.
+ * @param cluster_mask	Mask of clusters to configure the style for.
+ * @param tag_cfg  Pointer to tag configuration struct.
+ */
+void cvmx_pki_read_tag_config(int node, int style, uint64_t cluster_mask,
+			      struct cvmx_pki_style_tag_cfg *tag_cfg)
+{
+	int mask, tag_idx, index;
+	cvmx_pki_clx_stylex_cfg2_t style_cfg2;
+	cvmx_pki_clx_stylex_alg_t style_alg;
+	cvmx_pki_stylex_tag_sel_t tag_sel;
+	cvmx_pki_tag_incx_ctl_t tag_ctl;
+	cvmx_pki_tag_incx_mask_t tag_mask;
+	int cluster = __builtin_ffsll(cluster_mask) - 1;
+
+	style_cfg2.u64 =
+		csr_rd_node(node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
+	style_alg.u64 =
+		csr_rd_node(node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
+
+	/* 7-Tuple Tag: */
+	tag_cfg->tag_fields.layer_g_src = style_cfg2.s.tag_src_lg;
+	tag_cfg->tag_fields.layer_f_src = style_cfg2.s.tag_src_lf;
+	tag_cfg->tag_fields.layer_e_src = style_cfg2.s.tag_src_le;
+	tag_cfg->tag_fields.layer_d_src = style_cfg2.s.tag_src_ld;
+	tag_cfg->tag_fields.layer_c_src = style_cfg2.s.tag_src_lc;
+	tag_cfg->tag_fields.layer_b_src = style_cfg2.s.tag_src_lb;
+	tag_cfg->tag_fields.layer_g_dst = style_cfg2.s.tag_dst_lg;
+	tag_cfg->tag_fields.layer_f_dst = style_cfg2.s.tag_dst_lf;
+	tag_cfg->tag_fields.layer_e_dst = style_cfg2.s.tag_dst_le;
+	tag_cfg->tag_fields.layer_d_dst = style_cfg2.s.tag_dst_ld;
+	tag_cfg->tag_fields.layer_c_dst = style_cfg2.s.tag_dst_lc;
+	tag_cfg->tag_fields.layer_b_dst = style_cfg2.s.tag_dst_lb;
+	tag_cfg->tag_fields.tag_vni = style_alg.s.tag_vni;
+	tag_cfg->tag_fields.tag_gtp = style_alg.s.tag_gtp;
+	tag_cfg->tag_fields.tag_spi = style_alg.s.tag_spi;
+	tag_cfg->tag_fields.tag_sync = style_alg.s.tag_syn;
+	tag_cfg->tag_fields.ip_prot_nexthdr = style_alg.s.tag_pctl;
+	tag_cfg->tag_fields.second_vlan = style_alg.s.tag_vs1;
+	tag_cfg->tag_fields.first_vlan = style_alg.s.tag_vs0;
+	tag_cfg->tag_fields.mpls_label = style_alg.s.tag_mpls0;
+	tag_cfg->tag_fields.input_port = style_alg.s.tag_prt;
+
+	/* Custom-Mask Tag: */
+	tag_sel.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_TAG_SEL(style));
+	for (mask = 0; mask < 4; mask++) {
+		tag_cfg->mask_tag[mask].enable =
+			(style_cfg2.s.tag_inc & (1 << mask)) != 0;
+		switch (mask) {
+		case 0:
+			tag_idx = tag_sel.s.tag_idx0;
+			break;
+		case 1:
+			tag_idx = tag_sel.s.tag_idx1;
+			break;
+		case 2:
+			tag_idx = tag_sel.s.tag_idx2;
+			break;
+		case 3:
+			tag_idx = tag_sel.s.tag_idx3;
+			break;
+		}
+		index = tag_idx * 4 + mask;
+		tag_mask.u64 = csr_rd_node(node, CVMX_PKI_TAG_INCX_MASK(index));
+		tag_cfg->mask_tag[mask].val = tag_mask.s.en;
+		tag_ctl.u64 = csr_rd_node(node, CVMX_PKI_TAG_INCX_CTL(index));
+		tag_cfg->mask_tag[mask].base = tag_ctl.s.ptr_sel;
+		tag_cfg->mask_tag[mask].offset = tag_ctl.s.offset;
+	}
+}
+
+/**
+ * This function writes/configures parameters associated with tag configuration in
+ * hardware. In Custom-Mask Tagging, all four masks use the same base index
+ * to access Tag Control and Tag Mask registers.
+ *
+ * @param node  Node number.
+ * @param style  Style to configure tag for.
+ * @param cluster_mask  Mask of clusters to configure the style for.
+ * @param tag_cfg  Pointer to taf configuration struct.
+ */
+void cvmx_pki_write_tag_config(int node, int style, uint64_t cluster_mask,
+			       struct cvmx_pki_style_tag_cfg *tag_cfg)
+{
+	int mask, index, tag_idx, mtag_en = 0;
+	unsigned int cluster = 0;
+	cvmx_pki_clx_stylex_cfg2_t scfg2;
+	cvmx_pki_clx_stylex_alg_t style_alg;
+	cvmx_pki_tag_incx_ctl_t tag_ctl;
+	cvmx_pki_tag_incx_mask_t tag_mask;
+	cvmx_pki_stylex_tag_sel_t tag_sel;
+
+	while (cluster < CVMX_PKI_NUM_CLUSTER) {
+		if (cluster_mask & (0x01L << cluster)) {
+			/* 7-Tuple Tag: */
+			scfg2.u64 = csr_rd_node(
+				node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
+			scfg2.s.tag_src_lg = tag_cfg->tag_fields.layer_g_src;
+			scfg2.s.tag_src_lf = tag_cfg->tag_fields.layer_f_src;
+			scfg2.s.tag_src_le = tag_cfg->tag_fields.layer_e_src;
+			scfg2.s.tag_src_ld = tag_cfg->tag_fields.layer_d_src;
+			scfg2.s.tag_src_lc = tag_cfg->tag_fields.layer_c_src;
+			scfg2.s.tag_src_lb = tag_cfg->tag_fields.layer_b_src;
+			scfg2.s.tag_dst_lg = tag_cfg->tag_fields.layer_g_dst;
+			scfg2.s.tag_dst_lf = tag_cfg->tag_fields.layer_f_dst;
+			scfg2.s.tag_dst_le = tag_cfg->tag_fields.layer_e_dst;
+			scfg2.s.tag_dst_ld = tag_cfg->tag_fields.layer_d_dst;
+			scfg2.s.tag_dst_lc = tag_cfg->tag_fields.layer_c_dst;
+			scfg2.s.tag_dst_lb = tag_cfg->tag_fields.layer_b_dst;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_STYLEX_CFG2(style, cluster),
+				    scfg2.u64);
+
+			style_alg.u64 = csr_rd_node(
+				node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
+			style_alg.s.tag_vni = tag_cfg->tag_fields.tag_vni;
+			style_alg.s.tag_gtp = tag_cfg->tag_fields.tag_gtp;
+			style_alg.s.tag_spi = tag_cfg->tag_fields.tag_spi;
+			style_alg.s.tag_syn = tag_cfg->tag_fields.tag_sync;
+			style_alg.s.tag_pctl =
+				tag_cfg->tag_fields.ip_prot_nexthdr;
+			style_alg.s.tag_vs1 = tag_cfg->tag_fields.second_vlan;
+			style_alg.s.tag_vs0 = tag_cfg->tag_fields.first_vlan;
+			style_alg.s.tag_mpls0 = tag_cfg->tag_fields.mpls_label;
+			style_alg.s.tag_prt = tag_cfg->tag_fields.input_port;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_STYLEX_ALG(style, cluster),
+				    style_alg.u64);
+
+			/* Custom-Mask Tag (Part 1): */
+			for (mask = 0; mask < 4; mask++) {
+				if (tag_cfg->mask_tag[mask].enable)
+					mtag_en++;
+			}
+			if (mtag_en) {
+				scfg2.u64 = csr_rd_node(
+					node, CVMX_PKI_CLX_STYLEX_CFG2(
+						      style, cluster));
+				scfg2.s.tag_inc = 0;
+				for (mask = 0; mask < 4; mask++) {
+					if (tag_cfg->mask_tag[mask].enable)
+						scfg2.s.tag_inc |= 1 << mask;
+				}
+				csr_wr_node(node,
+					    CVMX_PKI_CLX_STYLEX_CFG2(style,
+								     cluster),
+					    scfg2.u64);
+			}
+		}
+		cluster++;
+	}
+	/* Custom-Mask Tag (Part 2): */
+	if (mtag_en) {
+		tag_idx = cvmx_pki_mtag_idx_alloc(node, -1);
+		if (tag_idx < 0)
+			return;
+
+		tag_sel.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_TAG_SEL(style));
+		for (mask = 0; mask < 4; mask++) {
+			if (tag_cfg->mask_tag[mask].enable) {
+				switch (mask) {
+				case 0:
+					tag_sel.s.tag_idx0 = tag_idx;
+					break;
+				case 1:
+					tag_sel.s.tag_idx1 = tag_idx;
+					break;
+				case 2:
+					tag_sel.s.tag_idx2 = tag_idx;
+					break;
+				case 3:
+					tag_sel.s.tag_idx3 = tag_idx;
+					break;
+				}
+				index = tag_idx * 4 + mask;
+				tag_mask.u64 = csr_rd_node(
+					node, CVMX_PKI_TAG_INCX_MASK(index));
+				tag_mask.s.en = tag_cfg->mask_tag[mask].val;
+				csr_wr_node(node, CVMX_PKI_TAG_INCX_MASK(index),
+					    tag_mask.u64);
+
+				tag_ctl.u64 = csr_rd_node(
+					node, CVMX_PKI_TAG_INCX_CTL(index));
+				tag_ctl.s.ptr_sel =
+					tag_cfg->mask_tag[mask].base;
+				tag_ctl.s.offset =
+					tag_cfg->mask_tag[mask].offset;
+				csr_wr_node(node, CVMX_PKI_TAG_INCX_CTL(index),
+					    tag_ctl.u64);
+			}
+		}
+		csr_wr_node(node, CVMX_PKI_STYLEX_TAG_SEL(style), tag_sel.u64);
+	}
+}
+
+/**
+ * This function reads parameters associated with style in hardware.
+ *
+ * @param node  Node number.
+ * @param style	Style to read from.
+ * @param cluster_mask	Mask of clusters style belongs to.
+ * @param style_cfg	 Pointer to style config struct.
+ */
+void cvmx_pki_read_style_config(int node, int style, uint64_t cluster_mask,
+				struct cvmx_pki_style_config *style_cfg)
+{
+	cvmx_pki_clx_stylex_cfg_t scfg;
+	cvmx_pki_clx_stylex_cfg2_t scfg2;
+	cvmx_pki_clx_stylex_alg_t style_alg;
+	cvmx_pki_stylex_buf_t style_buf;
+	int cluster = __builtin_ffsll(cluster_mask) - 1;
+
+	scfg.u64 = csr_rd_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
+	scfg2.u64 = csr_rd_node(node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
+	style_alg.u64 =
+		csr_rd_node(node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
+	style_buf.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_BUF(style));
+
+	style_cfg->parm_cfg.ip6_udp_opt = scfg.s.ip6_udp_opt;
+	style_cfg->parm_cfg.lenerr_en = scfg.s.lenerr_en;
+	style_cfg->parm_cfg.lenerr_eqpad = scfg.s.lenerr_eqpad;
+	style_cfg->parm_cfg.maxerr_en = scfg.s.maxerr_en;
+	style_cfg->parm_cfg.minerr_en = scfg.s.minerr_en;
+	style_cfg->parm_cfg.fcs_chk = scfg.s.fcs_chk;
+	style_cfg->parm_cfg.fcs_strip = scfg.s.fcs_strip;
+	style_cfg->parm_cfg.minmax_sel = scfg.s.minmax_sel;
+	style_cfg->parm_cfg.qpg_base = scfg.s.qpg_base;
+	style_cfg->parm_cfg.qpg_dis_padd = scfg.s.qpg_dis_padd;
+	style_cfg->parm_cfg.qpg_dis_aura = scfg.s.qpg_dis_aura;
+	style_cfg->parm_cfg.qpg_dis_grp = scfg.s.qpg_dis_grp;
+	style_cfg->parm_cfg.qpg_dis_grptag = scfg.s.qpg_dis_grptag;
+	style_cfg->parm_cfg.rawdrp = scfg.s.rawdrp;
+	style_cfg->parm_cfg.force_drop = scfg.s.drop;
+	style_cfg->parm_cfg.nodrop = scfg.s.nodrop;
+
+	style_cfg->parm_cfg.len_lg = scfg2.s.len_lg;
+	style_cfg->parm_cfg.len_lf = scfg2.s.len_lf;
+	style_cfg->parm_cfg.len_le = scfg2.s.len_le;
+	style_cfg->parm_cfg.len_ld = scfg2.s.len_ld;
+	style_cfg->parm_cfg.len_lc = scfg2.s.len_lc;
+	style_cfg->parm_cfg.len_lb = scfg2.s.len_lb;
+	style_cfg->parm_cfg.csum_lg = scfg2.s.csum_lg;
+	style_cfg->parm_cfg.csum_lf = scfg2.s.csum_lf;
+	style_cfg->parm_cfg.csum_le = scfg2.s.csum_le;
+	style_cfg->parm_cfg.csum_ld = scfg2.s.csum_ld;
+	style_cfg->parm_cfg.csum_lc = scfg2.s.csum_lc;
+	style_cfg->parm_cfg.csum_lb = scfg2.s.csum_lb;
+
+	style_cfg->parm_cfg.qpg_qos = style_alg.s.qpg_qos;
+	style_cfg->parm_cfg.tag_type = style_alg.s.tt;
+	style_cfg->parm_cfg.apad_nip = style_alg.s.apad_nip;
+	style_cfg->parm_cfg.qpg_port_sh = style_alg.s.qpg_port_sh;
+	style_cfg->parm_cfg.qpg_port_msb = style_alg.s.qpg_port_msb;
+	style_cfg->parm_cfg.wqe_vs = style_alg.s.wqe_vs;
+
+	style_cfg->parm_cfg.pkt_lend = style_buf.s.pkt_lend;
+	style_cfg->parm_cfg.wqe_hsz = style_buf.s.wqe_hsz;
+	style_cfg->parm_cfg.wqe_skip = style_buf.s.wqe_skip * 128;
+	style_cfg->parm_cfg.first_skip = style_buf.s.first_skip * 8;
+	style_cfg->parm_cfg.later_skip = style_buf.s.later_skip * 8;
+	style_cfg->parm_cfg.cache_mode = style_buf.s.opc_mode;
+	style_cfg->parm_cfg.mbuff_size = style_buf.s.mb_size * 8;
+	style_cfg->parm_cfg.dis_wq_dat = style_buf.s.dis_wq_dat;
+
+	cvmx_pki_read_tag_config(node, style, cluster_mask,
+				 &style_cfg->tag_cfg);
+}
+
+/**
+ * This function writes/configures parameters associated with style in hardware.
+ *
+ * @param node  Node number.
+ * @param style  Style to configure.
+ * @param cluster_mask  Mask of clusters to configure the style for.
+ * @param style_cfg	 Pointer to style config struct.
+ */
+void cvmx_pki_write_style_config(int node, uint64_t style, u64 cluster_mask,
+				 struct cvmx_pki_style_config *style_cfg)
+{
+	cvmx_pki_clx_stylex_cfg_t scfg;
+	cvmx_pki_clx_stylex_cfg2_t scfg2;
+	cvmx_pki_clx_stylex_alg_t style_alg;
+	cvmx_pki_stylex_buf_t style_buf;
+	unsigned int cluster = 0;
+
+	while (cluster < CVMX_PKI_NUM_CLUSTER) {
+		if (cluster_mask & (0x01L << cluster)) {
+			scfg.u64 = csr_rd_node(
+				node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
+			scfg.s.ip6_udp_opt = style_cfg->parm_cfg.ip6_udp_opt;
+			scfg.s.lenerr_en = style_cfg->parm_cfg.lenerr_en;
+			scfg.s.lenerr_eqpad = style_cfg->parm_cfg.lenerr_eqpad;
+			scfg.s.maxerr_en = style_cfg->parm_cfg.maxerr_en;
+			scfg.s.minerr_en = style_cfg->parm_cfg.minerr_en;
+			scfg.s.fcs_chk = style_cfg->parm_cfg.fcs_chk;
+			scfg.s.fcs_strip = style_cfg->parm_cfg.fcs_strip;
+			scfg.s.minmax_sel = style_cfg->parm_cfg.minmax_sel;
+			scfg.s.qpg_base = style_cfg->parm_cfg.qpg_base;
+			scfg.s.qpg_dis_padd = style_cfg->parm_cfg.qpg_dis_padd;
+			scfg.s.qpg_dis_aura = style_cfg->parm_cfg.qpg_dis_aura;
+			scfg.s.qpg_dis_grp = style_cfg->parm_cfg.qpg_dis_grp;
+			scfg.s.qpg_dis_grptag =
+				style_cfg->parm_cfg.qpg_dis_grptag;
+			scfg.s.rawdrp = style_cfg->parm_cfg.rawdrp;
+			scfg.s.drop = style_cfg->parm_cfg.force_drop;
+			scfg.s.nodrop = style_cfg->parm_cfg.nodrop;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
+				    scfg.u64);
+
+			scfg2.u64 = csr_rd_node(
+				node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
+			scfg2.s.len_lg = style_cfg->parm_cfg.len_lg;
+			scfg2.s.len_lf = style_cfg->parm_cfg.len_lf;
+			scfg2.s.len_le = style_cfg->parm_cfg.len_le;
+			scfg2.s.len_ld = style_cfg->parm_cfg.len_ld;
+			scfg2.s.len_lc = style_cfg->parm_cfg.len_lc;
+			scfg2.s.len_lb = style_cfg->parm_cfg.len_lb;
+			scfg2.s.csum_lg = style_cfg->parm_cfg.csum_lg;
+			scfg2.s.csum_lf = style_cfg->parm_cfg.csum_lf;
+			scfg2.s.csum_le = style_cfg->parm_cfg.csum_le;
+			scfg2.s.csum_ld = style_cfg->parm_cfg.csum_ld;
+			scfg2.s.csum_lc = style_cfg->parm_cfg.csum_lc;
+			scfg2.s.csum_lb = style_cfg->parm_cfg.csum_lb;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_STYLEX_CFG2(style, cluster),
+				    scfg2.u64);
+
+			style_alg.u64 = csr_rd_node(
+				node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
+			style_alg.s.qpg_qos = style_cfg->parm_cfg.qpg_qos;
+			style_alg.s.tt = style_cfg->parm_cfg.tag_type;
+			style_alg.s.apad_nip = style_cfg->parm_cfg.apad_nip;
+			style_alg.s.qpg_port_sh =
+				style_cfg->parm_cfg.qpg_port_sh;
+			style_alg.s.qpg_port_msb =
+				style_cfg->parm_cfg.qpg_port_msb;
+			style_alg.s.wqe_vs = style_cfg->parm_cfg.wqe_vs;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_STYLEX_ALG(style, cluster),
+				    style_alg.u64);
+		}
+		cluster++;
+	}
+	style_buf.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_BUF(style));
+	style_buf.s.pkt_lend = style_cfg->parm_cfg.pkt_lend;
+	style_buf.s.wqe_hsz = style_cfg->parm_cfg.wqe_hsz;
+	style_buf.s.wqe_skip = (style_cfg->parm_cfg.wqe_skip) / 128;
+	style_buf.s.first_skip = (style_cfg->parm_cfg.first_skip) / 8;
+	style_buf.s.later_skip = style_cfg->parm_cfg.later_skip / 8;
+	style_buf.s.opc_mode = style_cfg->parm_cfg.cache_mode;
+	style_buf.s.mb_size = (style_cfg->parm_cfg.mbuff_size) / 8;
+	style_buf.s.dis_wq_dat = style_cfg->parm_cfg.dis_wq_dat;
+	csr_wr_node(node, CVMX_PKI_STYLEX_BUF(style), style_buf.u64);
+
+	cvmx_pki_write_tag_config(node, style, cluster_mask,
+				  &style_cfg->tag_cfg);
+}
+
+/**
+ * This function reads qpg entry at specified offset from qpg table.
+ *
+ * @param node  Node number.
+ * @param offset  Offset in qpg table to read from.
+ * @param qpg_cfg  Pointer to structure containing qpg values.
+ */
+int cvmx_pki_read_qpg_entry(int node, int offset,
+			    struct cvmx_pki_qpg_config *qpg_cfg)
+{
+	cvmx_pki_qpg_tblx_t qpg_tbl;
+
+	if (offset >= CVMX_PKI_NUM_QPG_ENTRY) {
+		debug("ERROR: qpg offset %d is >= 2048\n", offset);
+		return -1;
+	}
+	qpg_tbl.u64 = csr_rd_node(node, CVMX_PKI_QPG_TBLX(offset));
+	qpg_cfg->aura_num = qpg_tbl.s.laura;
+	qpg_cfg->port_add = qpg_tbl.s.padd;
+	qpg_cfg->grp_ok = qpg_tbl.s.grp_ok;
+	qpg_cfg->grp_bad = qpg_tbl.s.grp_bad;
+	qpg_cfg->grptag_ok = qpg_tbl.s.grptag_ok;
+	qpg_cfg->grptag_bad = qpg_tbl.s.grptag_bad;
+	return 0;
+}
+
+/**
+ * This function writes qpg entry at specified offset in qpg table.
+ *
+ * @param node  Node number.
+ * @param offset  Offset in qpg table to read from.
+ * @param qpg_cfg  Pointer to structure containing qpg values.
+ */
+void cvmx_pki_write_qpg_entry(int node, int offset,
+			      struct cvmx_pki_qpg_config *qpg_cfg)
+{
+	cvmx_pki_qpg_tblx_t qpg_tbl;
+
+	qpg_tbl.u64 = csr_rd_node(node, CVMX_PKI_QPG_TBLX(offset));
+	qpg_tbl.s.padd = qpg_cfg->port_add;
+	qpg_tbl.s.laura = qpg_cfg->aura_num;
+	qpg_tbl.s.grp_ok = qpg_cfg->grp_ok;
+	qpg_tbl.s.grp_bad = qpg_cfg->grp_bad;
+	qpg_tbl.s.grptag_ok = qpg_cfg->grptag_ok;
+	qpg_tbl.s.grptag_bad = qpg_cfg->grptag_bad;
+	csr_wr_node(node, CVMX_PKI_QPG_TBLX(offset), qpg_tbl.u64);
+}
+
+/**
+ * This function writes pcam entry at given offset in pcam table in hardware
+ *
+ * @param node  Node number.
+ * @param index  Offset in pcam table.
+ * @param cluster_mask	Mask of clusters in which to write pcam entry.
+ * @param input  Input keys to pcam match passed as struct.
+ * @param action  PCAM match action passed as struct.
+ */
+int cvmx_pki_pcam_write_entry(int node, int index, uint64_t cluster_mask,
+			      struct cvmx_pki_pcam_input input,
+			      struct cvmx_pki_pcam_action action)
+{
+	int bank;
+	unsigned int cluster = 0;
+	cvmx_pki_clx_pcamx_termx_t term;
+	cvmx_pki_clx_pcamx_matchx_t match;
+	cvmx_pki_clx_pcamx_actionx_t act;
+
+	if (index >= CVMX_PKI_TOTAL_PCAM_ENTRY) {
+		debug("\nERROR: Invalid pcam entry %d\n", index);
+		return -1;
+	}
+	bank = (int)(input.field & 0x01);
+	while (cluster < CVMX_PKI_NUM_CLUSTER) {
+		if (cluster_mask & (0x01L << cluster)) {
+			term.u64 = csr_rd_node(
+				node,
+				CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank, index));
+			term.s.valid = 0;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank,
+							     index),
+				    term.u64);
+			match.u64 = csr_rd_node(
+				node, CVMX_PKI_CLX_PCAMX_MATCHX(cluster, bank,
+								index));
+			match.s.data1 = input.data & input.data_mask;
+			match.s.data0 = (~input.data) & input.data_mask;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_PCAMX_MATCHX(cluster, bank,
+							      index),
+				    match.u64);
+
+			act.u64 = csr_rd_node(
+				node, CVMX_PKI_CLX_PCAMX_ACTIONX(cluster, bank,
+								 index));
+			act.s.pmc = action.parse_mode_chg;
+			act.s.style_add = action.style_add;
+			act.s.pf = action.parse_flag_set;
+			act.s.setty = action.layer_type_set;
+			act.s.advance = action.pointer_advance;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_PCAMX_ACTIONX(cluster, bank,
+							       index),
+				    act.u64);
+
+			term.u64 = csr_rd_node(
+				node,
+				CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank, index));
+			term.s.term1 = input.field & input.field_mask;
+			term.s.term0 = (~input.field) & input.field_mask;
+			term.s.style1 = input.style & input.style_mask;
+			term.s.style0 = (~input.style) & input.style_mask;
+			term.s.valid = 1;
+			csr_wr_node(node,
+				    CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank,
+							     index),
+				    term.u64);
+		}
+		cluster++;
+	}
+	return 0;
+}
+
+/**
+ * Enables/Disables fcs check and fcs stripping on the pkind.
+ *
+ * @param node  Node number
+ * @param pknd  PKIND to apply settings on.
+ * @param fcs_chk  Enable/disable fcs check.
+ *    1 = enable fcs error check.
+ *    0 = disable fcs error check.
+ * @param fcs_strip	 Strip L2 FCS bytes from packet, decrease WQE[LEN] by 4 bytes
+ *    1 = strip L2 FCS.
+ *    0 = Do not strip L2 FCS.
+ */
+void cvmx_pki_endis_fcs_check(int node, int pknd, bool fcs_chk, bool fcs_strip)
+{
+	int style;
+	unsigned int cluster;
+	cvmx_pki_clx_pkindx_style_t pstyle;
+	cvmx_pki_clx_stylex_cfg_t style_cfg;
+
+	/* Valudate PKIND # */
+	if (pknd >= CVMX_PKI_NUM_PKIND) {
+		printf("%s: PKIND %d out of range\n", __func__, pknd);
+		return;
+	}
+
+	for (cluster = 0; cluster < CVMX_PKI_NUM_CLUSTER; cluster++) {
+		pstyle.u64 = csr_rd_node(
+			node, CVMX_PKI_CLX_PKINDX_STYLE(pknd, cluster));
+		style = pstyle.s.style;
+		/* Validate STYLE # */
+		if (style >= CVMX_PKI_NUM_INTERNAL_STYLE)
+			continue;
+		style_cfg.u64 = csr_rd_node(
+			node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
+		style_cfg.s.fcs_chk = fcs_chk;
+		style_cfg.s.fcs_strip = fcs_strip;
+		csr_wr_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
+			    style_cfg.u64);
+	}
+}
+
+/**
+ * Enables/Disables l2 length error check and max & min frame length checks
+ *
+ * @param node  Node number
+ * @param pknd  PKIND to disable error for.
+ * @param l2len_err  L2 length error check enable.
+ * @param maxframe_err  Max frame error check enable.
+ * @param minframe_err  Min frame error check enable.
+ *    1 = Enabel err checks
+ *    0 = Disable error checks
+ */
+void cvmx_pki_endis_l2_errs(int node, int pknd, bool l2len_err,
+			    bool maxframe_err, bool minframe_err)
+{
+	int style;
+	unsigned int cluster;
+	cvmx_pki_clx_pkindx_style_t pstyle;
+	cvmx_pki_clx_stylex_cfg_t style_cfg;
+
+	/* Valudate PKIND # */
+	if (pknd >= CVMX_PKI_NUM_PKIND) {
+		printf("%s: PKIND %d out of range\n", __func__, pknd);
+		return;
+	}
+
+	for (cluster = 0; cluster < CVMX_PKI_NUM_CLUSTER; cluster++) {
+		pstyle.u64 = csr_rd_node(
+			node, CVMX_PKI_CLX_PKINDX_STYLE(pknd, cluster));
+		style = pstyle.s.style;
+		/* Validate STYLE # */
+		if (style >= CVMX_PKI_NUM_INTERNAL_STYLE)
+			continue;
+		style_cfg.u64 = csr_rd_node(
+			node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
+		style_cfg.s.lenerr_en = l2len_err;
+		style_cfg.s.maxerr_en = maxframe_err;
+		style_cfg.s.minerr_en = minframe_err;
+		csr_wr_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
+			    style_cfg.u64);
+	}
+}
diff --git a/arch/mips/mach-octeon/cvmx-pko-internal-ports-range.c b/arch/mips/mach-octeon/cvmx-pko-internal-ports-range.c
new file mode 100644
index 0000000..694b4e3
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pko-internal-ports-range.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-ipd.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+union interface_port {
+	struct {
+		int port;
+		int interface;
+	} s;
+	u64 u64;
+};
+
+static int dbg;
+
+static int port_range_init;
+
+int __cvmx_pko_internal_ports_range_init(void)
+{
+	int rv = 0;
+
+	if (port_range_init)
+		return 0;
+	port_range_init = 1;
+	rv = cvmx_create_global_resource_range(CVMX_GR_TAG_PKO_IPORTS,
+					       CVMX_HELPER_CFG_MAX_PKO_QUEUES);
+	if (rv != 0)
+		debug("ERROR : Failed to initialize pko internal port range\n");
+	return rv;
+}
+
+int cvmx_pko_internal_ports_alloc(int xiface, int port, u64 count)
+{
+	int ret_val = -1;
+	union interface_port inf_port;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+	__cvmx_pko_internal_ports_range_init();
+	inf_port.s.interface = xi.interface;
+	inf_port.s.port = port;
+	ret_val = cvmx_allocate_global_resource_range(CVMX_GR_TAG_PKO_IPORTS,
+						      inf_port.u64, count, 1);
+	if (dbg)
+		debug("internal port alloc : port=%02d base=%02d count=%02d\n",
+		      (int)port, ret_val, (int)count);
+	if (ret_val == -1)
+		return ret_val;
+	cvmx_cfg_port[xi.node][xi.interface][port].ccpp_pko_port_base = ret_val;
+	cvmx_cfg_port[xi.node][xi.interface][port].ccpp_pko_num_ports = count;
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-pko.c b/arch/mips/mach-octeon/cvmx-pko.c
new file mode 100644
index 0000000..8a91813
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pko.c
@@ -0,0 +1,788 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Support library for the hardware Packet Output unit.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-iob-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+#include <mach/cvmx-helper-pko.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define CVMX_PKO_NQ_PER_PORT_MAX 32
+
+static cvmx_pko_return_value_t cvmx_pko2_config_port(short ipd_port,
+						     int base_queue,
+						     int num_queues,
+						     const u8 priority[]);
+
+static const int debug;
+
+/**
+ * Internal state of packet output
+ */
+
+/*
+ * PKO port iterator
+ * XXX this macro only works for 68XX
+ */
+
+#define pko_for_each_port(__p)                                                 \
+	for (__p = 0; __p < CVMX_HELPER_CFG_MAX_PKO_PORT; __p++)               \
+		if (__cvmx_helper_cfg_pko_queue_base(__p) !=                   \
+		    CVMX_HELPER_CFG_INVALID_VALUE)
+
+/*
+ * @INTERNAL
+ *
+ * Get INT for a port
+ *
+ * @param interface
+ * @param index
+ * @return the INT value on success and -1 on error
+ *
+ * This function is only for CN68XX.
+ */
+static int __cvmx_pko_int(int interface, int index)
+{
+	cvmx_helper_cfg_assert(interface < CVMX_HELPER_MAX_IFACE);
+	cvmx_helper_cfg_assert(index >= 0);
+
+	switch (interface) {
+	case 0:
+		cvmx_helper_cfg_assert(index < 4);
+		return index;
+	case 1:
+		cvmx_helper_cfg_assert(index == 0);
+		return 4;
+	case 2:
+		cvmx_helper_cfg_assert(index < 4);
+		return index + 8;
+	case 3:
+		cvmx_helper_cfg_assert(index < 4);
+		return index + 0xC;
+	case 4:
+		cvmx_helper_cfg_assert(index < 4);
+		return index + 0x10;
+	case 5:
+		cvmx_helper_cfg_assert(index < 256);
+		return 0x1C;
+	case 6:
+		cvmx_helper_cfg_assert(index < 256);
+		return 0x1D;
+	case 7:
+		cvmx_helper_cfg_assert(index < 32);
+		return 0x1E;
+	case 8:
+		cvmx_helper_cfg_assert(index < 8);
+		return 0x1F;
+	}
+
+	return -1;
+}
+
+int cvmx_pko_get_base_pko_port(int interface, int index)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
+		return cvmx_helper_get_ipd_port(interface, index);
+	else if (octeon_has_feature(OCTEON_FEATURE_PKND))
+		return __cvmx_helper_cfg_pko_port_base(interface, index);
+	else
+		return cvmx_helper_get_ipd_port(interface, index);
+}
+
+int cvmx_pko_get_base_queue(int port)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+		return cvmx_pko3_get_queue_base(port);
+	} else if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+		return __cvmx_helper_cfg_pko_queue_base(
+			cvmx_helper_cfg_ipd2pko_port_base(port));
+	} else {
+		if (port < 48)
+			return cvmx_pko_queue_table[port].ccppp_queue_base;
+		else
+			return CVMX_PKO_ILLEGAL_QUEUE;
+	}
+}
+
+int cvmx_pko_get_num_queues(int port)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+		return cvmx_pko3_get_queue_num(port);
+	} else if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+		return __cvmx_helper_cfg_pko_queue_num(
+			cvmx_helper_cfg_ipd2pko_port_base(port));
+	} else {
+		if (port < 48)
+			return cvmx_pko_queue_table[port].ccppp_num_queues;
+	}
+	return 0;
+}
+
+/*
+ * Allocate memory for PKO engines.
+ *
+ * @param engine is the PKO engine ID.
+ * @return # of 2KB-chunks allocated to this PKO engine.
+ */
+static int __cvmx_pko_memory_per_engine_o68(int engine)
+{
+	/* CN68XX has 40KB to devide between the engines in 2KB chunks */
+	int max_engine;
+	int size_per_engine;
+	int size;
+
+	max_engine = __cvmx_helper_cfg_pko_max_engine();
+	size_per_engine = 40 / 2 / max_engine;
+
+	if (engine >= max_engine)
+		/* Unused engines get no space */
+		size = 0;
+	else if (engine == max_engine - 1)
+		/*
+		 * The last engine gets all the space lost by rounding. This means
+		 * the ILK gets the most space
+		 */
+		size = 40 / 2 - engine * size_per_engine;
+	else
+		/* All other engines get the same space */
+		size = size_per_engine;
+
+	return size;
+}
+
+/*
+ * Setup one-to-one mapping between PKO2 iport and eport.
+ * @INTERNAL
+ */
+static void __cvmx_pko2_chip_init(void)
+{
+	int i;
+	int interface, index, port;
+	cvmx_helper_interface_mode_t mode;
+	union cvmx_pko_mem_iport_ptrs config;
+
+	/*
+	 * Initialize every iport with the invalid eid.
+	 */
+#define CVMX_O68_PKO2_INVALID_EID 31
+	config.u64 = 0;
+	config.s.eid = CVMX_O68_PKO2_INVALID_EID;
+	for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++) {
+		config.s.ipid = i;
+		csr_wr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+	}
+
+	/*
+	 * Set up PKO_MEM_IPORT_PTRS
+	 */
+	pko_for_each_port(port) {
+		interface = __cvmx_helper_cfg_pko_port_interface(port);
+		index = __cvmx_helper_cfg_pko_port_index(port);
+		mode = cvmx_helper_interface_get_mode(interface);
+
+		if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
+			continue;
+
+		config.s.ipid = port;
+		config.s.qos_mask = 0xff;
+		config.s.crc = __cvmx_helper_get_has_fcs(interface);
+		config.s.min_pkt = __cvmx_helper_get_pko_padding(interface);
+		config.s.intr = __cvmx_pko_int(interface, index);
+		config.s.eid = __cvmx_helper_cfg_pko_port_eid(port);
+		config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ?
+					index :
+					      port;
+		csr_wr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+	}
+}
+
+int __cvmx_pko_get_pipe(int interface, int index)
+{
+	/* The loopback ports do not have pipes */
+	if (cvmx_helper_interface_get_mode(interface) ==
+	    CVMX_HELPER_INTERFACE_MODE_LOOP)
+		return -1;
+	/* We use pko_port as the pipe. See __cvmx_pko_port_map_o68(). */
+	return cvmx_helper_get_pko_port(interface, index);
+}
+
+static void __cvmx_pko1_chip_init(void)
+{
+	int queue;
+	union cvmx_pko_mem_queue_ptrs config;
+	union cvmx_pko_reg_queue_ptrs1 config1;
+	const int port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
+
+	/* Initialize all queues to connect to port 63 (ILLEGAL_PID) */
+	for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
+		config1.u64 = 0;
+		config1.s.idx3 = 0;
+		config1.s.qid7 = queue >> 7;
+
+		config.u64 = 0;
+		config.s.tail = 1;
+		config.s.index = 0;
+		config.s.port = port;
+		config.s.queue = queue;
+		config.s.buf_ptr = 0;
+
+		csr_wr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+		csr_wr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+	}
+}
+
+/**
+ * Call before any other calls to initialize the packet
+ * output system.  This does chip global config, and should only be
+ * done by one core.
+ */
+void cvmx_pko_hw_init(u8 pool, unsigned int bufsize)
+{
+	union cvmx_pko_reg_cmd_buf config;
+	union cvmx_iob_fau_timeout fau_to;
+	int i;
+
+	if (debug)
+		debug("%s: pool=%u bufsz=%u\n", __func__, pool, bufsize);
+
+	/* chip-specific setup. */
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		__cvmx_pko2_chip_init();
+	else
+		__cvmx_pko1_chip_init();
+
+	/*
+	 * Set the size of the PKO command buffers to an odd number of
+	 * 64bit words. This allows the normal two word send to stay
+	 * aligned and never span a command word buffer.
+	 */
+	config.u64 = 0;
+	config.s.pool = pool;
+	config.s.size = bufsize / 8 - 1;
+	csr_wr(CVMX_PKO_REG_CMD_BUF, config.u64);
+
+	/*
+	 * Disable tagwait FAU timeout. This needs to be done before
+	 * anyone might start packet output using tags.
+	 */
+	fau_to.u64 = 0;
+	fau_to.s.tout_val = 0xfff;
+	fau_to.s.tout_enb = 0;
+	csr_wr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		union cvmx_pko_reg_min_pkt min_pkt;
+
+		min_pkt.u64 = 0;
+		min_pkt.s.size1 = 59;
+		min_pkt.s.size2 = 59;
+		min_pkt.s.size3 = 59;
+		min_pkt.s.size4 = 59;
+		min_pkt.s.size5 = 59;
+		min_pkt.s.size6 = 59;
+		min_pkt.s.size7 = 59;
+		csr_wr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64);
+	}
+
+	/*
+	 * If we aren't using all of the queues optimize PKO's
+	 * internal memory.
+	 */
+	if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+		int max_queues = __cvmx_helper_cfg_pko_max_queue();
+
+		if (OCTEON_IS_MODEL(OCTEON_CN68XX) && max_queues <= 32)
+			csr_wr(CVMX_PKO_REG_QUEUE_MODE, 3);
+		else if (max_queues <= 64)
+			csr_wr(CVMX_PKO_REG_QUEUE_MODE, 2);
+		else if (max_queues <= 128)
+			csr_wr(CVMX_PKO_REG_QUEUE_MODE, 1);
+		else
+			csr_wr(CVMX_PKO_REG_QUEUE_MODE, 0);
+		if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+			for (i = 0; i < 2; i++) {
+				union cvmx_pko_reg_engine_storagex
+					engine_storage;
+
+#define PKO_ASSIGN_ENGINE_STORAGE(index)                                       \
+	engine_storage.s.engine##index =                                       \
+		__cvmx_pko_memory_per_engine_o68(16 * i + (index))
+
+				engine_storage.u64 = 0;
+				PKO_ASSIGN_ENGINE_STORAGE(0);
+				PKO_ASSIGN_ENGINE_STORAGE(1);
+				PKO_ASSIGN_ENGINE_STORAGE(2);
+				PKO_ASSIGN_ENGINE_STORAGE(3);
+				PKO_ASSIGN_ENGINE_STORAGE(4);
+				PKO_ASSIGN_ENGINE_STORAGE(5);
+				PKO_ASSIGN_ENGINE_STORAGE(6);
+				PKO_ASSIGN_ENGINE_STORAGE(7);
+				PKO_ASSIGN_ENGINE_STORAGE(8);
+				PKO_ASSIGN_ENGINE_STORAGE(9);
+				PKO_ASSIGN_ENGINE_STORAGE(10);
+				PKO_ASSIGN_ENGINE_STORAGE(11);
+				PKO_ASSIGN_ENGINE_STORAGE(12);
+				PKO_ASSIGN_ENGINE_STORAGE(13);
+				PKO_ASSIGN_ENGINE_STORAGE(14);
+				PKO_ASSIGN_ENGINE_STORAGE(15);
+				csr_wr(CVMX_PKO_REG_ENGINE_STORAGEX(i),
+				       engine_storage.u64);
+			}
+		}
+	}
+}
+
+/**
+ * Enables the packet output hardware. It must already be
+ * configured.
+ */
+void cvmx_pko_enable(void)
+{
+	union cvmx_pko_reg_flags flags;
+
+	flags.u64 = csr_rd(CVMX_PKO_REG_FLAGS);
+	if (flags.s.ena_pko)
+		debug("Warning: Enabling PKO when PKO already enabled.\n");
+
+	flags.s.ena_dwb = cvmx_helper_cfg_opt_get(CVMX_HELPER_CFG_OPT_USE_DWB);
+	flags.s.ena_pko = 1;
+	/*
+	 * always enable big endian for 3-word command.  Does nothing
+	 * for 2-word.
+	 */
+	flags.s.store_be = 1;
+	csr_wr(CVMX_PKO_REG_FLAGS, flags.u64);
+}
+
+/**
+ * Configure a output port and the associated queues for use.
+ *
+ * @param port       Port to configure.
+ * @param base_queue First queue number to associate with this port.
+ * @param num_queues Number of queues to associate with this port
+ * @param priority   Array of priority levels for each queue. Values are
+ *                   allowed to be 0-8. A value of 8 get 8 times the traffic
+ *                   of a value of 1.  A value of 0 indicates that no rounds
+ *                   will be participated in. These priorities can be changed
+ *                   on the fly while the pko is enabled. A priority of 9
+ *                   indicates that static priority should be used.  If static
+ *                   priority is used all queues with static priority must be
+ *                   contiguous starting at the base_queue, and lower numbered
+ *                   queues have higher priority than higher numbered queues.
+ *                   There must be num_queues elements in the array.
+ */
+cvmx_pko_return_value_t cvmx_pko_config_port(int port, int base_queue,
+					     int num_queues,
+					     const u8 priority[])
+{
+	cvmx_pko_return_value_t result_code;
+	int queue;
+	union cvmx_pko_mem_queue_ptrs config;
+	union cvmx_pko_reg_queue_ptrs1 config1;
+	int static_priority_base = -1;
+	int static_priority_end = -1;
+	int outputbuffer_pool = (int)cvmx_fpa_get_pko_pool();
+	u64 outputbuffer_pool_size = cvmx_fpa_get_pko_pool_block_size();
+
+	/* This function is not used for CN68XX */
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return cvmx_pko2_config_port(port, base_queue, num_queues,
+					     priority);
+
+	if (debug)
+		debug("%s: port=%d queue=%d-%d pri %#x %#x %#x %#x\n", __func__,
+		      port, base_queue, (base_queue + num_queues - 1),
+		      priority[0], priority[1], priority[2], priority[3]);
+
+	/* The need to handle ILLEGAL_PID port argument
+	 * is obsolete now, the code here can be simplified.
+	 */
+
+	if (port >= CVMX_PKO_NUM_OUTPUT_PORTS &&
+	    port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
+		debug("ERROR: %s: Invalid port %llu\n", __func__,
+		      (unsigned long long)port);
+		return CVMX_PKO_INVALID_PORT;
+	}
+
+	if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
+		debug("ERROR: %s: Invalid queue range port = %lld base=%llu numques=%lld\n",
+		      __func__, (unsigned long long)port,
+		      (unsigned long long)base_queue,
+		      (unsigned long long)num_queues);
+		return CVMX_PKO_INVALID_QUEUE;
+	}
+
+	if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
+		/*
+		 * Validate the static queue priority setup and set
+		 * static_priority_base and static_priority_end
+		 * accordingly.
+		 */
+		for (queue = 0; queue < num_queues; queue++) {
+			/* Find first queue of static priority */
+			int p_queue = queue % 16;
+
+			if (static_priority_base == -1 &&
+			    priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY)
+				static_priority_base = queue;
+			/* Find last queue of static priority */
+			if (static_priority_base != -1 &&
+			    static_priority_end == -1 &&
+			    priority[p_queue] !=
+				    CVMX_PKO_QUEUE_STATIC_PRIORITY &&
+			    queue)
+				static_priority_end = queue - 1;
+			else if (static_priority_base != -1 &&
+				 static_priority_end == -1 &&
+				 queue == num_queues - 1)
+				/* all queues're static priority */
+				static_priority_end = queue;
+
+			/*
+			 * Check to make sure all static priority
+			 * queues are contiguous.  Also catches some
+			 * cases of static priorites not starting at
+			 * queue 0.
+			 */
+			if (static_priority_end != -1 &&
+			    (int)queue > static_priority_end &&
+			    priority[p_queue] ==
+				    CVMX_PKO_QUEUE_STATIC_PRIORITY) {
+				debug("ERROR: %s: Static priority queues aren't contiguous or don't start at base queue. q: %d, eq: %d\n",
+				      __func__, (int)queue, static_priority_end);
+				return CVMX_PKO_INVALID_PRIORITY;
+			}
+		}
+		if (static_priority_base > 0) {
+			debug("ERROR: %s: Static priority queues don't start at base queue. sq: %d\n",
+			      __func__, static_priority_base);
+			return CVMX_PKO_INVALID_PRIORITY;
+		}
+	}
+
+	/*
+	 * At this point, static_priority_base and static_priority_end
+	 * are either both -1, or are valid start/end queue numbers
+	 */
+
+	result_code = CVMX_PKO_SUCCESS;
+
+	for (queue = 0; queue < num_queues; queue++) {
+		u64 *buf_ptr = NULL;
+		int p_queue = queue % 16;
+
+		config1.u64 = 0;
+		config1.s.idx3 = queue >> 3;
+		config1.s.qid7 = (base_queue + queue) >> 7;
+
+		config.u64 = 0;
+		config.s.tail = queue == (num_queues - 1);
+		config.s.index = queue;
+		config.s.port = port;
+		config.s.queue = base_queue + queue;
+
+		config.s.static_p = static_priority_base >= 0;
+		config.s.static_q = (int)queue <= static_priority_end;
+		config.s.s_tail = (int)queue == static_priority_end;
+		/*
+		 * Convert the priority into an enable bit field. Try
+		 * to space the bits out evenly so the packet don't
+		 * get grouped up.
+		 */
+		switch ((int)priority[p_queue]) {
+		case 0:
+			config.s.qos_mask = 0x00;
+			break;
+		case 1:
+			config.s.qos_mask = 0x01;
+			break;
+		case 2:
+			config.s.qos_mask = 0x11;
+			break;
+		case 3:
+			config.s.qos_mask = 0x49;
+			break;
+		case 4:
+			config.s.qos_mask = 0x55;
+			break;
+		case 5:
+			config.s.qos_mask = 0x57;
+			break;
+		case 6:
+			config.s.qos_mask = 0x77;
+			break;
+		case 7:
+			config.s.qos_mask = 0x7f;
+			break;
+		case 8:
+			config.s.qos_mask = 0xff;
+			break;
+		case CVMX_PKO_QUEUE_STATIC_PRIORITY:
+			config.s.qos_mask = 0xff;
+			break;
+		default:
+			debug("ERROR: %s: Invalid priority %llu\n", __func__,
+			      (unsigned long long)priority[p_queue]);
+			config.s.qos_mask = 0xff;
+			result_code = CVMX_PKO_INVALID_PRIORITY;
+			break;
+		}
+
+		if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
+			cvmx_cmd_queue_result_t cmd_res;
+
+			cmd_res = cvmx_cmd_queue_initialize(
+				CVMX_CMD_QUEUE_PKO(base_queue + queue),
+				CVMX_PKO_MAX_QUEUE_DEPTH, outputbuffer_pool,
+				outputbuffer_pool_size -
+					CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST *
+						8);
+			if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
+				switch (cmd_res) {
+				case CVMX_CMD_QUEUE_NO_MEMORY:
+					debug("ERROR: %s: Unable to allocate output buffer\n",
+					      __func__);
+					return CVMX_PKO_NO_MEMORY;
+				case CVMX_CMD_QUEUE_ALREADY_SETUP:
+					debug("ERROR: %s: Port already setup. port=%d\n",
+					      __func__, (int)port);
+					return CVMX_PKO_PORT_ALREADY_SETUP;
+				case CVMX_CMD_QUEUE_INVALID_PARAM:
+				default:
+					debug("ERROR: %s: Command queue initialization failed.\n",
+					      __func__);
+					return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
+				}
+			}
+
+			buf_ptr = (u64 *)cvmx_cmd_queue_buffer(
+				CVMX_CMD_QUEUE_PKO(base_queue + queue));
+			config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
+		} else {
+			config.s.buf_ptr = 0;
+		}
+
+		CVMX_SYNCWS;
+
+		csr_wr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+		csr_wr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+	}
+
+	return result_code;
+}
+
+/*
+ * Configure queues for an internal port.
+ * @INTERNAL
+ * @param pko_port PKO internal port number
+ * @note this is the PKO2 equivalent to cvmx_pko_config_port()
+ */
+static cvmx_pko_return_value_t cvmx_pko2_config_port(short ipd_port,
+						     int base_queue,
+						     int num_queues,
+						     const u8 priority[])
+{
+	int queue, pko_port;
+	int static_priority_base;
+	int static_priority_end;
+	union cvmx_pko_mem_iqueue_ptrs config;
+	u64 *buf_ptr = NULL;
+	int outputbuffer_pool = (int)cvmx_fpa_get_pko_pool();
+	u64 outputbuffer_pool_size = cvmx_fpa_get_pko_pool_block_size();
+
+	pko_port = cvmx_helper_cfg_ipd2pko_port_base(ipd_port);
+
+	if (debug)
+		debug("%s: ipd_port %d pko_iport %d qbase %d qnum %d\n",
+		      __func__, ipd_port, pko_port, base_queue, num_queues);
+
+	static_priority_base = -1;
+	static_priority_end = -1;
+
+	/*
+	 * static queue priority validation
+	 */
+	for (queue = 0; queue < num_queues; queue++) {
+		int p_queue = queue % 16;
+
+		if (static_priority_base == -1 &&
+		    priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY)
+			static_priority_base = queue;
+
+		if (static_priority_base != -1 && static_priority_end == -1 &&
+		    priority[p_queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY &&
+		    queue)
+			static_priority_end = queue - 1;
+		else if (static_priority_base != -1 &&
+			 static_priority_end == -1 && queue == num_queues - 1)
+			static_priority_end =
+				queue; /* all queues are static priority */
+
+		/*
+		 * Check to make sure all static priority queues are contiguous.
+		 * Also catches some cases of static priorites not starting from
+		 * queue 0.
+		 */
+		if (static_priority_end != -1 &&
+		    (int)queue > static_priority_end &&
+		    priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) {
+			debug("ERROR: %s: Static priority queues aren't contiguous or don't start at base queue. q: %d, eq: %d\n",
+			      __func__, (int)queue, static_priority_end);
+		}
+		if (static_priority_base > 0) {
+			debug("ERROR: %s: Static priority queues don't start at base queue. sq: %d\n",
+			      __func__, static_priority_base);
+		}
+	}
+
+	/*
+	 * main loop to set the fields of CVMX_PKO_MEM_IQUEUE_PTRS for
+	 * each queue
+	 */
+	for (queue = 0; queue < num_queues; queue++) {
+		int p_queue = queue % 8;
+
+		config.u64 = 0;
+		config.s.index = queue;
+		config.s.qid = base_queue + queue;
+		config.s.ipid = pko_port;
+		config.s.tail = (queue == (num_queues - 1));
+		config.s.s_tail = (queue == static_priority_end);
+		config.s.static_p = (static_priority_base >= 0);
+		config.s.static_q = (queue <= static_priority_end);
+
+		/*
+		 * Convert the priority into an enable bit field.
+		 * Try to space the bits out evenly so the packet
+		 * don't get grouped up.
+		 */
+		switch ((int)priority[p_queue]) {
+		case 0:
+			config.s.qos_mask = 0x00;
+			break;
+		case 1:
+			config.s.qos_mask = 0x01;
+			break;
+		case 2:
+			config.s.qos_mask = 0x11;
+			break;
+		case 3:
+			config.s.qos_mask = 0x49;
+			break;
+		case 4:
+			config.s.qos_mask = 0x55;
+			break;
+		case 5:
+			config.s.qos_mask = 0x57;
+			break;
+		case 6:
+			config.s.qos_mask = 0x77;
+			break;
+		case 7:
+			config.s.qos_mask = 0x7f;
+			break;
+		case 8:
+			config.s.qos_mask = 0xff;
+			break;
+		case CVMX_PKO_QUEUE_STATIC_PRIORITY:
+			config.s.qos_mask = 0xff;
+			break;
+		default:
+			debug("ERROR: %s: Invalid priority %llu\n", __func__,
+			      (unsigned long long)priority[p_queue]);
+			config.s.qos_mask = 0xff;
+			break;
+		}
+
+		/*
+		 * The command queues
+		 */
+		{
+			cvmx_cmd_queue_result_t cmd_res;
+
+			cmd_res = cvmx_cmd_queue_initialize(
+				CVMX_CMD_QUEUE_PKO(base_queue + queue),
+				CVMX_PKO_MAX_QUEUE_DEPTH, outputbuffer_pool,
+				(outputbuffer_pool_size -
+				 CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8));
+
+			if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
+				switch (cmd_res) {
+				case CVMX_CMD_QUEUE_NO_MEMORY:
+					debug("ERROR: %s: Unable to allocate output buffer\n",
+					      __func__);
+					break;
+				case CVMX_CMD_QUEUE_ALREADY_SETUP:
+					debug("ERROR: %s: Port already setup\n",
+					      __func__);
+					break;
+				case CVMX_CMD_QUEUE_INVALID_PARAM:
+				default:
+					debug("ERROR: %s: Command queue initialization failed.",
+					      __func__);
+					break;
+				}
+				debug(" pko_port%d base_queue%d num_queues%d queue%d.\n",
+				      pko_port, base_queue, num_queues, queue);
+			}
+
+			buf_ptr = (u64 *)cvmx_cmd_queue_buffer(
+				CVMX_CMD_QUEUE_PKO(base_queue + queue));
+			config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7;
+		}
+
+		CVMX_SYNCWS;
+		csr_wr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
+	}
+
+	/* Error detection is resirable here */
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-pko3-compat.c b/arch/mips/mach-octeon/cvmx-pko3-compat.c
new file mode 100644
index 0000000..3e14232
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pko3-compat.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-scratch.h>
+#include <mach/cvmx-hwfau.h>
+#include <mach/cvmx-fau.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+/* #undef CVMX_ENABLE_PARAMETER_CHECKING */
+/* #define CVMX_ENABLE_PARAMETER_CHECKING 1 */
+/* #define	__PKO3_NATIVE_PTR */
+
+static inline u64 cvmx_pko3_legacy_paddr(unsigned int node, u64 addr)
+{
+	u64 paddr;
+
+	paddr = node;
+	paddr = (addr & ((1ull << 40) - 1)) | (paddr << 40);
+	return paddr;
+}
+
+#if CVMX_ENABLE_PARAMETER_CHECKING
+/**
+ * @INTERNAL
+ *
+ * Verify the integrity of a legacy buffer link pointer,
+ *
+ * Note that the IPD/PIP/PKO hardware would sometimes
+ * round-up the buf_ptr->size field of the last buffer in a chain  to the next
+ * cache line size, so the sum of buf_ptr->size
+ * fields for a packet may exceed total_bytes by up to 127 bytes.
+ *
+ * @returns 0 on success, a negative number on error.
+ */
+static int cvmx_pko3_legacy_bufptr_validate(cvmx_buf_ptr_t buf_ptr,
+					    unsigned int gather,
+					    unsigned int buffers,
+					    unsigned int total_bytes)
+{
+	unsigned int node = cvmx_get_node_num();
+	unsigned int segs = 0, bytes = 0;
+	unsigned int phys_addr;
+	cvmx_buf_ptr_t ptr;
+	int delta;
+
+	if (buffers == 0) {
+		return -1;
+	} else if (buffers == 1) {
+		delta = buf_ptr.s.size - total_bytes;
+		if (delta < 0 || delta > 127)
+			return -2;
+	} else if (gather) {
+		cvmx_buf_ptr_t *vptr;
+		/* Validate gather list */
+		if (buf_ptr.s.size < buffers)
+			return -3;
+		phys_addr = cvmx_pko3_legacy_paddr(node, buf_ptr.s.addr);
+		vptr = cvmx_phys_to_ptr(phys_addr);
+		for (segs = 0; segs < buffers; segs++)
+			bytes += vptr[segs].s.size;
+		delta = bytes - total_bytes;
+		if (delta < 0 || delta > 127)
+			return -4;
+	} else {
+		void *vptr;
+		/* Validate linked buffers */
+		ptr = buf_ptr;
+		for (segs = 0; segs < buffers; segs++) {
+			bytes += ptr.s.size;
+			phys_addr = cvmx_pko3_legacy_paddr(node, ptr.s.addr);
+			vptr = cvmx_phys_to_ptr(phys_addr);
+			memcpy(&ptr, vptr - sizeof(u64), sizeof(u64));
+		}
+		delta = bytes - total_bytes;
+		if (delta < 0 || delta > 127)
+			return -5;
+	}
+	return 0;
+}
+#endif /* CVMX_ENABLE_PARAMETER_CHECKING */
+
+/*
+ * @INTERNAL
+ *
+ * Implementation note:
+ * When the packet is sure to not need a jump_buf,
+ * it will be written directly into cvmseg.
+ * When the packet might not fit into cvmseg with all
+ * of its descriptors, a jump_buf is allocated a priori,
+ * and only header is first placed into cvmseg, all other
+ * descriptors are placed into jump_buf, and finally
+ * the PKO_SEND_JUMP_S is written to cvmseg.
+ * This is because if there are no EXT or TSO descriptors,
+ * then HDR must be first, and JMP second and that is all
+ * that should go into cvmseg.
+ */
+struct __cvmx_pko3_legacy_desc {
+	u64 *cmd_words;
+	u64 *jump_buf_base_ptr;
+	unsigned short word_count;
+	short last_pool;
+	u8 port_node;
+	u8 aura_node;
+	u8 jump_buf_size;
+};
+
+/**
+ * @INTERNAL
+ *
+ * Add a subdescriptor into a command buffer,
+ * and handle command-buffer overflow by allocating a JUMP_s buffer
+ * from PKO3 internal AURA.
+ */
+static int __cvmx_pko3_cmd_subdc_add(struct __cvmx_pko3_legacy_desc *desc,
+				     u64 subdc)
+{
+	/* SEND_JUMP_S missing on Pass1.X */
+	if (desc->word_count >= 15) {
+		printf("%s: ERROR: too many segments\n", __func__);
+		return -EBADF;
+	}
+
+	/* Handle small commands simply */
+	if (cvmx_likely(!desc->jump_buf_base_ptr)) {
+		desc->cmd_words[desc->word_count] = subdc;
+		(desc->word_count)++;
+		return desc->word_count;
+	}
+
+	if (cvmx_unlikely(desc->jump_buf_size >= 255))
+		return -ENOMEM;
+
+	desc->jump_buf_base_ptr[desc->jump_buf_size++] = subdc;
+
+	return desc->word_count + desc->jump_buf_size;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Finalize command buffer
+ *
+ * @returns: number of command words in command buffer and jump buffer
+ * or negative number on error.
+ */
+
+static int __cvmx_pko3_cmd_done(struct __cvmx_pko3_legacy_desc *desc)
+{
+	short pko_aura;
+	cvmx_pko_buf_ptr_t jump_s;
+	cvmx_pko_send_aura_t aura_s;
+
+	/* no jump buffer, nothing to do */
+	if (!desc->jump_buf_base_ptr)
+		return desc->word_count;
+
+	desc->word_count++;
+
+	/* Verify number of words is 15 */
+	if (desc->word_count != 2) {
+		printf("ERROR: %s: internal error, word_count=%d\n", __func__,
+		       desc->word_count);
+		return -EINVAL;
+	}
+
+	/* Add SEND_AURA_S at the end of jump_buf */
+	pko_aura = __cvmx_pko3_aura_get(desc->port_node);
+
+	aura_s.u64 = 0;
+	aura_s.s.aura = pko_aura;
+	aura_s.s.offset = 0;
+	aura_s.s.alg = AURAALG_NOP;
+	aura_s.s.subdc4 = CVMX_PKO_SENDSUBDC_AURA;
+
+	desc->jump_buf_base_ptr[desc->jump_buf_size++] = aura_s.u64;
+
+	/* Add SEND_JUMPS to point to jump_buf */
+	jump_s.u64 = 0;
+	jump_s.s.subdc3 = CVMX_PKO_SENDSUBDC_JUMP;
+	jump_s.s.addr = cvmx_ptr_to_phys(desc->jump_buf_base_ptr);
+	jump_s.s.i = 1; /* F=1: Free this buffer when done */
+	jump_s.s.size = desc->jump_buf_size;
+	desc->cmd_words[1] = jump_s.u64;
+
+	return desc->word_count + desc->jump_buf_size;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Handle buffer pools for PKO legacy transmit operation
+ */
+static inline int cvmx_pko3_legacy_pool(struct __cvmx_pko3_legacy_desc *desc,
+					int pool)
+{
+	cvmx_pko_send_aura_t aura_s;
+	unsigned int aura;
+
+	if (cvmx_unlikely(desc->last_pool == pool))
+		return 0;
+
+	aura = desc->aura_node << 10; /* LAURA=AURA[0..9] */
+	aura |= pool;
+
+	if (cvmx_likely(desc->last_pool < 0)) {
+		cvmx_pko_send_hdr_t *hdr_s;
+
+		hdr_s = (void *)&desc->cmd_words[0];
+		/* Create AURA from legacy pool (assume LAURA==POOL */
+		hdr_s->s.aura = aura;
+		desc->last_pool = pool;
+		return 0;
+	}
+
+	aura_s.u64 = 0;
+	aura_s.s.subdc4 = CVMX_PKO_SENDSUBDC_AURA;
+	aura_s.s.offset = 0;
+	aura_s.s.alg = AURAALG_NOP;
+	aura |= pool;
+	aura_s.s.aura = aura;
+	desc->last_pool = pool;
+	return __cvmx_pko3_cmd_subdc_add(desc, aura_s.u64);
+}
+
+/**
+ * @INTERNAL
+ *
+ * Backward compatibility for packet transmission using legacy PKO command.
+ *
+ * NOTE: Only supports output on node-local ports.
+ *
+ * TBD: Could embed destination node in extended DQ number.
+ */
+cvmx_pko_return_value_t
+cvmx_pko3_legacy_xmit(unsigned int dq, cvmx_pko_command_word0_t pko_command,
+		      cvmx_buf_ptr_t packet, u64 addr, bool tag_sw)
+{
+	cvmx_pko_query_rtn_t pko_status;
+	cvmx_pko_send_hdr_t *hdr_s;
+	struct __cvmx_pko3_legacy_desc desc;
+	u8 *data_ptr;
+	unsigned int node, seg_cnt;
+	int res;
+	cvmx_buf_ptr_pki_t bptr;
+
+	seg_cnt = pko_command.s.segs;
+	desc.cmd_words = cvmx_pko3_cvmseg_addr();
+
+	/* Allocate from local aura, assume all old-pools are local */
+	node = cvmx_get_node_num();
+	desc.aura_node = node;
+
+	/* Derive destination node from dq */
+	desc.port_node = dq >> 10;
+	dq &= (1 << 10) - 1;
+
+	desc.word_count = 1;
+	desc.last_pool = -1;
+
+	/* For small packets, write descriptors directly to CVMSEG
+	 * but for longer packets use jump_buf
+	 */
+	if (seg_cnt < 7 || OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
+		desc.jump_buf_size = 0;
+		desc.jump_buf_base_ptr = NULL;
+	} else {
+		unsigned int pko_aura = __cvmx_pko3_aura_get(desc.port_node);
+
+		cvmx_fpa3_gaura_t aura =
+			__cvmx_fpa3_gaura(pko_aura >> 10, pko_aura & 0x3ff);
+
+		/* Allocate from internal AURA, size is 4KiB */
+		desc.jump_buf_base_ptr = cvmx_fpa3_alloc(aura);
+
+		if (!desc.jump_buf_base_ptr)
+			return -ENOMEM;
+		desc.jump_buf_size = 0;
+	}
+
+	/* Native buffer-pointer for error checiing */
+	bptr.u64 = packet.u64;
+
+#if CVMX_ENABLE_PARAMETER_CHECKING
+	if (seg_cnt == 1 && bptr.size == pko_command.s.total_bytes) {
+		/*
+		 * Special case for native buffer pointer:
+		 * This is the only case where the native pointer-style can be
+		 * automatically identified, that is when an entire packet
+		 * fits into a single buffer by the PKI.
+		 * The use of the native buffers with this function
+		 * should be avoided.
+		 */
+		debug("%s: WARNING: Native buffer-pointer\n", __func__);
+	} else {
+		/* The buffer ptr is assume to be received in legacy format */
+		res = cvmx_pko3_legacy_bufptr_validate(
+			packet, pko_command.s.gather, pko_command.s.segs,
+			pko_command.s.total_bytes);
+		if (res < 0) {
+			debug("%s: ERROR: Not a valid packet pointer <%d>\n",
+			      __func__, res);
+			return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
+		}
+	}
+#endif /* CVMX_ENABLE_PARAMETER_CHECKING */
+
+	/* Squash warnings */
+	(void)bptr;
+
+	/*** Translate legacy PKO fields into PKO3 PKO_SEND_HDR_S ***/
+
+	/* PKO_SEND_HDR_S is alwasy the first word in the command */
+	hdr_s = (void *)&desc.cmd_words[0];
+	hdr_s->u64 = 0;
+
+	/* Copy total packet size */
+	hdr_s->s.total = pko_command.s.total_bytes;
+
+	/* Endianness */
+	hdr_s->s.le = pko_command.s.le;
+
+	/* N2 is the same meaning */
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+		hdr_s->s.n2 = 0; /* L2 allocate everything */
+	else
+		hdr_s->s.n2 = pko_command.s.n2;
+
+	/* DF bit has the same meaning */
+	hdr_s->s.df = pko_command.s.dontfree;
+
+	/* II bit has the same meaning */
+	hdr_s->s.ii = pko_command.s.ignore_i;
+
+	/* non-zero IP header offset requires L3/L4 checksum calculation */
+	if (cvmx_unlikely(pko_command.s.ipoffp1 > 0)) {
+		u8 ipoff, ip0, l4_proto = 0;
+
+		/* Get data pointer for header inspection below */
+		if (pko_command.s.gather) {
+			cvmx_buf_ptr_t *p_ptr;
+			cvmx_buf_ptr_t blk;
+
+			p_ptr = cvmx_phys_to_ptr(
+				cvmx_pko3_legacy_paddr(node, packet.s.addr));
+			blk = p_ptr[0];
+			data_ptr = cvmx_phys_to_ptr(
+				cvmx_pko3_legacy_paddr(node, blk.s.addr));
+		} else {
+			data_ptr = cvmx_phys_to_ptr(
+				cvmx_pko3_legacy_paddr(node, packet.s.addr));
+		}
+
+		/* Get IP header offset */
+		ipoff = pko_command.s.ipoffp1 - 1;
+
+		/* Parse IP header, version, L4 protocol */
+		hdr_s->s.l3ptr = ipoff;
+		ip0 = data_ptr[ipoff];
+
+		/* IPv4 header length, checksum offload */
+		if ((ip0 >> 4) == 4) {
+			hdr_s->s.l4ptr = hdr_s->s.l3ptr + ((ip0 & 0xf) << 2);
+			l4_proto = data_ptr[ipoff + 9];
+			hdr_s->s.ckl3 = 1; /* Only valid for IPv4 */
+		}
+		/* IPv6 header length is fixed, no checksum */
+		if ((ip0 >> 4) == 6) {
+			hdr_s->s.l4ptr = hdr_s->s.l3ptr + 40;
+			l4_proto = data_ptr[ipoff + 6];
+		}
+		/* Set L4 checksum algo based on L4 protocol */
+		if (l4_proto == 6)
+			hdr_s->s.ckl4 = /* TCP */ 2;
+		else if (l4_proto == 17)
+			hdr_s->s.ckl4 = /* UDP */ 1;
+		else if (l4_proto == 132)
+			hdr_s->s.ckl4 = /* SCTP */ 3;
+		else
+			hdr_s->s.ckl4 = /* Unknown */ 0;
+	}
+
+	if (pko_command.s.gather) {
+		/* Process legacy gather list */
+		cvmx_pko_buf_ptr_t gather_s;
+		cvmx_buf_ptr_t *p_ptr;
+		cvmx_buf_ptr_t blk;
+		unsigned int i;
+
+		/* Get gather list pointer */
+		p_ptr = cvmx_phys_to_ptr(
+			cvmx_pko3_legacy_paddr(node, packet.s.addr));
+		blk = p_ptr[0];
+		/* setup data_ptr */
+		data_ptr = cvmx_phys_to_ptr(
+			cvmx_pko3_legacy_paddr(node, blk.s.addr));
+
+		for (i = 0; i < seg_cnt; i++) {
+			if (cvmx_unlikely(cvmx_pko3_legacy_pool(
+						  &desc, blk.s.pool) < 0))
+				return CVMX_PKO_NO_MEMORY;
+
+			/* Insert PKO_SEND_GATHER_S for the current buffer */
+			gather_s.u64 = 0;
+			gather_s.s.subdc3 = CVMX_PKO_SENDSUBDC_GATHER;
+			gather_s.s.size = blk.s.size;
+			gather_s.s.i = blk.s.i;
+			gather_s.s.addr =
+				cvmx_pko3_legacy_paddr(node, blk.s.addr);
+
+			res = __cvmx_pko3_cmd_subdc_add(&desc, gather_s.u64);
+			if (res < 0)
+				return CVMX_PKO_NO_MEMORY;
+
+			/* get next bufptr */
+			blk = p_ptr[i + 1];
+		} /* for i */
+
+		/* Free original gather-list buffer */
+		if ((pko_command.s.ignore_i && !pko_command.s.dontfree) ||
+		    packet.s.i == pko_command.s.dontfree)
+			cvmx_fpa_free_nosync(p_ptr, packet.s.pool,
+					     (i - 1) / 16 + 1);
+	} else {
+		/* Process legacy linked buffer list */
+		cvmx_pko_buf_ptr_t gather_s;
+		cvmx_buf_ptr_t blk;
+		void *vptr;
+
+		data_ptr = cvmx_phys_to_ptr(
+			cvmx_pko3_legacy_paddr(node, packet.s.addr));
+		blk = packet;
+
+		/*
+		 * Legacy linked-buffers converted into flat gather list
+		 * so that the AURA can optionally be changed to reflect
+		 * the POOL number in the legacy pointers
+		 */
+		do {
+			/* Insert PKO_SEND_AURA_S if pool changes */
+			if (cvmx_unlikely(cvmx_pko3_legacy_pool(
+						  &desc, blk.s.pool) < 0))
+				return CVMX_PKO_NO_MEMORY;
+
+			/* Insert PKO_SEND_GATHER_S for the current buffer */
+			gather_s.u64 = 0;
+			gather_s.s.subdc3 = CVMX_PKO_SENDSUBDC_GATHER;
+			gather_s.s.size = blk.s.size;
+			gather_s.s.i = blk.s.i;
+			gather_s.s.addr =
+				cvmx_pko3_legacy_paddr(node, blk.s.addr);
+
+			res = __cvmx_pko3_cmd_subdc_add(&desc, gather_s.u64);
+			if (res < 0)
+				return CVMX_PKO_NO_MEMORY;
+
+			/* Get the next buffer pointer */
+			vptr = cvmx_phys_to_ptr(
+				cvmx_pko3_legacy_paddr(node, blk.s.addr));
+			memcpy(&blk, vptr - sizeof(blk), sizeof(blk));
+
+			/* Decrement segment count */
+			seg_cnt--;
+
+		} while (seg_cnt > 0);
+	}
+
+	/* This field indicates the presence of 3rd legacy command word */
+	/* NOTE: legacy 3rd word may contain CN78XX native phys addr already */
+	if (cvmx_unlikely(pko_command.s.rsp)) {
+		/* PTP bit in word3 is not supported -
+		 * can not be distibguished from larger phys_addr[42..41]
+		 */
+		if (pko_command.s.wqp) {
+			/* <addr> is an SSO WQE */
+			cvmx_wqe_word1_t *wqe_p;
+			cvmx_pko_send_work_t work_s;
+
+			work_s.u64 = 0;
+			work_s.s.subdc4 = CVMX_PKO_SENDSUBDC_WORK;
+			work_s.s.addr = addr;
+			/* Assume WQE is legacy format too */
+			wqe_p = cvmx_phys_to_ptr(addr + sizeof(u64));
+			work_s.s.grp = wqe_p->cn38xx.grp;
+			work_s.s.tt = wqe_p->tag_type;
+
+			res = __cvmx_pko3_cmd_subdc_add(&desc, work_s.u64);
+		} else {
+			cvmx_pko_send_mem_t mem_s;
+			/* MEMALG_SET broken on Pass1 */
+			if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
+				debug("%s: ERROR: PKO byte-clear not supported\n",
+				      __func__);
+			}
+			/* <addr> is a physical address of byte clear */
+			mem_s.u64 = 0;
+			mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM;
+			mem_s.s.addr = addr;
+			mem_s.s.dsz = MEMDSZ_B8;
+			mem_s.s.alg = MEMALG_SET;
+			mem_s.s.offset = 0;
+
+			res = __cvmx_pko3_cmd_subdc_add(&desc, mem_s.u64);
+		}
+		if (res < 0)
+			return CVMX_PKO_NO_MEMORY;
+	}
+
+	/* FAU counter binding reg0 */
+	if (pko_command.s.reg0) {
+		cvmx_pko_send_mem_t mem_s;
+
+		debug("%s: Legacy FAU commands: reg0=%#x sz0=%#x\n", __func__,
+		      pko_command.s.reg0, pko_command.s.size0);
+		mem_s.u64 = 0;
+		mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM;
+		mem_s.s.addr = cvmx_ptr_to_phys(
+			CASTPTR(void, __cvmx_fau_sw_addr(pko_command.s.reg0)));
+		if (cvmx_likely(pko_command.s.size0 == CVMX_FAU_OP_SIZE_64))
+			mem_s.s.dsz = MEMDSZ_B64;
+		else if (pko_command.s.size0 == CVMX_FAU_OP_SIZE_32)
+			mem_s.s.dsz = MEMDSZ_B32;
+		else if (pko_command.s.size0 == CVMX_FAU_OP_SIZE_16)
+			mem_s.s.dsz = MEMDSZ_B16;
+		else
+			mem_s.s.dsz = MEMDSZ_B8;
+
+		if (mem_s.s.dsz == MEMDSZ_B16 || mem_s.s.dsz == MEMDSZ_B8)
+			debug("%s: ERROR: 8/16 bit decrement unsupported",
+			      __func__);
+
+		mem_s.s.offset = pko_command.s.subone0;
+		if (mem_s.s.offset)
+			mem_s.s.alg = MEMALG_SUB;
+		else
+			mem_s.s.alg = MEMALG_SUBLEN;
+
+		res = __cvmx_pko3_cmd_subdc_add(&desc, mem_s.u64);
+		if (res < 0)
+			return CVMX_PKO_NO_MEMORY;
+	}
+
+	/* FAU counter binding reg1 */
+	if (cvmx_unlikely(pko_command.s.reg1)) {
+		cvmx_pko_send_mem_t mem_s;
+
+		debug("%s: Legacy FAU commands: reg1=%#x sz1=%#x\n", __func__,
+		      pko_command.s.reg1, pko_command.s.size1);
+		mem_s.u64 = 0;
+		mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM;
+		mem_s.s.addr = cvmx_ptr_to_phys(
+			CASTPTR(void, __cvmx_fau_sw_addr(pko_command.s.reg1)));
+		if (cvmx_likely(pko_command.s.size1 == CVMX_FAU_OP_SIZE_64))
+			mem_s.s.dsz = MEMDSZ_B64;
+		else if (pko_command.s.size1 == CVMX_FAU_OP_SIZE_32)
+			mem_s.s.dsz = MEMDSZ_B32;
+		else if (pko_command.s.size1 == CVMX_FAU_OP_SIZE_16)
+			mem_s.s.dsz = MEMDSZ_B16;
+		else
+			mem_s.s.dsz = MEMDSZ_B8;
+
+		if (mem_s.s.dsz == MEMDSZ_B16 || mem_s.s.dsz == MEMDSZ_B8)
+			printf("%s: ERROR: 8/16 bit decrement unsupported",
+			       __func__);
+
+		mem_s.s.offset = pko_command.s.subone1;
+		if (mem_s.s.offset)
+			mem_s.s.alg = MEMALG_SUB;
+		else
+			mem_s.s.alg = MEMALG_SUBLEN;
+
+		res = __cvmx_pko3_cmd_subdc_add(&desc, mem_s.u64);
+		if (res < 0)
+			return CVMX_PKO_NO_MEMORY;
+	}
+
+	/* These PKO_HDR_S fields are not used: */
+	/* hdr_s->s.ds does not have legacy equivalent, remains 0 */
+	/* hdr_s->s.format has no legacy equivalent, remains 0 */
+
+	/*** Finalize command buffer ***/
+	res = __cvmx_pko3_cmd_done(&desc);
+	if (res < 0)
+		return CVMX_PKO_NO_MEMORY;
+
+	/*** Send the PKO3 command into the Descriptor Queue ***/
+	pko_status =
+		__cvmx_pko3_lmtdma(desc.port_node, dq, desc.word_count, tag_sw);
+
+	/*** Map PKO3 result codes to legacy return values ***/
+	if (cvmx_likely(pko_status.s.dqstatus == PKO_DQSTATUS_PASS))
+		return CVMX_PKO_SUCCESS;
+
+	debug("%s: ERROR: failed to enqueue: %s\n", __func__,
+	      pko_dqstatus_error(pko_status.s.dqstatus));
+
+	if (pko_status.s.dqstatus == PKO_DQSTATUS_ALREADY)
+		return CVMX_PKO_PORT_ALREADY_SETUP;
+	if (pko_status.s.dqstatus == PKO_DQSTATUS_NOFPABUF ||
+	    pko_status.s.dqstatus == PKO_DQSTATUS_NOPKOBUF)
+		return CVMX_PKO_NO_MEMORY;
+	if (pko_status.s.dqstatus == PKO_DQSTATUS_NOTCREATED)
+		return CVMX_PKO_INVALID_QUEUE;
+	if (pko_status.s.dqstatus == PKO_DQSTATUS_BADSTATE)
+		return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
+	if (pko_status.s.dqstatus == PKO_DQSTATUS_SENDPKTDROP)
+		return CVMX_PKO_INVALID_PORT;
+
+	return CVMX_PKO_INVALID_PORT;
+}
diff --git a/arch/mips/mach-octeon/cvmx-pko3-queue.c b/arch/mips/mach-octeon/cvmx-pko3-queue.c
new file mode 100644
index 0000000..e28afdf
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pko3-queue.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+/* Smalles Round-Robin quantum to use +1 */
+#define CVMX_PKO3_RR_QUANTUM_MIN 0x10
+
+static int debug; /* 1 for basic, 2 for detailed trace */
+
+struct cvmx_pko3_dq {
+	unsigned dq_count : 6; /* Number of descriptor queues */
+	unsigned dq_base : 10; /* Descriptor queue start number */
+#define CVMX_PKO3_SWIZZLE_IPD 0x0
+};
+
+/*
+ * @INTERNAL
+ * Descriptor Queue to IPD port mapping table.
+ *
+ * This pointer is per-core, contains the virtual address
+ * of a global named block which has 2^12 entries per each
+ * possible node.
+ */
+struct cvmx_pko3_dq *__cvmx_pko3_dq_table;
+
+int cvmx_pko3_get_queue_base(int ipd_port)
+{
+	struct cvmx_pko3_dq *dq_table;
+	int ret = -1;
+	unsigned int i;
+	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
+
+	/* get per-node table */
+	if (cvmx_unlikely(!__cvmx_pko3_dq_table))
+		__cvmx_pko3_dq_table_setup();
+
+	i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
+
+	/* get per-node table */
+	dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xp.node;
+
+	if (cvmx_likely(dq_table[i].dq_count > 0))
+		ret = xp.node << 10 | dq_table[i].dq_base;
+	else if (debug)
+		cvmx_printf("ERROR: %s: no queues for ipd_port=%#x\n", __func__,
+			    ipd_port);
+
+	return ret;
+}
+
+int cvmx_pko3_get_queue_num(int ipd_port)
+{
+	struct cvmx_pko3_dq *dq_table;
+	int ret = -1;
+	unsigned int i;
+	struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
+
+	/* get per-node table */
+	if (cvmx_unlikely(!__cvmx_pko3_dq_table))
+		__cvmx_pko3_dq_table_setup();
+
+	i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
+
+	/* get per-node table */
+	dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xp.node;
+
+	if (cvmx_likely(dq_table[i].dq_count > 0))
+		ret = dq_table[i].dq_count;
+	else if (debug)
+		debug("ERROR: %s: no queues for ipd_port=%#x\n", __func__,
+		      ipd_port);
+
+	return ret;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Initialize port/dq table contents
+ */
+static void __cvmx_pko3_dq_table_init(void *ptr)
+{
+	unsigned int size = sizeof(struct cvmx_pko3_dq) *
+			    CVMX_PKO3_IPD_NUM_MAX * CVMX_MAX_NODES;
+
+	memset(ptr, 0, size);
+}
+
+/**
+ * @INTERNAL
+ *
+ * Find or allocate global port/dq map table
+ * which is a named table, contains entries for
+ * all possible OCI nodes.
+ *
+ * The table global pointer is stored in core-local variable
+ * so that every core will call this function once, on first use.
+ */
+int __cvmx_pko3_dq_table_setup(void)
+{
+	void *ptr;
+
+	ptr = cvmx_bootmem_alloc_named_range_once(
+		/* size */
+		sizeof(struct cvmx_pko3_dq) * CVMX_PKO3_IPD_NUM_MAX *
+			CVMX_MAX_NODES,
+		/* min_addr, max_addr, align */
+		0ull, 0ull, sizeof(struct cvmx_pko3_dq),
+		/* name */
+		"cvmx_pko3_global_dq_table", __cvmx_pko3_dq_table_init);
+
+	if (debug)
+		debug("%s: dq_table_ptr=%p\n", __func__, ptr);
+
+	if (!ptr)
+		return -1;
+
+	__cvmx_pko3_dq_table = ptr;
+	return 0;
+}
+
+/*
+ * @INTERNAL
+ * Register a range of Descriptor Queues with an interface port
+ *
+ * This function populates the DQ-to-IPD translation table
+ * used by the application to retrieve the DQ range (typically ordered
+ * by priority) for a given IPD-port, which is either a physical port,
+ * or a channel on a channelized interface (i.e. ILK).
+ *
+ * @param xiface is the physical interface number
+ * @param index is either a physical port on an interface
+ *        or a channel of an ILK interface
+ * @param dq_base is the first Descriptor Queue number in a consecutive range
+ * @param dq_count is the number of consecutive Descriptor Queues leading
+ *        the same channel or port.
+ *
+ * Only a consecutive range of Descriptor Queues can be associated with any
+ * given channel/port, and usually they are ordered from most to least
+ * in terms of scheduling priority.
+ *
+ * Note: thus function only populates the node-local translation table.
+ * NOTE: This function would be cleaner if it had a single ipd_port argument
+ *
+ * @returns 0 on success, -1 on failure.
+ */
+int __cvmx_pko3_ipd_dq_register(int xiface, int index, unsigned int dq_base,
+				unsigned int dq_count)
+{
+	struct cvmx_pko3_dq *dq_table;
+	int ipd_port;
+	unsigned int i;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	struct cvmx_xport xp;
+
+	if (__cvmx_helper_xiface_is_null(xiface)) {
+		ipd_port = cvmx_helper_node_to_ipd_port(xi.node,
+							CVMX_PKO3_IPD_PORT_NULL);
+	} else {
+		int p;
+
+		p = cvmx_helper_get_ipd_port(xiface, index);
+		if (p < 0) {
+			cvmx_printf("ERROR: %s: xiface %#x has no IPD port\n",
+				    __func__, xiface);
+			return -1;
+		}
+		ipd_port = p;
+	}
+
+	xp = cvmx_helper_ipd_port_to_xport(ipd_port);
+
+	i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
+
+	/* get per-node table */
+	if (!__cvmx_pko3_dq_table)
+		__cvmx_pko3_dq_table_setup();
+
+	dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xi.node;
+
+	if (debug)
+		debug("%s: ipd_port=%#x ix=%#x dq %u cnt %u\n", __func__,
+		      ipd_port, i, dq_base, dq_count);
+
+	/* Check the IPD port has not already been configured */
+	if (dq_table[i].dq_count > 0) {
+		cvmx_printf("%s: ERROR: IPD %#x already registered\n", __func__,
+			    ipd_port);
+		return -1;
+	}
+
+	/* Store DQ# range in the queue lookup table */
+	dq_table[i].dq_base = dq_base;
+	dq_table[i].dq_count = dq_count;
+
+	return 0;
+}
+
+/*
+ * @INTERNAL
+ * Convert normal CHAN_E (i.e. IPD port) value to compressed channel form
+ * that is used to populate PKO_LUT.
+ *
+ * Note: This code may be model specific.
+ */
+static int cvmx_pko3_chan_2_xchan(uint16_t ipd_port)
+{
+	u16 xchan;
+	u8 off;
+	static const u8 *xchan_base;
+	static const u8 xchan_base_cn78xx[16] = {
+		/* IPD 0x000 */ 0x3c0 >> 4, /* LBK */
+		/* IPD 0x100 */ 0x380 >> 4, /* DPI */
+		/* IPD 0x200 */ 0xfff >> 4, /* not used */
+		/* IPD 0x300 */ 0xfff >> 4, /* not used */
+		/* IPD 0x400 */ 0x000 >> 4, /* ILK0 */
+		/* IPD 0x500 */ 0x100 >> 4, /* ILK1 */
+		/* IPD 0x600 */ 0xfff >> 4, /* not used */
+		/* IPD 0x700 */ 0xfff >> 4, /* not used */
+		/* IPD 0x800 */ 0x200 >> 4, /* BGX0 */
+		/* IPD 0x900 */ 0x240 >> 4, /* BGX1 */
+		/* IPD 0xa00 */ 0x280 >> 4, /* BGX2 */
+		/* IPD 0xb00 */ 0x2c0 >> 4, /* BGX3 */
+		/* IPD 0xc00 */ 0x300 >> 4, /* BGX4 */
+		/* IPD 0xd00 */ 0x340 >> 4, /* BGX5 */
+		/* IPD 0xe00 */ 0xfff >> 4, /* not used */
+		/* IPD 0xf00 */ 0xfff >> 4  /* not used */
+	};
+	static const u8 xchan_base_cn73xx[16] = {
+		/* IPD 0x000 */ 0x0c0 >> 4, /* LBK */
+		/* IPD 0x100 */ 0x100 >> 4, /* DPI */
+		/* IPD 0x200 */ 0xfff >> 4, /* not used */
+		/* IPD 0x300 */ 0xfff >> 4, /* not used */
+		/* IPD 0x400 */ 0xfff >> 4, /* not used */
+		/* IPD 0x500 */ 0xfff >> 4, /* not used */
+		/* IPD 0x600 */ 0xfff >> 4, /* not used */
+		/* IPD 0x700 */ 0xfff >> 4, /* not used */
+		/* IPD 0x800 */ 0x000 >> 4, /* BGX0 */
+		/* IPD 0x900 */ 0x040 >> 4, /* BGX1 */
+		/* IPD 0xa00 */ 0x080 >> 4, /* BGX2 */
+		/* IPD 0xb00 */ 0xfff >> 4, /* not used */
+		/* IPD 0xc00 */ 0xfff >> 4, /* not used */
+		/* IPD 0xd00 */ 0xfff >> 4, /* not used */
+		/* IPD 0xe00 */ 0xfff >> 4, /* not used */
+		/* IPD 0xf00 */ 0xfff >> 4  /* not used */
+	};
+	static const u8 xchan_base_cn75xx[16] = {
+		/* IPD 0x000 */ 0x040 >> 4, /* LBK */
+		/* IPD 0x100 */ 0x080 >> 4, /* DPI */
+		/* IPD 0x200 */ 0xeee >> 4, /* SRIO0  noop */
+		/* IPD 0x300 */ 0xfff >> 4, /* not used */
+		/* IPD 0x400 */ 0xfff >> 4, /* not used */
+		/* IPD 0x500 */ 0xfff >> 4, /* not used */
+		/* IPD 0x600 */ 0xfff >> 4, /* not used */
+		/* IPD 0x700 */ 0xfff >> 4, /* not used */
+		/* IPD 0x800 */ 0x000 >> 4, /* BGX0 */
+		/* IPD 0x900 */ 0xfff >> 4, /* not used */
+		/* IPD 0xa00 */ 0xfff >> 4, /* not used */
+		/* IPD 0xb00 */ 0xfff >> 4, /* not used */
+		/* IPD 0xc00 */ 0xfff >> 4, /* not used */
+		/* IPD 0xd00 */ 0xfff >> 4, /* not used */
+		/* IPD 0xe00 */ 0xfff >> 4, /* not used */
+		/* IPD 0xf00 */ 0xfff >> 4  /* not used */
+	};
+
+	if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+		xchan_base = xchan_base_cn73xx;
+	if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
+		xchan_base = xchan_base_cn75xx;
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+		xchan_base = xchan_base_cn78xx;
+
+	if (!xchan_base)
+		return -1;
+
+	xchan = ipd_port >> 8;
+
+	/* ILKx, DPI has 8 bits logical channels, others just 6 */
+	if (((xchan & 0xfe) == 0x04) || xchan == 0x01)
+		off = ipd_port & 0xff;
+	else
+		off = ipd_port & 0x3f;
+
+	xchan = xchan_base[xchan & 0xf];
+
+	if (xchan == 0xff)
+		return -1; /* Invalid IPD_PORT */
+	else if (xchan == 0xee)
+		return -2; /* LUT not used */
+	else
+		return (xchan << 4) | off;
+}
+
+/*
+ * Map channel number in PKO
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param pq_num specifies the Port Queue (i.e. L1) queue number.
+ * @param l2_l3_q_num  specifies L2/L3 queue number.
+ * @param channel specifies the channel number to map to the queue.
+ *
+ * The channel assignment applies to L2 or L3 Shaper Queues depending
+ * on the setting of channel credit level.
+ *
+ * @return returns none.
+ */
+void cvmx_pko3_map_channel(unsigned int node, unsigned int pq_num,
+			   unsigned int l2_l3_q_num, uint16_t channel)
+{
+	union cvmx_pko_l3_l2_sqx_channel sqx_channel;
+	cvmx_pko_lutx_t lutx;
+	int xchan;
+
+	sqx_channel.u64 =
+		csr_rd_node(node, CVMX_PKO_L3_L2_SQX_CHANNEL(l2_l3_q_num));
+
+	sqx_channel.s.cc_channel = channel;
+
+	csr_wr_node(node, CVMX_PKO_L3_L2_SQX_CHANNEL(l2_l3_q_num),
+		    sqx_channel.u64);
+
+	/* Convert CHAN_E into compressed channel */
+	xchan = cvmx_pko3_chan_2_xchan(channel);
+
+	if (debug)
+		debug("%s: ipd_port=%#x xchan=%#x\n", __func__, channel, xchan);
+
+	if (xchan < 0) {
+		if (xchan == -1)
+			cvmx_printf("%s: ERROR: channel %#x not recognized\n",
+				    __func__, channel);
+		return;
+	}
+
+	lutx.u64 = 0;
+	lutx.s.valid = 1;
+	lutx.s.pq_idx = pq_num;
+	lutx.s.queue_number = l2_l3_q_num;
+
+	csr_wr_node(node, CVMX_PKO_LUTX(xchan), lutx.u64);
+
+	if (debug)
+		debug("%s: channel %#x (compressed=%#x) mapped L2/L3 SQ=%u, PQ=%u\n",
+		      __func__, channel, xchan, l2_l3_q_num, pq_num);
+}
+
+/*
+ * @INTERNAL
+ * This function configures port queue scheduling and topology parameters
+ * in hardware.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param port_queue is the port queue number to be configured.
+ * @param mac_num is the mac number of the mac that will be tied to this port_queue.
+ */
+static void cvmx_pko_configure_port_queue(int node, int port_queue, int mac_num)
+{
+	cvmx_pko_l1_sqx_topology_t pko_l1_topology;
+	cvmx_pko_l1_sqx_shape_t pko_l1_shape;
+	cvmx_pko_l1_sqx_link_t pko_l1_link;
+
+	pko_l1_topology.u64 = 0;
+	pko_l1_topology.s.link = mac_num;
+	csr_wr_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(port_queue),
+		    pko_l1_topology.u64);
+
+	pko_l1_shape.u64 = 0;
+	pko_l1_shape.s.link = mac_num;
+	csr_wr_node(node, CVMX_PKO_L1_SQX_SHAPE(port_queue), pko_l1_shape.u64);
+
+	pko_l1_link.u64 = 0;
+	pko_l1_link.s.link = mac_num;
+	csr_wr_node(node, CVMX_PKO_L1_SQX_LINK(port_queue), pko_l1_link.u64);
+}
+
+/*
+ * @INTERNAL
+ * This function configures level 2 queues scheduling and topology parameters
+ * in hardware.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param queue is the level3 queue number to be configured.
+ * @param parent_queue is the parent queue at next level for this l3 queue.
+ * @param prio is this queue's priority in parent's scheduler.
+ * @param rr_quantum is this queue's round robin quantum value.
+ * @param child_base is the first child queue number in the static prioriy children.
+ * @param child_rr_prio is the round robin children priority.
+ */
+static void cvmx_pko_configure_l2_queue(int node, int queue, int parent_queue,
+					int prio, int rr_quantum,
+					int child_base, int child_rr_prio)
+{
+	cvmx_pko_l2_sqx_schedule_t pko_sq_sched;
+	cvmx_pko_l2_sqx_topology_t pko_child_topology;
+	cvmx_pko_l1_sqx_topology_t pko_parent_topology;
+
+	/* parent topology configuration */
+	pko_parent_topology.u64 =
+		csr_rd_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(parent_queue));
+	pko_parent_topology.s.prio_anchor = child_base;
+	pko_parent_topology.s.rr_prio = child_rr_prio;
+	csr_wr_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(parent_queue),
+		    pko_parent_topology.u64);
+
+	if (debug > 1)
+		debug("CVMX_PKO_L1_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
+		      parent_queue, pko_parent_topology.s.prio_anchor,
+		      pko_parent_topology.s.link);
+
+	/* scheduler configuration for this sq in the parent queue */
+	pko_sq_sched.u64 = 0;
+	pko_sq_sched.s.prio = prio;
+	pko_sq_sched.s.rr_quantum = rr_quantum;
+	csr_wr_node(node, CVMX_PKO_L2_SQX_SCHEDULE(queue), pko_sq_sched.u64);
+
+	/* child topology configuration */
+	pko_child_topology.u64 = 0;
+	pko_child_topology.s.parent = parent_queue;
+	csr_wr_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(queue),
+		    pko_child_topology.u64);
+}
+
+/*
+ * @INTERNAL
+ * This function configures level 3 queues scheduling and topology parameters
+ * in hardware.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param queue is the level3 queue number to be configured.
+ * @param parent_queue is the parent queue at next level for this l3 queue.
+ * @param prio is this queue's priority in parent's scheduler.
+ * @param rr_quantum is this queue's round robin quantum value.
+ * @param child_base is the first child queue number in the static prioriy children.
+ * @param child_rr_prio is the round robin children priority.
+ */
+static void cvmx_pko_configure_l3_queue(int node, int queue, int parent_queue,
+					int prio, int rr_quantum,
+					int child_base, int child_rr_prio)
+{
+	cvmx_pko_l3_sqx_schedule_t pko_sq_sched;
+	cvmx_pko_l3_sqx_topology_t pko_child_topology;
+	cvmx_pko_l2_sqx_topology_t pko_parent_topology;
+
+	/* parent topology configuration */
+	pko_parent_topology.u64 =
+		csr_rd_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(parent_queue));
+	pko_parent_topology.s.prio_anchor = child_base;
+	pko_parent_topology.s.rr_prio = child_rr_prio;
+	csr_wr_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(parent_queue),
+		    pko_parent_topology.u64);
+
+	if (debug > 1)
+		debug("CVMX_PKO_L2_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
+		      parent_queue, pko_parent_topology.s.prio_anchor,
+		      pko_parent_topology.s.parent);
+
+	/* scheduler configuration for this sq in the parent queue */
+	pko_sq_sched.u64 = 0;
+	pko_sq_sched.s.prio = prio;
+	pko_sq_sched.s.rr_quantum = rr_quantum;
+	csr_wr_node(node, CVMX_PKO_L3_SQX_SCHEDULE(queue), pko_sq_sched.u64);
+
+	/* child topology configuration */
+	pko_child_topology.u64 = 0;
+	pko_child_topology.s.parent = parent_queue;
+	csr_wr_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(queue),
+		    pko_child_topology.u64);
+}
+
+/*
+ * @INTERNAL
+ * This function configures level 4 queues scheduling and topology parameters
+ * in hardware.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param queue is the level4 queue number to be configured.
+ * @param parent_queue is the parent queue at next level for this l4 queue.
+ * @param prio is this queue's priority in parent's scheduler.
+ * @param rr_quantum is this queue's round robin quantum value.
+ * @param child_base is the first child queue number in the static prioriy children.
+ * @param child_rr_prio is the round robin children priority.
+ */
+static void cvmx_pko_configure_l4_queue(int node, int queue, int parent_queue,
+					int prio, int rr_quantum,
+					int child_base, int child_rr_prio)
+{
+	cvmx_pko_l4_sqx_schedule_t pko_sq_sched;
+	cvmx_pko_l4_sqx_topology_t pko_child_topology;
+	cvmx_pko_l3_sqx_topology_t pko_parent_topology;
+
+	/* parent topology configuration */
+	pko_parent_topology.u64 =
+		csr_rd_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue));
+	pko_parent_topology.s.prio_anchor = child_base;
+	pko_parent_topology.s.rr_prio = child_rr_prio;
+	csr_wr_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue),
+		    pko_parent_topology.u64);
+
+	if (debug > 1)
+		debug("CVMX_PKO_L3_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
+		      parent_queue, pko_parent_topology.s.prio_anchor,
+		      pko_parent_topology.s.parent);
+
+	/* scheduler configuration for this sq in the parent queue */
+	pko_sq_sched.u64 = 0;
+	pko_sq_sched.s.prio = prio;
+	pko_sq_sched.s.rr_quantum = rr_quantum;
+	csr_wr_node(node, CVMX_PKO_L4_SQX_SCHEDULE(queue), pko_sq_sched.u64);
+
+	/* topology configuration */
+	pko_child_topology.u64 = 0;
+	pko_child_topology.s.parent = parent_queue;
+	csr_wr_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(queue),
+		    pko_child_topology.u64);
+}
+
+/*
+ * @INTERNAL
+ * This function configures level 5 queues scheduling and topology parameters
+ * in hardware.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param queue is the level5 queue number to be configured.
+ * @param parent_queue is the parent queue at next level for this l5 queue.
+ * @param prio is this queue's priority in parent's scheduler.
+ * @param rr_quantum is this queue's round robin quantum value.
+ * @param child_base is the first child queue number in the static prioriy children.
+ * @param child_rr_prio is the round robin children priority.
+ */
+static void cvmx_pko_configure_l5_queue(int node, int queue, int parent_queue,
+					int prio, int rr_quantum,
+					int child_base, int child_rr_prio)
+{
+	cvmx_pko_l5_sqx_schedule_t pko_sq_sched;
+	cvmx_pko_l4_sqx_topology_t pko_parent_topology;
+	cvmx_pko_l5_sqx_topology_t pko_child_topology;
+
+	/* parent topology configuration */
+	pko_parent_topology.u64 =
+		csr_rd_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(parent_queue));
+	pko_parent_topology.s.prio_anchor = child_base;
+	pko_parent_topology.s.rr_prio = child_rr_prio;
+	csr_wr_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(parent_queue),
+		    pko_parent_topology.u64);
+
+	if (debug > 1)
+		debug("CVMX_PKO_L4_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
+		      parent_queue, pko_parent_topology.s.prio_anchor,
+		      pko_parent_topology.s.parent);
+
+	/* scheduler configuration for this sq in the parent queue */
+	pko_sq_sched.u64 = 0;
+	pko_sq_sched.s.prio = prio;
+	pko_sq_sched.s.rr_quantum = rr_quantum;
+	csr_wr_node(node, CVMX_PKO_L5_SQX_SCHEDULE(queue), pko_sq_sched.u64);
+
+	/* topology configuration */
+	pko_child_topology.u64 = 0;
+	pko_child_topology.s.parent = parent_queue;
+	csr_wr_node(node, CVMX_PKO_L5_SQX_TOPOLOGY(queue),
+		    pko_child_topology.u64);
+}
+
+/*
+ * @INTERNAL
+ * This function configures descriptor queues scheduling and topology parameters
+ * in hardware.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be configured.
+ * @param parent_queue is the parent queue at next level for this dq.
+ * @param prio is this queue's priority in parent's scheduler.
+ * @param rr_quantum is this queue's round robin quantum value.
+ * @param child_base is the first child queue number in the static prioriy children.
+ * @param child_rr_prio is the round robin children priority.
+ */
+static void cvmx_pko_configure_dq(int node, int dq, int parent_queue, int prio,
+				  int rr_quantum, int child_base,
+				  int child_rr_prio)
+{
+	cvmx_pko_dqx_schedule_t pko_dq_sched;
+	cvmx_pko_dqx_topology_t pko_dq_topology;
+	cvmx_pko_l5_sqx_topology_t pko_parent_topology;
+	cvmx_pko_dqx_wm_ctl_t pko_dq_wm_ctl;
+	unsigned long long parent_topology_reg;
+	char lvl;
+
+	if (debug)
+		debug("%s: dq %u parent %u child_base %u\n", __func__, dq,
+		      parent_queue, child_base);
+
+	if (__cvmx_pko3_sq_lvl_max() == CVMX_PKO_L5_QUEUES) {
+		parent_topology_reg = CVMX_PKO_L5_SQX_TOPOLOGY(parent_queue);
+		lvl = 5;
+	} else if (__cvmx_pko3_sq_lvl_max() == CVMX_PKO_L3_QUEUES) {
+		parent_topology_reg = CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue);
+		lvl = 3;
+	} else {
+		return;
+	}
+
+	if (debug)
+		debug("%s: parent_topology_reg=%#llx\n", __func__,
+		      parent_topology_reg);
+
+	/* parent topology configuration */
+	pko_parent_topology.u64 = csr_rd_node(node, parent_topology_reg);
+	pko_parent_topology.s.prio_anchor = child_base;
+	pko_parent_topology.s.rr_prio = child_rr_prio;
+	csr_wr_node(node, parent_topology_reg, pko_parent_topology.u64);
+
+	if (debug > 1)
+		debug("CVMX_PKO_L%d_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
+		      lvl, parent_queue, pko_parent_topology.s.prio_anchor,
+		      pko_parent_topology.s.parent);
+
+	/* scheduler configuration for this dq in the parent queue */
+	pko_dq_sched.u64 = 0;
+	pko_dq_sched.s.prio = prio;
+	pko_dq_sched.s.rr_quantum = rr_quantum;
+	csr_wr_node(node, CVMX_PKO_DQX_SCHEDULE(dq), pko_dq_sched.u64);
+
+	/* topology configuration */
+	pko_dq_topology.u64 = 0;
+	pko_dq_topology.s.parent = parent_queue;
+	csr_wr_node(node, CVMX_PKO_DQX_TOPOLOGY(dq), pko_dq_topology.u64);
+
+	/* configure for counting packets, not bytes at this level */
+	pko_dq_wm_ctl.u64 = 0;
+	pko_dq_wm_ctl.s.kind = 1;
+	pko_dq_wm_ctl.s.enable = 0;
+	csr_wr_node(node, CVMX_PKO_DQX_WM_CTL(dq), pko_dq_wm_ctl.u64);
+
+	if (debug > 1) {
+		pko_dq_sched.u64 = csr_rd_node(node, CVMX_PKO_DQX_SCHEDULE(dq));
+		pko_dq_topology.u64 =
+			csr_rd_node(node, CVMX_PKO_DQX_TOPOLOGY(dq));
+		debug("CVMX_PKO_DQX_TOPOLOGY(%u)PARENT=%u CVMX_PKO_DQX_SCHEDULE(%u) PRIO=%u Q=%u\n",
+		      dq, pko_dq_topology.s.parent, dq, pko_dq_sched.s.prio,
+		      pko_dq_sched.s.rr_quantum);
+	}
+}
+
+/*
+ * @INTERNAL
+ * The following structure selects the Scheduling Queue configuration
+ * routine for each of the supported levels.
+ * The initial content of the table will be setup in accordance
+ * to the specific SoC model and its implemented resources
+ */
+struct pko3_cfg_tab_s {
+	/* function pointer for to configure the given level, last=DQ */
+	struct {
+		u8 parent_level;
+		void (*cfg_sq_func)(int node, int queue, int parent_queue,
+				    int prio, int rr_quantum, int child_base,
+				    int child_rr_prio);
+		//XXX for debugging exagerated size
+	} lvl[256];
+};
+
+static const struct pko3_cfg_tab_s pko3_cn78xx_cfg = {
+	{ [CVMX_PKO_L2_QUEUES] = { CVMX_PKO_PORT_QUEUES,
+				   cvmx_pko_configure_l2_queue },
+	  [CVMX_PKO_L3_QUEUES] = { CVMX_PKO_L2_QUEUES,
+				   cvmx_pko_configure_l3_queue },
+	  [CVMX_PKO_L4_QUEUES] = { CVMX_PKO_L3_QUEUES,
+				   cvmx_pko_configure_l4_queue },
+	  [CVMX_PKO_L5_QUEUES] = { CVMX_PKO_L4_QUEUES,
+				   cvmx_pko_configure_l5_queue },
+	  [CVMX_PKO_DESCR_QUEUES] = { CVMX_PKO_L5_QUEUES,
+				      cvmx_pko_configure_dq } }
+};
+
+static const struct pko3_cfg_tab_s pko3_cn73xx_cfg = {
+	{ [CVMX_PKO_L2_QUEUES] = { CVMX_PKO_PORT_QUEUES,
+				   cvmx_pko_configure_l2_queue },
+	  [CVMX_PKO_L3_QUEUES] = { CVMX_PKO_L2_QUEUES,
+				   cvmx_pko_configure_l3_queue },
+	  [CVMX_PKO_DESCR_QUEUES] = { CVMX_PKO_L3_QUEUES,
+				      cvmx_pko_configure_dq } }
+};
+
+/*
+ * Configure Port Queue and its children Scheduler Queue
+ *
+ * Port Queues (a.k.a L1) are assigned 1-to-1 to MACs.
+ * L2 Scheduler Queues are used for specifying channels, and thus there
+ * could be multiple L2 SQs attached to a single L1 PQ, either in a
+ * fair round-robin scheduling, or with static and/or round-robin priorities.
+ *
+ * @param node on which to operate
+ * @param mac_num is the LMAC number to that is associated with the Port Queue,
+ * @param pq_num is the number of the L1 PQ attached to the MAC
+ *
+ * @returns 0 on success, -1 on failure.
+ */
+int cvmx_pko3_pq_config(unsigned int node, unsigned int mac_num,
+			unsigned int pq_num)
+{
+	char b1[10];
+
+	if (debug)
+		debug("%s: MAC%u -> %s\n", __func__, mac_num,
+		      __cvmx_pko3_sq_str(b1, CVMX_PKO_PORT_QUEUES, pq_num));
+
+	cvmx_pko_configure_port_queue(node, pq_num, mac_num);
+
+	return 0;
+}
+
+/*
+ * Configure L3 through L5 Scheduler Queues and Descriptor Queues
+ *
+ * The Scheduler Queues in Levels 3 to 5 and Descriptor Queues are
+ * configured one-to-one or many-to-one to a single parent Scheduler
+ * Queues. The level of the parent SQ is specified in an argument,
+ * as well as the number of childer to attach to the specific parent.
+ * The children can have fair round-robin or priority-based scheduling
+ * when multiple children are assigned a single parent.
+ *
+ * @param node on which to operate
+ * @param child_level  is the level of the child queue
+ * @param parent_queue is the number of the parent Scheduler Queue
+ * @param child_base is the number of the first child SQ or DQ to assign to
+ * @param child_count is the number of consecutive children to assign
+ * @param stat_prio_count is the priority setting for the children L2 SQs
+ *
+ * If <stat_prio_count> is -1, the Ln children will have equal Round-Robin
+ * relationship with eachother. If <stat_prio_count> is 0, all Ln children
+ * will be arranged in Weighted-Round-Robin, with the first having the most
+ * precedence. If <stat_prio_count> is between 1 and 8, it indicates how
+ * many children will have static priority settings (with the first having
+ * the most precedence), with the remaining Ln children having WRR scheduling.
+ *
+ * @returns 0 on success, -1 on failure.
+ *
+ * Note: this function supports the configuration of node-local unit.
+ */
+int cvmx_pko3_sq_config_children(unsigned int node,
+				 enum cvmx_pko3_level_e child_level,
+				 unsigned int parent_queue,
+				 unsigned int child_base,
+				 unsigned int child_count, int stat_prio_count)
+{
+	enum cvmx_pko3_level_e parent_level;
+	unsigned int num_elem = 0;
+	unsigned int rr_quantum, rr_count;
+	unsigned int child, prio, rr_prio;
+	const struct pko3_cfg_tab_s *cfg_tbl = NULL;
+	char b1[10], b2[10];
+
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		num_elem = NUM_ELEMENTS(pko3_cn78xx_cfg.lvl);
+		cfg_tbl = &pko3_cn78xx_cfg;
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
+		num_elem = NUM_ELEMENTS(pko3_cn73xx_cfg.lvl);
+		cfg_tbl = &pko3_cn73xx_cfg;
+	}
+
+	if (!cfg_tbl || child_level >= num_elem) {
+		cvmx_printf("ERROR: %s: model or level %#x invalid\n", __func__,
+			    child_level);
+		return -1;
+	}
+
+	parent_level = cfg_tbl->lvl[child_level].parent_level;
+
+	if (!cfg_tbl->lvl[child_level].cfg_sq_func ||
+	    cfg_tbl->lvl[child_level].parent_level == 0) {
+		cvmx_printf("ERROR: %s: queue level %#x invalid\n", __func__,
+			    child_level);
+		return -1;
+	}
+
+	/* First static priority is 0 - top precedence */
+	prio = 0;
+
+	if (stat_prio_count > (signed int)child_count)
+		stat_prio_count = child_count;
+
+	/* Valid PRIO field is 0..9, limit maximum static priorities */
+	if (stat_prio_count > 9)
+		stat_prio_count = 9;
+
+	/* Special case of a single child */
+	if (child_count == 1) {
+		rr_count = 0;
+		rr_prio = 0xF;
+		/* Special case for Fair-RR */
+	} else if (stat_prio_count < 0) {
+		rr_count = child_count;
+		rr_prio = 0;
+	} else {
+		rr_count = child_count - stat_prio_count;
+		rr_prio = stat_prio_count;
+	}
+
+	/* Compute highest RR_QUANTUM */
+	if (stat_prio_count > 0)
+		rr_quantum = CVMX_PKO3_RR_QUANTUM_MIN * rr_count;
+	else
+		rr_quantum = CVMX_PKO3_RR_QUANTUM_MIN;
+
+	if (debug)
+		debug("%s: Parent %s child_base %u rr_pri %u\n", __func__,
+		      __cvmx_pko3_sq_str(b1, parent_level, parent_queue),
+		      child_base, rr_prio);
+
+	/* Parent is configured with child */
+
+	for (child = child_base; child < (child_base + child_count); child++) {
+		if (debug)
+			debug("%s: Child %s of %s prio %u rr_quantum %#x\n",
+			      __func__,
+			      __cvmx_pko3_sq_str(b1, child_level, child),
+			      __cvmx_pko3_sq_str(b2, parent_level,
+						 parent_queue),
+			      prio, rr_quantum);
+
+		cfg_tbl->lvl[child_level].cfg_sq_func(node, child, parent_queue,
+						      prio, rr_quantum,
+						      child_base, rr_prio);
+
+		if (prio < rr_prio)
+			prio++;
+		else if (stat_prio_count > 0)
+			rr_quantum -= CVMX_PKO3_RR_QUANTUM_MIN;
+	} /* for child */
+
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-pko3-resources.c b/arch/mips/mach-octeon/cvmx-pko3-resources.c
new file mode 100644
index 0000000..ab04e9a
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pko3-resources.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * PKO resources.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+#include <mach/cvmx-range.h>
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+#define CVMX_GR_TAG_PKO_PORT_QUEUES(x)                                         \
+	cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'p', 'o', 'q', '_', \
+			((x) + '0'), '.', '.', '.', '.')
+#define CVMX_GR_TAG_PKO_L2_QUEUES(x)                                           \
+	cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'l', '2', 'q', '_', \
+			((x) + '0'), '.', '.', '.', '.')
+#define CVMX_GR_TAG_PKO_L3_QUEUES(x)                                           \
+	cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'l', '3', 'q', '_', \
+			((x) + '0'), '.', '.', '.', '.')
+#define CVMX_GR_TAG_PKO_L4_QUEUES(x)                                           \
+	cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'l', '4', 'q', '_', \
+			((x) + '0'), '.', '.', '.', '.')
+#define CVMX_GR_TAG_PKO_L5_QUEUES(x)                                           \
+	cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'l', '5', 'q', '_', \
+			((x) + '0'), '.', '.', '.', '.')
+#define CVMX_GR_TAG_PKO_DESCR_QUEUES(x)                                        \
+	cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'd', 'e', 'q', '_', \
+			((x) + '0'), '.', '.', '.', '.')
+#define CVMX_GR_TAG_PKO_PORT_INDEX(x)                                          \
+	cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', 'p', 'i', 'd', '_', \
+			((x) + '0'), '.', '.', '.', '.')
+
+/*
+ * @INRWENAL
+ * Per-DQ parameters, current and maximum queue depth counters
+ */
+cvmx_pko3_dq_params_t *__cvmx_pko3_dq_params[CVMX_MAX_NODES];
+
+static const short cvmx_pko_num_queues_78XX[256] = {
+	[CVMX_PKO_PORT_QUEUES] = 32, [CVMX_PKO_L2_QUEUES] = 512,
+	[CVMX_PKO_L3_QUEUES] = 512,  [CVMX_PKO_L4_QUEUES] = 1024,
+	[CVMX_PKO_L5_QUEUES] = 1024, [CVMX_PKO_DESCR_QUEUES] = 1024
+};
+
+static const short cvmx_pko_num_queues_73XX[256] = {
+	[CVMX_PKO_PORT_QUEUES] = 16, [CVMX_PKO_L2_QUEUES] = 256,
+	[CVMX_PKO_L3_QUEUES] = 256,  [CVMX_PKO_L4_QUEUES] = 0,
+	[CVMX_PKO_L5_QUEUES] = 0,    [CVMX_PKO_DESCR_QUEUES] = 256
+};
+
+int cvmx_pko3_num_level_queues(enum cvmx_pko3_level_e level)
+{
+	unsigned int nq = 0, ne = 0;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		ne = NUM_ELEMENTS(cvmx_pko_num_queues_78XX);
+		nq = cvmx_pko_num_queues_78XX[level];
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
+		ne = NUM_ELEMENTS(cvmx_pko_num_queues_73XX);
+		nq = cvmx_pko_num_queues_73XX[level];
+	}
+
+	if (nq == 0 || level >= ne) {
+		printf("ERROR: %s: queue level %#x invalid\n", __func__, level);
+		return -1;
+	}
+
+	return nq;
+}
+
+static inline struct global_resource_tag
+__cvmx_pko_get_queues_resource_tag(int node, enum cvmx_pko3_level_e queue_level)
+{
+	if (cvmx_pko3_num_level_queues(queue_level) == 0) {
+		printf("ERROR: %s: queue level %#x invalid\n", __func__,
+		       queue_level);
+		return CVMX_GR_TAG_INVALID;
+	}
+
+	switch (queue_level) {
+	case CVMX_PKO_PORT_QUEUES:
+		return CVMX_GR_TAG_PKO_PORT_QUEUES(node);
+	case CVMX_PKO_L2_QUEUES:
+		return CVMX_GR_TAG_PKO_L2_QUEUES(node);
+	case CVMX_PKO_L3_QUEUES:
+		return CVMX_GR_TAG_PKO_L3_QUEUES(node);
+	case CVMX_PKO_L4_QUEUES:
+		return CVMX_GR_TAG_PKO_L4_QUEUES(node);
+	case CVMX_PKO_L5_QUEUES:
+		return CVMX_GR_TAG_PKO_L5_QUEUES(node);
+	case CVMX_PKO_DESCR_QUEUES:
+		return CVMX_GR_TAG_PKO_DESCR_QUEUES(node);
+	default:
+		printf("ERROR: %s: queue level %#x invalid\n", __func__,
+		       queue_level);
+		return CVMX_GR_TAG_INVALID;
+	}
+}
+
+/**
+ * Allocate or reserve a pko resource - called by wrapper functions
+ * @param tag processed global resource tag
+ * @param base_queue if specified the queue to reserve
+ * @param owner to be specified for resource
+ * @param num_queues to allocate
+ * @param max_num_queues for global resource
+ */
+int cvmx_pko_alloc_global_resource(struct global_resource_tag tag,
+				   int base_queue, int owner, int num_queues,
+				   int max_num_queues)
+{
+	int res;
+
+	if (cvmx_create_global_resource_range(tag, max_num_queues)) {
+		debug("ERROR: Failed to create PKO3 resource: %lx-%lx\n",
+		      (unsigned long)tag.hi, (unsigned long)tag.lo);
+		return -1;
+	}
+	if (base_queue >= 0) {
+		res = cvmx_reserve_global_resource_range(tag, owner, base_queue,
+							 num_queues);
+	} else {
+		res = cvmx_allocate_global_resource_range(tag, owner,
+							  num_queues, 1);
+	}
+	if (res < 0) {
+		debug("ERROR: Failed to %s PKO3 tag %lx:%lx, %i %i %i %i.\n",
+		      ((base_queue < 0) ? "allocate" : "reserve"),
+		      (unsigned long)tag.hi, (unsigned long)tag.lo, base_queue,
+		      owner, num_queues, max_num_queues);
+		return -1;
+	}
+
+	return res;
+}
+
+/**
+ * Allocate or reserve PKO queues - wrapper for cvmx_pko_alloc_global_resource
+ *
+ * @param node on which to allocate/reserve PKO queues
+ * @param level of PKO queue
+ * @param owner of reserved/allocated resources
+ * @param base_queue to start reservation/allocatation
+ * @param num_queues number of queues to be allocated
+ * @return 0 on success, -1 on failure
+ */
+int cvmx_pko_alloc_queues(int node, int level, int owner, int base_queue,
+			  int num_queues)
+{
+	struct global_resource_tag tag =
+		__cvmx_pko_get_queues_resource_tag(node, level);
+	int max_num_queues = cvmx_pko3_num_level_queues(level);
+
+	return cvmx_pko_alloc_global_resource(tag, base_queue, owner,
+					      num_queues, max_num_queues);
+}
+
+/**
+ * @INTERNAL
+ *
+ * Initialize the pointer to the descriptor queue parameter table.
+ * The table is one named block per node, and may be shared between
+ * applications.
+ */
+int __cvmx_pko3_dq_param_setup(unsigned int node)
+{
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/cvmx-pko3.c b/arch/mips/mach-octeon/cvmx-pko3.c
new file mode 100644
index 0000000..d8ce02a
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-pko3.c
@@ -0,0 +1,786 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-ciu-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ilk-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-ilk.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-pko3.h>
+#include <mach/cvmx-pko3-queue.h>
+#include <mach/cvmx-pko3-resources.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+#include <mach/cvmx-helper-util.h>
+#include <mach/cvmx-helper-pki.h>
+
+static const int debug;
+
+#define CVMX_DUMP_REGX(reg)						\
+	if (debug)							\
+		debug("%s=%#llx\n", #reg, (long long)csr_rd_node(node, reg))
+
+static int cvmx_pko_setup_macs(int node);
+
+/*
+ * PKO descriptor queue operation error string
+ *
+ * @param dqstatus is the enumeration returned from hardware,
+ *	  PKO_QUERY_RTN_S[DQSTATUS].
+ *
+ * @return static constant string error description
+ */
+const char *pko_dqstatus_error(pko_query_dqstatus_t dqstatus)
+{
+	char *str = "PKO Undefined error";
+
+	switch (dqstatus) {
+	case PKO_DQSTATUS_PASS:
+		str = "No error";
+		break;
+	case PKO_DQSTATUS_BADSTATE:
+		str = "PKO queue not ready";
+		break;
+	case PKO_DQSTATUS_NOFPABUF:
+		str = "PKO failed to allocate buffer from FPA";
+		break;
+	case PKO_DQSTATUS_NOPKOBUF:
+		str = "PKO out of buffers";
+		break;
+	case PKO_DQSTATUS_FAILRTNPTR:
+		str = "PKO failed to return buffer to FPA";
+		break;
+	case PKO_DQSTATUS_ALREADY:
+		str = "PKO queue already opened";
+		break;
+	case PKO_DQSTATUS_NOTCREATED:
+		str = "PKO queue has not been created";
+		break;
+	case PKO_DQSTATUS_NOTEMPTY:
+		str = "PKO queue is not empty";
+		break;
+	case PKO_DQSTATUS_SENDPKTDROP:
+		str = "Illegal PKO command construct";
+		break;
+	}
+	return str;
+}
+
+/*
+ * PKO global initialization for 78XX.
+ *
+ * @param node is the node on which PKO block is initialized.
+ * @return none.
+ */
+int cvmx_pko3_hw_init_global(int node, uint16_t aura)
+{
+	cvmx_pko_dpfi_flush_t pko_flush;
+	cvmx_pko_dpfi_fpa_aura_t pko_aura;
+	cvmx_pko_dpfi_ena_t dpfi_enable;
+	cvmx_pko_ptf_iobp_cfg_t ptf_iobp_cfg;
+	cvmx_pko_pdm_cfg_t pko_pdm_cfg;
+	cvmx_pko_enable_t pko_enable;
+	cvmx_pko_dpfi_status_t dpfi_status;
+	cvmx_pko_status_t pko_status;
+	cvmx_pko_shaper_cfg_t shaper_cfg;
+	u64 cycles;
+	const unsigned int timeout = 100; /* 100 milliseconds */
+
+	if (node != (aura >> 10))
+		cvmx_printf("WARNING: AURA vs PKO node mismatch\n");
+
+	pko_enable.u64 = csr_rd_node(node, CVMX_PKO_ENABLE);
+	if (pko_enable.s.enable) {
+		cvmx_printf("WARNING: %s: PKO already enabled on node %u\n",
+			    __func__, node);
+		return 0;
+	}
+	/* Enable color awareness. */
+	shaper_cfg.u64 = csr_rd_node(node, CVMX_PKO_SHAPER_CFG);
+	shaper_cfg.s.color_aware = 1;
+	csr_wr_node(node, CVMX_PKO_SHAPER_CFG, shaper_cfg.u64);
+
+	/* Clear FLUSH command to be sure */
+	pko_flush.u64 = 0;
+	pko_flush.s.flush_en = 0;
+	csr_wr_node(node, CVMX_PKO_DPFI_FLUSH, pko_flush.u64);
+
+	/* set the aura number in pko, use aura node from parameter */
+	pko_aura.u64 = 0;
+	pko_aura.s.node = aura >> 10;
+	pko_aura.s.laura = aura;
+	csr_wr_node(node, CVMX_PKO_DPFI_FPA_AURA, pko_aura.u64);
+
+	CVMX_DUMP_REGX(CVMX_PKO_DPFI_FPA_AURA);
+
+	dpfi_enable.u64 = 0;
+	dpfi_enable.s.enable = 1;
+	csr_wr_node(node, CVMX_PKO_DPFI_ENA, dpfi_enable.u64);
+
+	/* Prepare timeout */
+	cycles = get_timer(0);
+
+	/* Wait until all pointers have been returned */
+	do {
+		pko_status.u64 = csr_rd_node(node, CVMX_PKO_STATUS);
+		if (get_timer(cycles) > timeout)
+			break;
+	} while (!pko_status.s.pko_rdy);
+
+	if (!pko_status.s.pko_rdy) {
+		dpfi_status.u64 = csr_rd_node(node, CVMX_PKO_DPFI_STATUS);
+		cvmx_printf("ERROR: %s: PKO DFPI failed, PKO_STATUS=%#llx DPFI_STATUS=%#llx\n",
+			    __func__, (unsigned long long)pko_status.u64,
+			    (unsigned long long)dpfi_status.u64);
+		return -1;
+	}
+
+	/* Set max outstanding requests in IOBP for any FIFO.*/
+	ptf_iobp_cfg.u64 = csr_rd_node(node, CVMX_PKO_PTF_IOBP_CFG);
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+		ptf_iobp_cfg.s.max_read_size = 0x10; /* Recommended by HRM.*/
+	else
+		/* Reduce the value from recommended 0x10 to avoid
+		 * getting "underflow" condition in the BGX TX FIFO.
+		 */
+		ptf_iobp_cfg.s.max_read_size = 3;
+	csr_wr_node(node, CVMX_PKO_PTF_IOBP_CFG, ptf_iobp_cfg.u64);
+
+	/* Set minimum packet size per Ethernet standard */
+	pko_pdm_cfg.u64 = 0;
+	pko_pdm_cfg.s.pko_pad_minlen = 0x3c; /* 60 bytes before FCS */
+	csr_wr_node(node, CVMX_PKO_PDM_CFG, pko_pdm_cfg.u64);
+
+	/* Initialize MACs and FIFOs */
+	cvmx_pko_setup_macs(node);
+
+	/* enable PKO, although interfaces and queues are not up yet */
+	pko_enable.u64 = 0;
+	pko_enable.s.enable = 1;
+	csr_wr_node(node, CVMX_PKO_ENABLE, pko_enable.u64);
+
+	/* PKO_RDY set indicates successful initialization */
+	pko_status.u64 = csr_rd_node(node, CVMX_PKO_STATUS);
+	if (pko_status.s.pko_rdy)
+		return 0;
+
+	cvmx_printf("ERROR: %s: failed, PKO_STATUS=%#llx\n", __func__,
+		    (unsigned long long)pko_status.u64);
+	return -1;
+}
+
+/*
+ * Configure Channel credit level in PKO.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param level specifies the level at which pko channel queues will be configured,
+ * @return returns 0 if successful and -1 on failure.
+ */
+int cvmx_pko3_channel_credit_level(int node, enum cvmx_pko3_level_e level)
+{
+	union cvmx_pko_channel_level channel_level;
+
+	channel_level.u64 = 0;
+
+	if (level == CVMX_PKO_L2_QUEUES)
+		channel_level.s.cc_level = 0;
+	else if (level == CVMX_PKO_L3_QUEUES)
+		channel_level.s.cc_level = 1;
+	else
+		return -1;
+
+	csr_wr_node(node, CVMX_PKO_CHANNEL_LEVEL, channel_level.u64);
+
+	return 0;
+}
+
+/** Open configured descriptor queues before queueing packets into them.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be opened.
+ * @return returns 0 on success or -1 on failure.
+ */
+int cvmx_pko_dq_open(int node, int dq)
+{
+	cvmx_pko_query_rtn_t pko_status;
+	pko_query_dqstatus_t dqstatus;
+	cvmx_pko3_dq_params_t *p_param;
+
+	if (debug)
+		debug("%s: DEBUG: dq %u\n", __func__, dq);
+
+	__cvmx_pko3_dq_param_setup(node);
+
+	pko_status = __cvmx_pko3_do_dma(node, dq, NULL, 0, CVMX_PKO_DQ_OPEN);
+
+	dqstatus = pko_status.s.dqstatus;
+
+	if (dqstatus == PKO_DQSTATUS_ALREADY)
+		return 0;
+	if (dqstatus != PKO_DQSTATUS_PASS) {
+		cvmx_printf("%s: ERROR: Failed to open dq :%u: %s\n", __func__,
+			    dq, pko_dqstatus_error(dqstatus));
+		return -1;
+	}
+
+	/* Setup the descriptor queue software parameters */
+	p_param = cvmx_pko3_dq_parameters(node, dq);
+	if (p_param) {
+		p_param->depth = pko_status.s.depth;
+		if (p_param->limit == 0)
+			p_param->limit = 1024; /* last-resort default */
+	}
+
+	return 0;
+}
+
+/*
+ * PKO initialization of MACs and FIFOs
+ *
+ * All MACs are configured and assigned a specific FIFO,
+ * and each FIFO is configured with size for a best utilization
+ * of available FIFO resources.
+ *
+ * @param node is to specify which node's pko block for this setup.
+ * @return returns 0 if successful and -1 on failure.
+ *
+ * Note: This function contains model-specific code.
+ */
+static int cvmx_pko_setup_macs(int node)
+{
+	unsigned int interface;
+	unsigned int port, num_ports;
+	unsigned int mac_num, fifo, pri, cnt;
+	cvmx_helper_interface_mode_t mode;
+	const unsigned int num_interfaces =
+		cvmx_helper_get_number_of_interfaces();
+	u8 fifo_group_cfg[8];
+	u8 fifo_group_spd[8];
+	unsigned int fifo_count = 0;
+	unsigned int max_fifos = 0, fifo_groups = 0;
+	struct {
+		u8 fifo_cnt;
+		u8 fifo_id;
+		u8 pri;
+		u8 spd;
+		u8 mac_fifo_cnt;
+	} cvmx_pko3_mac_table[32];
+
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		max_fifos = 28;	 /* exclusive of NULL FIFO */
+		fifo_groups = 8; /* inclusive of NULL PTGF */
+	}
+	if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
+		max_fifos = 16;
+		fifo_groups = 5;
+	}
+
+	/* Initialize FIFO allocation table */
+	memset(&fifo_group_cfg, 0, sizeof(fifo_group_cfg));
+	memset(&fifo_group_spd, 0, sizeof(fifo_group_spd));
+	memset(cvmx_pko3_mac_table, 0, sizeof(cvmx_pko3_mac_table));
+
+	/* Initialize all MACs as disabled */
+	for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
+		cvmx_pko3_mac_table[mac_num].pri = 0;
+		cvmx_pko3_mac_table[mac_num].fifo_cnt = 0;
+		cvmx_pko3_mac_table[mac_num].fifo_id = 0x1f;
+	}
+
+	for (interface = 0; interface < num_interfaces; interface++) {
+		int xiface =
+			cvmx_helper_node_interface_to_xiface(node, interface);
+		/* Interface type for ALL interfaces */
+		mode = cvmx_helper_interface_get_mode(xiface);
+		num_ports = cvmx_helper_interface_enumerate(xiface);
+
+		if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
+			continue;
+		/*
+		 * Non-BGX interfaces:
+		 * Each of these interfaces has a single MAC really.
+		 */
+		if (mode == CVMX_HELPER_INTERFACE_MODE_ILK ||
+		    mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
+		    mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
+			num_ports = 1;
+
+		for (port = 0; port < num_ports; port++) {
+			int i;
+
+			/* Get the per-port mode for BGX-interfaces */
+			if (interface < CVMX_HELPER_MAX_GMX)
+				mode = cvmx_helper_bgx_get_mode(xiface, port);
+			/* In MIXED mode, LMACs can run different protocols */
+
+			/* convert interface/port to mac number */
+			i = __cvmx_pko3_get_mac_num(xiface, port);
+			if (i < 0 || i >= (int)__cvmx_pko3_num_macs()) {
+				cvmx_printf("%s: ERROR: interface %d:%u port %d has no MAC %d/%d\n",
+					    __func__, node, interface, port, i,
+					    __cvmx_pko3_num_macs());
+				continue;
+			}
+
+			if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI) {
+				unsigned int bgx_fifo_size =
+					__cvmx_helper_bgx_fifo_size(xiface,
+								    port);
+
+				cvmx_pko3_mac_table[i].mac_fifo_cnt =
+					bgx_fifo_size /
+					(CVMX_BGX_TX_FIFO_SIZE / 4);
+				cvmx_pko3_mac_table[i].pri = 2;
+				cvmx_pko3_mac_table[i].spd = 10;
+				cvmx_pko3_mac_table[i].fifo_cnt = 2;
+			} else if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI) {
+				unsigned int bgx_fifo_size =
+					__cvmx_helper_bgx_fifo_size(xiface,
+								    port);
+
+				cvmx_pko3_mac_table[i].mac_fifo_cnt =
+					bgx_fifo_size /
+					(CVMX_BGX_TX_FIFO_SIZE / 4);
+				cvmx_pko3_mac_table[i].pri = 4;
+				cvmx_pko3_mac_table[i].spd = 40;
+				cvmx_pko3_mac_table[i].fifo_cnt = 4;
+			} else if (mode == CVMX_HELPER_INTERFACE_MODE_XAUI) {
+				unsigned int bgx_fifo_size =
+					__cvmx_helper_bgx_fifo_size(xiface,
+								    port);
+
+				cvmx_pko3_mac_table[i].mac_fifo_cnt =
+					bgx_fifo_size /
+					(CVMX_BGX_TX_FIFO_SIZE / 4);
+				cvmx_pko3_mac_table[i].pri = 3;
+				cvmx_pko3_mac_table[i].fifo_cnt = 4;
+				/* DXAUI at 20G, or XAU at 10G */
+				cvmx_pko3_mac_table[i].spd = 20;
+			} else if (mode == CVMX_HELPER_INTERFACE_MODE_XFI) {
+				unsigned int bgx_fifo_size =
+					__cvmx_helper_bgx_fifo_size(xiface,
+								    port);
+
+				cvmx_pko3_mac_table[i].mac_fifo_cnt =
+					bgx_fifo_size /
+					(CVMX_BGX_TX_FIFO_SIZE / 4);
+				cvmx_pko3_mac_table[i].pri = 3;
+				cvmx_pko3_mac_table[i].fifo_cnt = 4;
+				cvmx_pko3_mac_table[i].spd = 10;
+			} else if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) {
+				cvmx_pko3_mac_table[i].fifo_cnt = 1;
+				cvmx_pko3_mac_table[i].pri = 1;
+				cvmx_pko3_mac_table[i].spd = 1;
+				cvmx_pko3_mac_table[i].mac_fifo_cnt = 1;
+			} else if (mode == CVMX_HELPER_INTERFACE_MODE_ILK ||
+				   mode == CVMX_HELPER_INTERFACE_MODE_SRIO) {
+				cvmx_pko3_mac_table[i].fifo_cnt = 4;
+				cvmx_pko3_mac_table[i].pri = 3;
+				/* ILK/SRIO: speed depends on lane count */
+				cvmx_pko3_mac_table[i].spd = 40;
+				cvmx_pko3_mac_table[i].mac_fifo_cnt = 4;
+			} else if (mode == CVMX_HELPER_INTERFACE_MODE_NPI) {
+				cvmx_pko3_mac_table[i].fifo_cnt = 4;
+				cvmx_pko3_mac_table[i].pri = 2;
+				/* Actual speed depends on PCIe lanes/mode */
+				cvmx_pko3_mac_table[i].spd = 50;
+				/* SLI Tx FIFO size to be revisitted */
+				cvmx_pko3_mac_table[i].mac_fifo_cnt = 1;
+			} else {
+				/* Other BGX interface modes: SGMII/RGMII */
+				unsigned int bgx_fifo_size =
+					__cvmx_helper_bgx_fifo_size(xiface,
+								    port);
+
+				cvmx_pko3_mac_table[i].mac_fifo_cnt =
+					bgx_fifo_size /
+					(CVMX_BGX_TX_FIFO_SIZE / 4);
+				cvmx_pko3_mac_table[i].fifo_cnt = 1;
+				cvmx_pko3_mac_table[i].pri = 1;
+				cvmx_pko3_mac_table[i].spd = 1;
+			}
+
+			if (debug)
+				debug("%s: intf %d:%u port %u %s mac %02u cnt %u macfifo %uk spd %u\n",
+				      __func__, node, interface, port,
+				      cvmx_helper_interface_mode_to_string(mode),
+				      i, cvmx_pko3_mac_table[i].fifo_cnt,
+				      cvmx_pko3_mac_table[i].mac_fifo_cnt * 8,
+				      cvmx_pko3_mac_table[i].spd);
+
+		} /* for port */
+	}	  /* for interface */
+
+	/* Count the number of requested FIFOs */
+	for (fifo_count = mac_num = 0; mac_num < __cvmx_pko3_num_macs();
+	     mac_num++)
+		fifo_count += cvmx_pko3_mac_table[mac_num].fifo_cnt;
+
+	if (debug)
+		debug("%s: initially requested FIFO count %u\n", __func__,
+		      fifo_count);
+
+	/* Heuristically trim FIFO count to fit in available number */
+	pri = 1;
+	cnt = 4;
+	while (fifo_count > max_fifos) {
+		for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
+			if (cvmx_pko3_mac_table[mac_num].fifo_cnt == cnt &&
+			    cvmx_pko3_mac_table[mac_num].pri <= pri) {
+				cvmx_pko3_mac_table[mac_num].fifo_cnt >>= 1;
+				fifo_count -=
+					cvmx_pko3_mac_table[mac_num].fifo_cnt;
+			}
+			if (fifo_count <= max_fifos)
+				break;
+		}
+		if (pri >= 4) {
+			pri = 1;
+			cnt >>= 1;
+		} else {
+			pri++;
+		}
+		if (cnt == 0)
+			break;
+	}
+
+	if (debug)
+		debug("%s: adjusted FIFO count %u\n", __func__, fifo_count);
+
+	/* Special case for NULL Virtual FIFO */
+	fifo_group_cfg[fifo_groups - 1] = 0;
+	/* there is no MAC connected to NULL FIFO */
+
+	/* Configure MAC units, and attach a FIFO to each */
+	for (fifo = 0, cnt = 4; cnt > 0; cnt >>= 1) {
+		unsigned int g;
+
+		for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
+			if (cvmx_pko3_mac_table[mac_num].fifo_cnt < cnt ||
+			    cvmx_pko3_mac_table[mac_num].fifo_id != 0x1f)
+				continue;
+
+			/* Attach FIFO to MAC */
+			cvmx_pko3_mac_table[mac_num].fifo_id = fifo;
+			g = fifo >> 2;
+			/* Sum speed for FIFO group */
+			fifo_group_spd[g] += cvmx_pko3_mac_table[mac_num].spd;
+
+			if (cnt == 4)
+				fifo_group_cfg[g] = 4; /* 10k,0,0,0 */
+			else if (cnt == 2 && (fifo & 0x3) == 0)
+				fifo_group_cfg[g] = 3; /* 5k,0,5k,0 */
+			else if (cnt == 2 && fifo_group_cfg[g] == 3)
+				/* no change */;
+			else if (cnt == 1 && (fifo & 0x2) &&
+				 fifo_group_cfg[g] == 3)
+				fifo_group_cfg[g] = 1; /* 5k,0,2.5k 2.5k*/
+			else if (cnt == 1 && (fifo & 0x3) == 0x3)
+				/* no change */;
+			else if (cnt == 1)
+				fifo_group_cfg[g] = 0; /* 2.5k x 4 */
+			else
+				cvmx_printf("ERROR: %s: internal error\n",
+					    __func__);
+
+			fifo += cnt;
+		}
+	}
+
+	/* Check if there was no error in FIFO allocation */
+	if (fifo > max_fifos) {
+		cvmx_printf("ERROR: %s: Internal error FIFO %u\n", __func__,
+			    fifo);
+		return -1;
+	}
+
+	if (debug)
+		debug("%s: used %u of FIFOs\n", __func__, fifo);
+
+	/* Now configure all FIFO groups */
+	for (fifo = 0; fifo < fifo_groups; fifo++) {
+		cvmx_pko_ptgfx_cfg_t pko_ptgfx_cfg;
+
+		pko_ptgfx_cfg.u64 = csr_rd_node(node, CVMX_PKO_PTGFX_CFG(fifo));
+		if (pko_ptgfx_cfg.s.size != fifo_group_cfg[fifo])
+			pko_ptgfx_cfg.s.reset = 1;
+		pko_ptgfx_cfg.s.size = fifo_group_cfg[fifo];
+		if (fifo_group_spd[fifo] >= 40)
+			if (pko_ptgfx_cfg.s.size >= 3)
+				pko_ptgfx_cfg.s.rate = 3; /* 50 Gbps */
+			else
+				pko_ptgfx_cfg.s.rate = 2; /* 25 Gbps */
+		else if (fifo_group_spd[fifo] >= 20)
+			pko_ptgfx_cfg.s.rate = 2; /* 25 Gbps */
+		else if (fifo_group_spd[fifo] >= 10)
+			pko_ptgfx_cfg.s.rate = 1; /* 12.5 Gbps */
+		else
+			pko_ptgfx_cfg.s.rate = 0; /* 6.25 Gbps */
+
+		if (debug)
+			debug("%s: FIFO %#x-%#x size=%u speed=%d rate=%d\n",
+			      __func__, fifo * 4, fifo * 4 + 3,
+			      pko_ptgfx_cfg.s.size, fifo_group_spd[fifo],
+			      pko_ptgfx_cfg.s.rate);
+
+		csr_wr_node(node, CVMX_PKO_PTGFX_CFG(fifo), pko_ptgfx_cfg.u64);
+		pko_ptgfx_cfg.s.reset = 0;
+		csr_wr_node(node, CVMX_PKO_PTGFX_CFG(fifo), pko_ptgfx_cfg.u64);
+	}
+
+	/* Configure all MACs assigned FIFO number */
+	for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
+		cvmx_pko_macx_cfg_t pko_mac_cfg;
+
+		if (debug)
+			debug("%s: mac#%02u: fifo=%#x cnt=%u speed=%d\n",
+			      __func__, mac_num,
+			      cvmx_pko3_mac_table[mac_num].fifo_id,
+			      cvmx_pko3_mac_table[mac_num].fifo_cnt,
+			      cvmx_pko3_mac_table[mac_num].spd);
+
+		pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
+		pko_mac_cfg.s.fifo_num = cvmx_pko3_mac_table[mac_num].fifo_id;
+		csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
+	}
+
+	/* Setup PKO MCI0/MCI1/SKID credits */
+	for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
+		cvmx_pko_mci0_max_credx_t pko_mci0_max_cred;
+		cvmx_pko_mci1_max_credx_t pko_mci1_max_cred;
+		cvmx_pko_macx_cfg_t pko_mac_cfg;
+		unsigned int fifo_credit, mac_credit, skid_credit;
+		unsigned int pko_fifo_cnt, fifo_size;
+		unsigned int mac_fifo_cnt;
+		unsigned int tmp;
+		int saved_fifo_num;
+
+		pko_fifo_cnt = cvmx_pko3_mac_table[mac_num].fifo_cnt;
+		mac_fifo_cnt = cvmx_pko3_mac_table[mac_num].mac_fifo_cnt;
+
+		/* Skip unused MACs */
+		if (pko_fifo_cnt == 0)
+			continue;
+
+		/* Check for sanity */
+		if (pko_fifo_cnt > 4)
+			pko_fifo_cnt = 1;
+
+		fifo_size = (2 * 1024) + (1024 / 2); /* 2.5KiB */
+		fifo_credit = pko_fifo_cnt * fifo_size;
+
+		if (mac_num == 0) {
+			/* loopback */
+			mac_credit = 4096; /* From HRM Sec 13.0 */
+			skid_credit = 0;
+		} else if (mac_num == 1) {
+			/* DPI */
+			mac_credit = 2 * 1024;
+			skid_credit = 0;
+		} else if (octeon_has_feature(OCTEON_FEATURE_ILK) &&
+			   (mac_num & 0xfe) == 2) {
+			/* ILK0, ILK1: MAC 2,3 */
+			mac_credit = 4 * 1024; /* 4KB fifo */
+			skid_credit = 0;
+		} else if (octeon_has_feature(OCTEON_FEATURE_SRIO) &&
+			   (mac_num >= 6) && (mac_num <= 9)) {
+			/* SRIO0, SRIO1: MAC 6..9 */
+			mac_credit = 1024 / 2;
+			skid_credit = 0;
+		} else {
+			/* BGX */
+			mac_credit = mac_fifo_cnt * 8 * 1024;
+			skid_credit = mac_fifo_cnt * 256;
+		}
+
+		if (debug)
+			debug("%s: mac %u pko_fifo_credit=%u mac_credit=%u\n",
+			      __func__, mac_num, fifo_credit, mac_credit);
+
+		tmp = (fifo_credit + mac_credit) / 16;
+		pko_mci0_max_cred.u64 = 0;
+		pko_mci0_max_cred.s.max_cred_lim = tmp;
+
+		/* Check for overflow */
+		if (pko_mci0_max_cred.s.max_cred_lim != tmp) {
+			cvmx_printf("WARNING: %s: MCI0 credit overflow\n",
+				    __func__);
+			pko_mci0_max_cred.s.max_cred_lim = 0xfff;
+		}
+
+		/* Pass 2 PKO hardware does not use the MCI0 credits */
+		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+			csr_wr_node(node, CVMX_PKO_MCI0_MAX_CREDX(mac_num),
+				    pko_mci0_max_cred.u64);
+
+		/* The original CSR formula is the correct one after all */
+		tmp = (mac_credit) / 16;
+		pko_mci1_max_cred.u64 = 0;
+		pko_mci1_max_cred.s.max_cred_lim = tmp;
+
+		/* Check for overflow */
+		if (pko_mci1_max_cred.s.max_cred_lim != tmp) {
+			cvmx_printf("WARNING: %s: MCI1 credit overflow\n",
+				    __func__);
+			pko_mci1_max_cred.s.max_cred_lim = 0xfff;
+		}
+
+		csr_wr_node(node, CVMX_PKO_MCI1_MAX_CREDX(mac_num),
+			    pko_mci1_max_cred.u64);
+
+		tmp = (skid_credit / 256) >> 1; /* valid 0,1,2 */
+		pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
+
+		/* The PKO_MACX_CFG bits cannot be changed unless FIFO_MUM=0x1f (unused fifo) */
+		saved_fifo_num = pko_mac_cfg.s.fifo_num;
+		pko_mac_cfg.s.fifo_num = 0x1f;
+		pko_mac_cfg.s.skid_max_cnt = tmp;
+		csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
+
+		pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
+		pko_mac_cfg.s.fifo_num = saved_fifo_num;
+		csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
+
+		if (debug) {
+			pko_mci0_max_cred.u64 =
+				csr_rd_node(node, CVMX_PKO_MCI0_MAX_CREDX(mac_num));
+			pko_mci1_max_cred.u64 =
+				csr_rd_node(node, CVMX_PKO_MCI1_MAX_CREDX(mac_num));
+			pko_mac_cfg.u64 =
+				csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
+			debug("%s: mac %u PKO_MCI0_MAX_CREDX=%u PKO_MCI1_MAX_CREDX=%u PKO_MACX_CFG[SKID_MAX_CNT]=%u\n",
+			      __func__, mac_num,
+			      pko_mci0_max_cred.s.max_cred_lim,
+			      pko_mci1_max_cred.s.max_cred_lim,
+			      pko_mac_cfg.s.skid_max_cnt);
+		}
+	} /* for mac_num */
+
+	return 0;
+}
+
+/** Set MAC options
+ *
+ * The options supported are the parameters below:
+ *
+ * @param xiface The physical interface number
+ * @param index The physical sub-interface port
+ * @param fcs_enable Enable FCS generation
+ * @param pad_enable Enable padding to minimum packet size
+ * @param fcs_sop_off Number of bytes at start of packet to exclude from FCS
+ *
+ * The typical use for `fcs_sop_off` is when the interface is configured
+ * to use a header such as HighGig to precede every Ethernet packet,
+ * such a header usually does not partake in the CRC32 computation stream,
+ * and its size must be set with this parameter.
+ *
+ * @return Returns 0 on success, -1 if interface/port is invalid.
+ */
+int cvmx_pko3_interface_options(int xiface, int index, bool fcs_enable,
+				bool pad_enable, unsigned int fcs_sop_off)
+{
+	int mac_num;
+	cvmx_pko_macx_cfg_t pko_mac_cfg;
+	unsigned int fifo_num;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+	if (debug)
+		debug("%s: intf %u:%u/%u fcs=%d pad=%d\n", __func__, xi.node,
+		      xi.interface, index, fcs_enable, pad_enable);
+
+	mac_num = __cvmx_pko3_get_mac_num(xiface, index);
+	if (mac_num < 0) {
+		cvmx_printf("ERROR: %s: invalid interface %u:%u/%u\n", __func__,
+			    xi.node, xi.interface, index);
+		return -1;
+	}
+
+	pko_mac_cfg.u64 = csr_rd_node(xi.node, CVMX_PKO_MACX_CFG(mac_num));
+
+	/* If MAC is not assigned, return an error */
+	if (pko_mac_cfg.s.fifo_num == 0x1f) {
+		cvmx_printf("ERROR: %s: unused interface %u:%u/%u\n", __func__,
+			    xi.node, xi.interface, index);
+		return -1;
+	}
+
+	if (pko_mac_cfg.s.min_pad_ena == pad_enable &&
+	    pko_mac_cfg.s.fcs_ena == fcs_enable) {
+		if (debug)
+			debug("%s: mac %#x unchanged\n", __func__, mac_num);
+		return 0;
+	}
+
+	/* WORKAROUND: Pass1 won't allow change any bits unless FIFO_NUM=0x1f */
+	fifo_num = pko_mac_cfg.s.fifo_num;
+	pko_mac_cfg.s.fifo_num = 0x1f;
+
+	pko_mac_cfg.s.min_pad_ena = pad_enable;
+	pko_mac_cfg.s.fcs_ena = fcs_enable;
+	pko_mac_cfg.s.fcs_sop_off = fcs_sop_off;
+
+	csr_wr_node(xi.node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
+
+	pko_mac_cfg.s.fifo_num = fifo_num;
+	csr_wr_node(xi.node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
+
+	if (debug)
+		debug("%s: PKO_MAC[%u]CFG=%#llx\n", __func__, mac_num,
+		      (unsigned long long)csr_rd_node(xi.node, CVMX_PKO_MACX_CFG(mac_num)));
+
+	return 0;
+}
+
+/** Set Descriptor Queue options
+ *
+ * The `min_pad` parameter must be in agreement with the interface-level
+ * padding option for all descriptor queues assigned to that particular
+ * interface/port.
+ *
+ * @param node on which to operate
+ * @param dq descriptor queue to set
+ * @param min_pad minimum padding to set for dq
+ */
+void cvmx_pko3_dq_options(unsigned int node, unsigned int dq, bool min_pad)
+{
+	cvmx_pko_pdm_dqx_minpad_t reg;
+
+	dq &= (1 << 10) - 1;
+	reg.u64 = csr_rd_node(node, CVMX_PKO_PDM_DQX_MINPAD(dq));
+	reg.s.minpad = min_pad;
+	csr_wr_node(node, CVMX_PKO_PDM_DQX_MINPAD(dq), reg.u64);
+}
diff --git a/arch/mips/mach-octeon/cvmx-qlm-tables.c b/arch/mips/mach-octeon/cvmx-qlm-tables.c
new file mode 100644
index 0000000..ca22892
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-qlm-tables.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <mach/cvmx-regs.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+
+const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn63xx[] = {
+	{ "prbs_err_cnt", 299, 252 },	   // prbs_err_cnt[47..0]
+	{ "prbs_lock", 251, 251 },	   // prbs_lock
+	{ "jtg_prbs_rst_n", 250, 250 },	   // jtg_prbs_rst_n
+	{ "jtg_run_prbs31", 249, 249 },	   // jtg_run_prbs31
+	{ "jtg_run_prbs7", 248, 248 },	   // jtg_run_prbs7
+	{ "Unused1", 247, 245 },	   // 0
+	{ "cfg_pwrup_set", 244, 244 },	   // cfg_pwrup_set
+	{ "cfg_pwrup_clr", 243, 243 },	   // cfg_pwrup_clr
+	{ "cfg_rst_n_set", 242, 242 },	   // cfg_rst_n_set
+	{ "cfg_rst_n_clr", 241, 241 },	   // cfg_rst_n_clr
+	{ "cfg_tx_idle_set", 240, 240 },   // cfg_tx_idle_set
+	{ "cfg_tx_idle_clr", 239, 239 },   // cfg_tx_idle_clr
+	{ "cfg_tx_byp", 238, 238 },	   // cfg_tx_byp
+	{ "cfg_tx_byp_inv", 237, 237 },	   // cfg_tx_byp_inv
+	{ "cfg_tx_byp_val", 236, 227 },	   // cfg_tx_byp_val[9..0]
+	{ "cfg_loopback", 226, 226 },	   // cfg_loopback
+	{ "shlpbck", 225, 224 },	   // shlpbck[1..0]
+	{ "sl_enable", 223, 223 },	   // sl_enable
+	{ "sl_posedge_sample", 222, 222 }, // sl_posedge_sample
+	{ "trimen", 221, 220 },		   // trimen[1..0]
+	{ "serdes_tx_byp", 219, 219 },	   // serdes_tx_byp
+	{ "serdes_pll_byp", 218, 218 },	   // serdes_pll_byp
+	{ "lowf_byp", 217, 217 },	   // lowf_byp
+	{ "spdsel_byp", 216, 216 },	   // spdsel_byp
+	{ "div4_byp", 215, 215 },	   // div4_byp
+	{ "clkf_byp", 214, 208 },	   // clkf_byp[6..0]
+	{ "Unused2", 207, 206 },	   // 0
+	{ "biasdrv_hs_ls_byp", 205, 201 }, // biasdrv_hs_ls_byp[4..0]
+	{ "tcoeff_hf_ls_byp", 200, 197 },  // tcoeff_hf_ls_byp[3..0]
+	{ "biasdrv_hf_byp", 196, 192 },	   // biasdrv_hf_byp[4..0]
+	{ "tcoeff_hf_byp", 191, 188 },	   // tcoeff_hf_byp[3..0]
+	{ "Unused3", 187, 186 },	   // 0
+	{ "biasdrv_lf_ls_byp", 185, 181 }, // biasdrv_lf_ls_byp[4..0]
+	{ "tcoeff_lf_ls_byp", 180, 177 },  // tcoeff_lf_ls_byp[3..0]
+	{ "biasdrv_lf_byp", 176, 172 },	   // biasdrv_lf_byp[4..0]
+	{ "tcoeff_lf_byp", 171, 168 },	   // tcoeff_lf_byp[3..0]
+	{ "Unused4", 167, 167 },	   // 0
+	{ "interpbw", 166, 162 },	   // interpbw[4..0]
+	{ "pll_cpb", 161, 159 },	   // pll_cpb[2..0]
+	{ "pll_cps", 158, 156 },	   // pll_cps[2..0]
+	{ "pll_diffamp", 155, 152 },	   // pll_diffamp[3..0]
+	{ "Unused5", 151, 150 },	   // 0
+	{ "cfg_rx_idle_set", 149, 149 },   // cfg_rx_idle_set
+	{ "cfg_rx_idle_clr", 148, 148 },   // cfg_rx_idle_clr
+	{ "cfg_rx_idle_thr", 147, 144 },   // cfg_rx_idle_thr[3..0]
+	{ "cfg_com_thr", 143, 140 },	   // cfg_com_thr[3..0]
+	{ "cfg_rx_offset", 139, 136 },	   // cfg_rx_offset[3..0]
+	{ "cfg_skp_max", 135, 132 },	   // cfg_skp_max[3..0]
+	{ "cfg_skp_min", 131, 128 },	   // cfg_skp_min[3..0]
+	{ "cfg_fast_pwrup", 127, 127 },	   // cfg_fast_pwrup
+	{ "Unused6", 126, 100 },	   // 0
+	{ "detected_n", 99, 99 },	   // detected_n
+	{ "detected_p", 98, 98 },	   // detected_p
+	{ "dbg_res_rx", 97, 94 },	   // dbg_res_rx[3..0]
+	{ "dbg_res_tx", 93, 90 },	   // dbg_res_tx[3..0]
+	{ "cfg_tx_pol_set", 89, 89 },	   // cfg_tx_pol_set
+	{ "cfg_tx_pol_clr", 88, 88 },	   // cfg_tx_pol_clr
+	{ "cfg_rx_pol_set", 87, 87 },	   // cfg_rx_pol_set
+	{ "cfg_rx_pol_clr", 86, 86 },	   // cfg_rx_pol_clr
+	{ "cfg_rxd_set", 85, 85 },	   // cfg_rxd_set
+	{ "cfg_rxd_clr", 84, 84 },	   // cfg_rxd_clr
+	{ "cfg_rxd_wait", 83, 80 },	   // cfg_rxd_wait[3..0]
+	{ "cfg_cdr_limit", 79, 79 },	   // cfg_cdr_limit
+	{ "cfg_cdr_rotate", 78, 78 },	   // cfg_cdr_rotate
+	{ "cfg_cdr_bw_ctl", 77, 76 },	   // cfg_cdr_bw_ctl[1..0]
+	{ "cfg_cdr_trunc", 75, 74 },	   // cfg_cdr_trunc[1..0]
+	{ "cfg_cdr_rqoffs", 73, 64 },	   // cfg_cdr_rqoffs[9..0]
+	{ "cfg_cdr_inc2", 63, 58 },	   // cfg_cdr_inc2[5..0]
+	{ "cfg_cdr_inc1", 57, 52 },	   // cfg_cdr_inc1[5..0]
+	{ "fusopt_voter_sync", 51, 51 },   // fusopt_voter_sync
+	{ "rndt", 50, 50 },		   // rndt
+	{ "hcya", 49, 49 },		   // hcya
+	{ "hyst", 48, 48 },		   // hyst
+	{ "idle_dac", 47, 45 },		   // idle_dac[2..0]
+	{ "bg_ref_sel", 44, 44 },	   // bg_ref_sel
+	{ "ic50dac", 43, 39 },		   // ic50dac[4..0]
+	{ "ir50dac", 38, 34 },		   // ir50dac[4..0]
+	{ "tx_rout_comp_bypass", 33, 33 }, // tx_rout_comp_bypass
+	{ "tx_rout_comp_value", 32, 29 },  // tx_rout_comp_value[3..0]
+	{ "tx_res_offset", 28, 25 },	   // tx_res_offset[3..0]
+	{ "rx_rout_comp_bypass", 24, 24 }, // rx_rout_comp_bypass
+	{ "rx_rout_comp_value", 23, 20 },  // rx_rout_comp_value[3..0]
+	{ "rx_res_offset", 19, 16 },	   // rx_res_offset[3..0]
+	{ "rx_cap_gen2", 15, 12 },	   // rx_cap_gen2[3..0]
+	{ "rx_eq_gen2", 11, 8 },	   // rx_eq_gen2[3..0]
+	{ "rx_cap_gen1", 7, 4 },	   // rx_cap_gen1[3..0]
+	{ "rx_eq_gen1", 3, 0 },		   // rx_eq_gen1[3..0]
+	{ NULL, -1, -1 }
+};
+
+const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn66xx[] = {
+	{ "prbs_err_cnt", 303, 256 },	   // prbs_err_cnt[47..0]
+	{ "prbs_lock", 255, 255 },	   // prbs_lock
+	{ "jtg_prbs_rx_rst_n", 254, 254 }, // jtg_prbs_rx_rst_n
+	{ "jtg_prbs_tx_rst_n", 253, 253 }, // jtg_prbs_tx_rst_n
+	{ "jtg_prbs_mode", 252, 251 },	   // jtg_prbs_mode[252:251]
+	{ "jtg_prbs_rst_n", 250, 250 },	   // jtg_prbs_rst_n
+	{ "jtg_run_prbs31", 249,
+	  249 }, // jtg_run_prbs31 - Use jtg_prbs_mode instead
+	{ "jtg_run_prbs7", 248,
+	  248 },		  // jtg_run_prbs7 - Use jtg_prbs_mode instead
+	{ "Unused1", 247, 246 },  // 0
+	{ "div5_byp", 245, 245 }, // div5_byp
+	{ "cfg_pwrup_set", 244, 244 },	   // cfg_pwrup_set
+	{ "cfg_pwrup_clr", 243, 243 },	   // cfg_pwrup_clr
+	{ "cfg_rst_n_set", 242, 242 },	   // cfg_rst_n_set
+	{ "cfg_rst_n_clr", 241, 241 },	   // cfg_rst_n_clr
+	{ "cfg_tx_idle_set", 240, 240 },   // cfg_tx_idle_set
+	{ "cfg_tx_idle_clr", 239, 239 },   // cfg_tx_idle_clr
+	{ "cfg_tx_byp", 238, 238 },	   // cfg_tx_byp
+	{ "cfg_tx_byp_inv", 237, 237 },	   // cfg_tx_byp_inv
+	{ "cfg_tx_byp_val", 236, 227 },	   // cfg_tx_byp_val[9..0]
+	{ "cfg_loopback", 226, 226 },	   // cfg_loopback
+	{ "shlpbck", 225, 224 },	   // shlpbck[1..0]
+	{ "sl_enable", 223, 223 },	   // sl_enable
+	{ "sl_posedge_sample", 222, 222 }, // sl_posedge_sample
+	{ "trimen", 221, 220 },		   // trimen[1..0]
+	{ "serdes_tx_byp", 219, 219 },	   // serdes_tx_byp
+	{ "serdes_pll_byp", 218, 218 },	   // serdes_pll_byp
+	{ "lowf_byp", 217, 217 },	   // lowf_byp
+	{ "spdsel_byp", 216, 216 },	   // spdsel_byp
+	{ "div4_byp", 215, 215 },	   // div4_byp
+	{ "clkf_byp", 214, 208 },	   // clkf_byp[6..0]
+	{ "biasdrv_hs_ls_byp", 207, 203 }, // biasdrv_hs_ls_byp[4..0]
+	{ "tcoeff_hf_ls_byp", 202, 198 },  // tcoeff_hf_ls_byp[4..0]
+	{ "biasdrv_hf_byp", 197, 193 },	   // biasdrv_hf_byp[4..0]
+	{ "tcoeff_hf_byp", 192, 188 },	   // tcoeff_hf_byp[4..0]
+	{ "biasdrv_lf_ls_byp", 187, 183 }, // biasdrv_lf_ls_byp[4..0]
+	{ "tcoeff_lf_ls_byp", 182, 178 },  // tcoeff_lf_ls_byp[4..0]
+	{ "biasdrv_lf_byp", 177, 173 },	   // biasdrv_lf_byp[4..0]
+	{ "tcoeff_lf_byp", 172, 168 },	   // tcoeff_lf_byp[4..0]
+	{ "Unused4", 167, 167 },	   // 0
+	{ "interpbw", 166, 162 },	   // interpbw[4..0]
+	{ "pll_cpb", 161, 159 },	   // pll_cpb[2..0]
+	{ "pll_cps", 158, 156 },	   // pll_cps[2..0]
+	{ "pll_diffamp", 155, 152 },	   // pll_diffamp[3..0]
+	{ "cfg_err_thr", 151, 150 },	   // cfg_err_thr
+	{ "cfg_rx_idle_set", 149, 149 },   // cfg_rx_idle_set
+	{ "cfg_rx_idle_clr", 148, 148 },   // cfg_rx_idle_clr
+	{ "cfg_rx_idle_thr", 147, 144 },   // cfg_rx_idle_thr[3..0]
+	{ "cfg_com_thr", 143, 140 },	   // cfg_com_thr[3..0]
+	{ "cfg_rx_offset", 139, 136 },	   // cfg_rx_offset[3..0]
+	{ "cfg_skp_max", 135, 132 },	   // cfg_skp_max[3..0]
+	{ "cfg_skp_min", 131, 128 },	   // cfg_skp_min[3..0]
+	{ "cfg_fast_pwrup", 127, 127 },	   // cfg_fast_pwrup
+	{ "Unused6", 126, 101 },	   // 0
+	{ "cfg_indep_dis", 100, 100 },	   // cfg_indep_dis
+	{ "detected_n", 99, 99 },	   // detected_n
+	{ "detected_p", 98, 98 },	   // detected_p
+	{ "dbg_res_rx", 97, 94 },	   // dbg_res_rx[3..0]
+	{ "dbg_res_tx", 93, 90 },	   // dbg_res_tx[3..0]
+	{ "cfg_tx_pol_set", 89, 89 },	   // cfg_tx_pol_set
+	{ "cfg_tx_pol_clr", 88, 88 },	   // cfg_tx_pol_clr
+	{ "cfg_rx_pol_set", 87, 87 },	   // cfg_rx_pol_set
+	{ "cfg_rx_pol_clr", 86, 86 },	   // cfg_rx_pol_clr
+	{ "cfg_rxd_set", 85, 85 },	   // cfg_rxd_set
+	{ "cfg_rxd_clr", 84, 84 },	   // cfg_rxd_clr
+	{ "cfg_rxd_wait", 83, 80 },	   // cfg_rxd_wait[3..0]
+	{ "cfg_cdr_limit", 79, 79 },	   // cfg_cdr_limit
+	{ "cfg_cdr_rotate", 78, 78 },	   // cfg_cdr_rotate
+	{ "cfg_cdr_bw_ctl", 77, 76 },	   // cfg_cdr_bw_ctl[1..0]
+	{ "cfg_cdr_trunc", 75, 74 },	   // cfg_cdr_trunc[1..0]
+	{ "cfg_cdr_rqoffs", 73, 64 },	   // cfg_cdr_rqoffs[9..0]
+	{ "cfg_cdr_inc2", 63, 58 },	   // cfg_cdr_inc2[5..0]
+	{ "cfg_cdr_inc1", 57, 52 },	   // cfg_cdr_inc1[5..0]
+	{ "fusopt_voter_sync", 51, 51 },   // fusopt_voter_sync
+	{ "rndt", 50, 50 },		   // rndt
+	{ "hcya", 49, 49 },		   // hcya
+	{ "hyst", 48, 48 },		   // hyst
+	{ "idle_dac", 47, 45 },		   // idle_dac[2..0]
+	{ "bg_ref_sel", 44, 44 },	   // bg_ref_sel
+	{ "ic50dac", 43, 39 },		   // ic50dac[4..0]
+	{ "ir50dac", 38, 34 },		   // ir50dac[4..0]
+	{ "tx_rout_comp_bypass", 33, 33 }, // tx_rout_comp_bypass
+	{ "tx_rout_comp_value", 32, 29 },  // tx_rout_comp_value[3..0]
+	{ "tx_res_offset", 28, 25 },	   // tx_res_offset[3..0]
+	{ "rx_rout_comp_bypass", 24, 24 }, // rx_rout_comp_bypass
+	{ "rx_rout_comp_value", 23, 20 },  // rx_rout_comp_value[3..0]
+	{ "rx_res_offset", 19, 16 },	   // rx_res_offset[3..0]
+	{ "rx_cap_gen2", 15, 12 },	   // rx_cap_gen2[3..0]
+	{ "rx_eq_gen2", 11, 8 },	   // rx_eq_gen2[3..0]
+	{ "rx_cap_gen1", 7, 4 },	   // rx_cap_gen1[3..0]
+	{ "rx_eq_gen1", 3, 0 },		   // rx_eq_gen1[3..0]
+	{ NULL, -1, -1 }
+};
+
+const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn68xx[] = {
+	{ "prbs_err_cnt", 303, 256 },	   // prbs_err_cnt[47..0]
+	{ "prbs_lock", 255, 255 },	   // prbs_lock
+	{ "jtg_prbs_rx_rst_n", 254, 254 }, // jtg_prbs_rx_rst_n
+	{ "jtg_prbs_tx_rst_n", 253, 253 }, // jtg_prbs_tx_rst_n
+	{ "jtg_prbs_mode", 252, 251 },	   // jtg_prbs_mode[252:251]
+	{ "jtg_prbs_rst_n", 250, 250 },	   // jtg_prbs_rst_n
+	{ "jtg_run_prbs31", 249,
+	  249 }, // jtg_run_prbs31 - Use jtg_prbs_mode instead
+	{ "jtg_run_prbs7", 248,
+	  248 },		 // jtg_run_prbs7 - Use jtg_prbs_mode instead
+	{ "Unused1", 247, 245 }, // 0
+	{ "cfg_pwrup_set", 244, 244 },	   // cfg_pwrup_set
+	{ "cfg_pwrup_clr", 243, 243 },	   // cfg_pwrup_clr
+	{ "cfg_rst_n_set", 242, 242 },	   // cfg_rst_n_set
+	{ "cfg_rst_n_clr", 241, 241 },	   // cfg_rst_n_clr
+	{ "cfg_tx_idle_set", 240, 240 },   // cfg_tx_idle_set
+	{ "cfg_tx_idle_clr", 239, 239 },   // cfg_tx_idle_clr
+	{ "cfg_tx_byp", 238, 238 },	   // cfg_tx_byp
+	{ "cfg_tx_byp_inv", 237, 237 },	   // cfg_tx_byp_inv
+	{ "cfg_tx_byp_val", 236, 227 },	   // cfg_tx_byp_val[9..0]
+	{ "cfg_loopback", 226, 226 },	   // cfg_loopback
+	{ "shlpbck", 225, 224 },	   // shlpbck[1..0]
+	{ "sl_enable", 223, 223 },	   // sl_enable
+	{ "sl_posedge_sample", 222, 222 }, // sl_posedge_sample
+	{ "trimen", 221, 220 },		   // trimen[1..0]
+	{ "serdes_tx_byp", 219, 219 },	   // serdes_tx_byp
+	{ "serdes_pll_byp", 218, 218 },	   // serdes_pll_byp
+	{ "lowf_byp", 217, 217 },	   // lowf_byp
+	{ "spdsel_byp", 216, 216 },	   // spdsel_byp
+	{ "div4_byp", 215, 215 },	   // div4_byp
+	{ "clkf_byp", 214, 208 },	   // clkf_byp[6..0]
+	{ "biasdrv_hs_ls_byp", 207, 203 }, // biasdrv_hs_ls_byp[4..0]
+	{ "tcoeff_hf_ls_byp", 202, 198 },  // tcoeff_hf_ls_byp[4..0]
+	{ "biasdrv_hf_byp", 197, 193 },	   // biasdrv_hf_byp[4..0]
+	{ "tcoeff_hf_byp", 192, 188 },	   // tcoeff_hf_byp[4..0]
+	{ "biasdrv_lf_ls_byp", 187, 183 }, // biasdrv_lf_ls_byp[4..0]
+	{ "tcoeff_lf_ls_byp", 182, 178 },  // tcoeff_lf_ls_byp[4..0]
+	{ "biasdrv_lf_byp", 177, 173 },	   // biasdrv_lf_byp[4..0]
+	{ "tcoeff_lf_byp", 172, 168 },	   // tcoeff_lf_byp[4..0]
+	{ "Unused4", 167, 167 },	   // 0
+	{ "interpbw", 166, 162 },	   // interpbw[4..0]
+	{ "pll_cpb", 161, 159 },	   // pll_cpb[2..0]
+	{ "pll_cps", 158, 156 },	   // pll_cps[2..0]
+	{ "pll_diffamp", 155, 152 },	   // pll_diffamp[3..0]
+	{ "cfg_err_thr", 151, 150 },	   // cfg_err_thr
+	{ "cfg_rx_idle_set", 149, 149 },   // cfg_rx_idle_set
+	{ "cfg_rx_idle_clr", 148, 148 },   // cfg_rx_idle_clr
+	{ "cfg_rx_idle_thr", 147, 144 },   // cfg_rx_idle_thr[3..0]
+	{ "cfg_com_thr", 143, 140 },	   // cfg_com_thr[3..0]
+	{ "cfg_rx_offset", 139, 136 },	   // cfg_rx_offset[3..0]
+	{ "cfg_skp_max", 135, 132 },	   // cfg_skp_max[3..0]
+	{ "cfg_skp_min", 131, 128 },	   // cfg_skp_min[3..0]
+	{ "cfg_fast_pwrup", 127, 127 },	   // cfg_fast_pwrup
+	{ "Unused6", 126, 100 },	   // 0
+	{ "detected_n", 99, 99 },	   // detected_n
+	{ "detected_p", 98, 98 },	   // detected_p
+	{ "dbg_res_rx", 97, 94 },	   // dbg_res_rx[3..0]
+	{ "dbg_res_tx", 93, 90 },	   // dbg_res_tx[3..0]
+	{ "cfg_tx_pol_set", 89, 89 },	   // cfg_tx_pol_set
+	{ "cfg_tx_pol_clr", 88, 88 },	   // cfg_tx_pol_clr
+	{ "cfg_rx_pol_set", 87, 87 },	   // cfg_rx_pol_set
+	{ "cfg_rx_pol_clr", 86, 86 },	   // cfg_rx_pol_clr
+	{ "cfg_rxd_set", 85, 85 },	   // cfg_rxd_set
+	{ "cfg_rxd_clr", 84, 84 },	   // cfg_rxd_clr
+	{ "cfg_rxd_wait", 83, 80 },	   // cfg_rxd_wait[3..0]
+	{ "cfg_cdr_limit", 79, 79 },	   // cfg_cdr_limit
+	{ "cfg_cdr_rotate", 78, 78 },	   // cfg_cdr_rotate
+	{ "cfg_cdr_bw_ctl", 77, 76 },	   // cfg_cdr_bw_ctl[1..0]
+	{ "cfg_cdr_trunc", 75, 74 },	   // cfg_cdr_trunc[1..0]
+	{ "cfg_cdr_rqoffs", 73, 64 },	   // cfg_cdr_rqoffs[9..0]
+	{ "cfg_cdr_inc2", 63, 58 },	   // cfg_cdr_inc2[5..0]
+	{ "cfg_cdr_inc1", 57, 52 },	   // cfg_cdr_inc1[5..0]
+	{ "fusopt_voter_sync", 51, 51 },   // fusopt_voter_sync
+	{ "rndt", 50, 50 },		   // rndt
+	{ "hcya", 49, 49 },		   // hcya
+	{ "hyst", 48, 48 },		   // hyst
+	{ "idle_dac", 47, 45 },		   // idle_dac[2..0]
+	{ "bg_ref_sel", 44, 44 },	   // bg_ref_sel
+	{ "ic50dac", 43, 39 },		   // ic50dac[4..0]
+	{ "ir50dac", 38, 34 },		   // ir50dac[4..0]
+	{ "tx_rout_comp_bypass", 33, 33 }, // tx_rout_comp_bypass
+	{ "tx_rout_comp_value", 32, 29 },  // tx_rout_comp_value[3..0]
+	{ "tx_res_offset", 28, 25 },	   // tx_res_offset[3..0]
+	{ "rx_rout_comp_bypass", 24, 24 }, // rx_rout_comp_bypass
+	{ "rx_rout_comp_value", 23, 20 },  // rx_rout_comp_value[3..0]
+	{ "rx_res_offset", 19, 16 },	   // rx_res_offset[3..0]
+	{ "rx_cap_gen2", 15, 12 },	   // rx_cap_gen2[3..0]
+	{ "rx_eq_gen2", 11, 8 },	   // rx_eq_gen2[3..0]
+	{ "rx_cap_gen1", 7, 4 },	   // rx_cap_gen1[3..0]
+	{ "rx_eq_gen1", 3, 0 },		   // rx_eq_gen1[3..0]
+	{ NULL, -1, -1 }
+};
diff --git a/arch/mips/mach-octeon/cvmx-range.c b/arch/mips/mach-octeon/cvmx-range.c
new file mode 100644
index 0000000..2ac20ea
--- /dev/null
+++ b/arch/mips/mach-octeon/cvmx-range.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <log.h>
+#include <time.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-global-resources.h>
+
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-range.h>
+
+#define CVMX_RANGE_AVAILABLE ((u64)-88)
+#define addr_of_element(base, index)					\
+	(1ull << 63 | ((base) + sizeof(u64) + (index) * sizeof(u64)))
+#define addr_of_size(base) (1ull << 63 | (base))
+
+static const int debug;
+
+int cvmx_range_memory_size(int nelements)
+{
+	return sizeof(u64) * (nelements + 1);
+}
+
+int cvmx_range_init(u64 range_addr, int size)
+{
+	u64 lsize = size;
+	u64 i;
+
+	cvmx_write64_uint64(addr_of_size(range_addr), lsize);
+	for (i = 0; i < lsize; i++) {
+		cvmx_write64_uint64(addr_of_element(range_addr, i),
+				    CVMX_RANGE_AVAILABLE);
+	}
+	return 0;
+}
+
+static int64_t cvmx_range_find_next_available(u64 range_addr, u64 index,
+					      int align)
+{
+	u64 size = cvmx_read64_uint64(addr_of_size(range_addr));
+	u64 i;
+
+	while ((index % align) != 0)
+		index++;
+
+	for (i = index; i < size; i += align) {
+		u64 r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
+
+		if (debug)
+			debug("%s: index=%d owner=%llx\n", __func__, (int)i,
+			      (unsigned long long)r_owner);
+		if (r_owner == CVMX_RANGE_AVAILABLE)
+			return i;
+	}
+	return -1;
+}
+
+static int64_t cvmx_range_find_last_available(u64 range_addr, u64 index,
+					      u64 align)
+{
+	u64 size = cvmx_read64_uint64(addr_of_size(range_addr));
+	u64 i;
+
+	if (index == 0)
+		index = size - 1;
+
+	while ((index % align) != 0)
+		index++;
+
+	for (i = index; i > align; i -= align) {
+		u64 r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
+
+		if (debug)
+			debug("%s: index=%d owner=%llx\n", __func__, (int)i,
+			      (unsigned long long)r_owner);
+		if (r_owner == CVMX_RANGE_AVAILABLE)
+			return i;
+	}
+	return -1;
+}
+
+int cvmx_range_alloc_ordered(u64 range_addr, u64 owner, u64 cnt,
+			     int align, int reverse)
+{
+	u64 i = 0, size;
+	s64 first_available;
+
+	if (debug)
+		debug("%s: range_addr=%llx  owner=%llx cnt=%d\n", __func__,
+		      (unsigned long long)range_addr,
+		      (unsigned long long)owner, (int)cnt);
+
+	size = cvmx_read64_uint64(addr_of_size(range_addr));
+	while (i < size) {
+		u64 available_cnt = 0;
+
+		if (reverse)
+			first_available = cvmx_range_find_last_available(range_addr, i, align);
+		else
+			first_available = cvmx_range_find_next_available(range_addr, i, align);
+		if (first_available == -1)
+			return -1;
+		i = first_available;
+
+		if (debug)
+			debug("%s: first_available=%d\n", __func__, (int)first_available);
+		while ((available_cnt != cnt) && (i < size)) {
+			u64 r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
+
+			if (r_owner == CVMX_RANGE_AVAILABLE)
+				available_cnt++;
+			i++;
+		}
+		if (available_cnt == cnt) {
+			u64 j;
+
+			if (debug)
+				debug("%s: first_available=%d available=%d\n",
+				      __func__,
+				      (int)first_available, (int)available_cnt);
+
+			for (j = first_available; j < first_available + cnt;
+			     j++) {
+				u64 a = addr_of_element(range_addr, j);
+
+				cvmx_write64_uint64(a, owner);
+			}
+			return first_available;
+		}
+	}
+
+	if (debug) {
+		debug("ERROR: %s: failed to allocate range cnt=%d\n",
+		      __func__, (int)cnt);
+		cvmx_range_show(range_addr);
+	}
+
+	return -1;
+}
+
+int cvmx_range_alloc(u64 range_addr, u64 owner, u64 cnt, int align)
+{
+	return cvmx_range_alloc_ordered(range_addr, owner, cnt, align, 0);
+}
+
+int cvmx_range_reserve(u64 range_addr, u64 owner, u64 base,
+		       u64 cnt)
+{
+	u64 i, size, r_owner;
+	u64 up = base + cnt;
+
+	size = cvmx_read64_uint64(addr_of_size(range_addr));
+	if (up > size) {
+		debug("ERROR: %s: invalid base or cnt. range_addr=0x%llx, owner=0x%llx, size=%d base+cnt=%d\n",
+		      __func__, (unsigned long long)range_addr,
+		      (unsigned long long)owner,
+		      (int)size, (int)up);
+		return -1;
+	}
+	for (i = base; i < up; i++) {
+		r_owner = cvmx_read64_uint64(addr_of_element(range_addr, i));
+		if (debug)
+			debug("%s: %d: %llx\n",
+			      __func__, (int)i, (unsigned long long)r_owner);
+		if (r_owner != CVMX_RANGE_AVAILABLE) {
+			if (debug) {
+				debug("%s: resource already reserved base+cnt=%d %llu %llu %llx %llx %llx\n",
+				      __func__, (int)i, (unsigned long long)cnt,
+				      (unsigned long long)base,
+				      (unsigned long long)r_owner,
+				      (unsigned long long)range_addr,
+				      (unsigned long long)owner);
+			}
+			return -1;
+		}
+	}
+	for (i = base; i < up; i++)
+		cvmx_write64_uint64(addr_of_element(range_addr, i), owner);
+	return base;
+}
+
+int __cvmx_range_is_allocated(u64 range_addr, int bases[], int count)
+{
+	u64 i, cnt, size;
+	u64 r_owner;
+
+	cnt = count;
+	size = cvmx_read64_uint64(addr_of_size(range_addr));
+	for (i = 0; i < cnt; i++) {
+		u64 base = bases[i];
+
+		if (base >= size) {
+			debug("ERROR: %s: invalid base or cnt size=%d base=%d\n",
+			      __func__, (int)size, (int)base);
+			return 0;
+		}
+		r_owner = cvmx_read64_uint64(addr_of_element(range_addr, base));
+		if (r_owner == CVMX_RANGE_AVAILABLE) {
+			if (debug) {
+				debug("%s: i=%d:base=%d is available\n",
+				      __func__, (int)i, (int)base);
+			}
+			return 0;
+		}
+	}
+	return 1;
+}
+
+int cvmx_range_free_mutiple(u64 range_addr, int bases[], int count)
+{
+	u64 i, cnt;
+
+	cnt = count;
+	if (__cvmx_range_is_allocated(range_addr, bases, count) != 1)
+		return -1;
+	for (i = 0; i < cnt; i++) {
+		u64 base = bases[i];
+
+		cvmx_write64_uint64(addr_of_element(range_addr, base),
+				    CVMX_RANGE_AVAILABLE);
+	}
+	return 0;
+}
+
+int cvmx_range_free_with_base(u64 range_addr, int base, int cnt)
+{
+	u64 i, size;
+	u64 up = base + cnt;
+
+	size = cvmx_read64_uint64(addr_of_size(range_addr));
+	if (up > size) {
+		debug("ERROR: %s: invalid base or cnt size=%d base+cnt=%d\n",
+		      __func__, (int)size, (int)up);
+		return -1;
+	}
+	for (i = base; i < up; i++) {
+		cvmx_write64_uint64(addr_of_element(range_addr, i),
+				    CVMX_RANGE_AVAILABLE);
+	}
+	return 0;
+}
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-agl.h b/arch/mips/mach-octeon/include/mach/cvmx-agl.h
new file mode 100644
index 0000000..4afb3a4
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-agl.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Functions for AGL (RGMII) commong initialization, configuration.
+ */
+
+#ifndef __CVMX_AGL_H__
+#define __CVMX_AGL_H__
+
+/*
+ * @param port to enable
+ *
+ * @return Zero on success, negative on failure
+ */
+int cvmx_agl_enable(int port);
+
+cvmx_helper_link_info_t cvmx_agl_link_get(int port);
+
+/*
+ * Set MII/RGMII link based on mode.
+ *
+ * @param port   interface port to set the link.
+ * @param link_info  Link status
+ *
+ * @return       0 on success and 1 on failure
+ */
+int cvmx_agl_link_set(int port, cvmx_helper_link_info_t link_info);
+
+/**
+ * Disables the sending of flow control (pause) frames on the specified
+ * AGL (RGMII) port(s).
+ *
+ * @param interface Which interface (0 or 1)
+ * @param port_mask Mask (4bits) of which ports on the interface to disable
+ *                  backpressure on.
+ *                  1 => disable backpressure
+ *                  0 => enable backpressure
+ *
+ * @return 0 on success
+ *         -1 on error
+ */
+int cvmx_agl_set_backpressure_override(u32 interface, uint32_t port_mask);
+
+#endif /* __CVMX_AGL_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-bootmem.h b/arch/mips/mach-octeon/include/mach/cvmx-bootmem.h
index 283ac5c..d5c004d 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-bootmem.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-bootmem.h
@@ -26,7 +26,8 @@
 
 /* Real physical addresses of memory regions */
 #define OCTEON_DDR0_BASE    (0x0ULL)
-#define OCTEON_DDR0_SIZE    (0x010000000ULL)
+/* Use 16MiB here, as 256 leads to overwriting U-Boot reloc space */
+#define OCTEON_DDR0_SIZE    (0x001000000ULL)
 #define OCTEON_DDR1_BASE    ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) \
 			     ? 0x20000000ULL : 0x410000000ULL)
 #define OCTEON_DDR1_SIZE    (0x010000000ULL)
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-config.h b/arch/mips/mach-octeon/include/mach/cvmx-config.h
new file mode 100644
index 0000000..4f66a3c
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-config.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#ifndef __CVMX_CONFIG_H__
+#define __CVMX_CONFIG_H__
+
+/************************* Config Specific Defines ************************/
+#define CVMX_LLM_NUM_PORTS 1
+
+/**< PKO queues per port for interface 0 (ports 0-15) */
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 1
+
+/**< PKO queues per port for interface 1 (ports 16-31) */
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 1
+
+/**< PKO queues per port for interface 4 (AGL) */
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE4 1
+
+/**< Limit on the number of PKO ports enabled for interface 0 */
+#define CVMX_PKO_MAX_PORTS_INTERFACE0 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
+
+/**< Limit on the number of PKO ports enabled for interface 1 */
+#define CVMX_PKO_MAX_PORTS_INTERFACE1 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
+
+/**< PKO queues per port for PCI (ports 32-35) */
+#define CVMX_PKO_QUEUES_PER_PORT_PCI 1
+
+/**< PKO queues per port for Loop devices (ports 36-39) */
+#define CVMX_PKO_QUEUES_PER_PORT_LOOP 1
+
+/**< PKO queues per port for SRIO0 devices (ports 40-41) */
+#define CVMX_PKO_QUEUES_PER_PORT_SRIO0 1
+
+/**< PKO queues per port for SRIO1 devices (ports 42-43) */
+#define CVMX_PKO_QUEUES_PER_PORT_SRIO1 1
+
+/************************* FPA allocation *********************************/
+/* Pool sizes in bytes, must be multiple of a cache line */
+#define CVMX_FPA_POOL_0_SIZE (16 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_1_SIZE (1 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_2_SIZE (8 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_3_SIZE (2 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_4_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_5_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_6_SIZE (8 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_7_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+
+/* Pools in use */
+/**< Packet buffers */
+#define CVMX_FPA_PACKET_POOL (0)
+#ifndef CVMX_FPA_PACKET_POOL_SIZE
+#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
+#endif
+
+/**< Work queue entries */
+#define CVMX_FPA_WQE_POOL      (1)
+#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
+
+/**< PKO queue command buffers */
+#define CVMX_FPA_OUTPUT_BUFFER_POOL	 (2)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE
+
+/**< BCH queue command buffers */
+#define CVMX_FPA_BCH_POOL      (6)
+#define CVMX_FPA_BCH_POOL_SIZE CVMX_FPA_POOL6_SIZE
+
+/*************************  FAU allocation ********************************/
+/* The fetch and add registers are allocated here.  They are arranged
+ * in order of descending size so that all alignment constraints are
+ * automatically met.
+ * The enums are linked so that the following enum continues allocating
+ * where the previous one left off, so the numbering within each
+ * enum always starts with zero.  The macros take care of the address
+ * increment size, so the values entered always increase by 1.
+ * FAU registers are accessed with byte addresses.
+ */
+
+#define CVMX_FAU_REG_64_ADDR(x) (((x) << 3) + CVMX_FAU_REG_64_START)
+typedef enum {
+	CVMX_FAU_REG_64_START = 0,
+	/**< FAU registers for the position in PKO command buffers */
+	CVMX_FAU_REG_OQ_ADDR_INDEX = CVMX_FAU_REG_64_ADDR(0),
+	/* Array of 36 */
+	CVMX_FAU_REG_64_END = CVMX_FAU_REG_64_ADDR(36),
+} cvmx_fau_reg_64_t;
+
+#define CVMX_FAU_REG_32_ADDR(x) (((x) << 2) + CVMX_FAU_REG_32_START)
+typedef enum {
+	CVMX_FAU_REG_32_START = CVMX_FAU_REG_64_END,
+	CVMX_FAU_REG_32_END = CVMX_FAU_REG_32_ADDR(0),
+} cvmx_fau_reg_32_t;
+
+#define CVMX_FAU_REG_16_ADDR(x) (((x) << 1) + CVMX_FAU_REG_16_START)
+typedef enum {
+	CVMX_FAU_REG_16_START = CVMX_FAU_REG_32_END,
+	CVMX_FAU_REG_16_END = CVMX_FAU_REG_16_ADDR(0),
+} cvmx_fau_reg_16_t;
+
+#define CVMX_FAU_REG_8_ADDR(x) ((x) + CVMX_FAU_REG_8_START)
+typedef enum {
+	CVMX_FAU_REG_8_START = CVMX_FAU_REG_16_END,
+	CVMX_FAU_REG_8_END = CVMX_FAU_REG_8_ADDR(0),
+} cvmx_fau_reg_8_t;
+
+/* The name CVMX_FAU_REG_AVAIL_BASE is provided to indicate the first available
+ * FAU address that is not allocated in cvmx-config.h. This is 64 bit aligned.
+ */
+#define CVMX_FAU_REG_AVAIL_BASE ((CVMX_FAU_REG_8_END + 0x7) & (~0x7ULL))
+#define CVMX_FAU_REG_END	(2048)
+
+/********************** scratch memory allocation *************************/
+/* Scratchpad memory allocation.  Note that these are byte memory addresses.
+ * Some uses of scratchpad (IOBDMA for example) require the use of 8-byte
+ * aligned addresses, so proper alignment needs to be taken into account.
+ */
+
+/**< Pre allocation for PKO queue command buffers */
+#define CVMX_SCR_OQ_BUF_PRE_ALLOC (0)
+
+/**< Generic scratch iobdma area */
+#define CVMX_SCR_SCRATCH (8)
+
+/**< First location available after cvmx-config.h allocated region. */
+#define CVMX_SCR_REG_AVAIL_BASE (16)
+
+#endif /* __CVMX_CONFIG_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-fau.h b/arch/mips/mach-octeon/include/mach/cvmx-fau.h
new file mode 100644
index 0000000..d795ff6
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-fau.h
@@ -0,0 +1,581 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Interface to the hardware Fetch and Add Unit.
+ */
+
+#ifndef __CVMX_FAU_H__
+#define __CVMX_FAU_H__
+
+extern u8 *cvmx_fau_regs_ptr;
+
+/**
+ * Initializes fau, on devices with FAU hw this is a noop.
+ */
+int cvmx_fau_init(void);
+
+/**
+ * Return the location of emulated FAU register
+ */
+static inline u8 *__cvmx_fau_sw_addr(int reg)
+{
+	if (cvmx_unlikely(!cvmx_fau_regs_ptr))
+		cvmx_fau_init();
+	return (cvmx_fau_regs_ptr + reg);
+}
+
+/**
+ * Perform an atomic 64 bit add
+ *
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 8 for 64 bit access.
+ * @param value   Signed value to add.
+ *                Note: Only the low 22 bits are available.
+ * @return Value of the register before the update
+ */
+static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg64_t reg,
+					       int64_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU))
+		return cvmx_hwfau_fetch_and_add64(reg, value);
+
+	return __atomic_fetch_add(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)),
+				  value, __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 32 bit add
+ *
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 4 for 32 bit access.
+ * @param value   Signed value to add.
+ *                Note: Only the low 22 bits are available.
+ * @return Value of the register before the update
+ */
+static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg32_t reg,
+					       int32_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU))
+		return cvmx_hwfau_fetch_and_add32(reg, value);
+
+	reg ^= SWIZZLE_32;
+	return __atomic_fetch_add(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)),
+				  value, __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 16 bit add
+ *
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 2 for 16 bit access.
+ * @param value   Signed value to add.
+ * @return Value of the register before the update
+ */
+static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg16_t reg,
+					       int16_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU))
+		return cvmx_hwfau_fetch_and_add16(reg, value);
+
+	reg ^= SWIZZLE_16;
+	return __atomic_fetch_add(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)),
+				  value, __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 8 bit add
+ *
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ * @param value   Signed value to add.
+ * @return Value of the register before the update
+ */
+static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg8_t reg, int8_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU))
+		return cvmx_hwfau_fetch_and_add8(reg, value);
+
+	reg ^= SWIZZLE_8;
+	return __atomic_fetch_add(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)),
+				  value, __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 64 bit add after the current tag switch
+ * completes
+ *
+ * @param reg    FAU atomic register to access. 0 <= reg < 2048.
+ *               - Step by 8 for 64 bit access.
+ * @param value  Signed value to add.
+ *               Note: Only the low 22 bits are available.
+ * @return If a timeout occurs, the error bit will be set. Otherwise
+ *         the value of the register before the update will be
+ *         returned
+ */
+static inline cvmx_fau_tagwait64_t
+cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg64_t reg, int64_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU))
+		return cvmx_hwfau_tagwait_fetch_and_add64(reg, value);
+
+	/* not implemented yet.*/
+	return (cvmx_fau_tagwait64_t){ 1, 0 };
+}
+
+/**
+ * Perform an atomic 32 bit add after the current tag switch
+ * completes
+ *
+ * @param reg    FAU atomic register to access. 0 <= reg < 2048.
+ *               - Step by 4 for 32 bit access.
+ * @param value  Signed value to add.
+ *               Note: Only the low 22 bits are available.
+ * @return If a timeout occurs, the error bit will be set. Otherwise
+ *         the value of the register before the update will be
+ *         returned
+ */
+static inline cvmx_fau_tagwait32_t
+cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg32_t reg, int32_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU))
+		return cvmx_hwfau_tagwait_fetch_and_add32(reg, value);
+
+	/* not implemented yet.*/
+	return (cvmx_fau_tagwait32_t){ 1, 0 };
+}
+
+/**
+ * Perform an atomic 16 bit add after the current tag switch
+ * completes
+ *
+ * @param reg    FAU atomic register to access. 0 <= reg < 2048.
+ *               - Step by 2 for 16 bit access.
+ * @param value  Signed value to add.
+ * @return If a timeout occurs, the error bit will be set. Otherwise
+ *         the value of the register before the update will be
+ *         returned
+ */
+static inline cvmx_fau_tagwait16_t
+cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg16_t reg, int16_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU))
+		return cvmx_hwfau_tagwait_fetch_and_add16(reg, value);
+
+	/* not implemented yet.*/
+	return (cvmx_fau_tagwait16_t){ 1, 0 };
+}
+
+/**
+ * Perform an atomic 8 bit add after the current tag switch
+ * completes
+ *
+ * @param reg    FAU atomic register to access. 0 <= reg < 2048.
+ * @param value  Signed value to add.
+ * @return If a timeout occurs, the error bit will be set. Otherwise
+ *         the value of the register before the update will be
+ *         returned
+ */
+static inline cvmx_fau_tagwait8_t
+cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg8_t reg, int8_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU))
+		return cvmx_hwfau_tagwait_fetch_and_add8(reg, value);
+
+	/* not implemented yet.*/
+	return (cvmx_fau_tagwait8_t){ 1, 0 };
+}
+
+/**
+ * Perform an async atomic 64 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ *                Must be 8 byte aligned.
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 8 for 64 bit access.
+ * @param value   Signed value to add.
+ *                Note: Only the low 22 bits are available.
+ * @return Placed in the scratch pad register
+ */
+static inline void
+cvmx_fau_async_fetch_and_add64(u64 scraddr, cvmx_fau_reg64_t reg, int64_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_async_fetch_and_add64(scraddr, reg, value);
+		return;
+	}
+	cvmx_scratch_write64(
+		scraddr,
+		__atomic_fetch_add(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)),
+				   value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 32 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ *                Must be 8 byte aligned.
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 4 for 32 bit access.
+ * @param value   Signed value to add.
+ *                Note: Only the low 22 bits are available.
+ * @return Placed in the scratch pad register
+ */
+static inline void
+cvmx_fau_async_fetch_and_add32(u64 scraddr, cvmx_fau_reg32_t reg, int32_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_async_fetch_and_add32(scraddr, reg, value);
+		return;
+	}
+	cvmx_scratch_write64(
+		scraddr,
+		__atomic_fetch_add(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)),
+				   value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 16 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ *                Must be 8 byte aligned.
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 2 for 16 bit access.
+ * @param value   Signed value to add.
+ * @return Placed in the scratch pad register
+ */
+static inline void
+cvmx_fau_async_fetch_and_add16(u64 scraddr, cvmx_fau_reg16_t reg, int16_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_async_fetch_and_add16(scraddr, reg, value);
+		return;
+	}
+	cvmx_scratch_write64(
+		scraddr,
+		__atomic_fetch_add(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)),
+				   value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 8 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ *                Must be 8 byte aligned.
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ * @param value   Signed value to add.
+ * @return Placed in the scratch pad register
+ */
+static inline void
+cvmx_fau_async_fetch_and_add8(u64 scraddr, cvmx_fau_reg8_t reg, int8_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_async_fetch_and_add8(scraddr, reg, value);
+		return;
+	}
+	cvmx_scratch_write64(
+		scraddr,
+		__atomic_fetch_add(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)),
+				   value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 64 bit add after the current tag
+ * switch completes.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ *                Must be 8 byte aligned.
+ *                If a timeout occurs, the error bit (63) will be set. Otherwise
+ *                the value of the register before the update will be
+ *                returned
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 8 for 64 bit access.
+ * @param value   Signed value to add.
+ *                Note: Only the low 22 bits are available.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add64(u64 scraddr,
+							  cvmx_fau_reg64_t reg,
+							  int64_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_async_tagwait_fetch_and_add64(scraddr, reg, value);
+		return;
+	}
+
+	/* Broken.  Where is the tag wait? */
+	cvmx_scratch_write64(
+		scraddr,
+		__atomic_fetch_add(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)),
+				   value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 32 bit add after the current tag
+ * switch completes.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ *                Must be 8 byte aligned.
+ *                If a timeout occurs, the error bit (63) will be set. Otherwise
+ *                the value of the register before the update will be
+ *                returned
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 4 for 32 bit access.
+ * @param value   Signed value to add.
+ *                Note: Only the low 22 bits are available.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add32(u64 scraddr,
+							  cvmx_fau_reg32_t reg,
+							  int32_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_async_tagwait_fetch_and_add32(scraddr, reg, value);
+		return;
+	}
+	/* Broken.  Where is the tag wait? */
+	cvmx_scratch_write64(
+		scraddr,
+		__atomic_fetch_add(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)),
+				   value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 16 bit add after the current tag
+ * switch completes.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ *                Must be 8 byte aligned.
+ *                If a timeout occurs, the error bit (63) will be set. Otherwise
+ *                the value of the register before the update will be
+ *                returned
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 2 for 16 bit access.
+ * @param value   Signed value to add.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add16(u64 scraddr,
+							  cvmx_fau_reg16_t reg,
+							  int16_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_async_tagwait_fetch_and_add16(scraddr, reg, value);
+		return;
+	}
+	/* Broken.  Where is the tag wait? */
+	cvmx_scratch_write64(
+		scraddr,
+		__atomic_fetch_add(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)),
+				   value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an async atomic 8 bit add after the current tag
+ * switch completes.
+ *
+ * @param scraddr Scratch memory byte address to put response in.
+ *                Must be 8 byte aligned.
+ *                If a timeout occurs, the error bit (63) will be set. Otherwise
+ *                the value of the register before the update will be
+ *                returned
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ * @param value   Signed value to add.
+ * @return Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add8(u64 scraddr,
+							 cvmx_fau_reg8_t reg,
+							 int8_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_async_tagwait_fetch_and_add8(scraddr, reg, value);
+		return;
+	}
+	/* Broken.  Where is the tag wait? */
+	cvmx_scratch_write64(
+		scraddr,
+		__atomic_fetch_add(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)),
+				   value, __ATOMIC_SEQ_CST));
+}
+
+/**
+ * Perform an atomic 64 bit add
+ *
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 8 for 64 bit access.
+ * @param value   Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add64(cvmx_fau_reg64_t reg, int64_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_atomic_add64(reg, value);
+		return;
+	}
+	/* Ignored fetch values should be optimized away */
+	__atomic_add_fetch(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)), value,
+			   __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 32 bit add
+ *
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 4 for 32 bit access.
+ * @param value   Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add32(cvmx_fau_reg32_t reg, int32_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_atomic_add32(reg, value);
+		return;
+	}
+	reg ^= SWIZZLE_32;
+	/* Ignored fetch values should be optimized away */
+	__atomic_add_fetch(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)), value,
+			   __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 16 bit add
+ *
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 2 for 16 bit access.
+ * @param value   Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add16(cvmx_fau_reg16_t reg, int16_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_atomic_add16(reg, value);
+		return;
+	}
+	reg ^= SWIZZLE_16;
+	/* Ignored fetch values should be optimized away */
+	__atomic_add_fetch(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)), value,
+			   __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 8 bit add
+ *
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ * @param value   Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add8(cvmx_fau_reg8_t reg, int8_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_atomic_add8(reg, value);
+		return;
+	}
+	reg ^= SWIZZLE_8;
+	/* Ignored fetch values should be optimized away */
+	__atomic_add_fetch(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)), value,
+			   __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 64 bit write
+ *
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 8 for 64 bit access.
+ * @param value   Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write64(cvmx_fau_reg64_t reg, int64_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_atomic_write64(reg, value);
+		return;
+	}
+	__atomic_store_n(CASTPTR(int64_t, __cvmx_fau_sw_addr(reg)), value,
+			 __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 32 bit write
+ *
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 4 for 32 bit access.
+ * @param value   Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write32(cvmx_fau_reg32_t reg, int32_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_atomic_write32(reg, value);
+		return;
+	}
+	reg ^= SWIZZLE_32;
+	__atomic_store_n(CASTPTR(int32_t, __cvmx_fau_sw_addr(reg)), value,
+			 __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 16 bit write
+ *
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ *                - Step by 2 for 16 bit access.
+ * @param value   Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write16(cvmx_fau_reg16_t reg, int16_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_atomic_write16(reg, value);
+		return;
+	}
+	reg ^= SWIZZLE_16;
+	__atomic_store_n(CASTPTR(int16_t, __cvmx_fau_sw_addr(reg)), value,
+			 __ATOMIC_SEQ_CST);
+}
+
+/**
+ * Perform an atomic 8 bit write
+ *
+ * @param reg     FAU atomic register to access. 0 <= reg < 2048.
+ * @param value   Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write8(cvmx_fau_reg8_t reg, int8_t value)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		cvmx_hwfau_atomic_write8(reg, value);
+		return;
+	}
+	reg ^= SWIZZLE_8;
+	__atomic_store_n(CASTPTR(int8_t, __cvmx_fau_sw_addr(reg)), value,
+			 __ATOMIC_SEQ_CST);
+}
+
+/** Allocates 64bit FAU register.
+ *  @param reserve base address to reserve
+ *  @return value is the base address of allocated FAU register
+ */
+int cvmx_fau64_alloc(int reserve);
+
+/** Allocates 32bit FAU register.
+ *  @param reserve base address to reserve
+ *  @return value is the base address of allocated FAU register
+ */
+int cvmx_fau32_alloc(int reserve);
+
+/** Allocates 16bit FAU register.
+ *  @param reserve base address to reserve
+ *  @return value is the base address of allocated FAU register
+ */
+int cvmx_fau16_alloc(int reserve);
+
+/** Allocates 8bit FAU register.
+ *  @param reserve base address to reserve
+ *  @return value is the base address of allocated FAU register
+ */
+int cvmx_fau8_alloc(int reserve);
+
+/** Frees the specified FAU register.
+ *  @param address base address of register to release.
+ *  @return 0 on success; -1 on failure
+ */
+int cvmx_fau_free(int address);
+
+/** Display the fau registers array
+ */
+void cvmx_fau_show(void);
+
+#endif /* __CVMX_FAU_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-fpa.h b/arch/mips/mach-octeon/include/mach/cvmx-fpa.h
index aa238a8..0660c31 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-fpa.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-fpa.h
@@ -104,8 +104,9 @@
 	/* FPA3 is handled differently */
 	if ((octeon_has_feature(OCTEON_FEATURE_FPA3))) {
 		return cvmx_fpa3_alloc(cvmx_fpa1_pool_to_fpa3_aura(pool));
-	} else
+	} else {
 		return cvmx_fpa1_alloc(pool);
+	}
 }
 
 /**
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-fpa3.h b/arch/mips/mach-octeon/include/mach/cvmx-fpa3.h
index b3e04d7..9bab03f 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-fpa3.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-fpa3.h
@@ -526,41 +526,4 @@
 int cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_pool_t pool);
 const char *cvmx_fpa3_get_aura_name(cvmx_fpa3_gaura_t aura);
 
-/* FIXME: Need a different macro for stage2 of u-boot */
-
-static inline void cvmx_fpa3_stage2_init(int aura, int pool, u64 stack_paddr, int stacklen,
-					 int buffer_sz, int buf_cnt)
-{
-	cvmx_fpa_poolx_cfg_t pool_cfg;
-
-	/* Configure pool stack */
-	cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), stack_paddr);
-	cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), stack_paddr);
-	cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), stack_paddr + stacklen);
-
-	/* Configure pool with buffer size */
-	pool_cfg.u64 = 0;
-	pool_cfg.cn78xx.nat_align = 1;
-	pool_cfg.cn78xx.buf_size = buffer_sz >> 7;
-	pool_cfg.cn78xx.l_type = 0x2;
-	pool_cfg.cn78xx.ena = 0;
-	cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);
-	/* Reset pool before starting */
-	pool_cfg.cn78xx.ena = 1;
-	cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);
-
-	cvmx_write_csr_node(0, CVMX_FPA_AURAX_CFG(aura), 0);
-	cvmx_write_csr_node(0, CVMX_FPA_AURAX_CNT_ADD(aura), buf_cnt);
-	cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), (u64)pool);
-}
-
-static inline void cvmx_fpa3_stage2_disable(int aura, int pool)
-{
-	cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), 0);
-	cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), 0);
-	cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), 0);
-	cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), 0);
-	cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), 0);
-}
-
 #endif /* __CVMX_FPA3_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-helper-board.h b/arch/mips/mach-octeon/include/mach/cvmx-helper-board.h
index 5837592..9cc61b1 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-helper-board.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-helper-board.h
@@ -9,6 +9,8 @@
 #ifndef __CVMX_HELPER_BOARD_H__
 #define __CVMX_HELPER_BOARD_H__
 
+#include <asm-generic/gpio.h>
+
 #define CVMX_VSC7224_NAME_LEN 16
 
 typedef enum {
@@ -185,8 +187,8 @@
 	struct cvmx_fdt_i2c_bus_info *i2c_bus;
 	/** Address of VSC7224 on i2c bus */
 	int i2c_addr;
-	struct cvmx_fdt_gpio_info *los_gpio;   /** LoS GPIO pin */
-	struct cvmx_fdt_gpio_info *reset_gpio; /** Reset GPIO pin */
+	struct gpio_desc los_gpio;		/** LoS GPIO pin */
+	struct gpio_desc reset_gpio;		/** Reset GPIO pin */
 	int of_offset;			       /** Offset in device tree */
 };
 
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-helper-fdt.h b/arch/mips/mach-octeon/include/mach/cvmx-helper-fdt.h
index 3328845..c3ce359 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-helper-fdt.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-helper-fdt.h
@@ -14,10 +14,13 @@
 #include <fdtdec.h>
 #include <time.h>
 #include <asm/global_data.h>
+#include <asm-generic/gpio.h>
+#include <dm/device.h>
 #include <linux/libfdt.h>
 
 #include <mach/cvmx-helper-sfp.h>
 
+/* todo: this is deprecated and some of it can be removed at some time */
 enum cvmx_i2c_bus_type {
 	CVMX_I2C_BUS_OCTEON,
 	CVMX_I2C_MUX_PCA9540,
@@ -55,6 +58,8 @@
 	u8 enable_bit;
 	/** True if mux, false if switch */
 	bool is_mux;
+
+	struct udevice *i2c_bus;
 };
 
 /**
@@ -85,22 +90,24 @@
 	bool is_qsfp;
 	/** True if EEPROM data is valid */
 	bool valid;
+
 	/** SFP tx_disable GPIO descriptor */
-	struct cvmx_fdt_gpio_info *tx_disable;
+	struct gpio_desc tx_disable;
 	/** SFP mod_abs/QSFP mod_prs GPIO descriptor */
-	struct cvmx_fdt_gpio_info *mod_abs;
+	struct gpio_desc mod_abs;
 	/** SFP tx_error GPIO descriptor */
-	struct cvmx_fdt_gpio_info *tx_error;
+	struct gpio_desc tx_error;
 	/** SFP rx_los GPIO discriptor */
-	struct cvmx_fdt_gpio_info *rx_los;
+	struct gpio_desc rx_los;
 	/** QSFP select GPIO descriptor */
-	struct cvmx_fdt_gpio_info *select;
+	struct gpio_desc select;
 	/** QSFP reset GPIO descriptor */
-	struct cvmx_fdt_gpio_info *reset;
+	struct gpio_desc reset;
 	/** QSFP interrupt GPIO descriptor */
-	struct cvmx_fdt_gpio_info *interrupt;
+	struct gpio_desc interrupt;
 	/** QSFP lp_mode GPIO descriptor */
-	struct cvmx_fdt_gpio_info *lp_mode;
+	struct gpio_desc lp_mode;
+
 	/** Last mod_abs value */
 	int last_mod_abs;
 	/** Last rx_los value */
@@ -146,6 +153,9 @@
 int cvmx_fdt_lookup_phandles(const void *fdt_addr, int node, const char *prop_name, int *lenp,
 			     int *nodes);
 
+int cvmx_ofnode_lookup_phandles(ofnode node, const char *prop_name,
+				int *lenp, ofnode *nodes);
+
 /**
  * Helper to return the address property
  *
@@ -341,8 +351,7 @@
  * Given the parent offset of an i2c device build up a list describing the bus
  * which can contain i2c muxes and switches.
  *
- * @param[in]	fdt_addr	address of device tree
- * @param	of_offset	Offset of the parent node of a GPIO device in
+ * @param[in]	node		ofnode of the parent node of a GPIO device in
  *				the device tree.
  *
  * Return:	pointer to list of i2c devices starting from the root which
@@ -351,7 +360,7 @@
  *
  * @see cvmx_fdt_free_i2c_bus()
  */
-struct cvmx_fdt_i2c_bus_info *cvmx_fdt_get_i2c_bus(const void *fdt_addr, int of_offset);
+struct cvmx_fdt_i2c_bus_info *cvmx_ofnode_get_i2c_bus(ofnode node);
 
 /**
  * Return the Octeon bus number for a bus descriptor
@@ -497,15 +506,6 @@
 int __cvmx_fdt_parse_avsp5410(const void *fdt_addr);
 
 /**
- * Parse SFP information from device tree
- *
- * @param[in]	fdt_addr	Address of flat device tree
- *
- * Return: pointer to sfp info or NULL if error
- */
-struct cvmx_fdt_sfp_info *cvmx_helper_fdt_parse_sfp_info(const void *fdt_addr, int of_offset);
-
-/**
  * @INTERNAL
  * Parses either a CS4343 phy or a slice of the phy from the device tree
  * @param[in]	fdt_addr	Address of FDT
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-helper-pko.h b/arch/mips/mach-octeon/include/mach/cvmx-helper-pko.h
index 806102d..e1eb824 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-helper-pko.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-helper-pko.h
@@ -17,7 +17,7 @@
  * number. Users should set this pointer to a function before
  * calling any cvmx-helper operations.
  */
-void (*cvmx_override_pko_queue_priority)(int ipd_port, u8 *priorities);
+extern void (*cvmx_override_pko_queue_priority)(int ipd_port, u8 *priorities);
 
 /**
  * Gets the fpa pool number of pko pool
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-helper.h b/arch/mips/mach-octeon/include/mach/cvmx-helper.h
index caa0c69..2a7b133 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-helper.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-helper.h
@@ -128,6 +128,26 @@
 };
 
 /**
+ * cvmx_override_iface_phy_mode(int interface, int index) is a function pointer.
+ * It is meant to allow customization of interfaces which do not have a PHY.
+ *
+ * @returns 0 if MAC decides TX_CONFIG_REG or 1 if PHY decides  TX_CONFIG_REG.
+ *
+ * If this function pointer is NULL then it defaults to the MAC.
+ */
+extern int (*cvmx_override_iface_phy_mode) (int interface, int index);
+
+/**
+ * cvmx_override_ipd_port_setup(int ipd_port) is a function
+ * pointer. It is meant to allow customization of the IPD port/port kind
+ * setup before packet input/output comes online. It is called
+ * after cvmx-helper does the default IPD configuration, but
+ * before IPD is enabled. Users should set this pointer to a
+ * function before calling any cvmx-helper operations.
+ */
+extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
+
+/**
  * This function enables the IPD and also enables the packet interfaces.
  * The packet interfaces (RGMII and SPI) must be enabled after the
  * IPD.  This should be called by the user program after any additional
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-ilk-defs.h b/arch/mips/mach-octeon/include/mach/cvmx-ilk-defs.h
new file mode 100644
index 0000000..d54f9ca
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-ilk-defs.h
@@ -0,0 +1,2269 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon ilk.
+ */
+
+#ifndef __CVMX_ILK_DEFS_H__
+#define __CVMX_ILK_DEFS_H__
+
+#define CVMX_ILK_BIST_SUM    (0x0001180014000038ull)
+#define CVMX_ILK_GBL_CFG     (0x0001180014000000ull)
+#define CVMX_ILK_GBL_ERR_CFG (0x0001180014000058ull)
+#define CVMX_ILK_GBL_INT     (0x0001180014000008ull)
+#define CVMX_ILK_GBL_INT_EN  (0x0001180014000010ull)
+#define CVMX_ILK_INT_SUM     (0x0001180014000030ull)
+#define CVMX_ILK_LNEX_TRN_CTL(offset)                                          \
+	(0x00011800140380F0ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_LNEX_TRN_LD(offset)                                           \
+	(0x00011800140380E0ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_LNEX_TRN_LP(offset)                                           \
+	(0x00011800140380E8ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_LNE_DBG      (0x0001180014030008ull)
+#define CVMX_ILK_LNE_STS_MSG  (0x0001180014030000ull)
+#define CVMX_ILK_RID_CFG      (0x0001180014000050ull)
+#define CVMX_ILK_RXF_IDX_PMAP (0x0001180014000020ull)
+#define CVMX_ILK_RXF_MEM_PMAP (0x0001180014000028ull)
+#define CVMX_ILK_RXX_BYTE_CNTX(offset, block_id)                               \
+	(0x0001180014023000ull +                                               \
+	 (((offset) & 255) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_RXX_CAL_ENTRYX(offset, block_id)                              \
+	(0x0001180014021000ull +                                               \
+	 (((offset) & 511) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_RXX_CFG0(offset) (0x0001180014020000ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_CFG1(offset) (0x0001180014020008ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_CHAX(offset, block_id)                                    \
+	(0x0001180014002000ull +                                               \
+	 (((offset) & 255) + ((block_id) & 1) * 0x200ull) * 8)
+#define CVMX_ILK_RXX_CHA_XONX(offset, block_id)                                \
+	(0x0001180014020400ull + (((offset) & 3) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_RXX_ERR_CFG(offset)                                           \
+	(0x00011800140200E0ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_FLOW_CTL0(offset)                                         \
+	(0x0001180014020090ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_FLOW_CTL1(offset)                                         \
+	(0x0001180014020098ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_IDX_CAL(offset)                                           \
+	(0x00011800140200A0ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_IDX_STAT0(offset)                                         \
+	(0x0001180014020070ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_IDX_STAT1(offset)                                         \
+	(0x0001180014020078ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_INT(offset) (0x0001180014020010ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_INT_EN(offset)                                            \
+	(0x0001180014020018ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_JABBER(offset)                                            \
+	(0x00011800140200B8ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_MEM_CAL0(offset)                                          \
+	(0x00011800140200A8ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_MEM_CAL1(offset)                                          \
+	(0x00011800140200B0ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_MEM_STAT0(offset)                                         \
+	(0x0001180014020080ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_MEM_STAT1(offset)                                         \
+	(0x0001180014020088ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_PKT_CNTX(offset, block_id)                                \
+	(0x0001180014022000ull +                                               \
+	 (((offset) & 255) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_RXX_RID(offset) (0x00011800140200C0ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT0(offset)                                             \
+	(0x0001180014020020ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT1(offset)                                             \
+	(0x0001180014020028ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT2(offset)                                             \
+	(0x0001180014020030ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT3(offset)                                             \
+	(0x0001180014020038ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT4(offset)                                             \
+	(0x0001180014020040ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT5(offset)                                             \
+	(0x0001180014020048ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT6(offset)                                             \
+	(0x0001180014020050ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT7(offset)                                             \
+	(0x0001180014020058ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT8(offset)                                             \
+	(0x0001180014020060ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RXX_STAT9(offset)                                             \
+	(0x0001180014020068ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_RX_LNEX_CFG(offset)                                           \
+	(0x0001180014038000ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_INT(offset)                                           \
+	(0x0001180014038008ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_INT_EN(offset)                                        \
+	(0x0001180014038010ull + ((offset) & 7) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT0(offset)                                         \
+	(0x0001180014038018ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT1(offset)                                         \
+	(0x0001180014038020ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT10(offset)                                        \
+	(0x0001180014038068ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT2(offset)                                         \
+	(0x0001180014038028ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT3(offset)                                         \
+	(0x0001180014038030ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT4(offset)                                         \
+	(0x0001180014038038ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT5(offset)                                         \
+	(0x0001180014038040ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT6(offset)                                         \
+	(0x0001180014038048ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT7(offset)                                         \
+	(0x0001180014038050ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT8(offset)                                         \
+	(0x0001180014038058ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_RX_LNEX_STAT9(offset)                                         \
+	(0x0001180014038060ull + ((offset) & 15) * 1024)
+#define CVMX_ILK_SER_CFG (0x0001180014000018ull)
+#define CVMX_ILK_TXX_BYTE_CNTX(offset, block_id)                               \
+	(0x0001180014013000ull +                                               \
+	 (((offset) & 255) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_TXX_CAL_ENTRYX(offset, block_id)                              \
+	(0x0001180014011000ull +                                               \
+	 (((offset) & 511) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_TXX_CFG0(offset) (0x0001180014010000ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_CFG1(offset) (0x0001180014010008ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_CHA_XONX(offset, block_id)                                \
+	(0x0001180014010400ull + (((offset) & 3) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_TXX_DBG(offset) (0x0001180014010070ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_ERR_CFG(offset)                                           \
+	(0x00011800140100B0ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_FLOW_CTL0(offset)                                         \
+	(0x0001180014010048ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_FLOW_CTL1(offset)                                         \
+	(0x0001180014010050ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_IDX_CAL(offset)                                           \
+	(0x0001180014010058ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_IDX_PMAP(offset)                                          \
+	(0x0001180014010010ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_IDX_STAT0(offset)                                         \
+	(0x0001180014010020ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_IDX_STAT1(offset)                                         \
+	(0x0001180014010028ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_INT(offset) (0x0001180014010078ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_INT_EN(offset)                                            \
+	(0x0001180014010080ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_MEM_CAL0(offset)                                          \
+	(0x0001180014010060ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_MEM_CAL1(offset)                                          \
+	(0x0001180014010068ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_MEM_PMAP(offset)                                          \
+	(0x0001180014010018ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_MEM_STAT0(offset)                                         \
+	(0x0001180014010030ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_MEM_STAT1(offset)                                         \
+	(0x0001180014010038ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_PIPE(offset) (0x0001180014010088ull + ((offset) & 1) * 16384)
+#define CVMX_ILK_TXX_PKT_CNTX(offset, block_id)                                \
+	(0x0001180014012000ull +                                               \
+	 (((offset) & 255) + ((block_id) & 1) * 0x800ull) * 8)
+#define CVMX_ILK_TXX_RMATCH(offset)                                            \
+	(0x0001180014010040ull + ((offset) & 1) * 16384)
+
+/**
+ * cvmx_ilk_bist_sum
+ */
+union cvmx_ilk_bist_sum {
+	u64 u64;
+	struct cvmx_ilk_bist_sum_s {
+		u64 rxf_x2p : 1;
+		u64 rxf_mem19 : 1;
+		u64 rxf_mem18 : 1;
+		u64 rxf_mem17 : 1;
+		u64 rxf_mem16 : 1;
+		u64 rxf_mem15 : 1;
+		u64 reserved_52_57 : 6;
+		u64 rxf_mem8 : 1;
+		u64 rxf_mem7 : 1;
+		u64 rxf_mem6 : 1;
+		u64 rxf_mem5 : 1;
+		u64 rxf_mem4 : 1;
+		u64 rxf_mem3 : 1;
+		u64 reserved_36_45 : 10;
+		u64 rle7_dsk1 : 1;
+		u64 rle7_dsk0 : 1;
+		u64 rle6_dsk1 : 1;
+		u64 rle6_dsk0 : 1;
+		u64 rle5_dsk1 : 1;
+		u64 rle5_dsk0 : 1;
+		u64 rle4_dsk1 : 1;
+		u64 rle4_dsk0 : 1;
+		u64 rle3_dsk1 : 1;
+		u64 rle3_dsk0 : 1;
+		u64 rle2_dsk1 : 1;
+		u64 rle2_dsk0 : 1;
+		u64 rle1_dsk1 : 1;
+		u64 rle1_dsk0 : 1;
+		u64 rle0_dsk1 : 1;
+		u64 rle0_dsk0 : 1;
+		u64 rlk1_pmap : 1;
+		u64 reserved_18_18 : 1;
+		u64 rlk1_fwc : 1;
+		u64 reserved_16_16 : 1;
+		u64 rlk0_pmap : 1;
+		u64 rlk0_stat1 : 1;
+		u64 rlk0_fwc : 1;
+		u64 rlk0_stat : 1;
+		u64 tlk1_stat1 : 1;
+		u64 tlk1_fwc : 1;
+		u64 reserved_9_9 : 1;
+		u64 tlk1_txf2 : 1;
+		u64 tlk1_txf1 : 1;
+		u64 tlk1_txf0 : 1;
+		u64 tlk0_stat1 : 1;
+		u64 tlk0_fwc : 1;
+		u64 reserved_3_3 : 1;
+		u64 tlk0_txf2 : 1;
+		u64 tlk0_txf1 : 1;
+		u64 tlk0_txf0 : 1;
+	} s;
+	struct cvmx_ilk_bist_sum_cn68xx {
+		u64 reserved_58_63 : 6;
+		u64 rxf_x2p1 : 1;
+		u64 rxf_x2p0 : 1;
+		u64 rxf_pmap : 1;
+		u64 rxf_mem2 : 1;
+		u64 rxf_mem1 : 1;
+		u64 rxf_mem0 : 1;
+		u64 reserved_36_51 : 16;
+		u64 rle7_dsk1 : 1;
+		u64 rle7_dsk0 : 1;
+		u64 rle6_dsk1 : 1;
+		u64 rle6_dsk0 : 1;
+		u64 rle5_dsk1 : 1;
+		u64 rle5_dsk0 : 1;
+		u64 rle4_dsk1 : 1;
+		u64 rle4_dsk0 : 1;
+		u64 rle3_dsk1 : 1;
+		u64 rle3_dsk0 : 1;
+		u64 rle2_dsk1 : 1;
+		u64 rle2_dsk0 : 1;
+		u64 rle1_dsk1 : 1;
+		u64 rle1_dsk0 : 1;
+		u64 rle0_dsk1 : 1;
+		u64 rle0_dsk0 : 1;
+		u64 reserved_19_19 : 1;
+		u64 rlk1_stat1 : 1;
+		u64 rlk1_fwc : 1;
+		u64 rlk1_stat : 1;
+		u64 reserved_15_15 : 1;
+		u64 rlk0_stat1 : 1;
+		u64 rlk0_fwc : 1;
+		u64 rlk0_stat : 1;
+		u64 tlk1_stat1 : 1;
+		u64 tlk1_fwc : 1;
+		u64 tlk1_stat0 : 1;
+		u64 tlk1_txf2 : 1;
+		u64 tlk1_txf1 : 1;
+		u64 tlk1_txf0 : 1;
+		u64 tlk0_stat1 : 1;
+		u64 tlk0_fwc : 1;
+		u64 tlk0_stat0 : 1;
+		u64 tlk0_txf2 : 1;
+		u64 tlk0_txf1 : 1;
+		u64 tlk0_txf0 : 1;
+	} cn68xx;
+	struct cvmx_ilk_bist_sum_cn68xxp1 {
+		u64 reserved_58_63 : 6;
+		u64 rxf_x2p1 : 1;
+		u64 rxf_x2p0 : 1;
+		u64 rxf_pmap : 1;
+		u64 rxf_mem2 : 1;
+		u64 rxf_mem1 : 1;
+		u64 rxf_mem0 : 1;
+		u64 reserved_36_51 : 16;
+		u64 rle7_dsk1 : 1;
+		u64 rle7_dsk0 : 1;
+		u64 rle6_dsk1 : 1;
+		u64 rle6_dsk0 : 1;
+		u64 rle5_dsk1 : 1;
+		u64 rle5_dsk0 : 1;
+		u64 rle4_dsk1 : 1;
+		u64 rle4_dsk0 : 1;
+		u64 rle3_dsk1 : 1;
+		u64 rle3_dsk0 : 1;
+		u64 rle2_dsk1 : 1;
+		u64 rle2_dsk0 : 1;
+		u64 rle1_dsk1 : 1;
+		u64 rle1_dsk0 : 1;
+		u64 rle0_dsk1 : 1;
+		u64 rle0_dsk0 : 1;
+		u64 reserved_18_19 : 2;
+		u64 rlk1_fwc : 1;
+		u64 rlk1_stat : 1;
+		u64 reserved_14_15 : 2;
+		u64 rlk0_fwc : 1;
+		u64 rlk0_stat : 1;
+		u64 reserved_11_11 : 1;
+		u64 tlk1_fwc : 1;
+		u64 tlk1_stat : 1;
+		u64 tlk1_txf2 : 1;
+		u64 tlk1_txf1 : 1;
+		u64 tlk1_txf0 : 1;
+		u64 reserved_5_5 : 1;
+		u64 tlk0_fwc : 1;
+		u64 tlk0_stat : 1;
+		u64 tlk0_txf2 : 1;
+		u64 tlk0_txf1 : 1;
+		u64 tlk0_txf0 : 1;
+	} cn68xxp1;
+	struct cvmx_ilk_bist_sum_cn78xx {
+		u64 rxf_x2p : 1;
+		u64 rxf_mem19 : 1;
+		u64 rxf_mem18 : 1;
+		u64 rxf_mem17 : 1;
+		u64 rxf_mem16 : 1;
+		u64 rxf_mem15 : 1;
+		u64 rxf_mem14 : 1;
+		u64 rxf_mem13 : 1;
+		u64 rxf_mem12 : 1;
+		u64 rxf_mem11 : 1;
+		u64 rxf_mem10 : 1;
+		u64 rxf_mem9 : 1;
+		u64 rxf_mem8 : 1;
+		u64 rxf_mem7 : 1;
+		u64 rxf_mem6 : 1;
+		u64 rxf_mem5 : 1;
+		u64 rxf_mem4 : 1;
+		u64 rxf_mem3 : 1;
+		u64 rxf_mem2 : 1;
+		u64 rxf_mem1 : 1;
+		u64 rxf_mem0 : 1;
+		u64 reserved_36_42 : 7;
+		u64 rle7_dsk1 : 1;
+		u64 rle7_dsk0 : 1;
+		u64 rle6_dsk1 : 1;
+		u64 rle6_dsk0 : 1;
+		u64 rle5_dsk1 : 1;
+		u64 rle5_dsk0 : 1;
+		u64 rle4_dsk1 : 1;
+		u64 rle4_dsk0 : 1;
+		u64 rle3_dsk1 : 1;
+		u64 rle3_dsk0 : 1;
+		u64 rle2_dsk1 : 1;
+		u64 rle2_dsk0 : 1;
+		u64 rle1_dsk1 : 1;
+		u64 rle1_dsk0 : 1;
+		u64 rle0_dsk1 : 1;
+		u64 rle0_dsk0 : 1;
+		u64 rlk1_pmap : 1;
+		u64 rlk1_stat : 1;
+		u64 rlk1_fwc : 1;
+		u64 rlk1_stat1 : 1;
+		u64 rlk0_pmap : 1;
+		u64 rlk0_stat1 : 1;
+		u64 rlk0_fwc : 1;
+		u64 rlk0_stat : 1;
+		u64 tlk1_stat1 : 1;
+		u64 tlk1_fwc : 1;
+		u64 tlk1_stat0 : 1;
+		u64 tlk1_txf2 : 1;
+		u64 tlk1_txf1 : 1;
+		u64 tlk1_txf0 : 1;
+		u64 tlk0_stat1 : 1;
+		u64 tlk0_fwc : 1;
+		u64 tlk0_stat0 : 1;
+		u64 tlk0_txf2 : 1;
+		u64 tlk0_txf1 : 1;
+		u64 tlk0_txf0 : 1;
+	} cn78xx;
+	struct cvmx_ilk_bist_sum_cn78xx cn78xxp1;
+};
+
+typedef union cvmx_ilk_bist_sum cvmx_ilk_bist_sum_t;
+
+/**
+ * cvmx_ilk_gbl_cfg
+ */
+union cvmx_ilk_gbl_cfg {
+	u64 u64;
+	struct cvmx_ilk_gbl_cfg_s {
+		u64 reserved_4_63 : 60;
+		u64 rid_rstdis : 1;
+		u64 reset : 1;
+		u64 cclk_dis : 1;
+		u64 rxf_xlink : 1;
+	} s;
+	struct cvmx_ilk_gbl_cfg_s cn68xx;
+	struct cvmx_ilk_gbl_cfg_cn68xxp1 {
+		u64 reserved_2_63 : 62;
+		u64 cclk_dis : 1;
+		u64 rxf_xlink : 1;
+	} cn68xxp1;
+	struct cvmx_ilk_gbl_cfg_s cn78xx;
+	struct cvmx_ilk_gbl_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_gbl_cfg cvmx_ilk_gbl_cfg_t;
+
+/**
+ * cvmx_ilk_gbl_err_cfg
+ */
+union cvmx_ilk_gbl_err_cfg {
+	u64 u64;
+	struct cvmx_ilk_gbl_err_cfg_s {
+		u64 reserved_20_63 : 44;
+		u64 rxf_flip : 2;
+		u64 x2p_flip : 2;
+		u64 reserved_2_15 : 14;
+		u64 rxf_cor_dis : 1;
+		u64 x2p_cor_dis : 1;
+	} s;
+	struct cvmx_ilk_gbl_err_cfg_s cn78xx;
+	struct cvmx_ilk_gbl_err_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_gbl_err_cfg cvmx_ilk_gbl_err_cfg_t;
+
+/**
+ * cvmx_ilk_gbl_int
+ */
+union cvmx_ilk_gbl_int {
+	u64 u64;
+	struct cvmx_ilk_gbl_int_s {
+		u64 reserved_9_63 : 55;
+		u64 x2p_dbe : 1;
+		u64 x2p_sbe : 1;
+		u64 rxf_dbe : 1;
+		u64 rxf_sbe : 1;
+		u64 rxf_push_full : 1;
+		u64 rxf_pop_empty : 1;
+		u64 rxf_ctl_perr : 1;
+		u64 rxf_lnk1_perr : 1;
+		u64 rxf_lnk0_perr : 1;
+	} s;
+	struct cvmx_ilk_gbl_int_cn68xx {
+		u64 reserved_5_63 : 59;
+		u64 rxf_push_full : 1;
+		u64 rxf_pop_empty : 1;
+		u64 rxf_ctl_perr : 1;
+		u64 rxf_lnk1_perr : 1;
+		u64 rxf_lnk0_perr : 1;
+	} cn68xx;
+	struct cvmx_ilk_gbl_int_cn68xx cn68xxp1;
+	struct cvmx_ilk_gbl_int_s cn78xx;
+	struct cvmx_ilk_gbl_int_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_gbl_int cvmx_ilk_gbl_int_t;
+
+/**
+ * cvmx_ilk_gbl_int_en
+ */
+union cvmx_ilk_gbl_int_en {
+	u64 u64;
+	struct cvmx_ilk_gbl_int_en_s {
+		u64 reserved_5_63 : 59;
+		u64 rxf_push_full : 1;
+		u64 rxf_pop_empty : 1;
+		u64 rxf_ctl_perr : 1;
+		u64 rxf_lnk1_perr : 1;
+		u64 rxf_lnk0_perr : 1;
+	} s;
+	struct cvmx_ilk_gbl_int_en_s cn68xx;
+	struct cvmx_ilk_gbl_int_en_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_gbl_int_en cvmx_ilk_gbl_int_en_t;
+
+/**
+ * cvmx_ilk_int_sum
+ */
+union cvmx_ilk_int_sum {
+	u64 u64;
+	struct cvmx_ilk_int_sum_s {
+		u64 reserved_13_63 : 51;
+		u64 rle7_int : 1;
+		u64 rle6_int : 1;
+		u64 rle5_int : 1;
+		u64 rle4_int : 1;
+		u64 rle3_int : 1;
+		u64 rle2_int : 1;
+		u64 rle1_int : 1;
+		u64 rle0_int : 1;
+		u64 rlk1_int : 1;
+		u64 rlk0_int : 1;
+		u64 tlk1_int : 1;
+		u64 tlk0_int : 1;
+		u64 gbl_int : 1;
+	} s;
+	struct cvmx_ilk_int_sum_s cn68xx;
+	struct cvmx_ilk_int_sum_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_int_sum cvmx_ilk_int_sum_t;
+
+/**
+ * cvmx_ilk_lne#_trn_ctl
+ */
+union cvmx_ilk_lnex_trn_ctl {
+	u64 u64;
+	struct cvmx_ilk_lnex_trn_ctl_s {
+		u64 reserved_4_63 : 60;
+		u64 trn_lock : 1;
+		u64 trn_done : 1;
+		u64 trn_ena : 1;
+		u64 eie_det : 1;
+	} s;
+	struct cvmx_ilk_lnex_trn_ctl_s cn78xx;
+	struct cvmx_ilk_lnex_trn_ctl_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_lnex_trn_ctl cvmx_ilk_lnex_trn_ctl_t;
+
+/**
+ * cvmx_ilk_lne#_trn_ld
+ */
+union cvmx_ilk_lnex_trn_ld {
+	u64 u64;
+	struct cvmx_ilk_lnex_trn_ld_s {
+		u64 lp_manual : 1;
+		u64 reserved_49_62 : 14;
+		u64 ld_cu_val : 1;
+		u64 ld_cu_dat : 16;
+		u64 reserved_17_31 : 15;
+		u64 ld_sr_val : 1;
+		u64 ld_sr_dat : 16;
+	} s;
+	struct cvmx_ilk_lnex_trn_ld_s cn78xx;
+	struct cvmx_ilk_lnex_trn_ld_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_lnex_trn_ld cvmx_ilk_lnex_trn_ld_t;
+
+/**
+ * cvmx_ilk_lne#_trn_lp
+ */
+union cvmx_ilk_lnex_trn_lp {
+	u64 u64;
+	struct cvmx_ilk_lnex_trn_lp_s {
+		u64 reserved_49_63 : 15;
+		u64 lp_cu_val : 1;
+		u64 lp_cu_dat : 16;
+		u64 reserved_17_31 : 15;
+		u64 lp_sr_val : 1;
+		u64 lp_sr_dat : 16;
+	} s;
+	struct cvmx_ilk_lnex_trn_lp_s cn78xx;
+	struct cvmx_ilk_lnex_trn_lp_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_lnex_trn_lp cvmx_ilk_lnex_trn_lp_t;
+
+/**
+ * cvmx_ilk_lne_dbg
+ */
+union cvmx_ilk_lne_dbg {
+	u64 u64;
+	struct cvmx_ilk_lne_dbg_s {
+		u64 reserved_60_63 : 4;
+		u64 tx_bad_crc32 : 1;
+		u64 tx_bad_6467_cnt : 5;
+		u64 tx_bad_sync_cnt : 3;
+		u64 tx_bad_scram_cnt : 3;
+		u64 tx_bad_lane_sel : 16;
+		u64 tx_dis_dispr : 16;
+		u64 tx_dis_scram : 16;
+	} s;
+	struct cvmx_ilk_lne_dbg_cn68xx {
+		u64 reserved_60_63 : 4;
+		u64 tx_bad_crc32 : 1;
+		u64 tx_bad_6467_cnt : 5;
+		u64 tx_bad_sync_cnt : 3;
+		u64 tx_bad_scram_cnt : 3;
+		u64 reserved_40_47 : 8;
+		u64 tx_bad_lane_sel : 8;
+		u64 reserved_24_31 : 8;
+		u64 tx_dis_dispr : 8;
+		u64 reserved_8_15 : 8;
+		u64 tx_dis_scram : 8;
+	} cn68xx;
+	struct cvmx_ilk_lne_dbg_cn68xx cn68xxp1;
+	struct cvmx_ilk_lne_dbg_s cn78xx;
+	struct cvmx_ilk_lne_dbg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_lne_dbg cvmx_ilk_lne_dbg_t;
+
+/**
+ * cvmx_ilk_lne_sts_msg
+ */
+union cvmx_ilk_lne_sts_msg {
+	u64 u64;
+	struct cvmx_ilk_lne_sts_msg_s {
+		u64 rx_lnk_stat : 16;
+		u64 rx_lne_stat : 16;
+		u64 tx_lnk_stat : 16;
+		u64 tx_lne_stat : 16;
+	} s;
+	struct cvmx_ilk_lne_sts_msg_cn68xx {
+		u64 reserved_56_63 : 8;
+		u64 rx_lnk_stat : 8;
+		u64 reserved_40_47 : 8;
+		u64 rx_lne_stat : 8;
+		u64 reserved_24_31 : 8;
+		u64 tx_lnk_stat : 8;
+		u64 reserved_8_15 : 8;
+		u64 tx_lne_stat : 8;
+	} cn68xx;
+	struct cvmx_ilk_lne_sts_msg_cn68xx cn68xxp1;
+	struct cvmx_ilk_lne_sts_msg_s cn78xx;
+	struct cvmx_ilk_lne_sts_msg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_lne_sts_msg cvmx_ilk_lne_sts_msg_t;
+
+/**
+ * cvmx_ilk_rid_cfg
+ */
+union cvmx_ilk_rid_cfg {
+	u64 u64;
+	struct cvmx_ilk_rid_cfg_s {
+		u64 reserved_39_63 : 25;
+		u64 max_cnt : 7;
+		u64 reserved_7_31 : 25;
+		u64 base : 7;
+	} s;
+	struct cvmx_ilk_rid_cfg_s cn78xx;
+	struct cvmx_ilk_rid_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rid_cfg cvmx_ilk_rid_cfg_t;
+
+/**
+ * cvmx_ilk_rx#_byte_cnt#
+ */
+union cvmx_ilk_rxx_byte_cntx {
+	u64 u64;
+	struct cvmx_ilk_rxx_byte_cntx_s {
+		u64 reserved_40_63 : 24;
+		u64 rx_bytes : 40;
+	} s;
+	struct cvmx_ilk_rxx_byte_cntx_s cn78xx;
+	struct cvmx_ilk_rxx_byte_cntx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_byte_cntx cvmx_ilk_rxx_byte_cntx_t;
+
+/**
+ * cvmx_ilk_rx#_cal_entry#
+ */
+union cvmx_ilk_rxx_cal_entryx {
+	u64 u64;
+	struct cvmx_ilk_rxx_cal_entryx_s {
+		u64 reserved_34_63 : 30;
+		u64 ctl : 2;
+		u64 reserved_8_31 : 24;
+		u64 channel : 8;
+	} s;
+	struct cvmx_ilk_rxx_cal_entryx_s cn78xx;
+	struct cvmx_ilk_rxx_cal_entryx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_cal_entryx cvmx_ilk_rxx_cal_entryx_t;
+
+/**
+ * cvmx_ilk_rx#_cfg0
+ */
+union cvmx_ilk_rxx_cfg0 {
+	u64 u64;
+	struct cvmx_ilk_rxx_cfg0_s {
+		u64 ext_lpbk_fc : 1;
+		u64 ext_lpbk : 1;
+		u64 reserved_60_61 : 2;
+		u64 lnk_stats_wrap : 1;
+		u64 bcw_push : 1;
+		u64 mproto_ign : 1;
+		u64 ptrn_mode : 1;
+		u64 lnk_stats_rdclr : 1;
+		u64 lnk_stats_ena : 1;
+		u64 mltuse_fc_ena : 1;
+		u64 cal_ena : 1;
+		u64 mfrm_len : 13;
+		u64 brst_shrt : 7;
+		u64 lane_rev : 1;
+		u64 brst_max : 5;
+		u64 reserved_25_25 : 1;
+		u64 cal_depth : 9;
+		u64 lane_ena : 16;
+	} s;
+	struct cvmx_ilk_rxx_cfg0_cn68xx {
+		u64 ext_lpbk_fc : 1;
+		u64 ext_lpbk : 1;
+		u64 reserved_60_61 : 2;
+		u64 lnk_stats_wrap : 1;
+		u64 bcw_push : 1;
+		u64 mproto_ign : 1;
+		u64 ptrn_mode : 1;
+		u64 lnk_stats_rdclr : 1;
+		u64 lnk_stats_ena : 1;
+		u64 mltuse_fc_ena : 1;
+		u64 cal_ena : 1;
+		u64 mfrm_len : 13;
+		u64 brst_shrt : 7;
+		u64 lane_rev : 1;
+		u64 brst_max : 5;
+		u64 reserved_25_25 : 1;
+		u64 cal_depth : 9;
+		u64 reserved_8_15 : 8;
+		u64 lane_ena : 8;
+	} cn68xx;
+	struct cvmx_ilk_rxx_cfg0_cn68xxp1 {
+		u64 ext_lpbk_fc : 1;
+		u64 ext_lpbk : 1;
+		u64 reserved_57_61 : 5;
+		u64 ptrn_mode : 1;
+		u64 lnk_stats_rdclr : 1;
+		u64 lnk_stats_ena : 1;
+		u64 mltuse_fc_ena : 1;
+		u64 cal_ena : 1;
+		u64 mfrm_len : 13;
+		u64 brst_shrt : 7;
+		u64 lane_rev : 1;
+		u64 brst_max : 5;
+		u64 reserved_25_25 : 1;
+		u64 cal_depth : 9;
+		u64 reserved_8_15 : 8;
+		u64 lane_ena : 8;
+	} cn68xxp1;
+	struct cvmx_ilk_rxx_cfg0_s cn78xx;
+	struct cvmx_ilk_rxx_cfg0_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_cfg0 cvmx_ilk_rxx_cfg0_t;
+
+/**
+ * cvmx_ilk_rx#_cfg1
+ */
+union cvmx_ilk_rxx_cfg1 {
+	u64 u64;
+	struct cvmx_ilk_rxx_cfg1_s {
+		u64 reserved_62_63 : 2;
+		u64 rx_fifo_cnt : 12;
+		u64 reserved_49_49 : 1;
+		u64 rx_fifo_hwm : 13;
+		u64 reserved_35_35 : 1;
+		u64 rx_fifo_max : 13;
+		u64 pkt_flush : 1;
+		u64 pkt_ena : 1;
+		u64 la_mode : 1;
+		u64 tx_link_fc : 1;
+		u64 rx_link_fc : 1;
+		u64 rx_align_ena : 1;
+		u64 rx_bdry_lock_ena : 16;
+	} s;
+	struct cvmx_ilk_rxx_cfg1_cn68xx {
+		u64 reserved_62_63 : 2;
+		u64 rx_fifo_cnt : 12;
+		u64 reserved_48_49 : 2;
+		u64 rx_fifo_hwm : 12;
+		u64 reserved_34_35 : 2;
+		u64 rx_fifo_max : 12;
+		u64 pkt_flush : 1;
+		u64 pkt_ena : 1;
+		u64 la_mode : 1;
+		u64 tx_link_fc : 1;
+		u64 rx_link_fc : 1;
+		u64 rx_align_ena : 1;
+		u64 reserved_8_15 : 8;
+		u64 rx_bdry_lock_ena : 8;
+	} cn68xx;
+	struct cvmx_ilk_rxx_cfg1_cn68xx cn68xxp1;
+	struct cvmx_ilk_rxx_cfg1_s cn78xx;
+	struct cvmx_ilk_rxx_cfg1_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_cfg1 cvmx_ilk_rxx_cfg1_t;
+
+/**
+ * cvmx_ilk_rx#_cha#
+ */
+union cvmx_ilk_rxx_chax {
+	u64 u64;
+	struct cvmx_ilk_rxx_chax_s {
+		u64 reserved_6_63 : 58;
+		u64 port_kind : 6;
+	} s;
+	struct cvmx_ilk_rxx_chax_s cn78xx;
+	struct cvmx_ilk_rxx_chax_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_chax cvmx_ilk_rxx_chax_t;
+
+/**
+ * cvmx_ilk_rx#_cha_xon#
+ */
+union cvmx_ilk_rxx_cha_xonx {
+	u64 u64;
+	struct cvmx_ilk_rxx_cha_xonx_s {
+		u64 xon : 64;
+	} s;
+	struct cvmx_ilk_rxx_cha_xonx_s cn78xx;
+	struct cvmx_ilk_rxx_cha_xonx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_cha_xonx cvmx_ilk_rxx_cha_xonx_t;
+
+/**
+ * cvmx_ilk_rx#_err_cfg
+ */
+union cvmx_ilk_rxx_err_cfg {
+	u64 u64;
+	struct cvmx_ilk_rxx_err_cfg_s {
+		u64 reserved_20_63 : 44;
+		u64 fwc_flip : 2;
+		u64 pmap_flip : 2;
+		u64 reserved_2_15 : 14;
+		u64 fwc_cor_dis : 1;
+		u64 pmap_cor_dis : 1;
+	} s;
+	struct cvmx_ilk_rxx_err_cfg_s cn78xx;
+	struct cvmx_ilk_rxx_err_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_err_cfg cvmx_ilk_rxx_err_cfg_t;
+
+/**
+ * cvmx_ilk_rx#_flow_ctl0
+ */
+union cvmx_ilk_rxx_flow_ctl0 {
+	u64 u64;
+	struct cvmx_ilk_rxx_flow_ctl0_s {
+		u64 status : 64;
+	} s;
+	struct cvmx_ilk_rxx_flow_ctl0_s cn68xx;
+	struct cvmx_ilk_rxx_flow_ctl0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_flow_ctl0 cvmx_ilk_rxx_flow_ctl0_t;
+
+/**
+ * cvmx_ilk_rx#_flow_ctl1
+ */
+union cvmx_ilk_rxx_flow_ctl1 {
+	u64 u64;
+	struct cvmx_ilk_rxx_flow_ctl1_s {
+		u64 status : 64;
+	} s;
+	struct cvmx_ilk_rxx_flow_ctl1_s cn68xx;
+	struct cvmx_ilk_rxx_flow_ctl1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_flow_ctl1 cvmx_ilk_rxx_flow_ctl1_t;
+
+/**
+ * cvmx_ilk_rx#_idx_cal
+ */
+union cvmx_ilk_rxx_idx_cal {
+	u64 u64;
+	struct cvmx_ilk_rxx_idx_cal_s {
+		u64 reserved_14_63 : 50;
+		u64 inc : 6;
+		u64 reserved_6_7 : 2;
+		u64 index : 6;
+	} s;
+	struct cvmx_ilk_rxx_idx_cal_s cn68xx;
+	struct cvmx_ilk_rxx_idx_cal_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_idx_cal cvmx_ilk_rxx_idx_cal_t;
+
+/**
+ * cvmx_ilk_rx#_idx_stat0
+ */
+union cvmx_ilk_rxx_idx_stat0 {
+	u64 u64;
+	struct cvmx_ilk_rxx_idx_stat0_s {
+		u64 reserved_32_63 : 32;
+		u64 clr : 1;
+		u64 reserved_24_30 : 7;
+		u64 inc : 8;
+		u64 reserved_8_15 : 8;
+		u64 index : 8;
+	} s;
+	struct cvmx_ilk_rxx_idx_stat0_s cn68xx;
+	struct cvmx_ilk_rxx_idx_stat0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_idx_stat0 cvmx_ilk_rxx_idx_stat0_t;
+
+/**
+ * cvmx_ilk_rx#_idx_stat1
+ */
+union cvmx_ilk_rxx_idx_stat1 {
+	u64 u64;
+	struct cvmx_ilk_rxx_idx_stat1_s {
+		u64 reserved_32_63 : 32;
+		u64 clr : 1;
+		u64 reserved_24_30 : 7;
+		u64 inc : 8;
+		u64 reserved_8_15 : 8;
+		u64 index : 8;
+	} s;
+	struct cvmx_ilk_rxx_idx_stat1_s cn68xx;
+	struct cvmx_ilk_rxx_idx_stat1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_idx_stat1 cvmx_ilk_rxx_idx_stat1_t;
+
+/**
+ * cvmx_ilk_rx#_int
+ */
+union cvmx_ilk_rxx_int {
+	u64 u64;
+	struct cvmx_ilk_rxx_int_s {
+		u64 reserved_13_63 : 51;
+		u64 pmap_dbe : 1;
+		u64 pmap_sbe : 1;
+		u64 fwc_dbe : 1;
+		u64 fwc_sbe : 1;
+		u64 pkt_drop_sop : 1;
+		u64 pkt_drop_rid : 1;
+		u64 pkt_drop_rxf : 1;
+		u64 lane_bad_word : 1;
+		u64 stat_cnt_ovfl : 1;
+		u64 lane_align_done : 1;
+		u64 word_sync_done : 1;
+		u64 crc24_err : 1;
+		u64 lane_align_fail : 1;
+	} s;
+	struct cvmx_ilk_rxx_int_cn68xx {
+		u64 reserved_9_63 : 55;
+		u64 pkt_drop_sop : 1;
+		u64 pkt_drop_rid : 1;
+		u64 pkt_drop_rxf : 1;
+		u64 lane_bad_word : 1;
+		u64 stat_cnt_ovfl : 1;
+		u64 lane_align_done : 1;
+		u64 word_sync_done : 1;
+		u64 crc24_err : 1;
+		u64 lane_align_fail : 1;
+	} cn68xx;
+	struct cvmx_ilk_rxx_int_cn68xxp1 {
+		u64 reserved_8_63 : 56;
+		u64 pkt_drop_rid : 1;
+		u64 pkt_drop_rxf : 1;
+		u64 lane_bad_word : 1;
+		u64 stat_cnt_ovfl : 1;
+		u64 lane_align_done : 1;
+		u64 word_sync_done : 1;
+		u64 crc24_err : 1;
+		u64 lane_align_fail : 1;
+	} cn68xxp1;
+	struct cvmx_ilk_rxx_int_s cn78xx;
+	struct cvmx_ilk_rxx_int_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_int cvmx_ilk_rxx_int_t;
+
+/**
+ * cvmx_ilk_rx#_int_en
+ */
+union cvmx_ilk_rxx_int_en {
+	u64 u64;
+	struct cvmx_ilk_rxx_int_en_s {
+		u64 reserved_9_63 : 55;
+		u64 pkt_drop_sop : 1;
+		u64 pkt_drop_rid : 1;
+		u64 pkt_drop_rxf : 1;
+		u64 lane_bad_word : 1;
+		u64 stat_cnt_ovfl : 1;
+		u64 lane_align_done : 1;
+		u64 word_sync_done : 1;
+		u64 crc24_err : 1;
+		u64 lane_align_fail : 1;
+	} s;
+	struct cvmx_ilk_rxx_int_en_s cn68xx;
+	struct cvmx_ilk_rxx_int_en_cn68xxp1 {
+		u64 reserved_8_63 : 56;
+		u64 pkt_drop_rid : 1;
+		u64 pkt_drop_rxf : 1;
+		u64 lane_bad_word : 1;
+		u64 stat_cnt_ovfl : 1;
+		u64 lane_align_done : 1;
+		u64 word_sync_done : 1;
+		u64 crc24_err : 1;
+		u64 lane_align_fail : 1;
+	} cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_int_en cvmx_ilk_rxx_int_en_t;
+
+/**
+ * cvmx_ilk_rx#_jabber
+ */
+union cvmx_ilk_rxx_jabber {
+	u64 u64;
+	struct cvmx_ilk_rxx_jabber_s {
+		u64 reserved_16_63 : 48;
+		u64 cnt : 16;
+	} s;
+	struct cvmx_ilk_rxx_jabber_s cn68xx;
+	struct cvmx_ilk_rxx_jabber_s cn68xxp1;
+	struct cvmx_ilk_rxx_jabber_s cn78xx;
+	struct cvmx_ilk_rxx_jabber_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_jabber cvmx_ilk_rxx_jabber_t;
+
+/**
+ * cvmx_ilk_rx#_mem_cal0
+ *
+ * Notes:
+ * Software must program the calendar table prior to enabling the
+ * link.
+ *
+ * Software must always write ILK_RXx_MEM_CAL0 then ILK_RXx_MEM_CAL1.
+ * Software must never write them in reverse order or write one without
+ * writing the other.
+ *
+ * A given calendar table entry has no effect on PKO pipe
+ * backpressure when either:
+ *  - ENTRY_CTLx=Link (1), or
+ *  - ENTRY_CTLx=XON (3) and PORT_PIPEx is outside the range of ILK_TXx_PIPE[BASE/NUMP].
+ *
+ * Within the 8 calendar table entries of one IDX value, if more
+ * than one affects the same PKO pipe, XOFF always wins over XON,
+ * regardless of the calendar table order.
+ *
+ * Software must always read ILK_RXx_MEM_CAL0 then ILK_RXx_MEM_CAL1.  Software
+ * must never read them in reverse order or read one without reading the
+ * other.
+ */
+union cvmx_ilk_rxx_mem_cal0 {
+	u64 u64;
+	struct cvmx_ilk_rxx_mem_cal0_s {
+		u64 reserved_36_63 : 28;
+		u64 entry_ctl3 : 2;
+		u64 port_pipe3 : 7;
+		u64 entry_ctl2 : 2;
+		u64 port_pipe2 : 7;
+		u64 entry_ctl1 : 2;
+		u64 port_pipe1 : 7;
+		u64 entry_ctl0 : 2;
+		u64 port_pipe0 : 7;
+	} s;
+	struct cvmx_ilk_rxx_mem_cal0_s cn68xx;
+	struct cvmx_ilk_rxx_mem_cal0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_mem_cal0 cvmx_ilk_rxx_mem_cal0_t;
+
+/**
+ * cvmx_ilk_rx#_mem_cal1
+ *
+ * Notes:
+ * Software must program the calendar table prior to enabling the
+ * link.
+ *
+ * Software must always write ILK_RXx_MEM_CAL0 then ILK_RXx_MEM_CAL1.
+ * Software must never write them in reverse order or write one without
+ * writing the other.
+ *
+ * A given calendar table entry has no effect on PKO pipe
+ * backpressure when either:
+ *  - ENTRY_CTLx=Link (1), or
+ *  - ENTRY_CTLx=XON (3) and PORT_PIPEx is outside the range of ILK_TXx_PIPE[BASE/NUMP].
+ *
+ * Within the 8 calendar table entries of one IDX value, if more
+ * than one affects the same PKO pipe, XOFF always wins over XON,
+ * regardless of the calendar table order.
+ *
+ * Software must always read ILK_RXx_MEM_CAL0 then ILK_Rx_MEM_CAL1.  Software
+ * must never read them in reverse order or read one without reading the
+ * other.
+ */
+union cvmx_ilk_rxx_mem_cal1 {
+	u64 u64;
+	struct cvmx_ilk_rxx_mem_cal1_s {
+		u64 reserved_36_63 : 28;
+		u64 entry_ctl7 : 2;
+		u64 port_pipe7 : 7;
+		u64 entry_ctl6 : 2;
+		u64 port_pipe6 : 7;
+		u64 entry_ctl5 : 2;
+		u64 port_pipe5 : 7;
+		u64 entry_ctl4 : 2;
+		u64 port_pipe4 : 7;
+	} s;
+	struct cvmx_ilk_rxx_mem_cal1_s cn68xx;
+	struct cvmx_ilk_rxx_mem_cal1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_mem_cal1 cvmx_ilk_rxx_mem_cal1_t;
+
+/**
+ * cvmx_ilk_rx#_mem_stat0
+ */
+union cvmx_ilk_rxx_mem_stat0 {
+	u64 u64;
+	struct cvmx_ilk_rxx_mem_stat0_s {
+		u64 reserved_28_63 : 36;
+		u64 rx_pkt : 28;
+	} s;
+	struct cvmx_ilk_rxx_mem_stat0_s cn68xx;
+	struct cvmx_ilk_rxx_mem_stat0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_mem_stat0 cvmx_ilk_rxx_mem_stat0_t;
+
+/**
+ * cvmx_ilk_rx#_mem_stat1
+ */
+union cvmx_ilk_rxx_mem_stat1 {
+	u64 u64;
+	struct cvmx_ilk_rxx_mem_stat1_s {
+		u64 reserved_36_63 : 28;
+		u64 rx_bytes : 36;
+	} s;
+	struct cvmx_ilk_rxx_mem_stat1_s cn68xx;
+	struct cvmx_ilk_rxx_mem_stat1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxx_mem_stat1 cvmx_ilk_rxx_mem_stat1_t;
+
+/**
+ * cvmx_ilk_rx#_pkt_cnt#
+ */
+union cvmx_ilk_rxx_pkt_cntx {
+	u64 u64;
+	struct cvmx_ilk_rxx_pkt_cntx_s {
+		u64 reserved_34_63 : 30;
+		u64 rx_pkt : 34;
+	} s;
+	struct cvmx_ilk_rxx_pkt_cntx_s cn78xx;
+	struct cvmx_ilk_rxx_pkt_cntx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_pkt_cntx cvmx_ilk_rxx_pkt_cntx_t;
+
+/**
+ * cvmx_ilk_rx#_rid
+ */
+union cvmx_ilk_rxx_rid {
+	u64 u64;
+	struct cvmx_ilk_rxx_rid_s {
+		u64 reserved_7_63 : 57;
+		u64 max_cnt : 7;
+	} s;
+	struct cvmx_ilk_rxx_rid_cn68xx {
+		u64 reserved_6_63 : 58;
+		u64 max_cnt : 6;
+	} cn68xx;
+	struct cvmx_ilk_rxx_rid_s cn78xx;
+	struct cvmx_ilk_rxx_rid_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_rid cvmx_ilk_rxx_rid_t;
+
+/**
+ * cvmx_ilk_rx#_stat0
+ */
+union cvmx_ilk_rxx_stat0 {
+	u64 u64;
+	struct cvmx_ilk_rxx_stat0_s {
+		u64 reserved_35_63 : 29;
+		u64 crc24_match_cnt : 35;
+	} s;
+	struct cvmx_ilk_rxx_stat0_cn68xx {
+		u64 reserved_33_63 : 31;
+		u64 crc24_match_cnt : 33;
+	} cn68xx;
+	struct cvmx_ilk_rxx_stat0_cn68xxp1 {
+		u64 reserved_27_63 : 37;
+		u64 crc24_match_cnt : 27;
+	} cn68xxp1;
+	struct cvmx_ilk_rxx_stat0_s cn78xx;
+	struct cvmx_ilk_rxx_stat0_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat0 cvmx_ilk_rxx_stat0_t;
+
+/**
+ * cvmx_ilk_rx#_stat1
+ */
+union cvmx_ilk_rxx_stat1 {
+	u64 u64;
+	struct cvmx_ilk_rxx_stat1_s {
+		u64 reserved_20_63 : 44;
+		u64 crc24_err_cnt : 20;
+	} s;
+	struct cvmx_ilk_rxx_stat1_cn68xx {
+		u64 reserved_18_63 : 46;
+		u64 crc24_err_cnt : 18;
+	} cn68xx;
+	struct cvmx_ilk_rxx_stat1_cn68xx cn68xxp1;
+	struct cvmx_ilk_rxx_stat1_s cn78xx;
+	struct cvmx_ilk_rxx_stat1_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat1 cvmx_ilk_rxx_stat1_t;
+
+/**
+ * cvmx_ilk_rx#_stat2
+ */
+union cvmx_ilk_rxx_stat2 {
+	u64 u64;
+	struct cvmx_ilk_rxx_stat2_s {
+		u64 reserved_50_63 : 14;
+		u64 brst_not_full_cnt : 18;
+		u64 reserved_30_31 : 2;
+		u64 brst_cnt : 30;
+	} s;
+	struct cvmx_ilk_rxx_stat2_cn68xx {
+		u64 reserved_48_63 : 16;
+		u64 brst_not_full_cnt : 16;
+		u64 reserved_28_31 : 4;
+		u64 brst_cnt : 28;
+	} cn68xx;
+	struct cvmx_ilk_rxx_stat2_cn68xxp1 {
+		u64 reserved_48_63 : 16;
+		u64 brst_not_full_cnt : 16;
+		u64 reserved_16_31 : 16;
+		u64 brst_cnt : 16;
+	} cn68xxp1;
+	struct cvmx_ilk_rxx_stat2_s cn78xx;
+	struct cvmx_ilk_rxx_stat2_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat2 cvmx_ilk_rxx_stat2_t;
+
+/**
+ * cvmx_ilk_rx#_stat3
+ */
+union cvmx_ilk_rxx_stat3 {
+	u64 u64;
+	struct cvmx_ilk_rxx_stat3_s {
+		u64 reserved_18_63 : 46;
+		u64 brst_max_err_cnt : 18;
+	} s;
+	struct cvmx_ilk_rxx_stat3_cn68xx {
+		u64 reserved_16_63 : 48;
+		u64 brst_max_err_cnt : 16;
+	} cn68xx;
+	struct cvmx_ilk_rxx_stat3_cn68xx cn68xxp1;
+	struct cvmx_ilk_rxx_stat3_s cn78xx;
+	struct cvmx_ilk_rxx_stat3_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat3 cvmx_ilk_rxx_stat3_t;
+
+/**
+ * cvmx_ilk_rx#_stat4
+ */
+union cvmx_ilk_rxx_stat4 {
+	u64 u64;
+	struct cvmx_ilk_rxx_stat4_s {
+		u64 reserved_18_63 : 46;
+		u64 brst_shrt_err_cnt : 18;
+	} s;
+	struct cvmx_ilk_rxx_stat4_cn68xx {
+		u64 reserved_16_63 : 48;
+		u64 brst_shrt_err_cnt : 16;
+	} cn68xx;
+	struct cvmx_ilk_rxx_stat4_cn68xx cn68xxp1;
+	struct cvmx_ilk_rxx_stat4_s cn78xx;
+	struct cvmx_ilk_rxx_stat4_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat4 cvmx_ilk_rxx_stat4_t;
+
+/**
+ * cvmx_ilk_rx#_stat5
+ */
+union cvmx_ilk_rxx_stat5 {
+	u64 u64;
+	struct cvmx_ilk_rxx_stat5_s {
+		u64 reserved_25_63 : 39;
+		u64 align_cnt : 25;
+	} s;
+	struct cvmx_ilk_rxx_stat5_cn68xx {
+		u64 reserved_23_63 : 41;
+		u64 align_cnt : 23;
+	} cn68xx;
+	struct cvmx_ilk_rxx_stat5_cn68xxp1 {
+		u64 reserved_16_63 : 48;
+		u64 align_cnt : 16;
+	} cn68xxp1;
+	struct cvmx_ilk_rxx_stat5_s cn78xx;
+	struct cvmx_ilk_rxx_stat5_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat5 cvmx_ilk_rxx_stat5_t;
+
+/**
+ * cvmx_ilk_rx#_stat6
+ */
+union cvmx_ilk_rxx_stat6 {
+	u64 u64;
+	struct cvmx_ilk_rxx_stat6_s {
+		u64 reserved_18_63 : 46;
+		u64 align_err_cnt : 18;
+	} s;
+	struct cvmx_ilk_rxx_stat6_cn68xx {
+		u64 reserved_16_63 : 48;
+		u64 align_err_cnt : 16;
+	} cn68xx;
+	struct cvmx_ilk_rxx_stat6_cn68xx cn68xxp1;
+	struct cvmx_ilk_rxx_stat6_s cn78xx;
+	struct cvmx_ilk_rxx_stat6_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat6 cvmx_ilk_rxx_stat6_t;
+
+/**
+ * cvmx_ilk_rx#_stat7
+ */
+union cvmx_ilk_rxx_stat7 {
+	u64 u64;
+	struct cvmx_ilk_rxx_stat7_s {
+		u64 reserved_18_63 : 46;
+		u64 bad_64b67b_cnt : 18;
+	} s;
+	struct cvmx_ilk_rxx_stat7_cn68xx {
+		u64 reserved_16_63 : 48;
+		u64 bad_64b67b_cnt : 16;
+	} cn68xx;
+	struct cvmx_ilk_rxx_stat7_cn68xx cn68xxp1;
+	struct cvmx_ilk_rxx_stat7_s cn78xx;
+	struct cvmx_ilk_rxx_stat7_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat7 cvmx_ilk_rxx_stat7_t;
+
+/**
+ * cvmx_ilk_rx#_stat8
+ */
+union cvmx_ilk_rxx_stat8 {
+	u64 u64;
+	struct cvmx_ilk_rxx_stat8_s {
+		u64 reserved_32_63 : 32;
+		u64 pkt_drop_rid_cnt : 16;
+		u64 pkt_drop_rxf_cnt : 16;
+	} s;
+	struct cvmx_ilk_rxx_stat8_s cn68xx;
+	struct cvmx_ilk_rxx_stat8_s cn68xxp1;
+	struct cvmx_ilk_rxx_stat8_s cn78xx;
+	struct cvmx_ilk_rxx_stat8_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat8 cvmx_ilk_rxx_stat8_t;
+
+/**
+ * cvmx_ilk_rx#_stat9
+ *
+ * This register is reserved.
+ *
+ */
+union cvmx_ilk_rxx_stat9 {
+	u64 u64;
+	struct cvmx_ilk_rxx_stat9_s {
+		u64 reserved_0_63 : 64;
+	} s;
+	struct cvmx_ilk_rxx_stat9_s cn68xx;
+	struct cvmx_ilk_rxx_stat9_s cn68xxp1;
+	struct cvmx_ilk_rxx_stat9_s cn78xx;
+	struct cvmx_ilk_rxx_stat9_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rxx_stat9 cvmx_ilk_rxx_stat9_t;
+
+/**
+ * cvmx_ilk_rx_lne#_cfg
+ */
+union cvmx_ilk_rx_lnex_cfg {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_cfg_s {
+		u64 reserved_9_63 : 55;
+		u64 rx_dis_psh_skip : 1;
+		u64 reserved_7_7 : 1;
+		u64 rx_dis_disp_chk : 1;
+		u64 rx_scrm_sync : 1;
+		u64 rx_bdry_sync : 1;
+		u64 rx_dis_ukwn : 1;
+		u64 rx_dis_scram : 1;
+		u64 stat_rdclr : 1;
+		u64 stat_ena : 1;
+	} s;
+	struct cvmx_ilk_rx_lnex_cfg_cn68xx {
+		u64 reserved_9_63 : 55;
+		u64 rx_dis_psh_skip : 1;
+		u64 reserved_6_7 : 2;
+		u64 rx_scrm_sync : 1;
+		u64 rx_bdry_sync : 1;
+		u64 rx_dis_ukwn : 1;
+		u64 rx_dis_scram : 1;
+		u64 stat_rdclr : 1;
+		u64 stat_ena : 1;
+	} cn68xx;
+	struct cvmx_ilk_rx_lnex_cfg_cn68xxp1 {
+		u64 reserved_5_63 : 59;
+		u64 rx_bdry_sync : 1;
+		u64 rx_dis_ukwn : 1;
+		u64 rx_dis_scram : 1;
+		u64 stat_rdclr : 1;
+		u64 stat_ena : 1;
+	} cn68xxp1;
+	struct cvmx_ilk_rx_lnex_cfg_s cn78xx;
+	struct cvmx_ilk_rx_lnex_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_cfg cvmx_ilk_rx_lnex_cfg_t;
+
+/**
+ * cvmx_ilk_rx_lne#_int
+ */
+union cvmx_ilk_rx_lnex_int {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_int_s {
+		u64 reserved_10_63 : 54;
+		u64 disp_err : 1;
+		u64 bad_64b67b : 1;
+		u64 stat_cnt_ovfl : 1;
+		u64 stat_msg : 1;
+		u64 dskew_fifo_ovfl : 1;
+		u64 scrm_sync_loss : 1;
+		u64 ukwn_cntl_word : 1;
+		u64 crc32_err : 1;
+		u64 bdry_sync_loss : 1;
+		u64 serdes_lock_loss : 1;
+	} s;
+	struct cvmx_ilk_rx_lnex_int_cn68xx {
+		u64 reserved_9_63 : 55;
+		u64 bad_64b67b : 1;
+		u64 stat_cnt_ovfl : 1;
+		u64 stat_msg : 1;
+		u64 dskew_fifo_ovfl : 1;
+		u64 scrm_sync_loss : 1;
+		u64 ukwn_cntl_word : 1;
+		u64 crc32_err : 1;
+		u64 bdry_sync_loss : 1;
+		u64 serdes_lock_loss : 1;
+	} cn68xx;
+	struct cvmx_ilk_rx_lnex_int_cn68xx cn68xxp1;
+	struct cvmx_ilk_rx_lnex_int_s cn78xx;
+	struct cvmx_ilk_rx_lnex_int_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_int cvmx_ilk_rx_lnex_int_t;
+
+/**
+ * cvmx_ilk_rx_lne#_int_en
+ */
+union cvmx_ilk_rx_lnex_int_en {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_int_en_s {
+		u64 reserved_9_63 : 55;
+		u64 bad_64b67b : 1;
+		u64 stat_cnt_ovfl : 1;
+		u64 stat_msg : 1;
+		u64 dskew_fifo_ovfl : 1;
+		u64 scrm_sync_loss : 1;
+		u64 ukwn_cntl_word : 1;
+		u64 crc32_err : 1;
+		u64 bdry_sync_loss : 1;
+		u64 serdes_lock_loss : 1;
+	} s;
+	struct cvmx_ilk_rx_lnex_int_en_s cn68xx;
+	struct cvmx_ilk_rx_lnex_int_en_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_int_en cvmx_ilk_rx_lnex_int_en_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat0
+ */
+union cvmx_ilk_rx_lnex_stat0 {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_stat0_s {
+		u64 reserved_18_63 : 46;
+		u64 ser_lock_loss_cnt : 18;
+	} s;
+	struct cvmx_ilk_rx_lnex_stat0_s cn68xx;
+	struct cvmx_ilk_rx_lnex_stat0_s cn68xxp1;
+	struct cvmx_ilk_rx_lnex_stat0_s cn78xx;
+	struct cvmx_ilk_rx_lnex_stat0_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat0 cvmx_ilk_rx_lnex_stat0_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat1
+ */
+union cvmx_ilk_rx_lnex_stat1 {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_stat1_s {
+		u64 reserved_18_63 : 46;
+		u64 bdry_sync_loss_cnt : 18;
+	} s;
+	struct cvmx_ilk_rx_lnex_stat1_s cn68xx;
+	struct cvmx_ilk_rx_lnex_stat1_s cn68xxp1;
+	struct cvmx_ilk_rx_lnex_stat1_s cn78xx;
+	struct cvmx_ilk_rx_lnex_stat1_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat1 cvmx_ilk_rx_lnex_stat1_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat10
+ */
+union cvmx_ilk_rx_lnex_stat10 {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_stat10_s {
+		u64 reserved_43_63 : 21;
+		u64 prbs_bad : 11;
+		u64 reserved_11_31 : 21;
+		u64 prbs_good : 11;
+	} s;
+	struct cvmx_ilk_rx_lnex_stat10_s cn78xx;
+	struct cvmx_ilk_rx_lnex_stat10_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat10 cvmx_ilk_rx_lnex_stat10_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat2
+ */
+union cvmx_ilk_rx_lnex_stat2 {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_stat2_s {
+		u64 reserved_50_63 : 14;
+		u64 syncw_good_cnt : 18;
+		u64 reserved_18_31 : 14;
+		u64 syncw_bad_cnt : 18;
+	} s;
+	struct cvmx_ilk_rx_lnex_stat2_s cn68xx;
+	struct cvmx_ilk_rx_lnex_stat2_s cn68xxp1;
+	struct cvmx_ilk_rx_lnex_stat2_s cn78xx;
+	struct cvmx_ilk_rx_lnex_stat2_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat2 cvmx_ilk_rx_lnex_stat2_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat3
+ */
+union cvmx_ilk_rx_lnex_stat3 {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_stat3_s {
+		u64 reserved_18_63 : 46;
+		u64 bad_64b67b_cnt : 18;
+	} s;
+	struct cvmx_ilk_rx_lnex_stat3_s cn68xx;
+	struct cvmx_ilk_rx_lnex_stat3_s cn68xxp1;
+	struct cvmx_ilk_rx_lnex_stat3_s cn78xx;
+	struct cvmx_ilk_rx_lnex_stat3_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat3 cvmx_ilk_rx_lnex_stat3_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat4
+ */
+union cvmx_ilk_rx_lnex_stat4 {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_stat4_s {
+		u64 reserved_59_63 : 5;
+		u64 cntl_word_cnt : 27;
+		u64 reserved_27_31 : 5;
+		u64 data_word_cnt : 27;
+	} s;
+	struct cvmx_ilk_rx_lnex_stat4_s cn68xx;
+	struct cvmx_ilk_rx_lnex_stat4_s cn68xxp1;
+	struct cvmx_ilk_rx_lnex_stat4_s cn78xx;
+	struct cvmx_ilk_rx_lnex_stat4_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat4 cvmx_ilk_rx_lnex_stat4_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat5
+ */
+union cvmx_ilk_rx_lnex_stat5 {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_stat5_s {
+		u64 reserved_18_63 : 46;
+		u64 unkwn_word_cnt : 18;
+	} s;
+	struct cvmx_ilk_rx_lnex_stat5_s cn68xx;
+	struct cvmx_ilk_rx_lnex_stat5_s cn68xxp1;
+	struct cvmx_ilk_rx_lnex_stat5_s cn78xx;
+	struct cvmx_ilk_rx_lnex_stat5_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat5 cvmx_ilk_rx_lnex_stat5_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat6
+ */
+union cvmx_ilk_rx_lnex_stat6 {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_stat6_s {
+		u64 reserved_18_63 : 46;
+		u64 scrm_sync_loss_cnt : 18;
+	} s;
+	struct cvmx_ilk_rx_lnex_stat6_s cn68xx;
+	struct cvmx_ilk_rx_lnex_stat6_s cn68xxp1;
+	struct cvmx_ilk_rx_lnex_stat6_s cn78xx;
+	struct cvmx_ilk_rx_lnex_stat6_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat6 cvmx_ilk_rx_lnex_stat6_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat7
+ */
+union cvmx_ilk_rx_lnex_stat7 {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_stat7_s {
+		u64 reserved_18_63 : 46;
+		u64 scrm_match_cnt : 18;
+	} s;
+	struct cvmx_ilk_rx_lnex_stat7_s cn68xx;
+	struct cvmx_ilk_rx_lnex_stat7_s cn68xxp1;
+	struct cvmx_ilk_rx_lnex_stat7_s cn78xx;
+	struct cvmx_ilk_rx_lnex_stat7_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat7 cvmx_ilk_rx_lnex_stat7_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat8
+ */
+union cvmx_ilk_rx_lnex_stat8 {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_stat8_s {
+		u64 reserved_18_63 : 46;
+		u64 skipw_good_cnt : 18;
+	} s;
+	struct cvmx_ilk_rx_lnex_stat8_s cn68xx;
+	struct cvmx_ilk_rx_lnex_stat8_s cn68xxp1;
+	struct cvmx_ilk_rx_lnex_stat8_s cn78xx;
+	struct cvmx_ilk_rx_lnex_stat8_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat8 cvmx_ilk_rx_lnex_stat8_t;
+
+/**
+ * cvmx_ilk_rx_lne#_stat9
+ */
+union cvmx_ilk_rx_lnex_stat9 {
+	u64 u64;
+	struct cvmx_ilk_rx_lnex_stat9_s {
+		u64 reserved_50_63 : 14;
+		u64 crc32_err_cnt : 18;
+		u64 reserved_27_31 : 5;
+		u64 crc32_match_cnt : 27;
+	} s;
+	struct cvmx_ilk_rx_lnex_stat9_s cn68xx;
+	struct cvmx_ilk_rx_lnex_stat9_s cn68xxp1;
+	struct cvmx_ilk_rx_lnex_stat9_s cn78xx;
+	struct cvmx_ilk_rx_lnex_stat9_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_rx_lnex_stat9 cvmx_ilk_rx_lnex_stat9_t;
+
+/**
+ * cvmx_ilk_rxf_idx_pmap
+ */
+union cvmx_ilk_rxf_idx_pmap {
+	u64 u64;
+	struct cvmx_ilk_rxf_idx_pmap_s {
+		u64 reserved_25_63 : 39;
+		u64 inc : 9;
+		u64 reserved_9_15 : 7;
+		u64 index : 9;
+	} s;
+	struct cvmx_ilk_rxf_idx_pmap_s cn68xx;
+	struct cvmx_ilk_rxf_idx_pmap_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxf_idx_pmap cvmx_ilk_rxf_idx_pmap_t;
+
+/**
+ * cvmx_ilk_rxf_mem_pmap
+ */
+union cvmx_ilk_rxf_mem_pmap {
+	u64 u64;
+	struct cvmx_ilk_rxf_mem_pmap_s {
+		u64 reserved_6_63 : 58;
+		u64 port_kind : 6;
+	} s;
+	struct cvmx_ilk_rxf_mem_pmap_s cn68xx;
+	struct cvmx_ilk_rxf_mem_pmap_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_rxf_mem_pmap cvmx_ilk_rxf_mem_pmap_t;
+
+/**
+ * cvmx_ilk_ser_cfg
+ */
+union cvmx_ilk_ser_cfg {
+	u64 u64;
+	struct cvmx_ilk_ser_cfg_s {
+		u64 reserved_57_63 : 7;
+		u64 ser_rxpol_auto : 1;
+		u64 ser_rxpol : 16;
+		u64 ser_txpol : 16;
+		u64 ser_reset_n : 16;
+		u64 ser_pwrup : 4;
+		u64 ser_haul : 4;
+	} s;
+	struct cvmx_ilk_ser_cfg_cn68xx {
+		u64 reserved_57_63 : 7;
+		u64 ser_rxpol_auto : 1;
+		u64 reserved_48_55 : 8;
+		u64 ser_rxpol : 8;
+		u64 reserved_32_39 : 8;
+		u64 ser_txpol : 8;
+		u64 reserved_16_23 : 8;
+		u64 ser_reset_n : 8;
+		u64 reserved_6_7 : 2;
+		u64 ser_pwrup : 2;
+		u64 reserved_2_3 : 2;
+		u64 ser_haul : 2;
+	} cn68xx;
+	struct cvmx_ilk_ser_cfg_cn68xx cn68xxp1;
+	struct cvmx_ilk_ser_cfg_s cn78xx;
+	struct cvmx_ilk_ser_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_ser_cfg cvmx_ilk_ser_cfg_t;
+
+/**
+ * cvmx_ilk_tx#_byte_cnt#
+ */
+union cvmx_ilk_txx_byte_cntx {
+	u64 u64;
+	struct cvmx_ilk_txx_byte_cntx_s {
+		u64 reserved_40_63 : 24;
+		u64 tx_bytes : 40;
+	} s;
+	struct cvmx_ilk_txx_byte_cntx_s cn78xx;
+	struct cvmx_ilk_txx_byte_cntx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_byte_cntx cvmx_ilk_txx_byte_cntx_t;
+
+/**
+ * cvmx_ilk_tx#_cal_entry#
+ */
+union cvmx_ilk_txx_cal_entryx {
+	u64 u64;
+	struct cvmx_ilk_txx_cal_entryx_s {
+		u64 reserved_34_63 : 30;
+		u64 ctl : 2;
+		u64 reserved_8_31 : 24;
+		u64 channel : 8;
+	} s;
+	struct cvmx_ilk_txx_cal_entryx_s cn78xx;
+	struct cvmx_ilk_txx_cal_entryx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_cal_entryx cvmx_ilk_txx_cal_entryx_t;
+
+/**
+ * cvmx_ilk_tx#_cfg0
+ */
+union cvmx_ilk_txx_cfg0 {
+	u64 u64;
+	struct cvmx_ilk_txx_cfg0_s {
+		u64 ext_lpbk_fc : 1;
+		u64 ext_lpbk : 1;
+		u64 int_lpbk : 1;
+		u64 txf_byp_dis : 1;
+		u64 reserved_57_59 : 3;
+		u64 ptrn_mode : 1;
+		u64 lnk_stats_rdclr : 1;
+		u64 lnk_stats_ena : 1;
+		u64 mltuse_fc_ena : 1;
+		u64 cal_ena : 1;
+		u64 mfrm_len : 13;
+		u64 brst_shrt : 7;
+		u64 lane_rev : 1;
+		u64 brst_max : 5;
+		u64 reserved_25_25 : 1;
+		u64 cal_depth : 9;
+		u64 lane_ena : 16;
+	} s;
+	struct cvmx_ilk_txx_cfg0_cn68xx {
+		u64 ext_lpbk_fc : 1;
+		u64 ext_lpbk : 1;
+		u64 int_lpbk : 1;
+		u64 reserved_57_60 : 4;
+		u64 ptrn_mode : 1;
+		u64 reserved_55_55 : 1;
+		u64 lnk_stats_ena : 1;
+		u64 mltuse_fc_ena : 1;
+		u64 cal_ena : 1;
+		u64 mfrm_len : 13;
+		u64 brst_shrt : 7;
+		u64 lane_rev : 1;
+		u64 brst_max : 5;
+		u64 reserved_25_25 : 1;
+		u64 cal_depth : 9;
+		u64 reserved_8_15 : 8;
+		u64 lane_ena : 8;
+	} cn68xx;
+	struct cvmx_ilk_txx_cfg0_cn68xx cn68xxp1;
+	struct cvmx_ilk_txx_cfg0_s cn78xx;
+	struct cvmx_ilk_txx_cfg0_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_cfg0 cvmx_ilk_txx_cfg0_t;
+
+/**
+ * cvmx_ilk_tx#_cfg1
+ */
+union cvmx_ilk_txx_cfg1 {
+	u64 u64;
+	struct cvmx_ilk_txx_cfg1_s {
+		u64 ser_low : 4;
+		u64 reserved_53_59 : 7;
+		u64 brst_min : 5;
+		u64 reserved_43_47 : 5;
+		u64 ser_limit : 10;
+		u64 pkt_busy : 1;
+		u64 pipe_crd_dis : 1;
+		u64 ptp_delay : 5;
+		u64 skip_cnt : 4;
+		u64 pkt_flush : 1;
+		u64 pkt_ena : 1;
+		u64 la_mode : 1;
+		u64 tx_link_fc : 1;
+		u64 rx_link_fc : 1;
+		u64 reserved_12_16 : 5;
+		u64 tx_link_fc_jam : 1;
+		u64 rx_link_fc_pkt : 1;
+		u64 rx_link_fc_ign : 1;
+		u64 rmatch : 1;
+		u64 tx_mltuse : 8;
+	} s;
+	struct cvmx_ilk_txx_cfg1_cn68xx {
+		u64 reserved_33_63 : 31;
+		u64 pkt_busy : 1;
+		u64 pipe_crd_dis : 1;
+		u64 ptp_delay : 5;
+		u64 skip_cnt : 4;
+		u64 pkt_flush : 1;
+		u64 pkt_ena : 1;
+		u64 la_mode : 1;
+		u64 tx_link_fc : 1;
+		u64 rx_link_fc : 1;
+		u64 reserved_12_16 : 5;
+		u64 tx_link_fc_jam : 1;
+		u64 rx_link_fc_pkt : 1;
+		u64 rx_link_fc_ign : 1;
+		u64 rmatch : 1;
+		u64 tx_mltuse : 8;
+	} cn68xx;
+	struct cvmx_ilk_txx_cfg1_cn68xxp1 {
+		u64 reserved_32_63 : 32;
+		u64 pipe_crd_dis : 1;
+		u64 ptp_delay : 5;
+		u64 skip_cnt : 4;
+		u64 pkt_flush : 1;
+		u64 pkt_ena : 1;
+		u64 la_mode : 1;
+		u64 tx_link_fc : 1;
+		u64 rx_link_fc : 1;
+		u64 reserved_12_16 : 5;
+		u64 tx_link_fc_jam : 1;
+		u64 rx_link_fc_pkt : 1;
+		u64 rx_link_fc_ign : 1;
+		u64 rmatch : 1;
+		u64 tx_mltuse : 8;
+	} cn68xxp1;
+	struct cvmx_ilk_txx_cfg1_s cn78xx;
+	struct cvmx_ilk_txx_cfg1_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_cfg1 cvmx_ilk_txx_cfg1_t;
+
+/**
+ * cvmx_ilk_tx#_cha_xon#
+ */
+union cvmx_ilk_txx_cha_xonx {
+	u64 u64;
+	struct cvmx_ilk_txx_cha_xonx_s {
+		u64 status : 64;
+	} s;
+	struct cvmx_ilk_txx_cha_xonx_s cn78xx;
+	struct cvmx_ilk_txx_cha_xonx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_cha_xonx cvmx_ilk_txx_cha_xonx_t;
+
+/**
+ * cvmx_ilk_tx#_dbg
+ */
+union cvmx_ilk_txx_dbg {
+	u64 u64;
+	struct cvmx_ilk_txx_dbg_s {
+		u64 reserved_29_63 : 35;
+		u64 data_rate : 13;
+		u64 low_delay : 6;
+		u64 reserved_3_9 : 7;
+		u64 tx_bad_crc24 : 1;
+		u64 tx_bad_ctlw2 : 1;
+		u64 tx_bad_ctlw1 : 1;
+	} s;
+	struct cvmx_ilk_txx_dbg_cn68xx {
+		u64 reserved_3_63 : 61;
+		u64 tx_bad_crc24 : 1;
+		u64 tx_bad_ctlw2 : 1;
+		u64 tx_bad_ctlw1 : 1;
+	} cn68xx;
+	struct cvmx_ilk_txx_dbg_cn68xx cn68xxp1;
+	struct cvmx_ilk_txx_dbg_s cn78xx;
+	struct cvmx_ilk_txx_dbg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_dbg cvmx_ilk_txx_dbg_t;
+
+/**
+ * cvmx_ilk_tx#_err_cfg
+ */
+union cvmx_ilk_txx_err_cfg {
+	u64 u64;
+	struct cvmx_ilk_txx_err_cfg_s {
+		u64 reserved_20_63 : 44;
+		u64 fwc_flip : 2;
+		u64 txf_flip : 2;
+		u64 reserved_2_15 : 14;
+		u64 fwc_cor_dis : 1;
+		u64 txf_cor_dis : 1;
+	} s;
+	struct cvmx_ilk_txx_err_cfg_s cn78xx;
+	struct cvmx_ilk_txx_err_cfg_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_err_cfg cvmx_ilk_txx_err_cfg_t;
+
+/**
+ * cvmx_ilk_tx#_flow_ctl0
+ */
+union cvmx_ilk_txx_flow_ctl0 {
+	u64 u64;
+	struct cvmx_ilk_txx_flow_ctl0_s {
+		u64 status : 64;
+	} s;
+	struct cvmx_ilk_txx_flow_ctl0_s cn68xx;
+	struct cvmx_ilk_txx_flow_ctl0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_flow_ctl0 cvmx_ilk_txx_flow_ctl0_t;
+
+/**
+ * cvmx_ilk_tx#_flow_ctl1
+ *
+ * Notes:
+ * Do not publish.
+ *
+ */
+union cvmx_ilk_txx_flow_ctl1 {
+	u64 u64;
+	struct cvmx_ilk_txx_flow_ctl1_s {
+		u64 reserved_0_63 : 64;
+	} s;
+	struct cvmx_ilk_txx_flow_ctl1_s cn68xx;
+	struct cvmx_ilk_txx_flow_ctl1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_flow_ctl1 cvmx_ilk_txx_flow_ctl1_t;
+
+/**
+ * cvmx_ilk_tx#_idx_cal
+ */
+union cvmx_ilk_txx_idx_cal {
+	u64 u64;
+	struct cvmx_ilk_txx_idx_cal_s {
+		u64 reserved_14_63 : 50;
+		u64 inc : 6;
+		u64 reserved_6_7 : 2;
+		u64 index : 6;
+	} s;
+	struct cvmx_ilk_txx_idx_cal_s cn68xx;
+	struct cvmx_ilk_txx_idx_cal_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_idx_cal cvmx_ilk_txx_idx_cal_t;
+
+/**
+ * cvmx_ilk_tx#_idx_pmap
+ */
+union cvmx_ilk_txx_idx_pmap {
+	u64 u64;
+	struct cvmx_ilk_txx_idx_pmap_s {
+		u64 reserved_23_63 : 41;
+		u64 inc : 7;
+		u64 reserved_7_15 : 9;
+		u64 index : 7;
+	} s;
+	struct cvmx_ilk_txx_idx_pmap_s cn68xx;
+	struct cvmx_ilk_txx_idx_pmap_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_idx_pmap cvmx_ilk_txx_idx_pmap_t;
+
+/**
+ * cvmx_ilk_tx#_idx_stat0
+ */
+union cvmx_ilk_txx_idx_stat0 {
+	u64 u64;
+	struct cvmx_ilk_txx_idx_stat0_s {
+		u64 reserved_32_63 : 32;
+		u64 clr : 1;
+		u64 reserved_24_30 : 7;
+		u64 inc : 8;
+		u64 reserved_8_15 : 8;
+		u64 index : 8;
+	} s;
+	struct cvmx_ilk_txx_idx_stat0_s cn68xx;
+	struct cvmx_ilk_txx_idx_stat0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_idx_stat0 cvmx_ilk_txx_idx_stat0_t;
+
+/**
+ * cvmx_ilk_tx#_idx_stat1
+ */
+union cvmx_ilk_txx_idx_stat1 {
+	u64 u64;
+	struct cvmx_ilk_txx_idx_stat1_s {
+		u64 reserved_32_63 : 32;
+		u64 clr : 1;
+		u64 reserved_24_30 : 7;
+		u64 inc : 8;
+		u64 reserved_8_15 : 8;
+		u64 index : 8;
+	} s;
+	struct cvmx_ilk_txx_idx_stat1_s cn68xx;
+	struct cvmx_ilk_txx_idx_stat1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_idx_stat1 cvmx_ilk_txx_idx_stat1_t;
+
+/**
+ * cvmx_ilk_tx#_int
+ */
+union cvmx_ilk_txx_int {
+	u64 u64;
+	struct cvmx_ilk_txx_int_s {
+		u64 reserved_8_63 : 56;
+		u64 fwc_dbe : 1;
+		u64 fwc_sbe : 1;
+		u64 txf_dbe : 1;
+		u64 txf_sbe : 1;
+		u64 stat_cnt_ovfl : 1;
+		u64 bad_pipe : 1;
+		u64 bad_seq : 1;
+		u64 txf_err : 1;
+	} s;
+	struct cvmx_ilk_txx_int_cn68xx {
+		u64 reserved_4_63 : 60;
+		u64 stat_cnt_ovfl : 1;
+		u64 bad_pipe : 1;
+		u64 bad_seq : 1;
+		u64 txf_err : 1;
+	} cn68xx;
+	struct cvmx_ilk_txx_int_cn68xx cn68xxp1;
+	struct cvmx_ilk_txx_int_s cn78xx;
+	struct cvmx_ilk_txx_int_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_int cvmx_ilk_txx_int_t;
+
+/**
+ * cvmx_ilk_tx#_int_en
+ */
+union cvmx_ilk_txx_int_en {
+	u64 u64;
+	struct cvmx_ilk_txx_int_en_s {
+		u64 reserved_4_63 : 60;
+		u64 stat_cnt_ovfl : 1;
+		u64 bad_pipe : 1;
+		u64 bad_seq : 1;
+		u64 txf_err : 1;
+	} s;
+	struct cvmx_ilk_txx_int_en_s cn68xx;
+	struct cvmx_ilk_txx_int_en_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_int_en cvmx_ilk_txx_int_en_t;
+
+/**
+ * cvmx_ilk_tx#_mem_cal0
+ *
+ * Notes:
+ * Software must always read ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1.  Software
+ * must never read them in reverse order or read one without reading the
+ * other.
+ *
+ * Software must always write ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1.
+ * Software must never write them in reverse order or write one without
+ * writing the other.
+ */
+union cvmx_ilk_txx_mem_cal0 {
+	u64 u64;
+	struct cvmx_ilk_txx_mem_cal0_s {
+		u64 reserved_36_63 : 28;
+		u64 entry_ctl3 : 2;
+		u64 reserved_33_33 : 1;
+		u64 bpid3 : 6;
+		u64 entry_ctl2 : 2;
+		u64 reserved_24_24 : 1;
+		u64 bpid2 : 6;
+		u64 entry_ctl1 : 2;
+		u64 reserved_15_15 : 1;
+		u64 bpid1 : 6;
+		u64 entry_ctl0 : 2;
+		u64 reserved_6_6 : 1;
+		u64 bpid0 : 6;
+	} s;
+	struct cvmx_ilk_txx_mem_cal0_s cn68xx;
+	struct cvmx_ilk_txx_mem_cal0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_mem_cal0 cvmx_ilk_txx_mem_cal0_t;
+
+/**
+ * cvmx_ilk_tx#_mem_cal1
+ *
+ * Notes:
+ * Software must always read ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1.  Software
+ * must never read them in reverse order or read one without reading the
+ * other.
+ *
+ * Software must always write ILK_TXx_MEM_CAL0 then ILK_TXx_MEM_CAL1.
+ * Software must never write them in reverse order or write one without
+ * writing the other.
+ */
+union cvmx_ilk_txx_mem_cal1 {
+	u64 u64;
+	struct cvmx_ilk_txx_mem_cal1_s {
+		u64 reserved_36_63 : 28;
+		u64 entry_ctl7 : 2;
+		u64 reserved_33_33 : 1;
+		u64 bpid7 : 6;
+		u64 entry_ctl6 : 2;
+		u64 reserved_24_24 : 1;
+		u64 bpid6 : 6;
+		u64 entry_ctl5 : 2;
+		u64 reserved_15_15 : 1;
+		u64 bpid5 : 6;
+		u64 entry_ctl4 : 2;
+		u64 reserved_6_6 : 1;
+		u64 bpid4 : 6;
+	} s;
+	struct cvmx_ilk_txx_mem_cal1_s cn68xx;
+	struct cvmx_ilk_txx_mem_cal1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_mem_cal1 cvmx_ilk_txx_mem_cal1_t;
+
+/**
+ * cvmx_ilk_tx#_mem_pmap
+ */
+union cvmx_ilk_txx_mem_pmap {
+	u64 u64;
+	struct cvmx_ilk_txx_mem_pmap_s {
+		u64 reserved_17_63 : 47;
+		u64 remap : 1;
+		u64 reserved_8_15 : 8;
+		u64 channel : 8;
+	} s;
+	struct cvmx_ilk_txx_mem_pmap_s cn68xx;
+	struct cvmx_ilk_txx_mem_pmap_cn68xxp1 {
+		u64 reserved_8_63 : 56;
+		u64 channel : 8;
+	} cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_mem_pmap cvmx_ilk_txx_mem_pmap_t;
+
+/**
+ * cvmx_ilk_tx#_mem_stat0
+ */
+union cvmx_ilk_txx_mem_stat0 {
+	u64 u64;
+	struct cvmx_ilk_txx_mem_stat0_s {
+		u64 reserved_28_63 : 36;
+		u64 tx_pkt : 28;
+	} s;
+	struct cvmx_ilk_txx_mem_stat0_s cn68xx;
+	struct cvmx_ilk_txx_mem_stat0_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_mem_stat0 cvmx_ilk_txx_mem_stat0_t;
+
+/**
+ * cvmx_ilk_tx#_mem_stat1
+ */
+union cvmx_ilk_txx_mem_stat1 {
+	u64 u64;
+	struct cvmx_ilk_txx_mem_stat1_s {
+		u64 reserved_36_63 : 28;
+		u64 tx_bytes : 36;
+	} s;
+	struct cvmx_ilk_txx_mem_stat1_s cn68xx;
+	struct cvmx_ilk_txx_mem_stat1_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_mem_stat1 cvmx_ilk_txx_mem_stat1_t;
+
+/**
+ * cvmx_ilk_tx#_pipe
+ */
+union cvmx_ilk_txx_pipe {
+	u64 u64;
+	struct cvmx_ilk_txx_pipe_s {
+		u64 reserved_24_63 : 40;
+		u64 nump : 8;
+		u64 reserved_7_15 : 9;
+		u64 base : 7;
+	} s;
+	struct cvmx_ilk_txx_pipe_s cn68xx;
+	struct cvmx_ilk_txx_pipe_s cn68xxp1;
+};
+
+typedef union cvmx_ilk_txx_pipe cvmx_ilk_txx_pipe_t;
+
+/**
+ * cvmx_ilk_tx#_pkt_cnt#
+ */
+union cvmx_ilk_txx_pkt_cntx {
+	u64 u64;
+	struct cvmx_ilk_txx_pkt_cntx_s {
+		u64 reserved_34_63 : 30;
+		u64 tx_pkt : 34;
+	} s;
+	struct cvmx_ilk_txx_pkt_cntx_s cn78xx;
+	struct cvmx_ilk_txx_pkt_cntx_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_pkt_cntx cvmx_ilk_txx_pkt_cntx_t;
+
+/**
+ * cvmx_ilk_tx#_rmatch
+ */
+union cvmx_ilk_txx_rmatch {
+	u64 u64;
+	struct cvmx_ilk_txx_rmatch_s {
+		u64 reserved_50_63 : 14;
+		u64 grnlrty : 2;
+		u64 brst_limit : 16;
+		u64 time_limit : 16;
+		u64 rate_limit : 16;
+	} s;
+	struct cvmx_ilk_txx_rmatch_s cn68xx;
+	struct cvmx_ilk_txx_rmatch_s cn68xxp1;
+	struct cvmx_ilk_txx_rmatch_s cn78xx;
+	struct cvmx_ilk_txx_rmatch_s cn78xxp1;
+};
+
+typedef union cvmx_ilk_txx_rmatch cvmx_ilk_txx_rmatch_t;
+
+#endif
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-iob-defs.h b/arch/mips/mach-octeon/include/mach/cvmx-iob-defs.h
new file mode 100644
index 0000000..0af444d
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-iob-defs.h
@@ -0,0 +1,1328 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon iob.
+ */
+
+#ifndef __CVMX_IOB_DEFS_H__
+#define __CVMX_IOB_DEFS_H__
+
+#define CVMX_IOB_BIST_STATUS		(0x00011800F00007F8ull)
+#define CVMX_IOB_CHIP_CUR_PWR		(0x00011800F0000828ull)
+#define CVMX_IOB_CHIP_GLB_PWR_THROTTLE	(0x00011800F0000808ull)
+#define CVMX_IOB_CHIP_PWR_OUT		(0x00011800F0000818ull)
+#define CVMX_IOB_CTL_STATUS		(0x00011800F0000050ull)
+#define CVMX_IOB_DWB_PRI_CNT		(0x00011800F0000028ull)
+#define CVMX_IOB_FAU_TIMEOUT		(0x00011800F0000000ull)
+#define CVMX_IOB_I2C_PRI_CNT		(0x00011800F0000010ull)
+#define CVMX_IOB_INB_CONTROL_MATCH	(0x00011800F0000078ull)
+#define CVMX_IOB_INB_CONTROL_MATCH_ENB	(0x00011800F0000088ull)
+#define CVMX_IOB_INB_DATA_MATCH		(0x00011800F0000070ull)
+#define CVMX_IOB_INB_DATA_MATCH_ENB	(0x00011800F0000080ull)
+#define CVMX_IOB_INT_ENB		(0x00011800F0000060ull)
+#define CVMX_IOB_INT_SUM		(0x00011800F0000058ull)
+#define CVMX_IOB_N2C_L2C_PRI_CNT	(0x00011800F0000020ull)
+#define CVMX_IOB_N2C_RSP_PRI_CNT	(0x00011800F0000008ull)
+#define CVMX_IOB_OUTB_COM_PRI_CNT	(0x00011800F0000040ull)
+#define CVMX_IOB_OUTB_CONTROL_MATCH	(0x00011800F0000098ull)
+#define CVMX_IOB_OUTB_CONTROL_MATCH_ENB (0x00011800F00000A8ull)
+#define CVMX_IOB_OUTB_DATA_MATCH	(0x00011800F0000090ull)
+#define CVMX_IOB_OUTB_DATA_MATCH_ENB	(0x00011800F00000A0ull)
+#define CVMX_IOB_OUTB_FPA_PRI_CNT	(0x00011800F0000048ull)
+#define CVMX_IOB_OUTB_REQ_PRI_CNT	(0x00011800F0000038ull)
+#define CVMX_IOB_P2C_REQ_PRI_CNT	(0x00011800F0000018ull)
+#define CVMX_IOB_PKT_ERR		(0x00011800F0000068ull)
+#define CVMX_IOB_PP_BIST_STATUS		(0x00011800F0000700ull)
+#define CVMX_IOB_TO_CMB_CREDITS		(0x00011800F00000B0ull)
+#define CVMX_IOB_TO_NCB_DID_00_CREDITS	(0x00011800F0000800ull)
+#define CVMX_IOB_TO_NCB_DID_111_CREDITS (0x00011800F0000B78ull)
+#define CVMX_IOB_TO_NCB_DID_223_CREDITS (0x00011800F0000EF8ull)
+#define CVMX_IOB_TO_NCB_DID_24_CREDITS	(0x00011800F00008C0ull)
+#define CVMX_IOB_TO_NCB_DID_32_CREDITS	(0x00011800F0000900ull)
+#define CVMX_IOB_TO_NCB_DID_40_CREDITS	(0x00011800F0000940ull)
+#define CVMX_IOB_TO_NCB_DID_55_CREDITS	(0x00011800F00009B8ull)
+#define CVMX_IOB_TO_NCB_DID_64_CREDITS	(0x00011800F0000A00ull)
+#define CVMX_IOB_TO_NCB_DID_79_CREDITS	(0x00011800F0000A78ull)
+#define CVMX_IOB_TO_NCB_DID_96_CREDITS	(0x00011800F0000B00ull)
+#define CVMX_IOB_TO_NCB_DID_98_CREDITS	(0x00011800F0000B10ull)
+
+/**
+ * cvmx_iob_bist_status
+ *
+ * The result of the BIST run on the IOB memories.
+ *
+ */
+union cvmx_iob_bist_status {
+	u64 u64;
+	struct cvmx_iob_bist_status_s {
+		u64 reserved_2_63 : 62;
+		u64 ibd : 1;
+		u64 icd : 1;
+	} s;
+	struct cvmx_iob_bist_status_cn30xx {
+		u64 reserved_18_63 : 46;
+		u64 icnrcb : 1;
+		u64 icr0 : 1;
+		u64 icr1 : 1;
+		u64 icnr1 : 1;
+		u64 icnr0 : 1;
+		u64 ibdr0 : 1;
+		u64 ibdr1 : 1;
+		u64 ibr0 : 1;
+		u64 ibr1 : 1;
+		u64 icnrt : 1;
+		u64 ibrq0 : 1;
+		u64 ibrq1 : 1;
+		u64 icrn0 : 1;
+		u64 icrn1 : 1;
+		u64 icrp0 : 1;
+		u64 icrp1 : 1;
+		u64 ibd : 1;
+		u64 icd : 1;
+	} cn30xx;
+	struct cvmx_iob_bist_status_cn30xx cn31xx;
+	struct cvmx_iob_bist_status_cn30xx cn38xx;
+	struct cvmx_iob_bist_status_cn30xx cn38xxp2;
+	struct cvmx_iob_bist_status_cn30xx cn50xx;
+	struct cvmx_iob_bist_status_cn30xx cn52xx;
+	struct cvmx_iob_bist_status_cn30xx cn52xxp1;
+	struct cvmx_iob_bist_status_cn30xx cn56xx;
+	struct cvmx_iob_bist_status_cn30xx cn56xxp1;
+	struct cvmx_iob_bist_status_cn30xx cn58xx;
+	struct cvmx_iob_bist_status_cn30xx cn58xxp1;
+	struct cvmx_iob_bist_status_cn61xx {
+		u64 reserved_23_63 : 41;
+		u64 xmdfif : 1;
+		u64 xmcfif : 1;
+		u64 iorfif : 1;
+		u64 rsdfif : 1;
+		u64 iocfif : 1;
+		u64 icnrcb : 1;
+		u64 icr0 : 1;
+		u64 icr1 : 1;
+		u64 icnr1 : 1;
+		u64 icnr0 : 1;
+		u64 ibdr0 : 1;
+		u64 ibdr1 : 1;
+		u64 ibr0 : 1;
+		u64 ibr1 : 1;
+		u64 icnrt : 1;
+		u64 ibrq0 : 1;
+		u64 ibrq1 : 1;
+		u64 icrn0 : 1;
+		u64 icrn1 : 1;
+		u64 icrp0 : 1;
+		u64 icrp1 : 1;
+		u64 ibd : 1;
+		u64 icd : 1;
+	} cn61xx;
+	struct cvmx_iob_bist_status_cn61xx cn63xx;
+	struct cvmx_iob_bist_status_cn61xx cn63xxp1;
+	struct cvmx_iob_bist_status_cn61xx cn66xx;
+	struct cvmx_iob_bist_status_cn68xx {
+		u64 reserved_18_63 : 46;
+		u64 xmdfif : 1;
+		u64 xmcfif : 1;
+		u64 iorfif : 1;
+		u64 rsdfif : 1;
+		u64 iocfif : 1;
+		u64 icnrcb : 1;
+		u64 icr0 : 1;
+		u64 icr1 : 1;
+		u64 icnr0 : 1;
+		u64 ibr0 : 1;
+		u64 ibr1 : 1;
+		u64 icnrt : 1;
+		u64 ibrq0 : 1;
+		u64 ibrq1 : 1;
+		u64 icrn0 : 1;
+		u64 icrn1 : 1;
+		u64 ibd : 1;
+		u64 icd : 1;
+	} cn68xx;
+	struct cvmx_iob_bist_status_cn68xx cn68xxp1;
+	struct cvmx_iob_bist_status_cn61xx cn70xx;
+	struct cvmx_iob_bist_status_cn61xx cn70xxp1;
+	struct cvmx_iob_bist_status_cn61xx cnf71xx;
+};
+
+typedef union cvmx_iob_bist_status cvmx_iob_bist_status_t;
+
+/**
+ * cvmx_iob_chip_cur_pwr
+ */
+union cvmx_iob_chip_cur_pwr {
+	u64 u64;
+	struct cvmx_iob_chip_cur_pwr_s {
+		u64 reserved_8_63 : 56;
+		u64 current_power_setting : 8;
+	} s;
+	struct cvmx_iob_chip_cur_pwr_s cn70xx;
+	struct cvmx_iob_chip_cur_pwr_s cn70xxp1;
+};
+
+typedef union cvmx_iob_chip_cur_pwr cvmx_iob_chip_cur_pwr_t;
+
+/**
+ * cvmx_iob_chip_glb_pwr_throttle
+ *
+ * Controls the min/max power settings.
+ *
+ */
+union cvmx_iob_chip_glb_pwr_throttle {
+	u64 u64;
+	struct cvmx_iob_chip_glb_pwr_throttle_s {
+		u64 reserved_34_63 : 30;
+		u64 pwr_bw : 2;
+		u64 pwr_max : 8;
+		u64 pwr_min : 8;
+		u64 pwr_setting : 16;
+	} s;
+	struct cvmx_iob_chip_glb_pwr_throttle_s cn70xx;
+	struct cvmx_iob_chip_glb_pwr_throttle_s cn70xxp1;
+};
+
+typedef union cvmx_iob_chip_glb_pwr_throttle cvmx_iob_chip_glb_pwr_throttle_t;
+
+/**
+ * cvmx_iob_chip_pwr_out
+ *
+ * Power numbers from the various partitions on the chip.
+ *
+ */
+union cvmx_iob_chip_pwr_out {
+	u64 u64;
+	struct cvmx_iob_chip_pwr_out_s {
+		u64 cpu_pwr : 16;
+		u64 chip_power : 16;
+		u64 coproc_power : 16;
+		u64 avg_chip_power : 16;
+	} s;
+	struct cvmx_iob_chip_pwr_out_s cn70xx;
+	struct cvmx_iob_chip_pwr_out_s cn70xxp1;
+};
+
+typedef union cvmx_iob_chip_pwr_out cvmx_iob_chip_pwr_out_t;
+
+/**
+ * cvmx_iob_ctl_status
+ *
+ * IOB Control Status = IOB Control and Status Register
+ * Provides control for IOB functions.
+ */
+union cvmx_iob_ctl_status {
+	u64 u64;
+	struct cvmx_iob_ctl_status_s {
+		u64 reserved_11_63 : 53;
+		u64 fif_dly : 1;
+		u64 xmc_per : 4;
+		u64 reserved_3_5 : 3;
+		u64 pko_enb : 1;
+		u64 dwb_enb : 1;
+		u64 fau_end : 1;
+	} s;
+	struct cvmx_iob_ctl_status_cn30xx {
+		u64 reserved_5_63 : 59;
+		u64 outb_mat : 1;
+		u64 inb_mat : 1;
+		u64 pko_enb : 1;
+		u64 dwb_enb : 1;
+		u64 fau_end : 1;
+	} cn30xx;
+	struct cvmx_iob_ctl_status_cn30xx cn31xx;
+	struct cvmx_iob_ctl_status_cn30xx cn38xx;
+	struct cvmx_iob_ctl_status_cn30xx cn38xxp2;
+	struct cvmx_iob_ctl_status_cn30xx cn50xx;
+	struct cvmx_iob_ctl_status_cn52xx {
+		u64 reserved_6_63 : 58;
+		u64 rr_mode : 1;
+		u64 outb_mat : 1;
+		u64 inb_mat : 1;
+		u64 pko_enb : 1;
+		u64 dwb_enb : 1;
+		u64 fau_end : 1;
+	} cn52xx;
+	struct cvmx_iob_ctl_status_cn30xx cn52xxp1;
+	struct cvmx_iob_ctl_status_cn30xx cn56xx;
+	struct cvmx_iob_ctl_status_cn30xx cn56xxp1;
+	struct cvmx_iob_ctl_status_cn30xx cn58xx;
+	struct cvmx_iob_ctl_status_cn30xx cn58xxp1;
+	struct cvmx_iob_ctl_status_cn61xx {
+		u64 reserved_11_63 : 53;
+		u64 fif_dly : 1;
+		u64 xmc_per : 4;
+		u64 rr_mode : 1;
+		u64 outb_mat : 1;
+		u64 inb_mat : 1;
+		u64 pko_enb : 1;
+		u64 dwb_enb : 1;
+		u64 fau_end : 1;
+	} cn61xx;
+	struct cvmx_iob_ctl_status_cn63xx {
+		u64 reserved_10_63 : 54;
+		u64 xmc_per : 4;
+		u64 rr_mode : 1;
+		u64 outb_mat : 1;
+		u64 inb_mat : 1;
+		u64 pko_enb : 1;
+		u64 dwb_enb : 1;
+		u64 fau_end : 1;
+	} cn63xx;
+	struct cvmx_iob_ctl_status_cn63xx cn63xxp1;
+	struct cvmx_iob_ctl_status_cn61xx cn66xx;
+	struct cvmx_iob_ctl_status_cn68xx {
+		u64 reserved_11_63 : 53;
+		u64 fif_dly : 1;
+		u64 xmc_per : 4;
+		u64 rsvr5 : 1;
+		u64 outb_mat : 1;
+		u64 inb_mat : 1;
+		u64 pko_enb : 1;
+		u64 dwb_enb : 1;
+		u64 fau_end : 1;
+	} cn68xx;
+	struct cvmx_iob_ctl_status_cn68xx cn68xxp1;
+	struct cvmx_iob_ctl_status_cn70xx {
+		u64 reserved_10_63 : 54;
+		u64 xmc_per : 4;
+		u64 rr_mode : 1;
+		u64 rsv4 : 1;
+		u64 rsv3 : 1;
+		u64 pko_enb : 1;
+		u64 dwb_enb : 1;
+		u64 fau_end : 1;
+	} cn70xx;
+	struct cvmx_iob_ctl_status_cn70xx cn70xxp1;
+	struct cvmx_iob_ctl_status_cn61xx cnf71xx;
+};
+
+typedef union cvmx_iob_ctl_status cvmx_iob_ctl_status_t;
+
+/**
+ * cvmx_iob_dwb_pri_cnt
+ *
+ * DWB To CMB Priority Counter = Don't Write Back to CMB Priority Counter Enable and Timer Value
+ * Enables and supplies the timeout count for raising the priority of Don't Write Back request to
+ * the L2C.
+ */
+union cvmx_iob_dwb_pri_cnt {
+	u64 u64;
+	struct cvmx_iob_dwb_pri_cnt_s {
+		u64 reserved_16_63 : 48;
+		u64 cnt_enb : 1;
+		u64 cnt_val : 15;
+	} s;
+	struct cvmx_iob_dwb_pri_cnt_s cn38xx;
+	struct cvmx_iob_dwb_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_dwb_pri_cnt_s cn52xx;
+	struct cvmx_iob_dwb_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_dwb_pri_cnt_s cn56xx;
+	struct cvmx_iob_dwb_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_dwb_pri_cnt_s cn58xx;
+	struct cvmx_iob_dwb_pri_cnt_s cn58xxp1;
+	struct cvmx_iob_dwb_pri_cnt_s cn61xx;
+	struct cvmx_iob_dwb_pri_cnt_s cn63xx;
+	struct cvmx_iob_dwb_pri_cnt_s cn63xxp1;
+	struct cvmx_iob_dwb_pri_cnt_s cn66xx;
+	struct cvmx_iob_dwb_pri_cnt_s cn70xx;
+	struct cvmx_iob_dwb_pri_cnt_s cn70xxp1;
+	struct cvmx_iob_dwb_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_dwb_pri_cnt cvmx_iob_dwb_pri_cnt_t;
+
+/**
+ * cvmx_iob_fau_timeout
+ *
+ * FAU Timeout = Fetch and Add Unit Tag-Switch Timeout
+ * How many clokc ticks the FAU unit will wait for a tag-switch before timing out.
+ * for Queue 0.
+ */
+union cvmx_iob_fau_timeout {
+	u64 u64;
+	struct cvmx_iob_fau_timeout_s {
+		u64 reserved_13_63 : 51;
+		u64 tout_enb : 1;
+		u64 tout_val : 12;
+	} s;
+	struct cvmx_iob_fau_timeout_s cn30xx;
+	struct cvmx_iob_fau_timeout_s cn31xx;
+	struct cvmx_iob_fau_timeout_s cn38xx;
+	struct cvmx_iob_fau_timeout_s cn38xxp2;
+	struct cvmx_iob_fau_timeout_s cn50xx;
+	struct cvmx_iob_fau_timeout_s cn52xx;
+	struct cvmx_iob_fau_timeout_s cn52xxp1;
+	struct cvmx_iob_fau_timeout_s cn56xx;
+	struct cvmx_iob_fau_timeout_s cn56xxp1;
+	struct cvmx_iob_fau_timeout_s cn58xx;
+	struct cvmx_iob_fau_timeout_s cn58xxp1;
+	struct cvmx_iob_fau_timeout_s cn61xx;
+	struct cvmx_iob_fau_timeout_s cn63xx;
+	struct cvmx_iob_fau_timeout_s cn63xxp1;
+	struct cvmx_iob_fau_timeout_s cn66xx;
+	struct cvmx_iob_fau_timeout_s cn68xx;
+	struct cvmx_iob_fau_timeout_s cn68xxp1;
+	struct cvmx_iob_fau_timeout_s cn70xx;
+	struct cvmx_iob_fau_timeout_s cn70xxp1;
+	struct cvmx_iob_fau_timeout_s cnf71xx;
+};
+
+typedef union cvmx_iob_fau_timeout cvmx_iob_fau_timeout_t;
+
+/**
+ * cvmx_iob_i2c_pri_cnt
+ *
+ * IPD To CMB Store Priority Counter = IPD to CMB Store Priority Counter Enable and Timer Value
+ * Enables and supplies the timeout count for raising the priority of IPD Store access to the
+ * CMB.
+ */
+union cvmx_iob_i2c_pri_cnt {
+	u64 u64;
+	struct cvmx_iob_i2c_pri_cnt_s {
+		u64 reserved_16_63 : 48;
+		u64 cnt_enb : 1;
+		u64 cnt_val : 15;
+	} s;
+	struct cvmx_iob_i2c_pri_cnt_s cn38xx;
+	struct cvmx_iob_i2c_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_i2c_pri_cnt_s cn52xx;
+	struct cvmx_iob_i2c_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_i2c_pri_cnt_s cn56xx;
+	struct cvmx_iob_i2c_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_i2c_pri_cnt_s cn58xx;
+	struct cvmx_iob_i2c_pri_cnt_s cn58xxp1;
+	struct cvmx_iob_i2c_pri_cnt_s cn61xx;
+	struct cvmx_iob_i2c_pri_cnt_s cn63xx;
+	struct cvmx_iob_i2c_pri_cnt_s cn63xxp1;
+	struct cvmx_iob_i2c_pri_cnt_s cn66xx;
+	struct cvmx_iob_i2c_pri_cnt_s cn70xx;
+	struct cvmx_iob_i2c_pri_cnt_s cn70xxp1;
+	struct cvmx_iob_i2c_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_i2c_pri_cnt cvmx_iob_i2c_pri_cnt_t;
+
+/**
+ * cvmx_iob_inb_control_match
+ *
+ * Match pattern for the inbound control to set the INB_MATCH_BIT.
+ *
+ */
+union cvmx_iob_inb_control_match {
+	u64 u64;
+	struct cvmx_iob_inb_control_match_s {
+		u64 reserved_29_63 : 35;
+		u64 mask : 8;
+		u64 opc : 4;
+		u64 dst : 9;
+		u64 src : 8;
+	} s;
+	struct cvmx_iob_inb_control_match_s cn30xx;
+	struct cvmx_iob_inb_control_match_s cn31xx;
+	struct cvmx_iob_inb_control_match_s cn38xx;
+	struct cvmx_iob_inb_control_match_s cn38xxp2;
+	struct cvmx_iob_inb_control_match_s cn50xx;
+	struct cvmx_iob_inb_control_match_s cn52xx;
+	struct cvmx_iob_inb_control_match_s cn52xxp1;
+	struct cvmx_iob_inb_control_match_s cn56xx;
+	struct cvmx_iob_inb_control_match_s cn56xxp1;
+	struct cvmx_iob_inb_control_match_s cn58xx;
+	struct cvmx_iob_inb_control_match_s cn58xxp1;
+	struct cvmx_iob_inb_control_match_s cn61xx;
+	struct cvmx_iob_inb_control_match_s cn63xx;
+	struct cvmx_iob_inb_control_match_s cn63xxp1;
+	struct cvmx_iob_inb_control_match_s cn66xx;
+	struct cvmx_iob_inb_control_match_s cn68xx;
+	struct cvmx_iob_inb_control_match_s cn68xxp1;
+	struct cvmx_iob_inb_control_match_s cn70xx;
+	struct cvmx_iob_inb_control_match_s cn70xxp1;
+	struct cvmx_iob_inb_control_match_s cnf71xx;
+};
+
+typedef union cvmx_iob_inb_control_match cvmx_iob_inb_control_match_t;
+
+/**
+ * cvmx_iob_inb_control_match_enb
+ *
+ * Enables the match of the corresponding bit in the IOB_INB_CONTROL_MATCH reister.
+ *
+ */
+union cvmx_iob_inb_control_match_enb {
+	u64 u64;
+	struct cvmx_iob_inb_control_match_enb_s {
+		u64 reserved_29_63 : 35;
+		u64 mask : 8;
+		u64 opc : 4;
+		u64 dst : 9;
+		u64 src : 8;
+	} s;
+	struct cvmx_iob_inb_control_match_enb_s cn30xx;
+	struct cvmx_iob_inb_control_match_enb_s cn31xx;
+	struct cvmx_iob_inb_control_match_enb_s cn38xx;
+	struct cvmx_iob_inb_control_match_enb_s cn38xxp2;
+	struct cvmx_iob_inb_control_match_enb_s cn50xx;
+	struct cvmx_iob_inb_control_match_enb_s cn52xx;
+	struct cvmx_iob_inb_control_match_enb_s cn52xxp1;
+	struct cvmx_iob_inb_control_match_enb_s cn56xx;
+	struct cvmx_iob_inb_control_match_enb_s cn56xxp1;
+	struct cvmx_iob_inb_control_match_enb_s cn58xx;
+	struct cvmx_iob_inb_control_match_enb_s cn58xxp1;
+	struct cvmx_iob_inb_control_match_enb_s cn61xx;
+	struct cvmx_iob_inb_control_match_enb_s cn63xx;
+	struct cvmx_iob_inb_control_match_enb_s cn63xxp1;
+	struct cvmx_iob_inb_control_match_enb_s cn66xx;
+	struct cvmx_iob_inb_control_match_enb_s cn68xx;
+	struct cvmx_iob_inb_control_match_enb_s cn68xxp1;
+	struct cvmx_iob_inb_control_match_enb_s cn70xx;
+	struct cvmx_iob_inb_control_match_enb_s cn70xxp1;
+	struct cvmx_iob_inb_control_match_enb_s cnf71xx;
+};
+
+typedef union cvmx_iob_inb_control_match_enb cvmx_iob_inb_control_match_enb_t;
+
+/**
+ * cvmx_iob_inb_data_match
+ *
+ * Match pattern for the inbound data to set the INB_MATCH_BIT.
+ *
+ */
+union cvmx_iob_inb_data_match {
+	u64 u64;
+	struct cvmx_iob_inb_data_match_s {
+		u64 data : 64;
+	} s;
+	struct cvmx_iob_inb_data_match_s cn30xx;
+	struct cvmx_iob_inb_data_match_s cn31xx;
+	struct cvmx_iob_inb_data_match_s cn38xx;
+	struct cvmx_iob_inb_data_match_s cn38xxp2;
+	struct cvmx_iob_inb_data_match_s cn50xx;
+	struct cvmx_iob_inb_data_match_s cn52xx;
+	struct cvmx_iob_inb_data_match_s cn52xxp1;
+	struct cvmx_iob_inb_data_match_s cn56xx;
+	struct cvmx_iob_inb_data_match_s cn56xxp1;
+	struct cvmx_iob_inb_data_match_s cn58xx;
+	struct cvmx_iob_inb_data_match_s cn58xxp1;
+	struct cvmx_iob_inb_data_match_s cn61xx;
+	struct cvmx_iob_inb_data_match_s cn63xx;
+	struct cvmx_iob_inb_data_match_s cn63xxp1;
+	struct cvmx_iob_inb_data_match_s cn66xx;
+	struct cvmx_iob_inb_data_match_s cn68xx;
+	struct cvmx_iob_inb_data_match_s cn68xxp1;
+	struct cvmx_iob_inb_data_match_s cn70xx;
+	struct cvmx_iob_inb_data_match_s cn70xxp1;
+	struct cvmx_iob_inb_data_match_s cnf71xx;
+};
+
+typedef union cvmx_iob_inb_data_match cvmx_iob_inb_data_match_t;
+
+/**
+ * cvmx_iob_inb_data_match_enb
+ *
+ * Enables the match of the corresponding bit in the IOB_INB_DATA_MATCH reister.
+ *
+ */
+union cvmx_iob_inb_data_match_enb {
+	u64 u64;
+	struct cvmx_iob_inb_data_match_enb_s {
+		u64 data : 64;
+	} s;
+	struct cvmx_iob_inb_data_match_enb_s cn30xx;
+	struct cvmx_iob_inb_data_match_enb_s cn31xx;
+	struct cvmx_iob_inb_data_match_enb_s cn38xx;
+	struct cvmx_iob_inb_data_match_enb_s cn38xxp2;
+	struct cvmx_iob_inb_data_match_enb_s cn50xx;
+	struct cvmx_iob_inb_data_match_enb_s cn52xx;
+	struct cvmx_iob_inb_data_match_enb_s cn52xxp1;
+	struct cvmx_iob_inb_data_match_enb_s cn56xx;
+	struct cvmx_iob_inb_data_match_enb_s cn56xxp1;
+	struct cvmx_iob_inb_data_match_enb_s cn58xx;
+	struct cvmx_iob_inb_data_match_enb_s cn58xxp1;
+	struct cvmx_iob_inb_data_match_enb_s cn61xx;
+	struct cvmx_iob_inb_data_match_enb_s cn63xx;
+	struct cvmx_iob_inb_data_match_enb_s cn63xxp1;
+	struct cvmx_iob_inb_data_match_enb_s cn66xx;
+	struct cvmx_iob_inb_data_match_enb_s cn68xx;
+	struct cvmx_iob_inb_data_match_enb_s cn68xxp1;
+	struct cvmx_iob_inb_data_match_enb_s cn70xx;
+	struct cvmx_iob_inb_data_match_enb_s cn70xxp1;
+	struct cvmx_iob_inb_data_match_enb_s cnf71xx;
+};
+
+typedef union cvmx_iob_inb_data_match_enb cvmx_iob_inb_data_match_enb_t;
+
+/**
+ * cvmx_iob_int_enb
+ *
+ * The IOB's interrupt enable register.
+ *
+ */
+union cvmx_iob_int_enb {
+	u64 u64;
+	struct cvmx_iob_int_enb_s {
+		u64 reserved_8_63 : 56;
+		u64 outb_mat : 1;
+		u64 inb_mat : 1;
+		u64 p_dat : 1;
+		u64 np_dat : 1;
+		u64 p_eop : 1;
+		u64 p_sop : 1;
+		u64 np_eop : 1;
+		u64 np_sop : 1;
+	} s;
+	struct cvmx_iob_int_enb_cn30xx {
+		u64 reserved_4_63 : 60;
+		u64 p_eop : 1;
+		u64 p_sop : 1;
+		u64 np_eop : 1;
+		u64 np_sop : 1;
+	} cn30xx;
+	struct cvmx_iob_int_enb_cn30xx cn31xx;
+	struct cvmx_iob_int_enb_cn30xx cn38xx;
+	struct cvmx_iob_int_enb_cn30xx cn38xxp2;
+	struct cvmx_iob_int_enb_cn50xx {
+		u64 reserved_6_63 : 58;
+		u64 p_dat : 1;
+		u64 np_dat : 1;
+		u64 p_eop : 1;
+		u64 p_sop : 1;
+		u64 np_eop : 1;
+		u64 np_sop : 1;
+	} cn50xx;
+	struct cvmx_iob_int_enb_cn50xx cn52xx;
+	struct cvmx_iob_int_enb_cn50xx cn52xxp1;
+	struct cvmx_iob_int_enb_cn50xx cn56xx;
+	struct cvmx_iob_int_enb_cn50xx cn56xxp1;
+	struct cvmx_iob_int_enb_cn50xx cn58xx;
+	struct cvmx_iob_int_enb_cn50xx cn58xxp1;
+	struct cvmx_iob_int_enb_cn50xx cn61xx;
+	struct cvmx_iob_int_enb_cn50xx cn63xx;
+	struct cvmx_iob_int_enb_cn50xx cn63xxp1;
+	struct cvmx_iob_int_enb_cn50xx cn66xx;
+	struct cvmx_iob_int_enb_cn68xx {
+		u64 reserved_0_63 : 64;
+	} cn68xx;
+	struct cvmx_iob_int_enb_cn68xx cn68xxp1;
+	struct cvmx_iob_int_enb_s cn70xx;
+	struct cvmx_iob_int_enb_s cn70xxp1;
+	struct cvmx_iob_int_enb_cn50xx cnf71xx;
+};
+
+typedef union cvmx_iob_int_enb cvmx_iob_int_enb_t;
+
+/**
+ * cvmx_iob_int_sum
+ *
+ * Contains the different interrupt summary bits of the IOB.
+ *
+ */
+union cvmx_iob_int_sum {
+	u64 u64;
+	struct cvmx_iob_int_sum_s {
+		u64 reserved_8_63 : 56;
+		u64 outb_mat : 1;
+		u64 inb_mat : 1;
+		u64 p_dat : 1;
+		u64 np_dat : 1;
+		u64 p_eop : 1;
+		u64 p_sop : 1;
+		u64 np_eop : 1;
+		u64 np_sop : 1;
+	} s;
+	struct cvmx_iob_int_sum_cn30xx {
+		u64 reserved_4_63 : 60;
+		u64 p_eop : 1;
+		u64 p_sop : 1;
+		u64 np_eop : 1;
+		u64 np_sop : 1;
+	} cn30xx;
+	struct cvmx_iob_int_sum_cn30xx cn31xx;
+	struct cvmx_iob_int_sum_cn30xx cn38xx;
+	struct cvmx_iob_int_sum_cn30xx cn38xxp2;
+	struct cvmx_iob_int_sum_cn50xx {
+		u64 reserved_6_63 : 58;
+		u64 p_dat : 1;
+		u64 np_dat : 1;
+		u64 p_eop : 1;
+		u64 p_sop : 1;
+		u64 np_eop : 1;
+		u64 np_sop : 1;
+	} cn50xx;
+	struct cvmx_iob_int_sum_cn50xx cn52xx;
+	struct cvmx_iob_int_sum_cn50xx cn52xxp1;
+	struct cvmx_iob_int_sum_cn50xx cn56xx;
+	struct cvmx_iob_int_sum_cn50xx cn56xxp1;
+	struct cvmx_iob_int_sum_cn50xx cn58xx;
+	struct cvmx_iob_int_sum_cn50xx cn58xxp1;
+	struct cvmx_iob_int_sum_cn50xx cn61xx;
+	struct cvmx_iob_int_sum_cn50xx cn63xx;
+	struct cvmx_iob_int_sum_cn50xx cn63xxp1;
+	struct cvmx_iob_int_sum_cn50xx cn66xx;
+	struct cvmx_iob_int_sum_cn68xx {
+		u64 reserved_0_63 : 64;
+	} cn68xx;
+	struct cvmx_iob_int_sum_cn68xx cn68xxp1;
+	struct cvmx_iob_int_sum_s cn70xx;
+	struct cvmx_iob_int_sum_s cn70xxp1;
+	struct cvmx_iob_int_sum_cn50xx cnf71xx;
+};
+
+typedef union cvmx_iob_int_sum cvmx_iob_int_sum_t;
+
+/**
+ * cvmx_iob_n2c_l2c_pri_cnt
+ *
+ * NCB To CMB L2C Priority Counter = NCB to CMB L2C Priority Counter Enable and Timer Value
+ * Enables and supplies the timeout count for raising the priority of NCB Store/Load access to
+ * the CMB.
+ */
+union cvmx_iob_n2c_l2c_pri_cnt {
+	u64 u64;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s {
+		u64 reserved_16_63 : 48;
+		u64 cnt_enb : 1;
+		u64 cnt_val : 15;
+	} s;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xx;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xx;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xx;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xx;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xxp1;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn61xx;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xx;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xxp1;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn66xx;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn70xx;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cn70xxp1;
+	struct cvmx_iob_n2c_l2c_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_n2c_l2c_pri_cnt cvmx_iob_n2c_l2c_pri_cnt_t;
+
+/**
+ * cvmx_iob_n2c_rsp_pri_cnt
+ *
+ * NCB To CMB Response Priority Counter = NCB to CMB Response Priority Counter Enable and Timer
+ * Value
+ * Enables and supplies the timeout count for raising the priority of NCB Responses access to the
+ * CMB.
+ */
+union cvmx_iob_n2c_rsp_pri_cnt {
+	u64 u64;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s {
+		u64 reserved_16_63 : 48;
+		u64 cnt_enb : 1;
+		u64 cnt_val : 15;
+	} s;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xx;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xx;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xx;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xx;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xxp1;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn61xx;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xx;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xxp1;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn66xx;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn70xx;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cn70xxp1;
+	struct cvmx_iob_n2c_rsp_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_n2c_rsp_pri_cnt cvmx_iob_n2c_rsp_pri_cnt_t;
+
+/**
+ * cvmx_iob_outb_com_pri_cnt
+ *
+ * Commit To NCB Priority Counter = Commit to NCB Priority Counter Enable and Timer Value
+ * Enables and supplies the timeout count for raising the priority of Commit request to the
+ * Outbound NCB.
+ */
+union cvmx_iob_outb_com_pri_cnt {
+	u64 u64;
+	struct cvmx_iob_outb_com_pri_cnt_s {
+		u64 reserved_16_63 : 48;
+		u64 cnt_enb : 1;
+		u64 cnt_val : 15;
+	} s;
+	struct cvmx_iob_outb_com_pri_cnt_s cn38xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_outb_com_pri_cnt_s cn52xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_outb_com_pri_cnt_s cn56xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_outb_com_pri_cnt_s cn58xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn58xxp1;
+	struct cvmx_iob_outb_com_pri_cnt_s cn61xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn63xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn63xxp1;
+	struct cvmx_iob_outb_com_pri_cnt_s cn66xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn68xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn68xxp1;
+	struct cvmx_iob_outb_com_pri_cnt_s cn70xx;
+	struct cvmx_iob_outb_com_pri_cnt_s cn70xxp1;
+	struct cvmx_iob_outb_com_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_com_pri_cnt cvmx_iob_outb_com_pri_cnt_t;
+
+/**
+ * cvmx_iob_outb_control_match
+ *
+ * Match pattern for the outbound control to set the OUTB_MATCH_BIT.
+ *
+ */
+union cvmx_iob_outb_control_match {
+	u64 u64;
+	struct cvmx_iob_outb_control_match_s {
+		u64 reserved_26_63 : 38;
+		u64 mask : 8;
+		u64 eot : 1;
+		u64 dst : 8;
+		u64 src : 9;
+	} s;
+	struct cvmx_iob_outb_control_match_s cn30xx;
+	struct cvmx_iob_outb_control_match_s cn31xx;
+	struct cvmx_iob_outb_control_match_s cn38xx;
+	struct cvmx_iob_outb_control_match_s cn38xxp2;
+	struct cvmx_iob_outb_control_match_s cn50xx;
+	struct cvmx_iob_outb_control_match_s cn52xx;
+	struct cvmx_iob_outb_control_match_s cn52xxp1;
+	struct cvmx_iob_outb_control_match_s cn56xx;
+	struct cvmx_iob_outb_control_match_s cn56xxp1;
+	struct cvmx_iob_outb_control_match_s cn58xx;
+	struct cvmx_iob_outb_control_match_s cn58xxp1;
+	struct cvmx_iob_outb_control_match_s cn61xx;
+	struct cvmx_iob_outb_control_match_s cn63xx;
+	struct cvmx_iob_outb_control_match_s cn63xxp1;
+	struct cvmx_iob_outb_control_match_s cn66xx;
+	struct cvmx_iob_outb_control_match_s cn68xx;
+	struct cvmx_iob_outb_control_match_s cn68xxp1;
+	struct cvmx_iob_outb_control_match_s cn70xx;
+	struct cvmx_iob_outb_control_match_s cn70xxp1;
+	struct cvmx_iob_outb_control_match_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_control_match cvmx_iob_outb_control_match_t;
+
+/**
+ * cvmx_iob_outb_control_match_enb
+ *
+ * Enables the match of the corresponding bit in the IOB_OUTB_CONTROL_MATCH reister.
+ *
+ */
+union cvmx_iob_outb_control_match_enb {
+	u64 u64;
+	struct cvmx_iob_outb_control_match_enb_s {
+		u64 reserved_26_63 : 38;
+		u64 mask : 8;
+		u64 eot : 1;
+		u64 dst : 8;
+		u64 src : 9;
+	} s;
+	struct cvmx_iob_outb_control_match_enb_s cn30xx;
+	struct cvmx_iob_outb_control_match_enb_s cn31xx;
+	struct cvmx_iob_outb_control_match_enb_s cn38xx;
+	struct cvmx_iob_outb_control_match_enb_s cn38xxp2;
+	struct cvmx_iob_outb_control_match_enb_s cn50xx;
+	struct cvmx_iob_outb_control_match_enb_s cn52xx;
+	struct cvmx_iob_outb_control_match_enb_s cn52xxp1;
+	struct cvmx_iob_outb_control_match_enb_s cn56xx;
+	struct cvmx_iob_outb_control_match_enb_s cn56xxp1;
+	struct cvmx_iob_outb_control_match_enb_s cn58xx;
+	struct cvmx_iob_outb_control_match_enb_s cn58xxp1;
+	struct cvmx_iob_outb_control_match_enb_s cn61xx;
+	struct cvmx_iob_outb_control_match_enb_s cn63xx;
+	struct cvmx_iob_outb_control_match_enb_s cn63xxp1;
+	struct cvmx_iob_outb_control_match_enb_s cn66xx;
+	struct cvmx_iob_outb_control_match_enb_s cn68xx;
+	struct cvmx_iob_outb_control_match_enb_s cn68xxp1;
+	struct cvmx_iob_outb_control_match_enb_s cn70xx;
+	struct cvmx_iob_outb_control_match_enb_s cn70xxp1;
+	struct cvmx_iob_outb_control_match_enb_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_control_match_enb cvmx_iob_outb_control_match_enb_t;
+
+/**
+ * cvmx_iob_outb_data_match
+ *
+ * Match pattern for the outbound data to set the OUTB_MATCH_BIT.
+ *
+ */
+union cvmx_iob_outb_data_match {
+	u64 u64;
+	struct cvmx_iob_outb_data_match_s {
+		u64 data : 64;
+	} s;
+	struct cvmx_iob_outb_data_match_s cn30xx;
+	struct cvmx_iob_outb_data_match_s cn31xx;
+	struct cvmx_iob_outb_data_match_s cn38xx;
+	struct cvmx_iob_outb_data_match_s cn38xxp2;
+	struct cvmx_iob_outb_data_match_s cn50xx;
+	struct cvmx_iob_outb_data_match_s cn52xx;
+	struct cvmx_iob_outb_data_match_s cn52xxp1;
+	struct cvmx_iob_outb_data_match_s cn56xx;
+	struct cvmx_iob_outb_data_match_s cn56xxp1;
+	struct cvmx_iob_outb_data_match_s cn58xx;
+	struct cvmx_iob_outb_data_match_s cn58xxp1;
+	struct cvmx_iob_outb_data_match_s cn61xx;
+	struct cvmx_iob_outb_data_match_s cn63xx;
+	struct cvmx_iob_outb_data_match_s cn63xxp1;
+	struct cvmx_iob_outb_data_match_s cn66xx;
+	struct cvmx_iob_outb_data_match_s cn68xx;
+	struct cvmx_iob_outb_data_match_s cn68xxp1;
+	struct cvmx_iob_outb_data_match_s cn70xx;
+	struct cvmx_iob_outb_data_match_s cn70xxp1;
+	struct cvmx_iob_outb_data_match_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_data_match cvmx_iob_outb_data_match_t;
+
+/**
+ * cvmx_iob_outb_data_match_enb
+ *
+ * Enables the match of the corresponding bit in the IOB_OUTB_DATA_MATCH reister.
+ *
+ */
+union cvmx_iob_outb_data_match_enb {
+	u64 u64;
+	struct cvmx_iob_outb_data_match_enb_s {
+		u64 data : 64;
+	} s;
+	struct cvmx_iob_outb_data_match_enb_s cn30xx;
+	struct cvmx_iob_outb_data_match_enb_s cn31xx;
+	struct cvmx_iob_outb_data_match_enb_s cn38xx;
+	struct cvmx_iob_outb_data_match_enb_s cn38xxp2;
+	struct cvmx_iob_outb_data_match_enb_s cn50xx;
+	struct cvmx_iob_outb_data_match_enb_s cn52xx;
+	struct cvmx_iob_outb_data_match_enb_s cn52xxp1;
+	struct cvmx_iob_outb_data_match_enb_s cn56xx;
+	struct cvmx_iob_outb_data_match_enb_s cn56xxp1;
+	struct cvmx_iob_outb_data_match_enb_s cn58xx;
+	struct cvmx_iob_outb_data_match_enb_s cn58xxp1;
+	struct cvmx_iob_outb_data_match_enb_s cn61xx;
+	struct cvmx_iob_outb_data_match_enb_s cn63xx;
+	struct cvmx_iob_outb_data_match_enb_s cn63xxp1;
+	struct cvmx_iob_outb_data_match_enb_s cn66xx;
+	struct cvmx_iob_outb_data_match_enb_s cn68xx;
+	struct cvmx_iob_outb_data_match_enb_s cn68xxp1;
+	struct cvmx_iob_outb_data_match_enb_s cn70xx;
+	struct cvmx_iob_outb_data_match_enb_s cn70xxp1;
+	struct cvmx_iob_outb_data_match_enb_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_data_match_enb cvmx_iob_outb_data_match_enb_t;
+
+/**
+ * cvmx_iob_outb_fpa_pri_cnt
+ *
+ * FPA To NCB Priority Counter = FPA Returns to NCB Priority Counter Enable and Timer Value
+ * Enables and supplies the timeout count for raising the priority of FPA Rreturn Page request to
+ * the Outbound NCB.
+ */
+union cvmx_iob_outb_fpa_pri_cnt {
+	u64 u64;
+	struct cvmx_iob_outb_fpa_pri_cnt_s {
+		u64 reserved_16_63 : 48;
+		u64 cnt_enb : 1;
+		u64 cnt_val : 15;
+	} s;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn38xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn52xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn56xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn58xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn58xxp1;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn61xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn63xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn63xxp1;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn66xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn68xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn68xxp1;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn70xx;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cn70xxp1;
+	struct cvmx_iob_outb_fpa_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_fpa_pri_cnt cvmx_iob_outb_fpa_pri_cnt_t;
+
+/**
+ * cvmx_iob_outb_req_pri_cnt
+ *
+ * Request To NCB Priority Counter = Request to NCB Priority Counter Enable and Timer Value
+ * Enables and supplies the timeout count for raising the priority of Request transfers to the
+ * Outbound NCB.
+ */
+union cvmx_iob_outb_req_pri_cnt {
+	u64 u64;
+	struct cvmx_iob_outb_req_pri_cnt_s {
+		u64 reserved_16_63 : 48;
+		u64 cnt_enb : 1;
+		u64 cnt_val : 15;
+	} s;
+	struct cvmx_iob_outb_req_pri_cnt_s cn38xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_outb_req_pri_cnt_s cn52xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_outb_req_pri_cnt_s cn56xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_outb_req_pri_cnt_s cn58xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn58xxp1;
+	struct cvmx_iob_outb_req_pri_cnt_s cn61xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn63xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn63xxp1;
+	struct cvmx_iob_outb_req_pri_cnt_s cn66xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn68xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn68xxp1;
+	struct cvmx_iob_outb_req_pri_cnt_s cn70xx;
+	struct cvmx_iob_outb_req_pri_cnt_s cn70xxp1;
+	struct cvmx_iob_outb_req_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_outb_req_pri_cnt cvmx_iob_outb_req_pri_cnt_t;
+
+/**
+ * cvmx_iob_p2c_req_pri_cnt
+ *
+ * PKO To CMB Response Priority Counter = PKO to CMB Response Priority Counter Enable and Timer
+ * Value
+ * Enables and supplies the timeout count for raising the priority of PKO Load access to the CMB.
+ */
+union cvmx_iob_p2c_req_pri_cnt {
+	u64 u64;
+	struct cvmx_iob_p2c_req_pri_cnt_s {
+		u64 reserved_16_63 : 48;
+		u64 cnt_enb : 1;
+		u64 cnt_val : 15;
+	} s;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn38xx;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn38xxp2;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn52xx;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn52xxp1;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn56xx;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn56xxp1;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn58xx;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn58xxp1;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn61xx;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn63xx;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn63xxp1;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn66xx;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn70xx;
+	struct cvmx_iob_p2c_req_pri_cnt_s cn70xxp1;
+	struct cvmx_iob_p2c_req_pri_cnt_s cnf71xx;
+};
+
+typedef union cvmx_iob_p2c_req_pri_cnt cvmx_iob_p2c_req_pri_cnt_t;
+
+/**
+ * cvmx_iob_pkt_err
+ *
+ * Provides status about the failing packet recevie error.
+ *
+ */
+union cvmx_iob_pkt_err {
+	u64 u64;
+	struct cvmx_iob_pkt_err_s {
+		u64 reserved_12_63 : 52;
+		u64 vport : 6;
+		u64 port : 6;
+	} s;
+	struct cvmx_iob_pkt_err_cn30xx {
+		u64 reserved_6_63 : 58;
+		u64 port : 6;
+	} cn30xx;
+	struct cvmx_iob_pkt_err_cn30xx cn31xx;
+	struct cvmx_iob_pkt_err_cn30xx cn38xx;
+	struct cvmx_iob_pkt_err_cn30xx cn38xxp2;
+	struct cvmx_iob_pkt_err_cn30xx cn50xx;
+	struct cvmx_iob_pkt_err_cn30xx cn52xx;
+	struct cvmx_iob_pkt_err_cn30xx cn52xxp1;
+	struct cvmx_iob_pkt_err_cn30xx cn56xx;
+	struct cvmx_iob_pkt_err_cn30xx cn56xxp1;
+	struct cvmx_iob_pkt_err_cn30xx cn58xx;
+	struct cvmx_iob_pkt_err_cn30xx cn58xxp1;
+	struct cvmx_iob_pkt_err_s cn61xx;
+	struct cvmx_iob_pkt_err_s cn63xx;
+	struct cvmx_iob_pkt_err_s cn63xxp1;
+	struct cvmx_iob_pkt_err_s cn66xx;
+	struct cvmx_iob_pkt_err_s cn70xx;
+	struct cvmx_iob_pkt_err_s cn70xxp1;
+	struct cvmx_iob_pkt_err_s cnf71xx;
+};
+
+typedef union cvmx_iob_pkt_err cvmx_iob_pkt_err_t;
+
+/**
+ * cvmx_iob_pp_bist_status
+ *
+ * The result of the BIST run on the PPs.
+ *
+ */
+union cvmx_iob_pp_bist_status {
+	u64 u64;
+	struct cvmx_iob_pp_bist_status_s {
+		u64 reserved_4_63 : 60;
+		u64 pp_bstat : 4;
+	} s;
+	struct cvmx_iob_pp_bist_status_s cn70xx;
+	struct cvmx_iob_pp_bist_status_s cn70xxp1;
+};
+
+typedef union cvmx_iob_pp_bist_status cvmx_iob_pp_bist_status_t;
+
+/**
+ * cvmx_iob_to_cmb_credits
+ *
+ * Controls the number of reads and writes that may be outstanding to the L2C (via the CMB).
+ *
+ */
+union cvmx_iob_to_cmb_credits {
+	u64 u64;
+	struct cvmx_iob_to_cmb_credits_s {
+		u64 reserved_6_63 : 58;
+		u64 ncb_rd : 3;
+		u64 ncb_wr : 3;
+	} s;
+	struct cvmx_iob_to_cmb_credits_cn52xx {
+		u64 reserved_9_63 : 55;
+		u64 pko_rd : 3;
+		u64 ncb_rd : 3;
+		u64 ncb_wr : 3;
+	} cn52xx;
+	struct cvmx_iob_to_cmb_credits_cn52xx cn61xx;
+	struct cvmx_iob_to_cmb_credits_cn52xx cn63xx;
+	struct cvmx_iob_to_cmb_credits_cn52xx cn63xxp1;
+	struct cvmx_iob_to_cmb_credits_cn52xx cn66xx;
+	struct cvmx_iob_to_cmb_credits_cn68xx {
+		u64 reserved_9_63 : 55;
+		u64 dwb : 3;
+		u64 ncb_rd : 3;
+		u64 ncb_wr : 3;
+	} cn68xx;
+	struct cvmx_iob_to_cmb_credits_cn68xx cn68xxp1;
+	struct cvmx_iob_to_cmb_credits_cn52xx cn70xx;
+	struct cvmx_iob_to_cmb_credits_cn52xx cn70xxp1;
+	struct cvmx_iob_to_cmb_credits_cn52xx cnf71xx;
+};
+
+typedef union cvmx_iob_to_cmb_credits cvmx_iob_to_cmb_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_00_credits
+ *
+ * IOB_TO_NCB_DID_00_CREDITS = IOB NCB DID 00 Credits
+ *
+ * Number of credits for NCB DID 00.
+ */
+union cvmx_iob_to_ncb_did_00_credits {
+	u64 u64;
+	struct cvmx_iob_to_ncb_did_00_credits_s {
+		u64 reserved_7_63 : 57;
+		u64 crd : 7;
+	} s;
+	struct cvmx_iob_to_ncb_did_00_credits_s cn68xx;
+	struct cvmx_iob_to_ncb_did_00_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_00_credits cvmx_iob_to_ncb_did_00_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_111_credits
+ *
+ * IOB_TO_NCB_DID_111_CREDITS = IOB NCB DID 111 Credits
+ *
+ * Number of credits for NCB DID 111.
+ */
+union cvmx_iob_to_ncb_did_111_credits {
+	u64 u64;
+	struct cvmx_iob_to_ncb_did_111_credits_s {
+		u64 reserved_7_63 : 57;
+		u64 crd : 7;
+	} s;
+	struct cvmx_iob_to_ncb_did_111_credits_s cn68xx;
+	struct cvmx_iob_to_ncb_did_111_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_111_credits cvmx_iob_to_ncb_did_111_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_223_credits
+ *
+ * IOB_TO_NCB_DID_223_CREDITS = IOB NCB DID 223 Credits
+ *
+ * Number of credits for NCB DID 223.
+ */
+union cvmx_iob_to_ncb_did_223_credits {
+	u64 u64;
+	struct cvmx_iob_to_ncb_did_223_credits_s {
+		u64 reserved_7_63 : 57;
+		u64 crd : 7;
+	} s;
+	struct cvmx_iob_to_ncb_did_223_credits_s cn68xx;
+	struct cvmx_iob_to_ncb_did_223_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_223_credits cvmx_iob_to_ncb_did_223_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_24_credits
+ *
+ * IOB_TO_NCB_DID_24_CREDITS = IOB NCB DID 24 Credits
+ *
+ * Number of credits for NCB DID 24.
+ */
+union cvmx_iob_to_ncb_did_24_credits {
+	u64 u64;
+	struct cvmx_iob_to_ncb_did_24_credits_s {
+		u64 reserved_7_63 : 57;
+		u64 crd : 7;
+	} s;
+	struct cvmx_iob_to_ncb_did_24_credits_s cn68xx;
+	struct cvmx_iob_to_ncb_did_24_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_24_credits cvmx_iob_to_ncb_did_24_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_32_credits
+ *
+ * IOB_TO_NCB_DID_32_CREDITS = IOB NCB DID 32 Credits
+ *
+ * Number of credits for NCB DID 32.
+ */
+union cvmx_iob_to_ncb_did_32_credits {
+	u64 u64;
+	struct cvmx_iob_to_ncb_did_32_credits_s {
+		u64 reserved_7_63 : 57;
+		u64 crd : 7;
+	} s;
+	struct cvmx_iob_to_ncb_did_32_credits_s cn68xx;
+	struct cvmx_iob_to_ncb_did_32_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_32_credits cvmx_iob_to_ncb_did_32_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_40_credits
+ *
+ * IOB_TO_NCB_DID_40_CREDITS = IOB NCB DID 40 Credits
+ *
+ * Number of credits for NCB DID 40.
+ */
+union cvmx_iob_to_ncb_did_40_credits {
+	u64 u64;
+	struct cvmx_iob_to_ncb_did_40_credits_s {
+		u64 reserved_7_63 : 57;
+		u64 crd : 7;
+	} s;
+	struct cvmx_iob_to_ncb_did_40_credits_s cn68xx;
+	struct cvmx_iob_to_ncb_did_40_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_40_credits cvmx_iob_to_ncb_did_40_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_55_credits
+ *
+ * IOB_TO_NCB_DID_55_CREDITS = IOB NCB DID 55 Credits
+ *
+ * Number of credits for NCB DID 55.
+ */
+union cvmx_iob_to_ncb_did_55_credits {
+	u64 u64;
+	struct cvmx_iob_to_ncb_did_55_credits_s {
+		u64 reserved_7_63 : 57;
+		u64 crd : 7;
+	} s;
+	struct cvmx_iob_to_ncb_did_55_credits_s cn68xx;
+	struct cvmx_iob_to_ncb_did_55_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_55_credits cvmx_iob_to_ncb_did_55_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_64_credits
+ *
+ * IOB_TO_NCB_DID_64_CREDITS = IOB NCB DID 64 Credits
+ *
+ * Number of credits for NCB DID 64.
+ */
+union cvmx_iob_to_ncb_did_64_credits {
+	u64 u64;
+	struct cvmx_iob_to_ncb_did_64_credits_s {
+		u64 reserved_7_63 : 57;
+		u64 crd : 7;
+	} s;
+	struct cvmx_iob_to_ncb_did_64_credits_s cn68xx;
+	struct cvmx_iob_to_ncb_did_64_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_64_credits cvmx_iob_to_ncb_did_64_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_79_credits
+ *
+ * IOB_TO_NCB_DID_79_CREDITS = IOB NCB DID 79 Credits
+ *
+ * Number of credits for NCB DID 79.
+ */
+union cvmx_iob_to_ncb_did_79_credits {
+	u64 u64;
+	struct cvmx_iob_to_ncb_did_79_credits_s {
+		u64 reserved_7_63 : 57;
+		u64 crd : 7;
+	} s;
+	struct cvmx_iob_to_ncb_did_79_credits_s cn68xx;
+	struct cvmx_iob_to_ncb_did_79_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_79_credits cvmx_iob_to_ncb_did_79_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_96_credits
+ *
+ * IOB_TO_NCB_DID_96_CREDITS = IOB NCB DID 96 Credits
+ *
+ * Number of credits for NCB DID 96.
+ */
+union cvmx_iob_to_ncb_did_96_credits {
+	u64 u64;
+	struct cvmx_iob_to_ncb_did_96_credits_s {
+		u64 reserved_7_63 : 57;
+		u64 crd : 7;
+	} s;
+	struct cvmx_iob_to_ncb_did_96_credits_s cn68xx;
+	struct cvmx_iob_to_ncb_did_96_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_96_credits cvmx_iob_to_ncb_did_96_credits_t;
+
+/**
+ * cvmx_iob_to_ncb_did_98_credits
+ *
+ * IOB_TO_NCB_DID_98_CREDITS = IOB NCB DID 96 Credits
+ *
+ * Number of credits for NCB DID 98.
+ */
+union cvmx_iob_to_ncb_did_98_credits {
+	u64 u64;
+	struct cvmx_iob_to_ncb_did_98_credits_s {
+		u64 reserved_7_63 : 57;
+		u64 crd : 7;
+	} s;
+	struct cvmx_iob_to_ncb_did_98_credits_s cn68xx;
+	struct cvmx_iob_to_ncb_did_98_credits_s cn68xxp1;
+};
+
+typedef union cvmx_iob_to_ncb_did_98_credits cvmx_iob_to_ncb_did_98_credits_t;
+
+#endif
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-lbk-defs.h b/arch/mips/mach-octeon/include/mach/cvmx-lbk-defs.h
new file mode 100644
index 0000000..1068a19
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-lbk-defs.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon lbk.
+ */
+
+#ifndef __CVMX_LBK_DEFS_H__
+#define __CVMX_LBK_DEFS_H__
+
+#define CVMX_LBK_BIST_RESULT	   (0x0001180012000020ull)
+#define CVMX_LBK_CHX_PKIND(offset) (0x0001180012000200ull + ((offset) & 63) * 8)
+#define CVMX_LBK_CLK_GATE_CTL	   (0x0001180012000008ull)
+#define CVMX_LBK_DAT_ERR_INFO	   (0x0001180012000050ull)
+#define CVMX_LBK_ECC_CFG	   (0x0001180012000060ull)
+#define CVMX_LBK_INT		   (0x0001180012000040ull)
+#define CVMX_LBK_SFT_RST	   (0x0001180012000000ull)
+
+/**
+ * cvmx_lbk_bist_result
+ *
+ * This register provides access to the internal BIST results. Each bit is the
+ * BIST result of an individual memory (per bit, 0 = pass and 1 = fail).
+ */
+union cvmx_lbk_bist_result {
+	u64 u64;
+	struct cvmx_lbk_bist_result_s {
+		u64 reserved_1_63 : 63;
+		u64 dat : 1;
+	} s;
+	struct cvmx_lbk_bist_result_s cn73xx;
+	struct cvmx_lbk_bist_result_s cn78xx;
+	struct cvmx_lbk_bist_result_s cn78xxp1;
+	struct cvmx_lbk_bist_result_s cnf75xx;
+};
+
+typedef union cvmx_lbk_bist_result cvmx_lbk_bist_result_t;
+
+/**
+ * cvmx_lbk_ch#_pkind
+ */
+union cvmx_lbk_chx_pkind {
+	u64 u64;
+	struct cvmx_lbk_chx_pkind_s {
+		u64 reserved_6_63 : 58;
+		u64 pkind : 6;
+	} s;
+	struct cvmx_lbk_chx_pkind_s cn73xx;
+	struct cvmx_lbk_chx_pkind_s cn78xx;
+	struct cvmx_lbk_chx_pkind_s cn78xxp1;
+	struct cvmx_lbk_chx_pkind_s cnf75xx;
+};
+
+typedef union cvmx_lbk_chx_pkind cvmx_lbk_chx_pkind_t;
+
+/**
+ * cvmx_lbk_clk_gate_ctl
+ *
+ * This register is for diagnostic use only.
+ *
+ */
+union cvmx_lbk_clk_gate_ctl {
+	u64 u64;
+	struct cvmx_lbk_clk_gate_ctl_s {
+		u64 reserved_1_63 : 63;
+		u64 dis : 1;
+	} s;
+	struct cvmx_lbk_clk_gate_ctl_s cn73xx;
+	struct cvmx_lbk_clk_gate_ctl_s cn78xx;
+	struct cvmx_lbk_clk_gate_ctl_s cn78xxp1;
+	struct cvmx_lbk_clk_gate_ctl_s cnf75xx;
+};
+
+typedef union cvmx_lbk_clk_gate_ctl cvmx_lbk_clk_gate_ctl_t;
+
+/**
+ * cvmx_lbk_dat_err_info
+ */
+union cvmx_lbk_dat_err_info {
+	u64 u64;
+	struct cvmx_lbk_dat_err_info_s {
+		u64 reserved_58_63 : 6;
+		u64 dbe_ecc_out : 9;
+		u64 dbe_synd : 9;
+		u64 dbe_addr : 8;
+		u64 reserved_26_31 : 6;
+		u64 sbe_ecc_out : 9;
+		u64 sbe_synd : 9;
+		u64 sbe_addr : 8;
+	} s;
+	struct cvmx_lbk_dat_err_info_s cn73xx;
+	struct cvmx_lbk_dat_err_info_s cn78xx;
+	struct cvmx_lbk_dat_err_info_s cn78xxp1;
+	struct cvmx_lbk_dat_err_info_s cnf75xx;
+};
+
+typedef union cvmx_lbk_dat_err_info cvmx_lbk_dat_err_info_t;
+
+/**
+ * cvmx_lbk_ecc_cfg
+ */
+union cvmx_lbk_ecc_cfg {
+	u64 u64;
+	struct cvmx_lbk_ecc_cfg_s {
+		u64 reserved_3_63 : 61;
+		u64 dat_flip : 2;
+		u64 dat_cdis : 1;
+	} s;
+	struct cvmx_lbk_ecc_cfg_s cn73xx;
+	struct cvmx_lbk_ecc_cfg_s cn78xx;
+	struct cvmx_lbk_ecc_cfg_s cn78xxp1;
+	struct cvmx_lbk_ecc_cfg_s cnf75xx;
+};
+
+typedef union cvmx_lbk_ecc_cfg cvmx_lbk_ecc_cfg_t;
+
+/**
+ * cvmx_lbk_int
+ */
+union cvmx_lbk_int {
+	u64 u64;
+	struct cvmx_lbk_int_s {
+		u64 reserved_6_63 : 58;
+		u64 chan_oflow : 1;
+		u64 chan_uflow : 1;
+		u64 dat_oflow : 1;
+		u64 dat_uflow : 1;
+		u64 dat_dbe : 1;
+		u64 dat_sbe : 1;
+	} s;
+	struct cvmx_lbk_int_s cn73xx;
+	struct cvmx_lbk_int_s cn78xx;
+	struct cvmx_lbk_int_s cn78xxp1;
+	struct cvmx_lbk_int_s cnf75xx;
+};
+
+typedef union cvmx_lbk_int cvmx_lbk_int_t;
+
+/**
+ * cvmx_lbk_sft_rst
+ */
+union cvmx_lbk_sft_rst {
+	u64 u64;
+	struct cvmx_lbk_sft_rst_s {
+		u64 reserved_1_63 : 63;
+		u64 reset : 1;
+	} s;
+	struct cvmx_lbk_sft_rst_s cn73xx;
+	struct cvmx_lbk_sft_rst_s cn78xx;
+	struct cvmx_lbk_sft_rst_s cn78xxp1;
+	struct cvmx_lbk_sft_rst_s cnf75xx;
+};
+
+typedef union cvmx_lbk_sft_rst cvmx_lbk_sft_rst_t;
+
+#endif
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-mdio.h b/arch/mips/mach-octeon/include/mach/cvmx-mdio.h
new file mode 100644
index 0000000..9bc138f
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-mdio.h
@@ -0,0 +1,516 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
+ * clause 22 and clause 45 operations.
+ */
+
+#ifndef __CVMX_MIO_H__
+#define __CVMX_MIO_H__
+
+/**
+ * PHY register 0 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_CONTROL 0
+
+typedef union {
+	u16 u16;
+	struct {
+		u16 reset : 1;
+		u16 loopback : 1;
+		u16 speed_lsb : 1;
+		u16 autoneg_enable : 1;
+		u16 power_down : 1;
+		u16 isolate : 1;
+		u16 restart_autoneg : 1;
+		u16 duplex : 1;
+		u16 collision_test : 1;
+		u16 speed_msb : 1;
+		u16 unidirectional_enable : 1;
+		u16 reserved_0_4 : 5;
+	} s;
+} cvmx_mdio_phy_reg_control_t;
+
+/**
+ * PHY register 1 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_STATUS 1
+typedef union {
+	u16 u16;
+	struct {
+		u16 capable_100base_t4 : 1;
+		u16 capable_100base_x_full : 1;
+		u16 capable_100base_x_half : 1;
+		u16 capable_10_full : 1;
+		u16 capable_10_half : 1;
+		u16 capable_100base_t2_full : 1;
+		u16 capable_100base_t2_half : 1;
+		u16 capable_extended_status : 1;
+		u16 capable_unidirectional : 1;
+		u16 capable_mf_preamble_suppression : 1;
+		u16 autoneg_complete : 1;
+		u16 remote_fault : 1;
+		u16 capable_autoneg : 1;
+		u16 link_status : 1;
+		u16 jabber_detect : 1;
+		u16 capable_extended_registers : 1;
+
+	} s;
+} cvmx_mdio_phy_reg_status_t;
+
+/**
+ * PHY register 2 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_ID1 2
+typedef union {
+	u16 u16;
+	struct {
+		u16 oui_bits_3_18;
+	} s;
+} cvmx_mdio_phy_reg_id1_t;
+
+/**
+ * PHY register 3 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_ID2 3
+typedef union {
+	u16 u16;
+	struct {
+		u16 oui_bits_19_24 : 6;
+		u16 model : 6;
+		u16 revision : 4;
+	} s;
+} cvmx_mdio_phy_reg_id2_t;
+
+/**
+ * PHY register 4 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4
+typedef union {
+	u16 u16;
+	struct {
+		u16 next_page : 1;
+		u16 reserved_14 : 1;
+		u16 remote_fault : 1;
+		u16 reserved_12 : 1;
+		u16 asymmetric_pause : 1;
+		u16 pause : 1;
+		u16 advert_100base_t4 : 1;
+		u16 advert_100base_tx_full : 1;
+		u16 advert_100base_tx_half : 1;
+		u16 advert_10base_tx_full : 1;
+		u16 advert_10base_tx_half : 1;
+		u16 selector : 5;
+	} s;
+} cvmx_mdio_phy_reg_autoneg_adver_t;
+
+/**
+ * PHY register 5 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
+typedef union {
+	u16 u16;
+	struct {
+		u16 next_page : 1;
+		u16 ack : 1;
+		u16 remote_fault : 1;
+		u16 reserved_12 : 1;
+		u16 asymmetric_pause : 1;
+		u16 pause : 1;
+		u16 advert_100base_t4 : 1;
+		u16 advert_100base_tx_full : 1;
+		u16 advert_100base_tx_half : 1;
+		u16 advert_10base_tx_full : 1;
+		u16 advert_10base_tx_half : 1;
+		u16 selector : 5;
+	} s;
+} cvmx_mdio_phy_reg_link_partner_ability_t;
+
+/**
+ * PHY register 6 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6
+typedef union {
+	u16 u16;
+	struct {
+		u16 reserved_5_15 : 11;
+		u16 parallel_detection_fault : 1;
+		u16 link_partner_next_page_capable : 1;
+		u16 local_next_page_capable : 1;
+		u16 page_received : 1;
+		u16 link_partner_autoneg_capable : 1;
+
+	} s;
+} cvmx_mdio_phy_reg_autoneg_expansion_t;
+
+/**
+ * PHY register 9 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_CONTROL_1000 9
+typedef union {
+	u16 u16;
+	struct {
+		u16 test_mode : 3;
+		u16 manual_master_slave : 1;
+		u16 master : 1;
+		u16 port_type : 1;
+		u16 advert_1000base_t_full : 1;
+		u16 advert_1000base_t_half : 1;
+		u16 reserved_0_7 : 8;
+	} s;
+} cvmx_mdio_phy_reg_control_1000_t;
+
+/**
+ * PHY register 10 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_STATUS_1000 10
+typedef union {
+	u16 u16;
+	struct {
+		u16 master_slave_fault : 1;
+		u16 is_master : 1;
+		u16 local_receiver_ok : 1;
+		u16 remote_receiver_ok : 1;
+		u16 remote_capable_1000base_t_full : 1;
+		u16 remote_capable_1000base_t_half : 1;
+		u16 reserved_8_9 : 2;
+		u16 idle_error_count : 8;
+	} s;
+} cvmx_mdio_phy_reg_status_1000_t;
+
+/**
+ * PHY register 15 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15
+typedef union {
+	u16 u16;
+	struct {
+		u16 capable_1000base_x_full : 1;
+		u16 capable_1000base_x_half : 1;
+		u16 capable_1000base_t_full : 1;
+		u16 capable_1000base_t_half : 1;
+		u16 reserved_0_11 : 12;
+	} s;
+} cvmx_mdio_phy_reg_extended_status_t;
+
+/**
+ * PHY register 13 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13
+typedef union {
+	u16 u16;
+	struct {
+		u16 function : 2;
+		u16 reserved_5_13 : 9;
+		u16 devad : 5;
+	} s;
+} cvmx_mdio_phy_reg_mmd_control_t;
+
+/**
+ * PHY register 14 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
+typedef union {
+	u16 u16;
+	struct {
+		u16 address_data : 16;
+	} s;
+} cvmx_mdio_phy_reg_mmd_address_data_t;
+
+/* Operating request encodings. */
+#define MDIO_CLAUSE_22_WRITE 0
+#define MDIO_CLAUSE_22_READ  1
+
+#define MDIO_CLAUSE_45_ADDRESS	0
+#define MDIO_CLAUSE_45_WRITE	1
+#define MDIO_CLAUSE_45_READ_INC 2
+#define MDIO_CLAUSE_45_READ	3
+
+/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
+#define CVMX_MMD_DEVICE_PMA_PMD	 1
+#define CVMX_MMD_DEVICE_WIS	 2
+#define CVMX_MMD_DEVICE_PCS	 3
+#define CVMX_MMD_DEVICE_PHY_XS	 4
+#define CVMX_MMD_DEVICE_DTS_XS	 5
+#define CVMX_MMD_DEVICE_TC	 6
+#define CVMX_MMD_DEVICE_CL22_EXT 29
+#define CVMX_MMD_DEVICE_VENDOR_1 30
+#define CVMX_MMD_DEVICE_VENDOR_2 31
+
+#define CVMX_MDIO_TIMEOUT 100000 /* 100 millisec */
+
+static inline int cvmx_mdio_bus_id_to_node(int bus_id)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+		return (bus_id >> 2) & CVMX_NODE_MASK;
+	else
+		return 0;
+}
+
+static inline int cvmx_mdio_bus_id_to_bus(int bus_id)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+		return bus_id & 3;
+	else
+		return bus_id;
+}
+
+/* Helper function to put MDIO interface into clause 45 mode */
+static inline void __cvmx_mdio_set_clause45_mode(int bus_id)
+{
+	cvmx_smix_clk_t smi_clk;
+	int node = cvmx_mdio_bus_id_to_node(bus_id);
+	int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+
+	/* Put bus into clause 45 mode */
+	smi_clk.u64 = csr_rd_node(node, CVMX_SMIX_CLK(bus));
+	smi_clk.s.mode = 1;
+	smi_clk.s.preamble = 1;
+	csr_wr_node(node, CVMX_SMIX_CLK(bus), smi_clk.u64);
+}
+
+/* Helper function to put MDIO interface into clause 22 mode */
+static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
+{
+	cvmx_smix_clk_t smi_clk;
+	int node = cvmx_mdio_bus_id_to_node(bus_id);
+	int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+
+	/* Put bus into clause 22 mode */
+	smi_clk.u64 = csr_rd_node(node, CVMX_SMIX_CLK(bus));
+	smi_clk.s.mode = 0;
+	csr_wr_node(node, CVMX_SMIX_CLK(bus), smi_clk.u64);
+}
+
+/**
+ * @INTERNAL
+ * Function to read SMIX_RD_DAT and check for timeouts. This
+ * code sequence is done fairly often, so put in one spot.
+ *
+ * @param bus_id SMI/MDIO bus to read
+ *
+ * @return Value of SMIX_RD_DAT. pending will be set on
+ *         a timeout.
+ */
+static inline cvmx_smix_rd_dat_t __cvmx_mdio_read_rd_dat(int bus_id)
+{
+	cvmx_smix_rd_dat_t smi_rd;
+	int node = cvmx_mdio_bus_id_to_node(bus_id);
+	int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+	u64 done;
+
+	done = get_timer(0);
+
+	do {
+		mdelay(1);
+		smi_rd.u64 = csr_rd_node(node, CVMX_SMIX_RD_DAT(bus));
+		if (get_timer(done) > (CVMX_MDIO_TIMEOUT / 1000))
+			break;
+	} while (smi_rd.s.pending);
+
+	return smi_rd;
+}
+
+/**
+ * Perform an MII read. This function is used to read PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id   MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ *                 support multiple busses.
+ * @param phy_id   The MII phy id
+ * @param location Register location to read
+ *
+ * @return Result from the read or -1 on failure
+ */
+static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
+{
+	int node = cvmx_mdio_bus_id_to_node(bus_id);
+	int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+	cvmx_smix_cmd_t smi_cmd;
+	cvmx_smix_rd_dat_t smi_rd;
+
+	if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+		__cvmx_mdio_set_clause22_mode(bus_id);
+
+	smi_cmd.u64 = 0;
+	smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
+	smi_cmd.s.phy_adr = phy_id;
+	smi_cmd.s.reg_adr = location;
+	csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
+
+	smi_rd = __cvmx_mdio_read_rd_dat(bus_id);
+	if (smi_rd.s.val)
+		return smi_rd.s.dat;
+	else
+		return -1;
+}
+
+/**
+ * Perform an MII write. This function is used to write PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id   MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ *                 support multiple busses.
+ * @param phy_id   The MII phy id
+ * @param location Register location to write
+ * @param val      Value to write
+ *
+ * @return -1 on error
+ *         0 on success
+ */
+static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
+{
+	int node = cvmx_mdio_bus_id_to_node(bus_id);
+	int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+	cvmx_smix_cmd_t smi_cmd;
+	cvmx_smix_wr_dat_t smi_wr;
+
+	if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+		__cvmx_mdio_set_clause22_mode(bus_id);
+
+	smi_wr.u64 = 0;
+	smi_wr.s.dat = val;
+	csr_wr_node(node, CVMX_SMIX_WR_DAT(bus), smi_wr.u64);
+
+	smi_cmd.u64 = 0;
+	smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
+	smi_cmd.s.phy_adr = phy_id;
+	smi_cmd.s.reg_adr = location;
+	csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
+
+	if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_SMIX_WR_DAT(bus),
+				       cvmx_smix_wr_dat_t, pending, ==, 0,
+				       CVMX_MDIO_TIMEOUT))
+		return -1;
+
+	return 0;
+}
+
+/**
+ * Perform an IEEE 802.3 clause 45 MII read. This function is used to read PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id   MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ *                 support multiple busses.
+ * @param phy_id   The MII phy id
+ * @param device   MDIO Manageable Device (MMD) id
+ * @param location Register location to read
+ *
+ * @return Result from the read or -1 on failure
+ */
+
+static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
+				    int location)
+{
+	cvmx_smix_cmd_t smi_cmd;
+	cvmx_smix_rd_dat_t smi_rd;
+	cvmx_smix_wr_dat_t smi_wr;
+	int node = cvmx_mdio_bus_id_to_node(bus_id);
+	int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+
+	if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+		return -1;
+
+	__cvmx_mdio_set_clause45_mode(bus_id);
+
+	smi_wr.u64 = 0;
+	smi_wr.s.dat = location;
+	csr_wr_node(node, CVMX_SMIX_WR_DAT(bus), smi_wr.u64);
+
+	smi_cmd.u64 = 0;
+	smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
+	smi_cmd.s.phy_adr = phy_id;
+	smi_cmd.s.reg_adr = device;
+	csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
+
+	if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_SMIX_WR_DAT(bus),
+				       cvmx_smix_wr_dat_t, pending, ==, 0,
+				       CVMX_MDIO_TIMEOUT)) {
+		debug("cvmx_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d   TIME OUT(address)\n",
+		      bus_id, phy_id, device, location);
+		return -1;
+	}
+
+	smi_cmd.u64 = 0;
+	smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
+	smi_cmd.s.phy_adr = phy_id;
+	smi_cmd.s.reg_adr = device;
+	csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
+
+	smi_rd = __cvmx_mdio_read_rd_dat(bus_id);
+	if (smi_rd.s.pending) {
+		debug("cvmx_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d   TIME OUT(data)\n",
+		      bus_id, phy_id, device, location);
+		return -1;
+	}
+
+	if (smi_rd.s.val)
+		return smi_rd.s.dat;
+
+	debug("cvmx_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d   INVALID READ\n",
+	      bus_id, phy_id, device, location);
+	return -1;
+}
+
+/**
+ * Perform an IEEE 802.3 clause 45 MII write. This function is used to write PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id   MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ *                 support multiple busses.
+ * @param phy_id   The MII phy id
+ * @param device   MDIO Manageable Device (MMD) id
+ * @param location Register location to write
+ * @param val      Value to write
+ *
+ * @return -1 on error
+ *         0 on success
+ */
+static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
+				     int location, int val)
+{
+	cvmx_smix_cmd_t smi_cmd;
+	cvmx_smix_wr_dat_t smi_wr;
+	int node = cvmx_mdio_bus_id_to_node(bus_id);
+	int bus = cvmx_mdio_bus_id_to_bus(bus_id);
+
+	if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+		return -1;
+
+	__cvmx_mdio_set_clause45_mode(bus_id);
+
+	smi_wr.u64 = 0;
+	smi_wr.s.dat = location;
+	csr_wr_node(node, CVMX_SMIX_WR_DAT(bus), smi_wr.u64);
+
+	smi_cmd.u64 = 0;
+	smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
+	smi_cmd.s.phy_adr = phy_id;
+	smi_cmd.s.reg_adr = device;
+	csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
+
+	if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_SMIX_WR_DAT(bus),
+				       cvmx_smix_wr_dat_t, pending, ==, 0,
+				       CVMX_MDIO_TIMEOUT))
+		return -1;
+
+	smi_wr.u64 = 0;
+	smi_wr.s.dat = val;
+	csr_wr_node(node, CVMX_SMIX_WR_DAT(bus), smi_wr.u64);
+
+	smi_cmd.u64 = 0;
+	smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
+	smi_cmd.s.phy_adr = phy_id;
+	smi_cmd.s.reg_adr = device;
+	csr_wr_node(node, CVMX_SMIX_CMD(bus), smi_cmd.u64);
+
+	if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_SMIX_WR_DAT(bus),
+				       cvmx_smix_wr_dat_t, pending, ==, 0,
+				       CVMX_MDIO_TIMEOUT))
+		return -1;
+
+	return 0;
+}
+
+#endif
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-npei-defs.h b/arch/mips/mach-octeon/include/mach/cvmx-npei-defs.h
new file mode 100644
index 0000000..2e2c248
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-npei-defs.h
@@ -0,0 +1,3550 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon npei.
+ */
+
+#ifndef __CVMX_NPEI_DEFS_H__
+#define __CVMX_NPEI_DEFS_H__
+
+#define CVMX_NPEI_BAR1_INDEXX(offset)                                          \
+	(0x0000000000000000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_BIST_STATUS	 (0x0000000000000580ull)
+#define CVMX_NPEI_BIST_STATUS2	 (0x0000000000000680ull)
+#define CVMX_NPEI_CTL_PORT0	 (0x0000000000000250ull)
+#define CVMX_NPEI_CTL_PORT1	 (0x0000000000000260ull)
+#define CVMX_NPEI_CTL_STATUS	 (0x0000000000000570ull)
+#define CVMX_NPEI_CTL_STATUS2	 (0x0000000000003C00ull)
+#define CVMX_NPEI_DATA_OUT_CNT	 (0x00000000000005F0ull)
+#define CVMX_NPEI_DBG_DATA	 (0x0000000000000510ull)
+#define CVMX_NPEI_DBG_SELECT	 (0x0000000000000500ull)
+#define CVMX_NPEI_DMA0_INT_LEVEL (0x00000000000005C0ull)
+#define CVMX_NPEI_DMA1_INT_LEVEL (0x00000000000005D0ull)
+#define CVMX_NPEI_DMAX_COUNTS(offset)                                          \
+	(0x0000000000000450ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_DBELL(offset) (0x00000000000003B0ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_IBUFF_SADDR(offset)                                     \
+	(0x0000000000000400ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_NADDR(offset) (0x00000000000004A0ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMA_CNTS	     (0x00000000000005E0ull)
+#define CVMX_NPEI_DMA_CONTROL	     (0x00000000000003A0ull)
+#define CVMX_NPEI_DMA_PCIE_REQ_NUM   (0x00000000000005B0ull)
+#define CVMX_NPEI_DMA_STATE1	     (0x00000000000006C0ull)
+#define CVMX_NPEI_DMA_STATE1_P1	     (0x0000000000000680ull)
+#define CVMX_NPEI_DMA_STATE2	     (0x00000000000006D0ull)
+#define CVMX_NPEI_DMA_STATE2_P1	     (0x0000000000000690ull)
+#define CVMX_NPEI_DMA_STATE3_P1	     (0x00000000000006A0ull)
+#define CVMX_NPEI_DMA_STATE4_P1	     (0x00000000000006B0ull)
+#define CVMX_NPEI_DMA_STATE5_P1	     (0x00000000000006C0ull)
+#define CVMX_NPEI_INT_A_ENB	     (0x0000000000000560ull)
+#define CVMX_NPEI_INT_A_ENB2	     (0x0000000000003CE0ull)
+#define CVMX_NPEI_INT_A_SUM	     (0x0000000000000550ull)
+#define CVMX_NPEI_INT_ENB	     (0x0000000000000540ull)
+#define CVMX_NPEI_INT_ENB2	     (0x0000000000003CD0ull)
+#define CVMX_NPEI_INT_INFO	     (0x0000000000000590ull)
+#define CVMX_NPEI_INT_SUM	     (0x0000000000000530ull)
+#define CVMX_NPEI_INT_SUM2	     (0x0000000000003CC0ull)
+#define CVMX_NPEI_LAST_WIN_RDATA0    (0x0000000000000600ull)
+#define CVMX_NPEI_LAST_WIN_RDATA1    (0x0000000000000610ull)
+#define CVMX_NPEI_MEM_ACCESS_CTL     (0x00000000000004F0ull)
+#define CVMX_NPEI_MEM_ACCESS_SUBIDX(offset)                                    \
+	(0x0000000000000280ull + ((offset) & 31) * 16 - 16 * 12)
+#define CVMX_NPEI_MSI_ENB0	    (0x0000000000003C50ull)
+#define CVMX_NPEI_MSI_ENB1	    (0x0000000000003C60ull)
+#define CVMX_NPEI_MSI_ENB2	    (0x0000000000003C70ull)
+#define CVMX_NPEI_MSI_ENB3	    (0x0000000000003C80ull)
+#define CVMX_NPEI_MSI_RCV0	    (0x0000000000003C10ull)
+#define CVMX_NPEI_MSI_RCV1	    (0x0000000000003C20ull)
+#define CVMX_NPEI_MSI_RCV2	    (0x0000000000003C30ull)
+#define CVMX_NPEI_MSI_RCV3	    (0x0000000000003C40ull)
+#define CVMX_NPEI_MSI_RD_MAP	    (0x0000000000003CA0ull)
+#define CVMX_NPEI_MSI_W1C_ENB0	    (0x0000000000003CF0ull)
+#define CVMX_NPEI_MSI_W1C_ENB1	    (0x0000000000003D00ull)
+#define CVMX_NPEI_MSI_W1C_ENB2	    (0x0000000000003D10ull)
+#define CVMX_NPEI_MSI_W1C_ENB3	    (0x0000000000003D20ull)
+#define CVMX_NPEI_MSI_W1S_ENB0	    (0x0000000000003D30ull)
+#define CVMX_NPEI_MSI_W1S_ENB1	    (0x0000000000003D40ull)
+#define CVMX_NPEI_MSI_W1S_ENB2	    (0x0000000000003D50ull)
+#define CVMX_NPEI_MSI_W1S_ENB3	    (0x0000000000003D60ull)
+#define CVMX_NPEI_MSI_WR_MAP	    (0x0000000000003C90ull)
+#define CVMX_NPEI_PCIE_CREDIT_CNT   (0x0000000000003D70ull)
+#define CVMX_NPEI_PCIE_MSI_RCV	    (0x0000000000003CB0ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B1   (0x0000000000000650ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B2   (0x0000000000000660ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B3   (0x0000000000000670ull)
+#define CVMX_NPEI_PKTX_CNTS(offset) (0x0000000000002400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_BADDR(offset)                                     \
+	(0x0000000000002800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(offset)                               \
+	(0x0000000000002C00ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(offset)                                \
+	(0x0000000000003000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_HEADER(offset)                                    \
+	(0x0000000000003400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_IN_BP(offset)                                           \
+	(0x0000000000003800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_BADDR(offset)                                     \
+	(0x0000000000001400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(offset)                               \
+	(0x0000000000001800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(offset)                                \
+	(0x0000000000001C00ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKT_CNT_INT	    (0x0000000000001110ull)
+#define CVMX_NPEI_PKT_CNT_INT_ENB   (0x0000000000001130ull)
+#define CVMX_NPEI_PKT_DATA_OUT_ES   (0x00000000000010B0ull)
+#define CVMX_NPEI_PKT_DATA_OUT_NS   (0x00000000000010A0ull)
+#define CVMX_NPEI_PKT_DATA_OUT_ROR  (0x0000000000001090ull)
+#define CVMX_NPEI_PKT_DPADDR	    (0x0000000000001080ull)
+#define CVMX_NPEI_PKT_INPUT_CONTROL (0x0000000000001150ull)
+#define CVMX_NPEI_PKT_INSTR_ENB	    (0x0000000000001000ull)
+#define CVMX_NPEI_PKT_INSTR_RD_SIZE (0x0000000000001190ull)
+#define CVMX_NPEI_PKT_INSTR_SIZE    (0x0000000000001020ull)
+#define CVMX_NPEI_PKT_INT_LEVELS    (0x0000000000001100ull)
+#define CVMX_NPEI_PKT_IN_BP	    (0x00000000000006B0ull)
+#define CVMX_NPEI_PKT_IN_DONEX_CNTS(offset)                                    \
+	(0x0000000000002000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKT_IN_INSTR_COUNTS (0x00000000000006A0ull)
+#define CVMX_NPEI_PKT_IN_PCIE_PORT    (0x00000000000011A0ull)
+#define CVMX_NPEI_PKT_IPTR	      (0x0000000000001070ull)
+#define CVMX_NPEI_PKT_OUTPUT_WMARK    (0x0000000000001160ull)
+#define CVMX_NPEI_PKT_OUT_BMODE	      (0x00000000000010D0ull)
+#define CVMX_NPEI_PKT_OUT_ENB	      (0x0000000000001010ull)
+#define CVMX_NPEI_PKT_PCIE_PORT	      (0x00000000000010E0ull)
+#define CVMX_NPEI_PKT_PORT_IN_RST     (0x0000000000000690ull)
+#define CVMX_NPEI_PKT_SLIST_ES	      (0x0000000000001050ull)
+#define CVMX_NPEI_PKT_SLIST_ID_SIZE   (0x0000000000001180ull)
+#define CVMX_NPEI_PKT_SLIST_NS	      (0x0000000000001040ull)
+#define CVMX_NPEI_PKT_SLIST_ROR	      (0x0000000000001030ull)
+#define CVMX_NPEI_PKT_TIME_INT	      (0x0000000000001120ull)
+#define CVMX_NPEI_PKT_TIME_INT_ENB    (0x0000000000001140ull)
+#define CVMX_NPEI_RSL_INT_BLOCKS      (0x0000000000000520ull)
+#define CVMX_NPEI_SCRATCH_1	      (0x0000000000000270ull)
+#define CVMX_NPEI_STATE1	      (0x0000000000000620ull)
+#define CVMX_NPEI_STATE2	      (0x0000000000000630ull)
+#define CVMX_NPEI_STATE3	      (0x0000000000000640ull)
+#define CVMX_NPEI_WINDOW_CTL	      (0x0000000000000380ull)
+#define CVMX_NPEI_WIN_RD_ADDR	      (0x0000000000000210ull)
+#define CVMX_NPEI_WIN_RD_DATA	      (0x0000000000000240ull)
+#define CVMX_NPEI_WIN_WR_ADDR	      (0x0000000000000200ull)
+#define CVMX_NPEI_WIN_WR_DATA	      (0x0000000000000220ull)
+#define CVMX_NPEI_WIN_WR_MASK	      (0x0000000000000230ull)
+
+/**
+ * cvmx_npei_bar1_index#
+ *
+ * Total Address is 16Kb; 0x0000 - 0x3fff, 0x000 - 0x7fe(Reg, every other 8B)
+ *
+ * General  5kb; 0x0000 - 0x13ff, 0x000 - 0x27e(Reg-General)
+ * PktMem  10Kb; 0x1400 - 0x3bff, 0x280 - 0x77e(Reg-General-Packet)
+ * Rsvd     1Kb; 0x3c00 - 0x3fff, 0x780 - 0x7fe(Reg-NCB Only Mode)
+ *                                   == NPEI_PKT_CNT_INT_ENB[PORT]
+ *                                   == NPEI_PKT_TIME_INT_ENB[PORT]
+ *                                   == NPEI_PKT_CNT_INT[PORT]
+ *                                   == NPEI_PKT_TIME_INT[PORT]
+ *                                   == NPEI_PKT_PCIE_PORT[PP]
+ *                                   == NPEI_PKT_SLIST_ROR[ROR]
+ *                                   == NPEI_PKT_SLIST_ROR[NSR] ?
+ *                                   == NPEI_PKT_SLIST_ES[ES]
+ *                                   == NPEI_PKTn_SLIST_BAOFF_DBELL[AOFF]
+ *                                   == NPEI_PKTn_SLIST_BAOFF_DBELL[DBELL]
+ *                                   == NPEI_PKTn_CNTS[CNT]
+ * NPEI_CTL_STATUS[OUTn_ENB]         == NPEI_PKT_OUT_ENB[ENB]
+ * NPEI_BASE_ADDRESS_OUTPUTn[BADDR]  == NPEI_PKTn_SLIST_BADDR[ADDR]
+ * NPEI_DESC_OUTPUTn[SIZE]           == NPEI_PKTn_SLIST_FIFO_RSIZE[RSIZE]
+ * NPEI_Pn_DBPAIR_ADDR[NADDR]        == NPEI_PKTn_SLIST_BADDR[ADDR] +
+ *                                      NPEI_PKTn_SLIST_BAOFF_DBELL[AOFF]
+ * NPEI_PKT_CREDITSn[PTR_CNT]        == NPEI_PKTn_SLIST_BAOFF_DBELL[DBELL]
+ * NPEI_P0_PAIR_CNTS[AVAIL]          == NPEI_PKTn_SLIST_BAOFF_DBELL[DBELL]
+ * NPEI_P0_PAIR_CNTS[FCNT]           ==
+ * NPEI_PKTS_SENTn[PKT_CNT]          == NPEI_PKTn_CNTS[CNT]
+ * NPEI_OUTPUT_CONTROL[Pn_BMODE]     == NPEI_PKT_OUT_BMODE[BMODE]
+ * NPEI_PKT_CREDITSn[PKT_CNT]        == NPEI_PKTn_CNTS[CNT]
+ * NPEI_BUFF_SIZE_OUTPUTn[BSIZE]     == NPEI_PKT_SLIST_ID_SIZE[BSIZE]
+ * NPEI_BUFF_SIZE_OUTPUTn[ISIZE]     == NPEI_PKT_SLIST_ID_SIZE[ISIZE]
+ * NPEI_OUTPUT_CONTROL[On_CSRM]      == NPEI_PKT_DPADDR[DPTR] &
+ *                                      NPEI_PKT_OUT_USE_IPTR[PORT]
+ * NPEI_OUTPUT_CONTROL[On_ES]        == NPEI_PKT_DATA_OUT_ES[ES]
+ * NPEI_OUTPUT_CONTROL[On_NS]        == NPEI_PKT_DATA_OUT_NS[NSR] ?
+ * NPEI_OUTPUT_CONTROL[On_RO]        == NPEI_PKT_DATA_OUT_ROR[ROR]
+ * NPEI_PKTS_SENT_INT_LEVn[PKT_CNT]  == NPEI_PKT_INT_LEVELS[CNT]
+ * NPEI_PKTS_SENT_TIMEn[PKT_TIME]    == NPEI_PKT_INT_LEVELS[TIME]
+ * NPEI_OUTPUT_CONTROL[IPTR_On]      == NPEI_PKT_IPTR[IPTR]
+ * NPEI_PCIE_PORT_OUTPUT[]           == NPEI_PKT_PCIE_PORT[PP]
+ *
+ *                  NPEI_BAR1_INDEXX = NPEI BAR1 IndexX Register
+ *
+ * Contains address index and control bits for access to memory ranges of
+ * BAR-1. Index is build from supplied address [25:22].
+ * NPEI_BAR1_INDEX0 through NPEI_BAR1_INDEX15 is used for transactions
+ * orginating with PCIE-PORT0 and NPEI_BAR1_INDEX16
+ * through NPEI_BAR1_INDEX31 is used for transactions originating with
+ * PCIE-PORT1.
+ */
+union cvmx_npei_bar1_indexx {
+	u32 u32;
+	struct cvmx_npei_bar1_indexx_s {
+		u32 reserved_18_31 : 14;
+		u32 addr_idx : 14;
+		u32 ca : 1;
+		u32 end_swp : 2;
+		u32 addr_v : 1;
+	} s;
+	struct cvmx_npei_bar1_indexx_s cn52xx;
+	struct cvmx_npei_bar1_indexx_s cn52xxp1;
+	struct cvmx_npei_bar1_indexx_s cn56xx;
+	struct cvmx_npei_bar1_indexx_s cn56xxp1;
+};
+
+typedef union cvmx_npei_bar1_indexx cvmx_npei_bar1_indexx_t;
+
+/**
+ * cvmx_npei_bist_status
+ *
+ * NPEI_BIST_STATUS = NPI's BIST Status Register
+ *
+ * Results from BIST runs of NPEI's memories.
+ */
+union cvmx_npei_bist_status {
+	u64 u64;
+	struct cvmx_npei_bist_status_s {
+		u64 pkt_rdf : 1;
+		u64 reserved_60_62 : 3;
+		u64 pcr_gim : 1;
+		u64 pkt_pif : 1;
+		u64 pcsr_int : 1;
+		u64 pcsr_im : 1;
+		u64 pcsr_cnt : 1;
+		u64 pcsr_id : 1;
+		u64 pcsr_sl : 1;
+		u64 reserved_50_52 : 3;
+		u64 pkt_ind : 1;
+		u64 pkt_slm : 1;
+		u64 reserved_36_47 : 12;
+		u64 d0_pst : 1;
+		u64 d1_pst : 1;
+		u64 d2_pst : 1;
+		u64 d3_pst : 1;
+		u64 reserved_31_31 : 1;
+		u64 n2p0_c : 1;
+		u64 n2p0_o : 1;
+		u64 n2p1_c : 1;
+		u64 n2p1_o : 1;
+		u64 cpl_p0 : 1;
+		u64 cpl_p1 : 1;
+		u64 p2n1_po : 1;
+		u64 p2n1_no : 1;
+		u64 p2n1_co : 1;
+		u64 p2n0_po : 1;
+		u64 p2n0_no : 1;
+		u64 p2n0_co : 1;
+		u64 p2n0_c0 : 1;
+		u64 p2n0_c1 : 1;
+		u64 p2n0_n : 1;
+		u64 p2n0_p0 : 1;
+		u64 p2n0_p1 : 1;
+		u64 p2n1_c0 : 1;
+		u64 p2n1_c1 : 1;
+		u64 p2n1_n : 1;
+		u64 p2n1_p0 : 1;
+		u64 p2n1_p1 : 1;
+		u64 csm0 : 1;
+		u64 csm1 : 1;
+		u64 dif0 : 1;
+		u64 dif1 : 1;
+		u64 dif2 : 1;
+		u64 dif3 : 1;
+		u64 reserved_2_2 : 1;
+		u64 msi : 1;
+		u64 ncb_cmd : 1;
+	} s;
+	struct cvmx_npei_bist_status_cn52xx {
+		u64 pkt_rdf : 1;
+		u64 reserved_60_62 : 3;
+		u64 pcr_gim : 1;
+		u64 pkt_pif : 1;
+		u64 pcsr_int : 1;
+		u64 pcsr_im : 1;
+		u64 pcsr_cnt : 1;
+		u64 pcsr_id : 1;
+		u64 pcsr_sl : 1;
+		u64 pkt_imem : 1;
+		u64 pkt_pfm : 1;
+		u64 pkt_pof : 1;
+		u64 reserved_48_49 : 2;
+		u64 pkt_pop0 : 1;
+		u64 pkt_pop1 : 1;
+		u64 d0_mem : 1;
+		u64 d1_mem : 1;
+		u64 d2_mem : 1;
+		u64 d3_mem : 1;
+		u64 d4_mem : 1;
+		u64 ds_mem : 1;
+		u64 reserved_36_39 : 4;
+		u64 d0_pst : 1;
+		u64 d1_pst : 1;
+		u64 d2_pst : 1;
+		u64 d3_pst : 1;
+		u64 d4_pst : 1;
+		u64 n2p0_c : 1;
+		u64 n2p0_o : 1;
+		u64 n2p1_c : 1;
+		u64 n2p1_o : 1;
+		u64 cpl_p0 : 1;
+		u64 cpl_p1 : 1;
+		u64 p2n1_po : 1;
+		u64 p2n1_no : 1;
+		u64 p2n1_co : 1;
+		u64 p2n0_po : 1;
+		u64 p2n0_no : 1;
+		u64 p2n0_co : 1;
+		u64 p2n0_c0 : 1;
+		u64 p2n0_c1 : 1;
+		u64 p2n0_n : 1;
+		u64 p2n0_p0 : 1;
+		u64 p2n0_p1 : 1;
+		u64 p2n1_c0 : 1;
+		u64 p2n1_c1 : 1;
+		u64 p2n1_n : 1;
+		u64 p2n1_p0 : 1;
+		u64 p2n1_p1 : 1;
+		u64 csm0 : 1;
+		u64 csm1 : 1;
+		u64 dif0 : 1;
+		u64 dif1 : 1;
+		u64 dif2 : 1;
+		u64 dif3 : 1;
+		u64 dif4 : 1;
+		u64 msi : 1;
+		u64 ncb_cmd : 1;
+	} cn52xx;
+	struct cvmx_npei_bist_status_cn52xxp1 {
+		u64 reserved_46_63 : 18;
+		u64 d0_mem0 : 1;
+		u64 d1_mem1 : 1;
+		u64 d2_mem2 : 1;
+		u64 d3_mem3 : 1;
+		u64 dr0_mem : 1;
+		u64 d0_mem : 1;
+		u64 d1_mem : 1;
+		u64 d2_mem : 1;
+		u64 d3_mem : 1;
+		u64 dr1_mem : 1;
+		u64 d0_pst : 1;
+		u64 d1_pst : 1;
+		u64 d2_pst : 1;
+		u64 d3_pst : 1;
+		u64 dr2_mem : 1;
+		u64 n2p0_c : 1;
+		u64 n2p0_o : 1;
+		u64 n2p1_c : 1;
+		u64 n2p1_o : 1;
+		u64 cpl_p0 : 1;
+		u64 cpl_p1 : 1;
+		u64 p2n1_po : 1;
+		u64 p2n1_no : 1;
+		u64 p2n1_co : 1;
+		u64 p2n0_po : 1;
+		u64 p2n0_no : 1;
+		u64 p2n0_co : 1;
+		u64 p2n0_c0 : 1;
+		u64 p2n0_c1 : 1;
+		u64 p2n0_n : 1;
+		u64 p2n0_p0 : 1;
+		u64 p2n0_p1 : 1;
+		u64 p2n1_c0 : 1;
+		u64 p2n1_c1 : 1;
+		u64 p2n1_n : 1;
+		u64 p2n1_p0 : 1;
+		u64 p2n1_p1 : 1;
+		u64 csm0 : 1;
+		u64 csm1 : 1;
+		u64 dif0 : 1;
+		u64 dif1 : 1;
+		u64 dif2 : 1;
+		u64 dif3 : 1;
+		u64 dr3_mem : 1;
+		u64 msi : 1;
+		u64 ncb_cmd : 1;
+	} cn52xxp1;
+	struct cvmx_npei_bist_status_cn52xx cn56xx;
+	struct cvmx_npei_bist_status_cn56xxp1 {
+		u64 reserved_58_63 : 6;
+		u64 pcsr_int : 1;
+		u64 pcsr_im : 1;
+		u64 pcsr_cnt : 1;
+		u64 pcsr_id : 1;
+		u64 pcsr_sl : 1;
+		u64 pkt_pout : 1;
+		u64 pkt_imem : 1;
+		u64 pkt_cntm : 1;
+		u64 pkt_ind : 1;
+		u64 pkt_slm : 1;
+		u64 pkt_odf : 1;
+		u64 pkt_oif : 1;
+		u64 pkt_out : 1;
+		u64 pkt_i0 : 1;
+		u64 pkt_i1 : 1;
+		u64 pkt_s0 : 1;
+		u64 pkt_s1 : 1;
+		u64 d0_mem : 1;
+		u64 d1_mem : 1;
+		u64 d2_mem : 1;
+		u64 d3_mem : 1;
+		u64 d4_mem : 1;
+		u64 d0_pst : 1;
+		u64 d1_pst : 1;
+		u64 d2_pst : 1;
+		u64 d3_pst : 1;
+		u64 d4_pst : 1;
+		u64 n2p0_c : 1;
+		u64 n2p0_o : 1;
+		u64 n2p1_c : 1;
+		u64 n2p1_o : 1;
+		u64 cpl_p0 : 1;
+		u64 cpl_p1 : 1;
+		u64 p2n1_po : 1;
+		u64 p2n1_no : 1;
+		u64 p2n1_co : 1;
+		u64 p2n0_po : 1;
+		u64 p2n0_no : 1;
+		u64 p2n0_co : 1;
+		u64 p2n0_c0 : 1;
+		u64 p2n0_c1 : 1;
+		u64 p2n0_n : 1;
+		u64 p2n0_p0 : 1;
+		u64 p2n0_p1 : 1;
+		u64 p2n1_c0 : 1;
+		u64 p2n1_c1 : 1;
+		u64 p2n1_n : 1;
+		u64 p2n1_p0 : 1;
+		u64 p2n1_p1 : 1;
+		u64 csm0 : 1;
+		u64 csm1 : 1;
+		u64 dif0 : 1;
+		u64 dif1 : 1;
+		u64 dif2 : 1;
+		u64 dif3 : 1;
+		u64 dif4 : 1;
+		u64 msi : 1;
+		u64 ncb_cmd : 1;
+	} cn56xxp1;
+};
+
+typedef union cvmx_npei_bist_status cvmx_npei_bist_status_t;
+
+/**
+ * cvmx_npei_bist_status2
+ *
+ * NPEI_BIST_STATUS2 = NPI's BIST Status Register2
+ *
+ * Results from BIST runs of NPEI's memories.
+ */
+union cvmx_npei_bist_status2 {
+	u64 u64;
+	struct cvmx_npei_bist_status2_s {
+		u64 reserved_14_63 : 50;
+		u64 prd_tag : 1;
+		u64 prd_st0 : 1;
+		u64 prd_st1 : 1;
+		u64 prd_err : 1;
+		u64 nrd_st : 1;
+		u64 nwe_st : 1;
+		u64 nwe_wr0 : 1;
+		u64 nwe_wr1 : 1;
+		u64 pkt_rd : 1;
+		u64 psc_p0 : 1;
+		u64 psc_p1 : 1;
+		u64 pkt_gd : 1;
+		u64 pkt_gl : 1;
+		u64 pkt_blk : 1;
+	} s;
+	struct cvmx_npei_bist_status2_s cn52xx;
+	struct cvmx_npei_bist_status2_s cn56xx;
+};
+
+typedef union cvmx_npei_bist_status2 cvmx_npei_bist_status2_t;
+
+/**
+ * cvmx_npei_ctl_port0
+ *
+ * NPEI_CTL_PORT0 = NPEI's Control Port 0
+ *
+ * Contains control for access for Port0
+ */
+union cvmx_npei_ctl_port0 {
+	u64 u64;
+	struct cvmx_npei_ctl_port0_s {
+		u64 reserved_21_63 : 43;
+		u64 waitl_com : 1;
+		u64 intd : 1;
+		u64 intc : 1;
+		u64 intb : 1;
+		u64 inta : 1;
+		u64 intd_map : 2;
+		u64 intc_map : 2;
+		u64 intb_map : 2;
+		u64 inta_map : 2;
+		u64 ctlp_ro : 1;
+		u64 reserved_6_6 : 1;
+		u64 ptlp_ro : 1;
+		u64 bar2_enb : 1;
+		u64 bar2_esx : 2;
+		u64 bar2_cax : 1;
+		u64 wait_com : 1;
+	} s;
+	struct cvmx_npei_ctl_port0_s cn52xx;
+	struct cvmx_npei_ctl_port0_s cn52xxp1;
+	struct cvmx_npei_ctl_port0_s cn56xx;
+	struct cvmx_npei_ctl_port0_s cn56xxp1;
+};
+
+typedef union cvmx_npei_ctl_port0 cvmx_npei_ctl_port0_t;
+
+/**
+ * cvmx_npei_ctl_port1
+ *
+ * NPEI_CTL_PORT1 = NPEI's Control Port1
+ *
+ * Contains control for access for Port1
+ */
+union cvmx_npei_ctl_port1 {
+	u64 u64;
+	struct cvmx_npei_ctl_port1_s {
+		u64 reserved_21_63 : 43;
+		u64 waitl_com : 1;
+		u64 intd : 1;
+		u64 intc : 1;
+		u64 intb : 1;
+		u64 inta : 1;
+		u64 intd_map : 2;
+		u64 intc_map : 2;
+		u64 intb_map : 2;
+		u64 inta_map : 2;
+		u64 ctlp_ro : 1;
+		u64 reserved_6_6 : 1;
+		u64 ptlp_ro : 1;
+		u64 bar2_enb : 1;
+		u64 bar2_esx : 2;
+		u64 bar2_cax : 1;
+		u64 wait_com : 1;
+	} s;
+	struct cvmx_npei_ctl_port1_s cn52xx;
+	struct cvmx_npei_ctl_port1_s cn52xxp1;
+	struct cvmx_npei_ctl_port1_s cn56xx;
+	struct cvmx_npei_ctl_port1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_ctl_port1 cvmx_npei_ctl_port1_t;
+
+/**
+ * cvmx_npei_ctl_status
+ *
+ * NPEI_CTL_STATUS = NPEI Control Status Register
+ *
+ * Contains control and status for NPEI. Writes to this register are not
+ * oSrdered with writes/reads to the PCIe Memory space.
+ * To ensure that a write has completed the user must read the register
+ * before making an access(i.e. PCIe memory space)
+ * that requires the value of this register to be updated.
+ */
+union cvmx_npei_ctl_status {
+	u64 u64;
+	struct cvmx_npei_ctl_status_s {
+		u64 reserved_44_63 : 20;
+		u64 p1_ntags : 6;
+		u64 p0_ntags : 6;
+		u64 cfg_rtry : 16;
+		u64 ring_en : 1;
+		u64 lnk_rst : 1;
+		u64 arb : 1;
+		u64 pkt_bp : 4;
+		u64 host_mode : 1;
+		u64 chip_rev : 8;
+	} s;
+	struct cvmx_npei_ctl_status_s cn52xx;
+	struct cvmx_npei_ctl_status_cn52xxp1 {
+		u64 reserved_44_63 : 20;
+		u64 p1_ntags : 6;
+		u64 p0_ntags : 6;
+		u64 cfg_rtry : 16;
+		u64 reserved_15_15 : 1;
+		u64 lnk_rst : 1;
+		u64 arb : 1;
+		u64 reserved_9_12 : 4;
+		u64 host_mode : 1;
+		u64 chip_rev : 8;
+	} cn52xxp1;
+	struct cvmx_npei_ctl_status_s cn56xx;
+	struct cvmx_npei_ctl_status_cn56xxp1 {
+		u64 reserved_15_63 : 49;
+		u64 lnk_rst : 1;
+		u64 arb : 1;
+		u64 pkt_bp : 4;
+		u64 host_mode : 1;
+		u64 chip_rev : 8;
+	} cn56xxp1;
+};
+
+typedef union cvmx_npei_ctl_status cvmx_npei_ctl_status_t;
+
+/**
+ * cvmx_npei_ctl_status2
+ *
+ * NPEI_CTL_STATUS2 = NPEI's Control Status2 Register
+ *
+ * Contains control and status for NPEI.
+ * Writes to this register are not ordered with writes/reads to the PCI
+ * Memory space.
+ * To ensure that a write has completed the user must read the register before
+ * making an access(i.e. PCI memory space) that requires the value of this
+ * register to be updated.
+ */
+union cvmx_npei_ctl_status2 {
+	u64 u64;
+	struct cvmx_npei_ctl_status2_s {
+		u64 reserved_16_63 : 48;
+		u64 mps : 1;
+		u64 mrrs : 3;
+		u64 c1_w_flt : 1;
+		u64 c0_w_flt : 1;
+		u64 c1_b1_s : 3;
+		u64 c0_b1_s : 3;
+		u64 c1_wi_d : 1;
+		u64 c1_b0_d : 1;
+		u64 c0_wi_d : 1;
+		u64 c0_b0_d : 1;
+	} s;
+	struct cvmx_npei_ctl_status2_s cn52xx;
+	struct cvmx_npei_ctl_status2_s cn52xxp1;
+	struct cvmx_npei_ctl_status2_s cn56xx;
+	struct cvmx_npei_ctl_status2_s cn56xxp1;
+};
+
+typedef union cvmx_npei_ctl_status2 cvmx_npei_ctl_status2_t;
+
+/**
+ * cvmx_npei_data_out_cnt
+ *
+ * NPEI_DATA_OUT_CNT = NPEI DATA OUT COUNT
+ *
+ * The EXEC data out fifo-count and the data unload counter.
+ */
+union cvmx_npei_data_out_cnt {
+	u64 u64;
+	struct cvmx_npei_data_out_cnt_s {
+		u64 reserved_44_63 : 20;
+		u64 p1_ucnt : 16;
+		u64 p1_fcnt : 6;
+		u64 p0_ucnt : 16;
+		u64 p0_fcnt : 6;
+	} s;
+	struct cvmx_npei_data_out_cnt_s cn52xx;
+	struct cvmx_npei_data_out_cnt_s cn52xxp1;
+	struct cvmx_npei_data_out_cnt_s cn56xx;
+	struct cvmx_npei_data_out_cnt_s cn56xxp1;
+};
+
+typedef union cvmx_npei_data_out_cnt cvmx_npei_data_out_cnt_t;
+
+/**
+ * cvmx_npei_dbg_data
+ *
+ * NPEI_DBG_DATA = NPEI Debug Data Register
+ *
+ * Value returned on the debug-data lines from the RSLs
+ */
+union cvmx_npei_dbg_data {
+	u64 u64;
+	struct cvmx_npei_dbg_data_s {
+		u64 reserved_28_63 : 36;
+		u64 qlm0_rev_lanes : 1;
+		u64 reserved_25_26 : 2;
+		u64 qlm1_spd : 2;
+		u64 c_mul : 5;
+		u64 dsel_ext : 1;
+		u64 data : 17;
+	} s;
+	struct cvmx_npei_dbg_data_cn52xx {
+		u64 reserved_29_63 : 35;
+		u64 qlm0_link_width : 1;
+		u64 qlm0_rev_lanes : 1;
+		u64 qlm1_mode : 2;
+		u64 qlm1_spd : 2;
+		u64 c_mul : 5;
+		u64 dsel_ext : 1;
+		u64 data : 17;
+	} cn52xx;
+	struct cvmx_npei_dbg_data_cn52xx cn52xxp1;
+	struct cvmx_npei_dbg_data_cn56xx {
+		u64 reserved_29_63 : 35;
+		u64 qlm2_rev_lanes : 1;
+		u64 qlm0_rev_lanes : 1;
+		u64 qlm3_spd : 2;
+		u64 qlm1_spd : 2;
+		u64 c_mul : 5;
+		u64 dsel_ext : 1;
+		u64 data : 17;
+	} cn56xx;
+	struct cvmx_npei_dbg_data_cn56xx cn56xxp1;
+};
+
+typedef union cvmx_npei_dbg_data cvmx_npei_dbg_data_t;
+
+/**
+ * cvmx_npei_dbg_select
+ *
+ * NPEI_DBG_SELECT = Debug Select Register
+ *
+ * Contains the debug select value last written to the RSLs.
+ */
+union cvmx_npei_dbg_select {
+	u64 u64;
+	struct cvmx_npei_dbg_select_s {
+		u64 reserved_16_63 : 48;
+		u64 dbg_sel : 16;
+	} s;
+	struct cvmx_npei_dbg_select_s cn52xx;
+	struct cvmx_npei_dbg_select_s cn52xxp1;
+	struct cvmx_npei_dbg_select_s cn56xx;
+	struct cvmx_npei_dbg_select_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dbg_select cvmx_npei_dbg_select_t;
+
+/**
+ * cvmx_npei_dma#_counts
+ *
+ * NPEI_DMA[0..4]_COUNTS = DMA Instruction Counts
+ *
+ * Values for determing the number of instructions for DMA[0..4] in the NPEI.
+ */
+union cvmx_npei_dmax_counts {
+	u64 u64;
+	struct cvmx_npei_dmax_counts_s {
+		u64 reserved_39_63 : 25;
+		u64 fcnt : 7;
+		u64 dbell : 32;
+	} s;
+	struct cvmx_npei_dmax_counts_s cn52xx;
+	struct cvmx_npei_dmax_counts_s cn52xxp1;
+	struct cvmx_npei_dmax_counts_s cn56xx;
+	struct cvmx_npei_dmax_counts_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dmax_counts cvmx_npei_dmax_counts_t;
+
+/**
+ * cvmx_npei_dma#_dbell
+ *
+ * NPEI_DMA_DBELL[0..4] = DMA Door Bell
+ *
+ * The door bell register for DMA[0..4] queue.
+ */
+union cvmx_npei_dmax_dbell {
+	u32 u32;
+	struct cvmx_npei_dmax_dbell_s {
+		u32 reserved_16_31 : 16;
+		u32 dbell : 16;
+	} s;
+	struct cvmx_npei_dmax_dbell_s cn52xx;
+	struct cvmx_npei_dmax_dbell_s cn52xxp1;
+	struct cvmx_npei_dmax_dbell_s cn56xx;
+	struct cvmx_npei_dmax_dbell_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dmax_dbell cvmx_npei_dmax_dbell_t;
+
+/**
+ * cvmx_npei_dma#_ibuff_saddr
+ *
+ * NPEI_DMA[0..4]_IBUFF_SADDR = DMA Instruction Buffer Starting Address
+ *
+ * The address to start reading Instructions from for DMA[0..4].
+ */
+union cvmx_npei_dmax_ibuff_saddr {
+	u64 u64;
+	struct cvmx_npei_dmax_ibuff_saddr_s {
+		u64 reserved_37_63 : 27;
+		u64 idle : 1;
+		u64 saddr : 29;
+		u64 reserved_0_6 : 7;
+	} s;
+	struct cvmx_npei_dmax_ibuff_saddr_s cn52xx;
+	struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 {
+		u64 reserved_36_63 : 28;
+		u64 saddr : 29;
+		u64 reserved_0_6 : 7;
+	} cn52xxp1;
+	struct cvmx_npei_dmax_ibuff_saddr_s cn56xx;
+	struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 cn56xxp1;
+};
+
+typedef union cvmx_npei_dmax_ibuff_saddr cvmx_npei_dmax_ibuff_saddr_t;
+
+/**
+ * cvmx_npei_dma#_naddr
+ *
+ * NPEI_DMA[0..4]_NADDR = DMA Next Ichunk Address
+ *
+ * Place NPEI will read the next Ichunk data from. This is valid when state is 0
+ */
+union cvmx_npei_dmax_naddr {
+	u64 u64;
+	struct cvmx_npei_dmax_naddr_s {
+		u64 reserved_36_63 : 28;
+		u64 addr : 36;
+	} s;
+	struct cvmx_npei_dmax_naddr_s cn52xx;
+	struct cvmx_npei_dmax_naddr_s cn52xxp1;
+	struct cvmx_npei_dmax_naddr_s cn56xx;
+	struct cvmx_npei_dmax_naddr_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dmax_naddr cvmx_npei_dmax_naddr_t;
+
+/**
+ * cvmx_npei_dma0_int_level
+ *
+ * NPEI_DMA0_INT_LEVEL = NPEI DMA0 Interrupt Level
+ *
+ * Thresholds for DMA count and timer interrupts for DMA0.
+ */
+union cvmx_npei_dma0_int_level {
+	u64 u64;
+	struct cvmx_npei_dma0_int_level_s {
+		u64 time : 32;
+		u64 cnt : 32;
+	} s;
+	struct cvmx_npei_dma0_int_level_s cn52xx;
+	struct cvmx_npei_dma0_int_level_s cn52xxp1;
+	struct cvmx_npei_dma0_int_level_s cn56xx;
+	struct cvmx_npei_dma0_int_level_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma0_int_level cvmx_npei_dma0_int_level_t;
+
+/**
+ * cvmx_npei_dma1_int_level
+ *
+ * NPEI_DMA1_INT_LEVEL = NPEI DMA1 Interrupt Level
+ *
+ * Thresholds for DMA count and timer interrupts for DMA1.
+ */
+union cvmx_npei_dma1_int_level {
+	u64 u64;
+	struct cvmx_npei_dma1_int_level_s {
+		u64 time : 32;
+		u64 cnt : 32;
+	} s;
+	struct cvmx_npei_dma1_int_level_s cn52xx;
+	struct cvmx_npei_dma1_int_level_s cn52xxp1;
+	struct cvmx_npei_dma1_int_level_s cn56xx;
+	struct cvmx_npei_dma1_int_level_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma1_int_level cvmx_npei_dma1_int_level_t;
+
+/**
+ * cvmx_npei_dma_cnts
+ *
+ * NPEI_DMA_CNTS = NPEI DMA Count
+ *
+ * The DMA Count values for DMA0 and DMA1.
+ */
+union cvmx_npei_dma_cnts {
+	u64 u64;
+	struct cvmx_npei_dma_cnts_s {
+		u64 dma1 : 32;
+		u64 dma0 : 32;
+	} s;
+	struct cvmx_npei_dma_cnts_s cn52xx;
+	struct cvmx_npei_dma_cnts_s cn52xxp1;
+	struct cvmx_npei_dma_cnts_s cn56xx;
+	struct cvmx_npei_dma_cnts_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_cnts cvmx_npei_dma_cnts_t;
+
+/**
+ * cvmx_npei_dma_control
+ *
+ * NPEI_DMA_CONTROL = DMA Control Register
+ *
+ * Controls operation of the DMA IN/OUT.
+ */
+union cvmx_npei_dma_control {
+	u64 u64;
+	struct cvmx_npei_dma_control_s {
+		u64 reserved_40_63 : 24;
+		u64 p_32b_m : 1;
+		u64 dma4_enb : 1;
+		u64 dma3_enb : 1;
+		u64 dma2_enb : 1;
+		u64 dma1_enb : 1;
+		u64 dma0_enb : 1;
+		u64 b0_lend : 1;
+		u64 dwb_denb : 1;
+		u64 dwb_ichk : 9;
+		u64 fpa_que : 3;
+		u64 o_add1 : 1;
+		u64 o_ro : 1;
+		u64 o_ns : 1;
+		u64 o_es : 2;
+		u64 o_mode : 1;
+		u64 csize : 14;
+	} s;
+	struct cvmx_npei_dma_control_s cn52xx;
+	struct cvmx_npei_dma_control_cn52xxp1 {
+		u64 reserved_38_63 : 26;
+		u64 dma3_enb : 1;
+		u64 dma2_enb : 1;
+		u64 dma1_enb : 1;
+		u64 dma0_enb : 1;
+		u64 b0_lend : 1;
+		u64 dwb_denb : 1;
+		u64 dwb_ichk : 9;
+		u64 fpa_que : 3;
+		u64 o_add1 : 1;
+		u64 o_ro : 1;
+		u64 o_ns : 1;
+		u64 o_es : 2;
+		u64 o_mode : 1;
+		u64 csize : 14;
+	} cn52xxp1;
+	struct cvmx_npei_dma_control_s cn56xx;
+	struct cvmx_npei_dma_control_cn56xxp1 {
+		u64 reserved_39_63 : 25;
+		u64 dma4_enb : 1;
+		u64 dma3_enb : 1;
+		u64 dma2_enb : 1;
+		u64 dma1_enb : 1;
+		u64 dma0_enb : 1;
+		u64 b0_lend : 1;
+		u64 dwb_denb : 1;
+		u64 dwb_ichk : 9;
+		u64 fpa_que : 3;
+		u64 o_add1 : 1;
+		u64 o_ro : 1;
+		u64 o_ns : 1;
+		u64 o_es : 2;
+		u64 o_mode : 1;
+		u64 csize : 14;
+	} cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_control cvmx_npei_dma_control_t;
+
+/**
+ * cvmx_npei_dma_pcie_req_num
+ *
+ * NPEI_DMA_PCIE_REQ_NUM = NPEI DMA PCIE Outstanding Read Request Number
+ *
+ * Outstanding PCIE read request number for DMAs and Packet, maximum number
+ * is 16
+ */
+union cvmx_npei_dma_pcie_req_num {
+	u64 u64;
+	struct cvmx_npei_dma_pcie_req_num_s {
+		u64 dma_arb : 1;
+		u64 reserved_53_62 : 10;
+		u64 pkt_cnt : 5;
+		u64 reserved_45_47 : 3;
+		u64 dma4_cnt : 5;
+		u64 reserved_37_39 : 3;
+		u64 dma3_cnt : 5;
+		u64 reserved_29_31 : 3;
+		u64 dma2_cnt : 5;
+		u64 reserved_21_23 : 3;
+		u64 dma1_cnt : 5;
+		u64 reserved_13_15 : 3;
+		u64 dma0_cnt : 5;
+		u64 reserved_5_7 : 3;
+		u64 dma_cnt : 5;
+	} s;
+	struct cvmx_npei_dma_pcie_req_num_s cn52xx;
+	struct cvmx_npei_dma_pcie_req_num_s cn56xx;
+};
+
+typedef union cvmx_npei_dma_pcie_req_num cvmx_npei_dma_pcie_req_num_t;
+
+/**
+ * cvmx_npei_dma_state1
+ *
+ * NPEI_DMA_STATE1 = NPI's DMA State 1
+ *
+ * Results from DMA state register 1
+ */
+union cvmx_npei_dma_state1 {
+	u64 u64;
+	struct cvmx_npei_dma_state1_s {
+		u64 reserved_40_63 : 24;
+		u64 d4_dwe : 8;
+		u64 d3_dwe : 8;
+		u64 d2_dwe : 8;
+		u64 d1_dwe : 8;
+		u64 d0_dwe : 8;
+	} s;
+	struct cvmx_npei_dma_state1_s cn52xx;
+};
+
+typedef union cvmx_npei_dma_state1 cvmx_npei_dma_state1_t;
+
+/**
+ * cvmx_npei_dma_state1_p1
+ *
+ * NPEI_DMA_STATE1_P1 = NPEI DMA Request and Instruction State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state1_p1 {
+	u64 u64;
+	struct cvmx_npei_dma_state1_p1_s {
+		u64 reserved_60_63 : 4;
+		u64 d0_difst : 7;
+		u64 d1_difst : 7;
+		u64 d2_difst : 7;
+		u64 d3_difst : 7;
+		u64 d4_difst : 7;
+		u64 d0_reqst : 5;
+		u64 d1_reqst : 5;
+		u64 d2_reqst : 5;
+		u64 d3_reqst : 5;
+		u64 d4_reqst : 5;
+	} s;
+	struct cvmx_npei_dma_state1_p1_cn52xxp1 {
+		u64 reserved_60_63 : 4;
+		u64 d0_difst : 7;
+		u64 d1_difst : 7;
+		u64 d2_difst : 7;
+		u64 d3_difst : 7;
+		u64 reserved_25_31 : 7;
+		u64 d0_reqst : 5;
+		u64 d1_reqst : 5;
+		u64 d2_reqst : 5;
+		u64 d3_reqst : 5;
+		u64 reserved_0_4 : 5;
+	} cn52xxp1;
+	struct cvmx_npei_dma_state1_p1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_state1_p1 cvmx_npei_dma_state1_p1_t;
+
+/**
+ * cvmx_npei_dma_state2
+ *
+ * NPEI_DMA_STATE2 = NPI's DMA State 2
+ *
+ * Results from DMA state register 2
+ */
+union cvmx_npei_dma_state2 {
+	u64 u64;
+	struct cvmx_npei_dma_state2_s {
+		u64 reserved_28_63 : 36;
+		u64 ndwe : 4;
+		u64 reserved_21_23 : 3;
+		u64 ndre : 5;
+		u64 reserved_10_15 : 6;
+		u64 prd : 10;
+	} s;
+	struct cvmx_npei_dma_state2_s cn52xx;
+};
+
+typedef union cvmx_npei_dma_state2 cvmx_npei_dma_state2_t;
+
+/**
+ * cvmx_npei_dma_state2_p1
+ *
+ * NPEI_DMA_STATE2_P1 = NPEI DMA Instruction Fetch State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state2_p1 {
+	u64 u64;
+	struct cvmx_npei_dma_state2_p1_s {
+		u64 reserved_45_63 : 19;
+		u64 d0_dffst : 9;
+		u64 d1_dffst : 9;
+		u64 d2_dffst : 9;
+		u64 d3_dffst : 9;
+		u64 d4_dffst : 9;
+	} s;
+	struct cvmx_npei_dma_state2_p1_cn52xxp1 {
+		u64 reserved_45_63 : 19;
+		u64 d0_dffst : 9;
+		u64 d1_dffst : 9;
+		u64 d2_dffst : 9;
+		u64 d3_dffst : 9;
+		u64 reserved_0_8 : 9;
+	} cn52xxp1;
+	struct cvmx_npei_dma_state2_p1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_state2_p1 cvmx_npei_dma_state2_p1_t;
+
+/**
+ * cvmx_npei_dma_state3_p1
+ *
+ * NPEI_DMA_STATE3_P1 = NPEI DMA DRE State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state3_p1 {
+	u64 u64;
+	struct cvmx_npei_dma_state3_p1_s {
+		u64 reserved_60_63 : 4;
+		u64 d0_drest : 15;
+		u64 d1_drest : 15;
+		u64 d2_drest : 15;
+		u64 d3_drest : 15;
+	} s;
+	struct cvmx_npei_dma_state3_p1_s cn52xxp1;
+	struct cvmx_npei_dma_state3_p1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_state3_p1 cvmx_npei_dma_state3_p1_t;
+
+/**
+ * cvmx_npei_dma_state4_p1
+ *
+ * NPEI_DMA_STATE4_P1 = NPEI DMA DWE State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state4_p1 {
+	u64 u64;
+	struct cvmx_npei_dma_state4_p1_s {
+		u64 reserved_52_63 : 12;
+		u64 d0_dwest : 13;
+		u64 d1_dwest : 13;
+		u64 d2_dwest : 13;
+		u64 d3_dwest : 13;
+	} s;
+	struct cvmx_npei_dma_state4_p1_s cn52xxp1;
+	struct cvmx_npei_dma_state4_p1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_state4_p1 cvmx_npei_dma_state4_p1_t;
+
+/**
+ * cvmx_npei_dma_state5_p1
+ *
+ * NPEI_DMA_STATE5_P1 = NPEI DMA DWE and DRE State
+ *
+ * DMA engine Debug information.
+ */
+union cvmx_npei_dma_state5_p1 {
+	u64 u64;
+	struct cvmx_npei_dma_state5_p1_s {
+		u64 reserved_28_63 : 36;
+		u64 d4_drest : 15;
+		u64 d4_dwest : 13;
+	} s;
+	struct cvmx_npei_dma_state5_p1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_dma_state5_p1 cvmx_npei_dma_state5_p1_t;
+
+/**
+ * cvmx_npei_int_a_enb
+ *
+ * NPEI_INTERRUPT_A_ENB = NPI's Interrupt A Enable Register
+ *
+ * Used to allow the generation of interrupts (MSI/INTA) to the PCIe
+ * CoresUsed to enable the various interrupting conditions of NPEI
+ */
+union cvmx_npei_int_a_enb {
+	u64 u64;
+	struct cvmx_npei_int_a_enb_s {
+		u64 reserved_10_63 : 54;
+		u64 pout_err : 1;
+		u64 pin_bp : 1;
+		u64 p1_rdlk : 1;
+		u64 p0_rdlk : 1;
+		u64 pgl_err : 1;
+		u64 pdi_err : 1;
+		u64 pop_err : 1;
+		u64 pins_err : 1;
+		u64 dma1_cpl : 1;
+		u64 dma0_cpl : 1;
+	} s;
+	struct cvmx_npei_int_a_enb_s cn52xx;
+	struct cvmx_npei_int_a_enb_cn52xxp1 {
+		u64 reserved_2_63 : 62;
+		u64 dma1_cpl : 1;
+		u64 dma0_cpl : 1;
+	} cn52xxp1;
+	struct cvmx_npei_int_a_enb_s cn56xx;
+};
+
+typedef union cvmx_npei_int_a_enb cvmx_npei_int_a_enb_t;
+
+/**
+ * cvmx_npei_int_a_enb2
+ *
+ * NPEI_INTERRUPT_A_ENB2 = NPEI's Interrupt A Enable2 Register
+ *
+ * Used to enable the various interrupting conditions of NPEI
+ */
+union cvmx_npei_int_a_enb2 {
+	u64 u64;
+	struct cvmx_npei_int_a_enb2_s {
+		u64 reserved_10_63 : 54;
+		u64 pout_err : 1;
+		u64 pin_bp : 1;
+		u64 p1_rdlk : 1;
+		u64 p0_rdlk : 1;
+		u64 pgl_err : 1;
+		u64 pdi_err : 1;
+		u64 pop_err : 1;
+		u64 pins_err : 1;
+		u64 dma1_cpl : 1;
+		u64 dma0_cpl : 1;
+	} s;
+	struct cvmx_npei_int_a_enb2_s cn52xx;
+	struct cvmx_npei_int_a_enb2_cn52xxp1 {
+		u64 reserved_2_63 : 62;
+		u64 dma1_cpl : 1;
+		u64 dma0_cpl : 1;
+	} cn52xxp1;
+	struct cvmx_npei_int_a_enb2_s cn56xx;
+};
+
+typedef union cvmx_npei_int_a_enb2 cvmx_npei_int_a_enb2_t;
+
+/**
+ * cvmx_npei_int_a_sum
+ *
+ * NPEI_INTERRUPT_A_SUM = NPI Interrupt A Summary Register
+ *
+ * Set when an interrupt condition occurs, write '1' to clear. When an
+ * interrupt bitin this register is set and
+ * the cooresponding bit in the NPEI_INT_A_ENB register is set, then
+ * NPEI_INT_SUM[61] will be set.
+ */
+union cvmx_npei_int_a_sum {
+	u64 u64;
+	struct cvmx_npei_int_a_sum_s {
+		u64 reserved_10_63 : 54;
+		u64 pout_err : 1;
+		u64 pin_bp : 1;
+		u64 p1_rdlk : 1;
+		u64 p0_rdlk : 1;
+		u64 pgl_err : 1;
+		u64 pdi_err : 1;
+		u64 pop_err : 1;
+		u64 pins_err : 1;
+		u64 dma1_cpl : 1;
+		u64 dma0_cpl : 1;
+	} s;
+	struct cvmx_npei_int_a_sum_s cn52xx;
+	struct cvmx_npei_int_a_sum_cn52xxp1 {
+		u64 reserved_2_63 : 62;
+		u64 dma1_cpl : 1;
+		u64 dma0_cpl : 1;
+	} cn52xxp1;
+	struct cvmx_npei_int_a_sum_s cn56xx;
+};
+
+typedef union cvmx_npei_int_a_sum cvmx_npei_int_a_sum_t;
+
+/**
+ * cvmx_npei_int_enb
+ *
+ * NPEI_INTERRUPT_ENB = NPI's Interrupt Enable Register
+ *
+ * Used to allow the generation of interrupts (MSI/INTA) to the PCIe
+ * CoresUsed to enable the various interrupting conditions of NPI
+ */
+union cvmx_npei_int_enb {
+	u64 u64;
+	struct cvmx_npei_int_enb_s {
+		u64 mio_inta : 1;
+		u64 reserved_62_62 : 1;
+		u64 int_a : 1;
+		u64 c1_ldwn : 1;
+		u64 c0_ldwn : 1;
+		u64 c1_exc : 1;
+		u64 c0_exc : 1;
+		u64 c1_up_wf : 1;
+		u64 c0_up_wf : 1;
+		u64 c1_un_wf : 1;
+		u64 c0_un_wf : 1;
+		u64 c1_un_bx : 1;
+		u64 c1_un_wi : 1;
+		u64 c1_un_b2 : 1;
+		u64 c1_un_b1 : 1;
+		u64 c1_un_b0 : 1;
+		u64 c1_up_bx : 1;
+		u64 c1_up_wi : 1;
+		u64 c1_up_b2 : 1;
+		u64 c1_up_b1 : 1;
+		u64 c1_up_b0 : 1;
+		u64 c0_un_bx : 1;
+		u64 c0_un_wi : 1;
+		u64 c0_un_b2 : 1;
+		u64 c0_un_b1 : 1;
+		u64 c0_un_b0 : 1;
+		u64 c0_up_bx : 1;
+		u64 c0_up_wi : 1;
+		u64 c0_up_b2 : 1;
+		u64 c0_up_b1 : 1;
+		u64 c0_up_b0 : 1;
+		u64 c1_hpint : 1;
+		u64 c1_pmei : 1;
+		u64 c1_wake : 1;
+		u64 crs1_dr : 1;
+		u64 c1_se : 1;
+		u64 crs1_er : 1;
+		u64 c1_aeri : 1;
+		u64 c0_hpint : 1;
+		u64 c0_pmei : 1;
+		u64 c0_wake : 1;
+		u64 crs0_dr : 1;
+		u64 c0_se : 1;
+		u64 crs0_er : 1;
+		u64 c0_aeri : 1;
+		u64 ptime : 1;
+		u64 pcnt : 1;
+		u64 pidbof : 1;
+		u64 psldbof : 1;
+		u64 dtime1 : 1;
+		u64 dtime0 : 1;
+		u64 dcnt1 : 1;
+		u64 dcnt0 : 1;
+		u64 dma1fi : 1;
+		u64 dma0fi : 1;
+		u64 dma4dbo : 1;
+		u64 dma3dbo : 1;
+		u64 dma2dbo : 1;
+		u64 dma1dbo : 1;
+		u64 dma0dbo : 1;
+		u64 iob2big : 1;
+		u64 bar0_to : 1;
+		u64 rml_wto : 1;
+		u64 rml_rto : 1;
+	} s;
+	struct cvmx_npei_int_enb_s cn52xx;
+	struct cvmx_npei_int_enb_cn52xxp1 {
+		u64 mio_inta : 1;
+		u64 reserved_62_62 : 1;
+		u64 int_a : 1;
+		u64 c1_ldwn : 1;
+		u64 c0_ldwn : 1;
+		u64 c1_exc : 1;
+		u64 c0_exc : 1;
+		u64 c1_up_wf : 1;
+		u64 c0_up_wf : 1;
+		u64 c1_un_wf : 1;
+		u64 c0_un_wf : 1;
+		u64 c1_un_bx : 1;
+		u64 c1_un_wi : 1;
+		u64 c1_un_b2 : 1;
+		u64 c1_un_b1 : 1;
+		u64 c1_un_b0 : 1;
+		u64 c1_up_bx : 1;
+		u64 c1_up_wi : 1;
+		u64 c1_up_b2 : 1;
+		u64 c1_up_b1 : 1;
+		u64 c1_up_b0 : 1;
+		u64 c0_un_bx : 1;
+		u64 c0_un_wi : 1;
+		u64 c0_un_b2 : 1;
+		u64 c0_un_b1 : 1;
+		u64 c0_un_b0 : 1;
+		u64 c0_up_bx : 1;
+		u64 c0_up_wi : 1;
+		u64 c0_up_b2 : 1;
+		u64 c0_up_b1 : 1;
+		u64 c0_up_b0 : 1;
+		u64 c1_hpint : 1;
+		u64 c1_pmei : 1;
+		u64 c1_wake : 1;
+		u64 crs1_dr : 1;
+		u64 c1_se : 1;
+		u64 crs1_er : 1;
+		u64 c1_aeri : 1;
+		u64 c0_hpint : 1;
+		u64 c0_pmei : 1;
+		u64 c0_wake : 1;
+		u64 crs0_dr : 1;
+		u64 c0_se : 1;
+		u64 crs0_er : 1;
+		u64 c0_aeri : 1;
+		u64 ptime : 1;
+		u64 pcnt : 1;
+		u64 pidbof : 1;
+		u64 psldbof : 1;
+		u64 dtime1 : 1;
+		u64 dtime0 : 1;
+		u64 dcnt1 : 1;
+		u64 dcnt0 : 1;
+		u64 dma1fi : 1;
+		u64 dma0fi : 1;
+		u64 reserved_8_8 : 1;
+		u64 dma3dbo : 1;
+		u64 dma2dbo : 1;
+		u64 dma1dbo : 1;
+		u64 dma0dbo : 1;
+		u64 iob2big : 1;
+		u64 bar0_to : 1;
+		u64 rml_wto : 1;
+		u64 rml_rto : 1;
+	} cn52xxp1;
+	struct cvmx_npei_int_enb_s cn56xx;
+	struct cvmx_npei_int_enb_cn56xxp1 {
+		u64 mio_inta : 1;
+		u64 reserved_61_62 : 2;
+		u64 c1_ldwn : 1;
+		u64 c0_ldwn : 1;
+		u64 c1_exc : 1;
+		u64 c0_exc : 1;
+		u64 c1_up_wf : 1;
+		u64 c0_up_wf : 1;
+		u64 c1_un_wf : 1;
+		u64 c0_un_wf : 1;
+		u64 c1_un_bx : 1;
+		u64 c1_un_wi : 1;
+		u64 c1_un_b2 : 1;
+		u64 c1_un_b1 : 1;
+		u64 c1_un_b0 : 1;
+		u64 c1_up_bx : 1;
+		u64 c1_up_wi : 1;
+		u64 c1_up_b2 : 1;
+		u64 c1_up_b1 : 1;
+		u64 c1_up_b0 : 1;
+		u64 c0_un_bx : 1;
+		u64 c0_un_wi : 1;
+		u64 c0_un_b2 : 1;
+		u64 c0_un_b1 : 1;
+		u64 c0_un_b0 : 1;
+		u64 c0_up_bx : 1;
+		u64 c0_up_wi : 1;
+		u64 c0_up_b2 : 1;
+		u64 c0_up_b1 : 1;
+		u64 c0_up_b0 : 1;
+		u64 c1_hpint : 1;
+		u64 c1_pmei : 1;
+		u64 c1_wake : 1;
+		u64 reserved_29_29 : 1;
+		u64 c1_se : 1;
+		u64 reserved_27_27 : 1;
+		u64 c1_aeri : 1;
+		u64 c0_hpint : 1;
+		u64 c0_pmei : 1;
+		u64 c0_wake : 1;
+		u64 reserved_22_22 : 1;
+		u64 c0_se : 1;
+		u64 reserved_20_20 : 1;
+		u64 c0_aeri : 1;
+		u64 ptime : 1;
+		u64 pcnt : 1;
+		u64 pidbof : 1;
+		u64 psldbof : 1;
+		u64 dtime1 : 1;
+		u64 dtime0 : 1;
+		u64 dcnt1 : 1;
+		u64 dcnt0 : 1;
+		u64 dma1fi : 1;
+		u64 dma0fi : 1;
+		u64 dma4dbo : 1;
+		u64 dma3dbo : 1;
+		u64 dma2dbo : 1;
+		u64 dma1dbo : 1;
+		u64 dma0dbo : 1;
+		u64 iob2big : 1;
+		u64 bar0_to : 1;
+		u64 rml_wto : 1;
+		u64 rml_rto : 1;
+	} cn56xxp1;
+};
+
+typedef union cvmx_npei_int_enb cvmx_npei_int_enb_t;
+
+/**
+ * cvmx_npei_int_enb2
+ *
+ * NPEI_INTERRUPT_ENB2 = NPI's Interrupt Enable2 Register
+ *
+ * Used to enable the various interrupting conditions of NPI
+ */
+union cvmx_npei_int_enb2 {
+	u64 u64;
+	struct cvmx_npei_int_enb2_s {
+		u64 reserved_62_63 : 2;
+		u64 int_a : 1;
+		u64 c1_ldwn : 1;
+		u64 c0_ldwn : 1;
+		u64 c1_exc : 1;
+		u64 c0_exc : 1;
+		u64 c1_up_wf : 1;
+		u64 c0_up_wf : 1;
+		u64 c1_un_wf : 1;
+		u64 c0_un_wf : 1;
+		u64 c1_un_bx : 1;
+		u64 c1_un_wi : 1;
+		u64 c1_un_b2 : 1;
+		u64 c1_un_b1 : 1;
+		u64 c1_un_b0 : 1;
+		u64 c1_up_bx : 1;
+		u64 c1_up_wi : 1;
+		u64 c1_up_b2 : 1;
+		u64 c1_up_b1 : 1;
+		u64 c1_up_b0 : 1;
+		u64 c0_un_bx : 1;
+		u64 c0_un_wi : 1;
+		u64 c0_un_b2 : 1;
+		u64 c0_un_b1 : 1;
+		u64 c0_un_b0 : 1;
+		u64 c0_up_bx : 1;
+		u64 c0_up_wi : 1;
+		u64 c0_up_b2 : 1;
+		u64 c0_up_b1 : 1;
+		u64 c0_up_b0 : 1;
+		u64 c1_hpint : 1;
+		u64 c1_pmei : 1;
+		u64 c1_wake : 1;
+		u64 crs1_dr : 1;
+		u64 c1_se : 1;
+		u64 crs1_er : 1;
+		u64 c1_aeri : 1;
+		u64 c0_hpint : 1;
+		u64 c0_pmei : 1;
+		u64 c0_wake : 1;
+		u64 crs0_dr : 1;
+		u64 c0_se : 1;
+		u64 crs0_er : 1;
+		u64 c0_aeri : 1;
+		u64 ptime : 1;
+		u64 pcnt : 1;
+		u64 pidbof : 1;
+		u64 psldbof : 1;
+		u64 dtime1 : 1;
+		u64 dtime0 : 1;
+		u64 dcnt1 : 1;
+		u64 dcnt0 : 1;
+		u64 dma1fi : 1;
+		u64 dma0fi : 1;
+		u64 dma4dbo : 1;
+		u64 dma3dbo : 1;
+		u64 dma2dbo : 1;
+		u64 dma1dbo : 1;
+		u64 dma0dbo : 1;
+		u64 iob2big : 1;
+		u64 bar0_to : 1;
+		u64 rml_wto : 1;
+		u64 rml_rto : 1;
+	} s;
+	struct cvmx_npei_int_enb2_s cn52xx;
+	struct cvmx_npei_int_enb2_cn52xxp1 {
+		u64 reserved_62_63 : 2;
+		u64 int_a : 1;
+		u64 c1_ldwn : 1;
+		u64 c0_ldwn : 1;
+		u64 c1_exc : 1;
+		u64 c0_exc : 1;
+		u64 c1_up_wf : 1;
+		u64 c0_up_wf : 1;
+		u64 c1_un_wf : 1;
+		u64 c0_un_wf : 1;
+		u64 c1_un_bx : 1;
+		u64 c1_un_wi : 1;
+		u64 c1_un_b2 : 1;
+		u64 c1_un_b1 : 1;
+		u64 c1_un_b0 : 1;
+		u64 c1_up_bx : 1;
+		u64 c1_up_wi : 1;
+		u64 c1_up_b2 : 1;
+		u64 c1_up_b1 : 1;
+		u64 c1_up_b0 : 1;
+		u64 c0_un_bx : 1;
+		u64 c0_un_wi : 1;
+		u64 c0_un_b2 : 1;
+		u64 c0_un_b1 : 1;
+		u64 c0_un_b0 : 1;
+		u64 c0_up_bx : 1;
+		u64 c0_up_wi : 1;
+		u64 c0_up_b2 : 1;
+		u64 c0_up_b1 : 1;
+		u64 c0_up_b0 : 1;
+		u64 c1_hpint : 1;
+		u64 c1_pmei : 1;
+		u64 c1_wake : 1;
+		u64 crs1_dr : 1;
+		u64 c1_se : 1;
+		u64 crs1_er : 1;
+		u64 c1_aeri : 1;
+		u64 c0_hpint : 1;
+		u64 c0_pmei : 1;
+		u64 c0_wake : 1;
+		u64 crs0_dr : 1;
+		u64 c0_se : 1;
+		u64 crs0_er : 1;
+		u64 c0_aeri : 1;
+		u64 ptime : 1;
+		u64 pcnt : 1;
+		u64 pidbof : 1;
+		u64 psldbof : 1;
+		u64 dtime1 : 1;
+		u64 dtime0 : 1;
+		u64 dcnt1 : 1;
+		u64 dcnt0 : 1;
+		u64 dma1fi : 1;
+		u64 dma0fi : 1;
+		u64 reserved_8_8 : 1;
+		u64 dma3dbo : 1;
+		u64 dma2dbo : 1;
+		u64 dma1dbo : 1;
+		u64 dma0dbo : 1;
+		u64 iob2big : 1;
+		u64 bar0_to : 1;
+		u64 rml_wto : 1;
+		u64 rml_rto : 1;
+	} cn52xxp1;
+	struct cvmx_npei_int_enb2_s cn56xx;
+	struct cvmx_npei_int_enb2_cn56xxp1 {
+		u64 reserved_61_63 : 3;
+		u64 c1_ldwn : 1;
+		u64 c0_ldwn : 1;
+		u64 c1_exc : 1;
+		u64 c0_exc : 1;
+		u64 c1_up_wf : 1;
+		u64 c0_up_wf : 1;
+		u64 c1_un_wf : 1;
+		u64 c0_un_wf : 1;
+		u64 c1_un_bx : 1;
+		u64 c1_un_wi : 1;
+		u64 c1_un_b2 : 1;
+		u64 c1_un_b1 : 1;
+		u64 c1_un_b0 : 1;
+		u64 c1_up_bx : 1;
+		u64 c1_up_wi : 1;
+		u64 c1_up_b2 : 1;
+		u64 c1_up_b1 : 1;
+		u64 c1_up_b0 : 1;
+		u64 c0_un_bx : 1;
+		u64 c0_un_wi : 1;
+		u64 c0_un_b2 : 1;
+		u64 c0_un_b1 : 1;
+		u64 c0_un_b0 : 1;
+		u64 c0_up_bx : 1;
+		u64 c0_up_wi : 1;
+		u64 c0_up_b2 : 1;
+		u64 c0_up_b1 : 1;
+		u64 c0_up_b0 : 1;
+		u64 c1_hpint : 1;
+		u64 c1_pmei : 1;
+		u64 c1_wake : 1;
+		u64 reserved_29_29 : 1;
+		u64 c1_se : 1;
+		u64 reserved_27_27 : 1;
+		u64 c1_aeri : 1;
+		u64 c0_hpint : 1;
+		u64 c0_pmei : 1;
+		u64 c0_wake : 1;
+		u64 reserved_22_22 : 1;
+		u64 c0_se : 1;
+		u64 reserved_20_20 : 1;
+		u64 c0_aeri : 1;
+		u64 ptime : 1;
+		u64 pcnt : 1;
+		u64 pidbof : 1;
+		u64 psldbof : 1;
+		u64 dtime1 : 1;
+		u64 dtime0 : 1;
+		u64 dcnt1 : 1;
+		u64 dcnt0 : 1;
+		u64 dma1fi : 1;
+		u64 dma0fi : 1;
+		u64 dma4dbo : 1;
+		u64 dma3dbo : 1;
+		u64 dma2dbo : 1;
+		u64 dma1dbo : 1;
+		u64 dma0dbo : 1;
+		u64 iob2big : 1;
+		u64 bar0_to : 1;
+		u64 rml_wto : 1;
+		u64 rml_rto : 1;
+	} cn56xxp1;
+};
+
+typedef union cvmx_npei_int_enb2 cvmx_npei_int_enb2_t;
+
+/**
+ * cvmx_npei_int_info
+ *
+ * NPEI_INT_INFO = NPI Interrupt Information
+ *
+ * Contains information about some of the interrupt condition that can occur
+ * in the NPEI_INTERRUPT_SUM register.
+ */
+union cvmx_npei_int_info {
+	u64 u64;
+	struct cvmx_npei_int_info_s {
+		u64 reserved_12_63 : 52;
+		u64 pidbof : 6;
+		u64 psldbof : 6;
+	} s;
+	struct cvmx_npei_int_info_s cn52xx;
+	struct cvmx_npei_int_info_s cn56xx;
+	struct cvmx_npei_int_info_s cn56xxp1;
+};
+
+typedef union cvmx_npei_int_info cvmx_npei_int_info_t;
+
+/**
+ * cvmx_npei_int_sum
+ *
+ * NPEI_INTERRUPT_SUM = NPI Interrupt Summary Register
+ *
+ * Set when an interrupt condition occurs, write '1' to clear.
+ */
+union cvmx_npei_int_sum {
+	u64 u64;
+	struct cvmx_npei_int_sum_s {
+		u64 mio_inta : 1;
+		u64 reserved_62_62 : 1;
+		u64 int_a : 1;
+		u64 c1_ldwn : 1;
+		u64 c0_ldwn : 1;
+		u64 c1_exc : 1;
+		u64 c0_exc : 1;
+		u64 c1_up_wf : 1;
+		u64 c0_up_wf : 1;
+		u64 c1_un_wf : 1;
+		u64 c0_un_wf : 1;
+		u64 c1_un_bx : 1;
+		u64 c1_un_wi : 1;
+		u64 c1_un_b2 : 1;
+		u64 c1_un_b1 : 1;
+		u64 c1_un_b0 : 1;
+		u64 c1_up_bx : 1;
+		u64 c1_up_wi : 1;
+		u64 c1_up_b2 : 1;
+		u64 c1_up_b1 : 1;
+		u64 c1_up_b0 : 1;
+		u64 c0_un_bx : 1;
+		u64 c0_un_wi : 1;
+		u64 c0_un_b2 : 1;
+		u64 c0_un_b1 : 1;
+		u64 c0_un_b0 : 1;
+		u64 c0_up_bx : 1;
+		u64 c0_up_wi : 1;
+		u64 c0_up_b2 : 1;
+		u64 c0_up_b1 : 1;
+		u64 c0_up_b0 : 1;
+		u64 c1_hpint : 1;
+		u64 c1_pmei : 1;
+		u64 c1_wake : 1;
+		u64 crs1_dr : 1;
+		u64 c1_se : 1;
+		u64 crs1_er : 1;
+		u64 c1_aeri : 1;
+		u64 c0_hpint : 1;
+		u64 c0_pmei : 1;
+		u64 c0_wake : 1;
+		u64 crs0_dr : 1;
+		u64 c0_se : 1;
+		u64 crs0_er : 1;
+		u64 c0_aeri : 1;
+		u64 ptime : 1;
+		u64 pcnt : 1;
+		u64 pidbof : 1;
+		u64 psldbof : 1;
+		u64 dtime1 : 1;
+		u64 dtime0 : 1;
+		u64 dcnt1 : 1;
+		u64 dcnt0 : 1;
+		u64 dma1fi : 1;
+		u64 dma0fi : 1;
+		u64 dma4dbo : 1;
+		u64 dma3dbo : 1;
+		u64 dma2dbo : 1;
+		u64 dma1dbo : 1;
+		u64 dma0dbo : 1;
+		u64 iob2big : 1;
+		u64 bar0_to : 1;
+		u64 rml_wto : 1;
+		u64 rml_rto : 1;
+	} s;
+	struct cvmx_npei_int_sum_s cn52xx;
+	struct cvmx_npei_int_sum_cn52xxp1 {
+		u64 mio_inta : 1;
+		u64 reserved_62_62 : 1;
+		u64 int_a : 1;
+		u64 c1_ldwn : 1;
+		u64 c0_ldwn : 1;
+		u64 c1_exc : 1;
+		u64 c0_exc : 1;
+		u64 c1_up_wf : 1;
+		u64 c0_up_wf : 1;
+		u64 c1_un_wf : 1;
+		u64 c0_un_wf : 1;
+		u64 c1_un_bx : 1;
+		u64 c1_un_wi : 1;
+		u64 c1_un_b2 : 1;
+		u64 c1_un_b1 : 1;
+		u64 c1_un_b0 : 1;
+		u64 c1_up_bx : 1;
+		u64 c1_up_wi : 1;
+		u64 c1_up_b2 : 1;
+		u64 c1_up_b1 : 1;
+		u64 c1_up_b0 : 1;
+		u64 c0_un_bx : 1;
+		u64 c0_un_wi : 1;
+		u64 c0_un_b2 : 1;
+		u64 c0_un_b1 : 1;
+		u64 c0_un_b0 : 1;
+		u64 c0_up_bx : 1;
+		u64 c0_up_wi : 1;
+		u64 c0_up_b2 : 1;
+		u64 c0_up_b1 : 1;
+		u64 c0_up_b0 : 1;
+		u64 c1_hpint : 1;
+		u64 c1_pmei : 1;
+		u64 c1_wake : 1;
+		u64 crs1_dr : 1;
+		u64 c1_se : 1;
+		u64 crs1_er : 1;
+		u64 c1_aeri : 1;
+		u64 c0_hpint : 1;
+		u64 c0_pmei : 1;
+		u64 c0_wake : 1;
+		u64 crs0_dr : 1;
+		u64 c0_se : 1;
+		u64 crs0_er : 1;
+		u64 c0_aeri : 1;
+		u64 reserved_15_18 : 4;
+		u64 dtime1 : 1;
+		u64 dtime0 : 1;
+		u64 dcnt1 : 1;
+		u64 dcnt0 : 1;
+		u64 dma1fi : 1;
+		u64 dma0fi : 1;
+		u64 reserved_8_8 : 1;
+		u64 dma3dbo : 1;
+		u64 dma2dbo : 1;
+		u64 dma1dbo : 1;
+		u64 dma0dbo : 1;
+		u64 iob2big : 1;
+		u64 bar0_to : 1;
+		u64 rml_wto : 1;
+		u64 rml_rto : 1;
+	} cn52xxp1;
+	struct cvmx_npei_int_sum_s cn56xx;
+	struct cvmx_npei_int_sum_cn56xxp1 {
+		u64 mio_inta : 1;
+		u64 reserved_61_62 : 2;
+		u64 c1_ldwn : 1;
+		u64 c0_ldwn : 1;
+		u64 c1_exc : 1;
+		u64 c0_exc : 1;
+		u64 c1_up_wf : 1;
+		u64 c0_up_wf : 1;
+		u64 c1_un_wf : 1;
+		u64 c0_un_wf : 1;
+		u64 c1_un_bx : 1;
+		u64 c1_un_wi : 1;
+		u64 c1_un_b2 : 1;
+		u64 c1_un_b1 : 1;
+		u64 c1_un_b0 : 1;
+		u64 c1_up_bx : 1;
+		u64 c1_up_wi : 1;
+		u64 c1_up_b2 : 1;
+		u64 c1_up_b1 : 1;
+		u64 c1_up_b0 : 1;
+		u64 c0_un_bx : 1;
+		u64 c0_un_wi : 1;
+		u64 c0_un_b2 : 1;
+		u64 c0_un_b1 : 1;
+		u64 c0_un_b0 : 1;
+		u64 c0_up_bx : 1;
+		u64 c0_up_wi : 1;
+		u64 c0_up_b2 : 1;
+		u64 c0_up_b1 : 1;
+		u64 c0_up_b0 : 1;
+		u64 c1_hpint : 1;
+		u64 c1_pmei : 1;
+		u64 c1_wake : 1;
+		u64 reserved_29_29 : 1;
+		u64 c1_se : 1;
+		u64 reserved_27_27 : 1;
+		u64 c1_aeri : 1;
+		u64 c0_hpint : 1;
+		u64 c0_pmei : 1;
+		u64 c0_wake : 1;
+		u64 reserved_22_22 : 1;
+		u64 c0_se : 1;
+		u64 reserved_20_20 : 1;
+		u64 c0_aeri : 1;
+		u64 reserved_15_18 : 4;
+		u64 dtime1 : 1;
+		u64 dtime0 : 1;
+		u64 dcnt1 : 1;
+		u64 dcnt0 : 1;
+		u64 dma1fi : 1;
+		u64 dma0fi : 1;
+		u64 dma4dbo : 1;
+		u64 dma3dbo : 1;
+		u64 dma2dbo : 1;
+		u64 dma1dbo : 1;
+		u64 dma0dbo : 1;
+		u64 iob2big : 1;
+		u64 bar0_to : 1;
+		u64 rml_wto : 1;
+		u64 rml_rto : 1;
+	} cn56xxp1;
+};
+
+typedef union cvmx_npei_int_sum cvmx_npei_int_sum_t;
+
+/**
+ * cvmx_npei_int_sum2
+ *
+ * NPEI_INTERRUPT_SUM2 = NPI Interrupt Summary2 Register
+ *
+ * This is a read only copy of the NPEI_INTERRUPT_SUM register with bit
+ * variances.
+ */
+union cvmx_npei_int_sum2 {
+	u64 u64;
+	struct cvmx_npei_int_sum2_s {
+		u64 mio_inta : 1;
+		u64 reserved_62_62 : 1;
+		u64 int_a : 1;
+		u64 c1_ldwn : 1;
+		u64 c0_ldwn : 1;
+		u64 c1_exc : 1;
+		u64 c0_exc : 1;
+		u64 c1_up_wf : 1;
+		u64 c0_up_wf : 1;
+		u64 c1_un_wf : 1;
+		u64 c0_un_wf : 1;
+		u64 c1_un_bx : 1;
+		u64 c1_un_wi : 1;
+		u64 c1_un_b2 : 1;
+		u64 c1_un_b1 : 1;
+		u64 c1_un_b0 : 1;
+		u64 c1_up_bx : 1;
+		u64 c1_up_wi : 1;
+		u64 c1_up_b2 : 1;
+		u64 c1_up_b1 : 1;
+		u64 c1_up_b0 : 1;
+		u64 c0_un_bx : 1;
+		u64 c0_un_wi : 1;
+		u64 c0_un_b2 : 1;
+		u64 c0_un_b1 : 1;
+		u64 c0_un_b0 : 1;
+		u64 c0_up_bx : 1;
+		u64 c0_up_wi : 1;
+		u64 c0_up_b2 : 1;
+		u64 c0_up_b1 : 1;
+		u64 c0_up_b0 : 1;
+		u64 c1_hpint : 1;
+		u64 c1_pmei : 1;
+		u64 c1_wake : 1;
+		u64 crs1_dr : 1;
+		u64 c1_se : 1;
+		u64 crs1_er : 1;
+		u64 c1_aeri : 1;
+		u64 c0_hpint : 1;
+		u64 c0_pmei : 1;
+		u64 c0_wake : 1;
+		u64 crs0_dr : 1;
+		u64 c0_se : 1;
+		u64 crs0_er : 1;
+		u64 c0_aeri : 1;
+		u64 reserved_15_18 : 4;
+		u64 dtime1 : 1;
+		u64 dtime0 : 1;
+		u64 dcnt1 : 1;
+		u64 dcnt0 : 1;
+		u64 dma1fi : 1;
+		u64 dma0fi : 1;
+		u64 reserved_8_8 : 1;
+		u64 dma3dbo : 1;
+		u64 dma2dbo : 1;
+		u64 dma1dbo : 1;
+		u64 dma0dbo : 1;
+		u64 iob2big : 1;
+		u64 bar0_to : 1;
+		u64 rml_wto : 1;
+		u64 rml_rto : 1;
+	} s;
+	struct cvmx_npei_int_sum2_s cn52xx;
+	struct cvmx_npei_int_sum2_s cn52xxp1;
+	struct cvmx_npei_int_sum2_s cn56xx;
+};
+
+typedef union cvmx_npei_int_sum2 cvmx_npei_int_sum2_t;
+
+/**
+ * cvmx_npei_last_win_rdata0
+ *
+ * NPEI_LAST_WIN_RDATA0 = NPEI Last Window Read Data Port0
+ *
+ * The data from the last initiated window read.
+ */
+union cvmx_npei_last_win_rdata0 {
+	u64 u64;
+	struct cvmx_npei_last_win_rdata0_s {
+		u64 data : 64;
+	} s;
+	struct cvmx_npei_last_win_rdata0_s cn52xx;
+	struct cvmx_npei_last_win_rdata0_s cn52xxp1;
+	struct cvmx_npei_last_win_rdata0_s cn56xx;
+	struct cvmx_npei_last_win_rdata0_s cn56xxp1;
+};
+
+typedef union cvmx_npei_last_win_rdata0 cvmx_npei_last_win_rdata0_t;
+
+/**
+ * cvmx_npei_last_win_rdata1
+ *
+ * NPEI_LAST_WIN_RDATA1 = NPEI Last Window Read Data Port1
+ *
+ * The data from the last initiated window read.
+ */
+union cvmx_npei_last_win_rdata1 {
+	u64 u64;
+	struct cvmx_npei_last_win_rdata1_s {
+		u64 data : 64;
+	} s;
+	struct cvmx_npei_last_win_rdata1_s cn52xx;
+	struct cvmx_npei_last_win_rdata1_s cn52xxp1;
+	struct cvmx_npei_last_win_rdata1_s cn56xx;
+	struct cvmx_npei_last_win_rdata1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_last_win_rdata1 cvmx_npei_last_win_rdata1_t;
+
+/**
+ * cvmx_npei_mem_access_ctl
+ *
+ * NPEI_MEM_ACCESS_CTL = NPEI's Memory Access Control
+ *
+ * Contains control for access to the PCIe address space.
+ */
+union cvmx_npei_mem_access_ctl {
+	u64 u64;
+	struct cvmx_npei_mem_access_ctl_s {
+		u64 reserved_14_63 : 50;
+		u64 max_word : 4;
+		u64 timer : 10;
+	} s;
+	struct cvmx_npei_mem_access_ctl_s cn52xx;
+	struct cvmx_npei_mem_access_ctl_s cn52xxp1;
+	struct cvmx_npei_mem_access_ctl_s cn56xx;
+	struct cvmx_npei_mem_access_ctl_s cn56xxp1;
+};
+
+typedef union cvmx_npei_mem_access_ctl cvmx_npei_mem_access_ctl_t;
+
+/**
+ * cvmx_npei_mem_access_subid#
+ *
+ * NPEI_MEM_ACCESS_SUBIDX = NPEI Memory Access SubidX Register
+ *
+ * Contains address index and control bits for access to memory from Core PPs.
+ */
+union cvmx_npei_mem_access_subidx {
+	u64 u64;
+	struct cvmx_npei_mem_access_subidx_s {
+		u64 reserved_42_63 : 22;
+		u64 zero : 1;
+		u64 port : 2;
+		u64 nmerge : 1;
+		u64 esr : 2;
+		u64 esw : 2;
+		u64 nsr : 1;
+		u64 nsw : 1;
+		u64 ror : 1;
+		u64 row : 1;
+		u64 ba : 30;
+	} s;
+	struct cvmx_npei_mem_access_subidx_s cn52xx;
+	struct cvmx_npei_mem_access_subidx_s cn52xxp1;
+	struct cvmx_npei_mem_access_subidx_s cn56xx;
+	struct cvmx_npei_mem_access_subidx_s cn56xxp1;
+};
+
+typedef union cvmx_npei_mem_access_subidx cvmx_npei_mem_access_subidx_t;
+
+/**
+ * cvmx_npei_msi_enb0
+ *
+ * NPEI_MSI_ENB0 = NPEI MSI Enable0
+ *
+ * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV0.
+ */
+union cvmx_npei_msi_enb0 {
+	u64 u64;
+	struct cvmx_npei_msi_enb0_s {
+		u64 enb : 64;
+	} s;
+	struct cvmx_npei_msi_enb0_s cn52xx;
+	struct cvmx_npei_msi_enb0_s cn52xxp1;
+	struct cvmx_npei_msi_enb0_s cn56xx;
+	struct cvmx_npei_msi_enb0_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_enb0 cvmx_npei_msi_enb0_t;
+
+/**
+ * cvmx_npei_msi_enb1
+ *
+ * NPEI_MSI_ENB1 = NPEI MSI Enable1
+ *
+ * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV1.
+ */
+union cvmx_npei_msi_enb1 {
+	u64 u64;
+	struct cvmx_npei_msi_enb1_s {
+		u64 enb : 64;
+	} s;
+	struct cvmx_npei_msi_enb1_s cn52xx;
+	struct cvmx_npei_msi_enb1_s cn52xxp1;
+	struct cvmx_npei_msi_enb1_s cn56xx;
+	struct cvmx_npei_msi_enb1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_enb1 cvmx_npei_msi_enb1_t;
+
+/**
+ * cvmx_npei_msi_enb2
+ *
+ * NPEI_MSI_ENB2 = NPEI MSI Enable2
+ *
+ * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV2.
+ */
+union cvmx_npei_msi_enb2 {
+	u64 u64;
+	struct cvmx_npei_msi_enb2_s {
+		u64 enb : 64;
+	} s;
+	struct cvmx_npei_msi_enb2_s cn52xx;
+	struct cvmx_npei_msi_enb2_s cn52xxp1;
+	struct cvmx_npei_msi_enb2_s cn56xx;
+	struct cvmx_npei_msi_enb2_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_enb2 cvmx_npei_msi_enb2_t;
+
+/**
+ * cvmx_npei_msi_enb3
+ *
+ * NPEI_MSI_ENB3 = NPEI MSI Enable3
+ *
+ * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV3.
+ */
+union cvmx_npei_msi_enb3 {
+	u64 u64;
+	struct cvmx_npei_msi_enb3_s {
+		u64 enb : 64;
+	} s;
+	struct cvmx_npei_msi_enb3_s cn52xx;
+	struct cvmx_npei_msi_enb3_s cn52xxp1;
+	struct cvmx_npei_msi_enb3_s cn56xx;
+	struct cvmx_npei_msi_enb3_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_enb3 cvmx_npei_msi_enb3_t;
+
+/**
+ * cvmx_npei_msi_rcv0
+ *
+ * NPEI_MSI_RCV0 = NPEI MSI Receive0
+ *
+ * Contains bits [63:0] of the 256 bits oof MSI interrupts.
+ */
+union cvmx_npei_msi_rcv0 {
+	u64 u64;
+	struct cvmx_npei_msi_rcv0_s {
+		u64 intr : 64;
+	} s;
+	struct cvmx_npei_msi_rcv0_s cn52xx;
+	struct cvmx_npei_msi_rcv0_s cn52xxp1;
+	struct cvmx_npei_msi_rcv0_s cn56xx;
+	struct cvmx_npei_msi_rcv0_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_rcv0 cvmx_npei_msi_rcv0_t;
+
+/**
+ * cvmx_npei_msi_rcv1
+ *
+ * NPEI_MSI_RCV1 = NPEI MSI Receive1
+ *
+ * Contains bits [127:64] of the 256 bits oof MSI interrupts.
+ */
+union cvmx_npei_msi_rcv1 {
+	u64 u64;
+	struct cvmx_npei_msi_rcv1_s {
+		u64 intr : 64;
+	} s;
+	struct cvmx_npei_msi_rcv1_s cn52xx;
+	struct cvmx_npei_msi_rcv1_s cn52xxp1;
+	struct cvmx_npei_msi_rcv1_s cn56xx;
+	struct cvmx_npei_msi_rcv1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_rcv1 cvmx_npei_msi_rcv1_t;
+
+/**
+ * cvmx_npei_msi_rcv2
+ *
+ * NPEI_MSI_RCV2 = NPEI MSI Receive2
+ *
+ * Contains bits [191:128] of the 256 bits oof MSI interrupts.
+ */
+union cvmx_npei_msi_rcv2 {
+	u64 u64;
+	struct cvmx_npei_msi_rcv2_s {
+		u64 intr : 64;
+	} s;
+	struct cvmx_npei_msi_rcv2_s cn52xx;
+	struct cvmx_npei_msi_rcv2_s cn52xxp1;
+	struct cvmx_npei_msi_rcv2_s cn56xx;
+	struct cvmx_npei_msi_rcv2_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_rcv2 cvmx_npei_msi_rcv2_t;
+
+/**
+ * cvmx_npei_msi_rcv3
+ *
+ * NPEI_MSI_RCV3 = NPEI MSI Receive3
+ *
+ * Contains bits [255:192] of the 256 bits oof MSI interrupts.
+ */
+union cvmx_npei_msi_rcv3 {
+	u64 u64;
+	struct cvmx_npei_msi_rcv3_s {
+		u64 intr : 64;
+	} s;
+	struct cvmx_npei_msi_rcv3_s cn52xx;
+	struct cvmx_npei_msi_rcv3_s cn52xxp1;
+	struct cvmx_npei_msi_rcv3_s cn56xx;
+	struct cvmx_npei_msi_rcv3_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_rcv3 cvmx_npei_msi_rcv3_t;
+
+/**
+ * cvmx_npei_msi_rd_map
+ *
+ * NPEI_MSI_RD_MAP = NPEI MSI Read MAP
+ *
+ * Used to read the mapping function of the NPEI_PCIE_MSI_RCV to NPEI_MSI_RCV
+ * registers.
+ */
+union cvmx_npei_msi_rd_map {
+	u64 u64;
+	struct cvmx_npei_msi_rd_map_s {
+		u64 reserved_16_63 : 48;
+		u64 rd_int : 8;
+		u64 msi_int : 8;
+	} s;
+	struct cvmx_npei_msi_rd_map_s cn52xx;
+	struct cvmx_npei_msi_rd_map_s cn52xxp1;
+	struct cvmx_npei_msi_rd_map_s cn56xx;
+	struct cvmx_npei_msi_rd_map_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_rd_map cvmx_npei_msi_rd_map_t;
+
+/**
+ * cvmx_npei_msi_w1c_enb0
+ *
+ * NPEI_MSI_W1C_ENB0 = NPEI MSI Write 1 To Clear Enable0
+ *
+ * Used to clear bits in NPEI_MSI_ENB0. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1c_enb0 {
+	u64 u64;
+	struct cvmx_npei_msi_w1c_enb0_s {
+		u64 clr : 64;
+	} s;
+	struct cvmx_npei_msi_w1c_enb0_s cn52xx;
+	struct cvmx_npei_msi_w1c_enb0_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1c_enb0 cvmx_npei_msi_w1c_enb0_t;
+
+/**
+ * cvmx_npei_msi_w1c_enb1
+ *
+ * NPEI_MSI_W1C_ENB1 = NPEI MSI Write 1 To Clear Enable1
+ *
+ * Used to clear bits in NPEI_MSI_ENB1. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1c_enb1 {
+	u64 u64;
+	struct cvmx_npei_msi_w1c_enb1_s {
+		u64 clr : 64;
+	} s;
+	struct cvmx_npei_msi_w1c_enb1_s cn52xx;
+	struct cvmx_npei_msi_w1c_enb1_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1c_enb1 cvmx_npei_msi_w1c_enb1_t;
+
+/**
+ * cvmx_npei_msi_w1c_enb2
+ *
+ * NPEI_MSI_W1C_ENB2 = NPEI MSI Write 1 To Clear Enable2
+ *
+ * Used to clear bits in NPEI_MSI_ENB2. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1c_enb2 {
+	u64 u64;
+	struct cvmx_npei_msi_w1c_enb2_s {
+		u64 clr : 64;
+	} s;
+	struct cvmx_npei_msi_w1c_enb2_s cn52xx;
+	struct cvmx_npei_msi_w1c_enb2_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1c_enb2 cvmx_npei_msi_w1c_enb2_t;
+
+/**
+ * cvmx_npei_msi_w1c_enb3
+ *
+ * NPEI_MSI_W1C_ENB3 = NPEI MSI Write 1 To Clear Enable3
+ *
+ * Used to clear bits in NPEI_MSI_ENB3. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1c_enb3 {
+	u64 u64;
+	struct cvmx_npei_msi_w1c_enb3_s {
+		u64 clr : 64;
+	} s;
+	struct cvmx_npei_msi_w1c_enb3_s cn52xx;
+	struct cvmx_npei_msi_w1c_enb3_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1c_enb3 cvmx_npei_msi_w1c_enb3_t;
+
+/**
+ * cvmx_npei_msi_w1s_enb0
+ *
+ * NPEI_MSI_W1S_ENB0 = NPEI MSI Write 1 To Set Enable0
+ *
+ * Used to set bits in NPEI_MSI_ENB0. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1s_enb0 {
+	u64 u64;
+	struct cvmx_npei_msi_w1s_enb0_s {
+		u64 set : 64;
+	} s;
+	struct cvmx_npei_msi_w1s_enb0_s cn52xx;
+	struct cvmx_npei_msi_w1s_enb0_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1s_enb0 cvmx_npei_msi_w1s_enb0_t;
+
+/**
+ * cvmx_npei_msi_w1s_enb1
+ *
+ * NPEI_MSI_W1S_ENB0 = NPEI MSI Write 1 To Set Enable1
+ *
+ * Used to set bits in NPEI_MSI_ENB1. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1s_enb1 {
+	u64 u64;
+	struct cvmx_npei_msi_w1s_enb1_s {
+		u64 set : 64;
+	} s;
+	struct cvmx_npei_msi_w1s_enb1_s cn52xx;
+	struct cvmx_npei_msi_w1s_enb1_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1s_enb1 cvmx_npei_msi_w1s_enb1_t;
+
+/**
+ * cvmx_npei_msi_w1s_enb2
+ *
+ * NPEI_MSI_W1S_ENB2 = NPEI MSI Write 1 To Set Enable2
+ *
+ * Used to set bits in NPEI_MSI_ENB2. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1s_enb2 {
+	u64 u64;
+	struct cvmx_npei_msi_w1s_enb2_s {
+		u64 set : 64;
+	} s;
+	struct cvmx_npei_msi_w1s_enb2_s cn52xx;
+	struct cvmx_npei_msi_w1s_enb2_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1s_enb2 cvmx_npei_msi_w1s_enb2_t;
+
+/**
+ * cvmx_npei_msi_w1s_enb3
+ *
+ * NPEI_MSI_W1S_ENB3 = NPEI MSI Write 1 To Set Enable3
+ *
+ * Used to set bits in NPEI_MSI_ENB3. This is a PASS2 register.
+ */
+union cvmx_npei_msi_w1s_enb3 {
+	u64 u64;
+	struct cvmx_npei_msi_w1s_enb3_s {
+		u64 set : 64;
+	} s;
+	struct cvmx_npei_msi_w1s_enb3_s cn52xx;
+	struct cvmx_npei_msi_w1s_enb3_s cn56xx;
+};
+
+typedef union cvmx_npei_msi_w1s_enb3 cvmx_npei_msi_w1s_enb3_t;
+
+/**
+ * cvmx_npei_msi_wr_map
+ *
+ * NPEI_MSI_WR_MAP = NPEI MSI Write MAP
+ *
+ * Used to write the mapping function of the NPEI_PCIE_MSI_RCV to NPEI_MSI_RCV
+ * registers.
+ */
+union cvmx_npei_msi_wr_map {
+	u64 u64;
+	struct cvmx_npei_msi_wr_map_s {
+		u64 reserved_16_63 : 48;
+		u64 ciu_int : 8;
+		u64 msi_int : 8;
+	} s;
+	struct cvmx_npei_msi_wr_map_s cn52xx;
+	struct cvmx_npei_msi_wr_map_s cn52xxp1;
+	struct cvmx_npei_msi_wr_map_s cn56xx;
+	struct cvmx_npei_msi_wr_map_s cn56xxp1;
+};
+
+typedef union cvmx_npei_msi_wr_map cvmx_npei_msi_wr_map_t;
+
+/**
+ * cvmx_npei_pcie_credit_cnt
+ *
+ * NPEI_PCIE_CREDIT_CNT = NPEI PCIE Credit Count
+ *
+ * Contains the number of credits for the pcie port FIFOs used by the NPEI.
+ * This value needs to be set BEFORE PCIe traffic
+ * flow from NPEI to PCIE Ports starts. A write to this register will cause
+ * the credit counts in the NPEI for the two
+ * PCIE ports to be reset to the value in this register.
+ */
+union cvmx_npei_pcie_credit_cnt {
+	u64 u64;
+	struct cvmx_npei_pcie_credit_cnt_s {
+		u64 reserved_48_63 : 16;
+		u64 p1_ccnt : 8;
+		u64 p1_ncnt : 8;
+		u64 p1_pcnt : 8;
+		u64 p0_ccnt : 8;
+		u64 p0_ncnt : 8;
+		u64 p0_pcnt : 8;
+	} s;
+	struct cvmx_npei_pcie_credit_cnt_s cn52xx;
+	struct cvmx_npei_pcie_credit_cnt_s cn56xx;
+};
+
+typedef union cvmx_npei_pcie_credit_cnt cvmx_npei_pcie_credit_cnt_t;
+
+/**
+ * cvmx_npei_pcie_msi_rcv
+ *
+ * NPEI_PCIE_MSI_RCV = NPEI PCIe MSI Receive
+ *
+ * Register where MSI writes are directed from the PCIe.
+ */
+union cvmx_npei_pcie_msi_rcv {
+	u64 u64;
+	struct cvmx_npei_pcie_msi_rcv_s {
+		u64 reserved_8_63 : 56;
+		u64 intr : 8;
+	} s;
+	struct cvmx_npei_pcie_msi_rcv_s cn52xx;
+	struct cvmx_npei_pcie_msi_rcv_s cn52xxp1;
+	struct cvmx_npei_pcie_msi_rcv_s cn56xx;
+	struct cvmx_npei_pcie_msi_rcv_s cn56xxp1;
+};
+
+typedef union cvmx_npei_pcie_msi_rcv cvmx_npei_pcie_msi_rcv_t;
+
+/**
+ * cvmx_npei_pcie_msi_rcv_b1
+ *
+ * NPEI_PCIE_MSI_RCV_B1 = NPEI PCIe MSI Receive Byte 1
+ *
+ * Register where MSI writes are directed from the PCIe.
+ */
+union cvmx_npei_pcie_msi_rcv_b1 {
+	u64 u64;
+	struct cvmx_npei_pcie_msi_rcv_b1_s {
+		u64 reserved_16_63 : 48;
+		u64 intr : 8;
+		u64 reserved_0_7 : 8;
+	} s;
+	struct cvmx_npei_pcie_msi_rcv_b1_s cn52xx;
+	struct cvmx_npei_pcie_msi_rcv_b1_s cn52xxp1;
+	struct cvmx_npei_pcie_msi_rcv_b1_s cn56xx;
+	struct cvmx_npei_pcie_msi_rcv_b1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_pcie_msi_rcv_b1 cvmx_npei_pcie_msi_rcv_b1_t;
+
+/**
+ * cvmx_npei_pcie_msi_rcv_b2
+ *
+ * NPEI_PCIE_MSI_RCV_B2 = NPEI PCIe MSI Receive Byte 2
+ *
+ * Register where MSI writes are directed from the PCIe.
+ */
+union cvmx_npei_pcie_msi_rcv_b2 {
+	u64 u64;
+	struct cvmx_npei_pcie_msi_rcv_b2_s {
+		u64 reserved_24_63 : 40;
+		u64 intr : 8;
+		u64 reserved_0_15 : 16;
+	} s;
+	struct cvmx_npei_pcie_msi_rcv_b2_s cn52xx;
+	struct cvmx_npei_pcie_msi_rcv_b2_s cn52xxp1;
+	struct cvmx_npei_pcie_msi_rcv_b2_s cn56xx;
+	struct cvmx_npei_pcie_msi_rcv_b2_s cn56xxp1;
+};
+
+typedef union cvmx_npei_pcie_msi_rcv_b2 cvmx_npei_pcie_msi_rcv_b2_t;
+
+/**
+ * cvmx_npei_pcie_msi_rcv_b3
+ *
+ * NPEI_PCIE_MSI_RCV_B3 = NPEI PCIe MSI Receive Byte 3
+ *
+ * Register where MSI writes are directed from the PCIe.
+ */
+union cvmx_npei_pcie_msi_rcv_b3 {
+	u64 u64;
+	struct cvmx_npei_pcie_msi_rcv_b3_s {
+		u64 reserved_32_63 : 32;
+		u64 intr : 8;
+		u64 reserved_0_23 : 24;
+	} s;
+	struct cvmx_npei_pcie_msi_rcv_b3_s cn52xx;
+	struct cvmx_npei_pcie_msi_rcv_b3_s cn52xxp1;
+	struct cvmx_npei_pcie_msi_rcv_b3_s cn56xx;
+	struct cvmx_npei_pcie_msi_rcv_b3_s cn56xxp1;
+};
+
+typedef union cvmx_npei_pcie_msi_rcv_b3 cvmx_npei_pcie_msi_rcv_b3_t;
+
+/**
+ * cvmx_npei_pkt#_cnts
+ *
+ * NPEI_PKT[0..31]_CNTS = NPEI Packet ring# Counts
+ *
+ * The counters for output rings.
+ */
+union cvmx_npei_pktx_cnts {
+	u64 u64;
+	struct cvmx_npei_pktx_cnts_s {
+		u64 reserved_54_63 : 10;
+		u64 timer : 22;
+		u64 cnt : 32;
+	} s;
+	struct cvmx_npei_pktx_cnts_s cn52xx;
+	struct cvmx_npei_pktx_cnts_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_cnts cvmx_npei_pktx_cnts_t;
+
+/**
+ * cvmx_npei_pkt#_in_bp
+ *
+ * NPEI_PKT[0..31]_IN_BP = NPEI Packet ring# Input Backpressure
+ *
+ * The counters and thresholds for input packets to apply backpressure to
+ * processing of the packets.
+ */
+union cvmx_npei_pktx_in_bp {
+	u64 u64;
+	struct cvmx_npei_pktx_in_bp_s {
+		u64 wmark : 32;
+		u64 cnt : 32;
+	} s;
+	struct cvmx_npei_pktx_in_bp_s cn52xx;
+	struct cvmx_npei_pktx_in_bp_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_in_bp cvmx_npei_pktx_in_bp_t;
+
+/**
+ * cvmx_npei_pkt#_instr_baddr
+ *
+ * NPEI_PKT[0..31]_INSTR_BADDR = NPEI Packet ring# Instruction Base Address
+ *
+ * Start of Instruction for input packets.
+ */
+union cvmx_npei_pktx_instr_baddr {
+	u64 u64;
+	struct cvmx_npei_pktx_instr_baddr_s {
+		u64 addr : 61;
+		u64 reserved_0_2 : 3;
+	} s;
+	struct cvmx_npei_pktx_instr_baddr_s cn52xx;
+	struct cvmx_npei_pktx_instr_baddr_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_instr_baddr cvmx_npei_pktx_instr_baddr_t;
+
+/**
+ * cvmx_npei_pkt#_instr_baoff_dbell
+ *
+ * NPEI_PKT[0..31]_INSTR_BAOFF_DBELL = NPEI Packet ring# Instruction Base
+ * Address Offset and Doorbell
+ *
+ * The doorbell and base address offset for next read.
+ */
+union cvmx_npei_pktx_instr_baoff_dbell {
+	u64 u64;
+	struct cvmx_npei_pktx_instr_baoff_dbell_s {
+		u64 aoff : 32;
+		u64 dbell : 32;
+	} s;
+	struct cvmx_npei_pktx_instr_baoff_dbell_s cn52xx;
+	struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_instr_baoff_dbell
+	cvmx_npei_pktx_instr_baoff_dbell_t;
+
+/**
+ * cvmx_npei_pkt#_instr_fifo_rsize
+ *
+ * NPEI_PKT[0..31]_INSTR_FIFO_RSIZE = NPEI Packet ring# Instruction FIFO and
+ * Ring Size.
+ *
+ * Fifo field and ring size for Instructions.
+ */
+union cvmx_npei_pktx_instr_fifo_rsize {
+	u64 u64;
+	struct cvmx_npei_pktx_instr_fifo_rsize_s {
+		u64 max : 9;
+		u64 rrp : 9;
+		u64 wrp : 9;
+		u64 fcnt : 5;
+		u64 rsize : 32;
+	} s;
+	struct cvmx_npei_pktx_instr_fifo_rsize_s cn52xx;
+	struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_instr_fifo_rsize cvmx_npei_pktx_instr_fifo_rsize_t;
+
+/**
+ * cvmx_npei_pkt#_instr_header
+ *
+ * NPEI_PKT[0..31]_INSTR_HEADER = NPEI Packet ring# Instruction Header.
+ *
+ * VAlues used to build input packet header.
+ */
+union cvmx_npei_pktx_instr_header {
+	u64 u64;
+	struct cvmx_npei_pktx_instr_header_s {
+		u64 reserved_44_63 : 20;
+		u64 pbp : 1;
+		u64 reserved_38_42 : 5;
+		u64 rparmode : 2;
+		u64 reserved_35_35 : 1;
+		u64 rskp_len : 7;
+		u64 reserved_22_27 : 6;
+		u64 use_ihdr : 1;
+		u64 reserved_16_20 : 5;
+		u64 par_mode : 2;
+		u64 reserved_13_13 : 1;
+		u64 skp_len : 7;
+		u64 reserved_0_5 : 6;
+	} s;
+	struct cvmx_npei_pktx_instr_header_s cn52xx;
+	struct cvmx_npei_pktx_instr_header_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_instr_header cvmx_npei_pktx_instr_header_t;
+
+/**
+ * cvmx_npei_pkt#_slist_baddr
+ *
+ * NPEI_PKT[0..31]_SLIST_BADDR = NPEI Packet ring# Scatter List Base Address
+ *
+ * Start of Scatter List for output packet pointers - MUST be 16 byte aligned
+ */
+union cvmx_npei_pktx_slist_baddr {
+	u64 u64;
+	struct cvmx_npei_pktx_slist_baddr_s {
+		u64 addr : 60;
+		u64 reserved_0_3 : 4;
+	} s;
+	struct cvmx_npei_pktx_slist_baddr_s cn52xx;
+	struct cvmx_npei_pktx_slist_baddr_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_slist_baddr cvmx_npei_pktx_slist_baddr_t;
+
+/**
+ * cvmx_npei_pkt#_slist_baoff_dbell
+ *
+ * NPEI_PKT[0..31]_SLIST_BAOFF_DBELL = NPEI Packet ring# Scatter List Base
+ * Address Offset and Doorbell
+ *
+ * The doorbell and base address offset for next read.
+ */
+union cvmx_npei_pktx_slist_baoff_dbell {
+	u64 u64;
+	struct cvmx_npei_pktx_slist_baoff_dbell_s {
+		u64 aoff : 32;
+		u64 dbell : 32;
+	} s;
+	struct cvmx_npei_pktx_slist_baoff_dbell_s cn52xx;
+	struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_slist_baoff_dbell
+	cvmx_npei_pktx_slist_baoff_dbell_t;
+
+/**
+ * cvmx_npei_pkt#_slist_fifo_rsize
+ *
+ * NPEI_PKT[0..31]_SLIST_FIFO_RSIZE = NPEI Packet ring# Scatter List FIFO and
+ * Ring Size.
+ *
+ * The number of scatter pointer pairs in the scatter list.
+ */
+union cvmx_npei_pktx_slist_fifo_rsize {
+	u64 u64;
+	struct cvmx_npei_pktx_slist_fifo_rsize_s {
+		u64 reserved_32_63 : 32;
+		u64 rsize : 32;
+	} s;
+	struct cvmx_npei_pktx_slist_fifo_rsize_s cn52xx;
+	struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xx;
+};
+
+typedef union cvmx_npei_pktx_slist_fifo_rsize cvmx_npei_pktx_slist_fifo_rsize_t;
+
+/**
+ * cvmx_npei_pkt_cnt_int
+ *
+ * NPEI_PKT_CNT_INT = NPI Packet Counter Interrupt
+ *
+ * The packets rings that are interrupting because of Packet Counters.
+ */
+union cvmx_npei_pkt_cnt_int {
+	u64 u64;
+	struct cvmx_npei_pkt_cnt_int_s {
+		u64 reserved_32_63 : 32;
+		u64 port : 32;
+	} s;
+	struct cvmx_npei_pkt_cnt_int_s cn52xx;
+	struct cvmx_npei_pkt_cnt_int_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_cnt_int cvmx_npei_pkt_cnt_int_t;
+
+/**
+ * cvmx_npei_pkt_cnt_int_enb
+ *
+ * NPEI_PKT_CNT_INT_ENB = NPI Packet Counter Interrupt Enable
+ *
+ * Enable for the packets rings that are interrupting because of Packet Counters.
+ */
+union cvmx_npei_pkt_cnt_int_enb {
+	u64 u64;
+	struct cvmx_npei_pkt_cnt_int_enb_s {
+		u64 reserved_32_63 : 32;
+		u64 port : 32;
+	} s;
+	struct cvmx_npei_pkt_cnt_int_enb_s cn52xx;
+	struct cvmx_npei_pkt_cnt_int_enb_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_cnt_int_enb cvmx_npei_pkt_cnt_int_enb_t;
+
+/**
+ * cvmx_npei_pkt_data_out_es
+ *
+ * NPEI_PKT_DATA_OUT_ES = NPEI's Packet Data Out Endian Swap
+ *
+ * The Endian Swap for writing Data Out.
+ */
+union cvmx_npei_pkt_data_out_es {
+	u64 u64;
+	struct cvmx_npei_pkt_data_out_es_s {
+		u64 es : 64;
+	} s;
+	struct cvmx_npei_pkt_data_out_es_s cn52xx;
+	struct cvmx_npei_pkt_data_out_es_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_data_out_es cvmx_npei_pkt_data_out_es_t;
+
+/**
+ * cvmx_npei_pkt_data_out_ns
+ *
+ * NPEI_PKT_DATA_OUT_NS = NPEI's Packet Data Out No Snoop
+ *
+ * The NS field for the TLP when writing packet data.
+ */
+union cvmx_npei_pkt_data_out_ns {
+	u64 u64;
+	struct cvmx_npei_pkt_data_out_ns_s {
+		u64 reserved_32_63 : 32;
+		u64 nsr : 32;
+	} s;
+	struct cvmx_npei_pkt_data_out_ns_s cn52xx;
+	struct cvmx_npei_pkt_data_out_ns_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_data_out_ns cvmx_npei_pkt_data_out_ns_t;
+
+/**
+ * cvmx_npei_pkt_data_out_ror
+ *
+ * NPEI_PKT_DATA_OUT_ROR = NPEI's Packet Data Out Relaxed Ordering
+ *
+ * The ROR field for the TLP when writing Packet Data.
+ */
+union cvmx_npei_pkt_data_out_ror {
+	u64 u64;
+	struct cvmx_npei_pkt_data_out_ror_s {
+		u64 reserved_32_63 : 32;
+		u64 ror : 32;
+	} s;
+	struct cvmx_npei_pkt_data_out_ror_s cn52xx;
+	struct cvmx_npei_pkt_data_out_ror_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_data_out_ror cvmx_npei_pkt_data_out_ror_t;
+
+/**
+ * cvmx_npei_pkt_dpaddr
+ *
+ * NPEI_PKT_DPADDR = NPEI's Packet Data Pointer Addr
+ *
+ * Used to detemine address and attributes for packet data writes.
+ */
+union cvmx_npei_pkt_dpaddr {
+	u64 u64;
+	struct cvmx_npei_pkt_dpaddr_s {
+		u64 reserved_32_63 : 32;
+		u64 dptr : 32;
+	} s;
+	struct cvmx_npei_pkt_dpaddr_s cn52xx;
+	struct cvmx_npei_pkt_dpaddr_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_dpaddr cvmx_npei_pkt_dpaddr_t;
+
+/**
+ * cvmx_npei_pkt_in_bp
+ *
+ * NPEI_PKT_IN_BP = NPEI Packet Input Backpressure
+ *
+ * Which input rings have backpressure applied.
+ */
+union cvmx_npei_pkt_in_bp {
+	u64 u64;
+	struct cvmx_npei_pkt_in_bp_s {
+		u64 reserved_32_63 : 32;
+		u64 bp : 32;
+	} s;
+	struct cvmx_npei_pkt_in_bp_s cn52xx;
+	struct cvmx_npei_pkt_in_bp_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_in_bp cvmx_npei_pkt_in_bp_t;
+
+/**
+ * cvmx_npei_pkt_in_done#_cnts
+ *
+ * NPEI_PKT_IN_DONE[0..31]_CNTS = NPEI Instruction Done ring# Counts
+ *
+ * Counters for instructions completed on Input rings.
+ */
+union cvmx_npei_pkt_in_donex_cnts {
+	u64 u64;
+	struct cvmx_npei_pkt_in_donex_cnts_s {
+		u64 reserved_32_63 : 32;
+		u64 cnt : 32;
+	} s;
+	struct cvmx_npei_pkt_in_donex_cnts_s cn52xx;
+	struct cvmx_npei_pkt_in_donex_cnts_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_in_donex_cnts cvmx_npei_pkt_in_donex_cnts_t;
+
+/**
+ * cvmx_npei_pkt_in_instr_counts
+ *
+ * NPEI_PKT_IN_INSTR_COUNTS = NPEI Packet Input Instrutction Counts
+ *
+ * Keeps track of the number of instructions read into the FIFO and Packets
+ * sent to IPD.
+ */
+union cvmx_npei_pkt_in_instr_counts {
+	u64 u64;
+	struct cvmx_npei_pkt_in_instr_counts_s {
+		u64 wr_cnt : 32;
+		u64 rd_cnt : 32;
+	} s;
+	struct cvmx_npei_pkt_in_instr_counts_s cn52xx;
+	struct cvmx_npei_pkt_in_instr_counts_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_in_instr_counts cvmx_npei_pkt_in_instr_counts_t;
+
+/**
+ * cvmx_npei_pkt_in_pcie_port
+ *
+ * NPEI_PKT_IN_PCIE_PORT = NPEI's Packet In To PCIe Port Assignment
+ *
+ * Assigns Packet Input rings to PCIe ports.
+ */
+union cvmx_npei_pkt_in_pcie_port {
+	u64 u64;
+	struct cvmx_npei_pkt_in_pcie_port_s {
+		u64 pp : 64;
+	} s;
+	struct cvmx_npei_pkt_in_pcie_port_s cn52xx;
+	struct cvmx_npei_pkt_in_pcie_port_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_in_pcie_port cvmx_npei_pkt_in_pcie_port_t;
+
+/**
+ * cvmx_npei_pkt_input_control
+ *
+ * NPEI_PKT_INPUT_CONTROL = NPEI's Packet Input Control
+ *
+ * Control for reads for gather list and instructions.
+ */
+union cvmx_npei_pkt_input_control {
+	u64 u64;
+	struct cvmx_npei_pkt_input_control_s {
+		u64 reserved_23_63 : 41;
+		u64 pkt_rr : 1;
+		u64 pbp_dhi : 13;
+		u64 d_nsr : 1;
+		u64 d_esr : 2;
+		u64 d_ror : 1;
+		u64 use_csr : 1;
+		u64 nsr : 1;
+		u64 esr : 2;
+		u64 ror : 1;
+	} s;
+	struct cvmx_npei_pkt_input_control_s cn52xx;
+	struct cvmx_npei_pkt_input_control_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_input_control cvmx_npei_pkt_input_control_t;
+
+/**
+ * cvmx_npei_pkt_instr_enb
+ *
+ * NPEI_PKT_INSTR_ENB = NPEI's Packet Instruction Enable
+ *
+ * Enables the instruction fetch for a Packet-ring.
+ */
+union cvmx_npei_pkt_instr_enb {
+	u64 u64;
+	struct cvmx_npei_pkt_instr_enb_s {
+		u64 reserved_32_63 : 32;
+		u64 enb : 32;
+	} s;
+	struct cvmx_npei_pkt_instr_enb_s cn52xx;
+	struct cvmx_npei_pkt_instr_enb_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_instr_enb cvmx_npei_pkt_instr_enb_t;
+
+/**
+ * cvmx_npei_pkt_instr_rd_size
+ *
+ * NPEI_PKT_INSTR_RD_SIZE = NPEI Instruction Read Size
+ *
+ * The number of instruction allowed to be read at one time.
+ */
+union cvmx_npei_pkt_instr_rd_size {
+	u64 u64;
+	struct cvmx_npei_pkt_instr_rd_size_s {
+		u64 rdsize : 64;
+	} s;
+	struct cvmx_npei_pkt_instr_rd_size_s cn52xx;
+	struct cvmx_npei_pkt_instr_rd_size_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_instr_rd_size cvmx_npei_pkt_instr_rd_size_t;
+
+/**
+ * cvmx_npei_pkt_instr_size
+ *
+ * NPEI_PKT_INSTR_SIZE = NPEI's Packet Instruction Size
+ *
+ * Determines if instructions are 64 or 32 byte in size for a Packet-ring.
+ */
+union cvmx_npei_pkt_instr_size {
+	u64 u64;
+	struct cvmx_npei_pkt_instr_size_s {
+		u64 reserved_32_63 : 32;
+		u64 is_64b : 32;
+	} s;
+	struct cvmx_npei_pkt_instr_size_s cn52xx;
+	struct cvmx_npei_pkt_instr_size_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_instr_size cvmx_npei_pkt_instr_size_t;
+
+/**
+ * cvmx_npei_pkt_int_levels
+ *
+ * 0x90F0 reserved NPEI_PKT_PCIE_PORT2
+ *
+ *
+ *                  NPEI_PKT_INT_LEVELS = NPEI's Packet Interrupt Levels
+ *
+ * Output packet interrupt levels.
+ */
+union cvmx_npei_pkt_int_levels {
+	u64 u64;
+	struct cvmx_npei_pkt_int_levels_s {
+		u64 reserved_54_63 : 10;
+		u64 time : 22;
+		u64 cnt : 32;
+	} s;
+	struct cvmx_npei_pkt_int_levels_s cn52xx;
+	struct cvmx_npei_pkt_int_levels_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_int_levels cvmx_npei_pkt_int_levels_t;
+
+/**
+ * cvmx_npei_pkt_iptr
+ *
+ * NPEI_PKT_IPTR = NPEI's Packet Info Poitner
+ *
+ * Controls using the Info-Pointer to store length and data.
+ */
+union cvmx_npei_pkt_iptr {
+	u64 u64;
+	struct cvmx_npei_pkt_iptr_s {
+		u64 reserved_32_63 : 32;
+		u64 iptr : 32;
+	} s;
+	struct cvmx_npei_pkt_iptr_s cn52xx;
+	struct cvmx_npei_pkt_iptr_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_iptr cvmx_npei_pkt_iptr_t;
+
+/**
+ * cvmx_npei_pkt_out_bmode
+ *
+ * NPEI_PKT_OUT_BMODE = NPEI's Packet Out Byte Mode
+ *
+ * Control the updating of the NPEI_PKT#_CNT register.
+ */
+union cvmx_npei_pkt_out_bmode {
+	u64 u64;
+	struct cvmx_npei_pkt_out_bmode_s {
+		u64 reserved_32_63 : 32;
+		u64 bmode : 32;
+	} s;
+	struct cvmx_npei_pkt_out_bmode_s cn52xx;
+	struct cvmx_npei_pkt_out_bmode_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_out_bmode cvmx_npei_pkt_out_bmode_t;
+
+/**
+ * cvmx_npei_pkt_out_enb
+ *
+ * NPEI_PKT_OUT_ENB = NPEI's Packet Output Enable
+ *
+ * Enables the output packet engines.
+ */
+union cvmx_npei_pkt_out_enb {
+	u64 u64;
+	struct cvmx_npei_pkt_out_enb_s {
+		u64 reserved_32_63 : 32;
+		u64 enb : 32;
+	} s;
+	struct cvmx_npei_pkt_out_enb_s cn52xx;
+	struct cvmx_npei_pkt_out_enb_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_out_enb cvmx_npei_pkt_out_enb_t;
+
+/**
+ * cvmx_npei_pkt_output_wmark
+ *
+ * NPEI_PKT_OUTPUT_WMARK = NPEI's Packet Output Water Mark
+ *
+ * Value that when the NPEI_PKT#_SLIST_BAOFF_DBELL[DBELL] value is less then
+ * that backpressure for the rings will be applied.
+ */
+union cvmx_npei_pkt_output_wmark {
+	u64 u64;
+	struct cvmx_npei_pkt_output_wmark_s {
+		u64 reserved_32_63 : 32;
+		u64 wmark : 32;
+	} s;
+	struct cvmx_npei_pkt_output_wmark_s cn52xx;
+	struct cvmx_npei_pkt_output_wmark_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_output_wmark cvmx_npei_pkt_output_wmark_t;
+
+/**
+ * cvmx_npei_pkt_pcie_port
+ *
+ * NPEI_PKT_PCIE_PORT = NPEI's Packet To PCIe Port Assignment
+ *
+ * Assigns Packet Ports to PCIe ports.
+ */
+union cvmx_npei_pkt_pcie_port {
+	u64 u64;
+	struct cvmx_npei_pkt_pcie_port_s {
+		u64 pp : 64;
+	} s;
+	struct cvmx_npei_pkt_pcie_port_s cn52xx;
+	struct cvmx_npei_pkt_pcie_port_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_pcie_port cvmx_npei_pkt_pcie_port_t;
+
+/**
+ * cvmx_npei_pkt_port_in_rst
+ *
+ * NPEI_PKT_PORT_IN_RST = NPEI Packet Port In Reset
+ *
+ * Vector bits related to ring-port for ones that are reset.
+ */
+union cvmx_npei_pkt_port_in_rst {
+	u64 u64;
+	struct cvmx_npei_pkt_port_in_rst_s {
+		u64 in_rst : 32;
+		u64 out_rst : 32;
+	} s;
+	struct cvmx_npei_pkt_port_in_rst_s cn52xx;
+	struct cvmx_npei_pkt_port_in_rst_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_port_in_rst cvmx_npei_pkt_port_in_rst_t;
+
+/**
+ * cvmx_npei_pkt_slist_es
+ *
+ * NPEI_PKT_SLIST_ES = NPEI's Packet Scatter List Endian Swap
+ *
+ * The Endian Swap for Scatter List Read.
+ */
+union cvmx_npei_pkt_slist_es {
+	u64 u64;
+	struct cvmx_npei_pkt_slist_es_s {
+		u64 es : 64;
+	} s;
+	struct cvmx_npei_pkt_slist_es_s cn52xx;
+	struct cvmx_npei_pkt_slist_es_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_slist_es cvmx_npei_pkt_slist_es_t;
+
+/**
+ * cvmx_npei_pkt_slist_id_size
+ *
+ * NPEI_PKT_SLIST_ID_SIZE = NPEI Packet Scatter List Info and Data Size
+ *
+ * The Size of the information and data fields pointed to by Scatter List
+ * pointers.
+ */
+union cvmx_npei_pkt_slist_id_size {
+	u64 u64;
+	struct cvmx_npei_pkt_slist_id_size_s {
+		u64 reserved_23_63 : 41;
+		u64 isize : 7;
+		u64 bsize : 16;
+	} s;
+	struct cvmx_npei_pkt_slist_id_size_s cn52xx;
+	struct cvmx_npei_pkt_slist_id_size_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_slist_id_size cvmx_npei_pkt_slist_id_size_t;
+
+/**
+ * cvmx_npei_pkt_slist_ns
+ *
+ * NPEI_PKT_SLIST_NS = NPEI's Packet Scatter List No Snoop
+ *
+ * The NS field for the TLP when fetching Scatter List.
+ */
+union cvmx_npei_pkt_slist_ns {
+	u64 u64;
+	struct cvmx_npei_pkt_slist_ns_s {
+		u64 reserved_32_63 : 32;
+		u64 nsr : 32;
+	} s;
+	struct cvmx_npei_pkt_slist_ns_s cn52xx;
+	struct cvmx_npei_pkt_slist_ns_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_slist_ns cvmx_npei_pkt_slist_ns_t;
+
+/**
+ * cvmx_npei_pkt_slist_ror
+ *
+ * NPEI_PKT_SLIST_ROR = NPEI's Packet Scatter List Relaxed Ordering
+ *
+ * The ROR field for the TLP when fetching Scatter List.
+ */
+union cvmx_npei_pkt_slist_ror {
+	u64 u64;
+	struct cvmx_npei_pkt_slist_ror_s {
+		u64 reserved_32_63 : 32;
+		u64 ror : 32;
+	} s;
+	struct cvmx_npei_pkt_slist_ror_s cn52xx;
+	struct cvmx_npei_pkt_slist_ror_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_slist_ror cvmx_npei_pkt_slist_ror_t;
+
+/**
+ * cvmx_npei_pkt_time_int
+ *
+ * NPEI_PKT_TIME_INT = NPEI Packet Timer Interrupt
+ *
+ * The packets rings that are interrupting because of Packet Timers.
+ */
+union cvmx_npei_pkt_time_int {
+	u64 u64;
+	struct cvmx_npei_pkt_time_int_s {
+		u64 reserved_32_63 : 32;
+		u64 port : 32;
+	} s;
+	struct cvmx_npei_pkt_time_int_s cn52xx;
+	struct cvmx_npei_pkt_time_int_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_time_int cvmx_npei_pkt_time_int_t;
+
+/**
+ * cvmx_npei_pkt_time_int_enb
+ *
+ * NPEI_PKT_TIME_INT_ENB = NPEI Packet Timer Interrupt Enable
+ *
+ * The packets rings that are interrupting because of Packet Timers.
+ */
+union cvmx_npei_pkt_time_int_enb {
+	u64 u64;
+	struct cvmx_npei_pkt_time_int_enb_s {
+		u64 reserved_32_63 : 32;
+		u64 port : 32;
+	} s;
+	struct cvmx_npei_pkt_time_int_enb_s cn52xx;
+	struct cvmx_npei_pkt_time_int_enb_s cn56xx;
+};
+
+typedef union cvmx_npei_pkt_time_int_enb cvmx_npei_pkt_time_int_enb_t;
+
+/**
+ * cvmx_npei_rsl_int_blocks
+ *
+ * NPEI_RSL_INT_BLOCKS = NPEI RSL Interrupt Blocks Register
+ *
+ * Reading this register will return a vector with a bit set '1' for a
+ * corresponding RSL block
+ * that presently has an interrupt pending. The Field Description below
+ * supplies the name of the
+ * register that software should read to find out why that intterupt bit is set.
+ */
+union cvmx_npei_rsl_int_blocks {
+	u64 u64;
+	struct cvmx_npei_rsl_int_blocks_s {
+		u64 reserved_31_63 : 33;
+		u64 iob : 1;
+		u64 lmc1 : 1;
+		u64 agl : 1;
+		u64 reserved_24_27 : 4;
+		u64 asxpcs1 : 1;
+		u64 asxpcs0 : 1;
+		u64 reserved_21_21 : 1;
+		u64 pip : 1;
+		u64 spx1 : 1;
+		u64 spx0 : 1;
+		u64 lmc0 : 1;
+		u64 l2c : 1;
+		u64 usb1 : 1;
+		u64 rad : 1;
+		u64 usb : 1;
+		u64 pow : 1;
+		u64 tim : 1;
+		u64 pko : 1;
+		u64 ipd : 1;
+		u64 reserved_8_8 : 1;
+		u64 zip : 1;
+		u64 dfa : 1;
+		u64 fpa : 1;
+		u64 key : 1;
+		u64 npei : 1;
+		u64 gmx1 : 1;
+		u64 gmx0 : 1;
+		u64 mio : 1;
+	} s;
+	struct cvmx_npei_rsl_int_blocks_s cn52xx;
+	struct cvmx_npei_rsl_int_blocks_s cn52xxp1;
+	struct cvmx_npei_rsl_int_blocks_s cn56xx;
+	struct cvmx_npei_rsl_int_blocks_s cn56xxp1;
+};
+
+typedef union cvmx_npei_rsl_int_blocks cvmx_npei_rsl_int_blocks_t;
+
+/**
+ * cvmx_npei_scratch_1
+ *
+ * NPEI_SCRATCH_1 = NPEI's Scratch 1
+ *
+ * A general purpose 64 bit register for SW use.
+ */
+union cvmx_npei_scratch_1 {
+	u64 u64;
+	struct cvmx_npei_scratch_1_s {
+		u64 data : 64;
+	} s;
+	struct cvmx_npei_scratch_1_s cn52xx;
+	struct cvmx_npei_scratch_1_s cn52xxp1;
+	struct cvmx_npei_scratch_1_s cn56xx;
+	struct cvmx_npei_scratch_1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_scratch_1 cvmx_npei_scratch_1_t;
+
+/**
+ * cvmx_npei_state1
+ *
+ * NPEI_STATE1 = NPEI State 1
+ *
+ * State machines in NPEI. For debug.
+ */
+union cvmx_npei_state1 {
+	u64 u64;
+	struct cvmx_npei_state1_s {
+		u64 cpl1 : 12;
+		u64 cpl0 : 12;
+		u64 arb : 1;
+		u64 csr : 39;
+	} s;
+	struct cvmx_npei_state1_s cn52xx;
+	struct cvmx_npei_state1_s cn52xxp1;
+	struct cvmx_npei_state1_s cn56xx;
+	struct cvmx_npei_state1_s cn56xxp1;
+};
+
+typedef union cvmx_npei_state1 cvmx_npei_state1_t;
+
+/**
+ * cvmx_npei_state2
+ *
+ * NPEI_STATE2 = NPEI State 2
+ *
+ * State machines in NPEI. For debug.
+ */
+union cvmx_npei_state2 {
+	u64 u64;
+	struct cvmx_npei_state2_s {
+		u64 reserved_48_63 : 16;
+		u64 npei : 1;
+		u64 rac : 1;
+		u64 csm1 : 15;
+		u64 csm0 : 15;
+		u64 nnp0 : 8;
+		u64 nnd : 8;
+	} s;
+	struct cvmx_npei_state2_s cn52xx;
+	struct cvmx_npei_state2_s cn52xxp1;
+	struct cvmx_npei_state2_s cn56xx;
+	struct cvmx_npei_state2_s cn56xxp1;
+};
+
+typedef union cvmx_npei_state2 cvmx_npei_state2_t;
+
+/**
+ * cvmx_npei_state3
+ *
+ * NPEI_STATE3 = NPEI State 3
+ *
+ * State machines in NPEI. For debug.
+ */
+union cvmx_npei_state3 {
+	u64 u64;
+	struct cvmx_npei_state3_s {
+		u64 reserved_56_63 : 8;
+		u64 psm1 : 15;
+		u64 psm0 : 15;
+		u64 nsm1 : 13;
+		u64 nsm0 : 13;
+	} s;
+	struct cvmx_npei_state3_s cn52xx;
+	struct cvmx_npei_state3_s cn52xxp1;
+	struct cvmx_npei_state3_s cn56xx;
+	struct cvmx_npei_state3_s cn56xxp1;
+};
+
+typedef union cvmx_npei_state3 cvmx_npei_state3_t;
+
+/**
+ * cvmx_npei_win_rd_addr
+ *
+ * NPEI_WIN_RD_ADDR = NPEI Window Read Address Register
+ *
+ * The address to be read when the NPEI_WIN_RD_DATA register is read.
+ */
+union cvmx_npei_win_rd_addr {
+	u64 u64;
+	struct cvmx_npei_win_rd_addr_s {
+		u64 reserved_51_63 : 13;
+		u64 ld_cmd : 2;
+		u64 iobit : 1;
+		u64 rd_addr : 48;
+	} s;
+	struct cvmx_npei_win_rd_addr_s cn52xx;
+	struct cvmx_npei_win_rd_addr_s cn52xxp1;
+	struct cvmx_npei_win_rd_addr_s cn56xx;
+	struct cvmx_npei_win_rd_addr_s cn56xxp1;
+};
+
+typedef union cvmx_npei_win_rd_addr cvmx_npei_win_rd_addr_t;
+
+/**
+ * cvmx_npei_win_rd_data
+ *
+ * NPEI_WIN_RD_DATA = NPEI Window Read Data Register
+ *
+ * Reading this register causes a window read operation to take place.
+ * Address read is that contained in the NPEI_WIN_RD_ADDR
+ * register.
+ */
+union cvmx_npei_win_rd_data {
+	u64 u64;
+	struct cvmx_npei_win_rd_data_s {
+		u64 rd_data : 64;
+	} s;
+	struct cvmx_npei_win_rd_data_s cn52xx;
+	struct cvmx_npei_win_rd_data_s cn52xxp1;
+	struct cvmx_npei_win_rd_data_s cn56xx;
+	struct cvmx_npei_win_rd_data_s cn56xxp1;
+};
+
+typedef union cvmx_npei_win_rd_data cvmx_npei_win_rd_data_t;
+
+/**
+ * cvmx_npei_win_wr_addr
+ *
+ * NPEI_WIN_WR_ADDR = NPEI Window Write Address Register
+ *
+ * Contains the address to be writen to when a write operation is started by
+ * writing the
+ * NPEI_WIN_WR_DATA register (see below).
+ *
+ * Notes:
+ * Even though address bit [2] can be set, it should always be kept to '0'.
+ *
+ */
+union cvmx_npei_win_wr_addr {
+	u64 u64;
+	struct cvmx_npei_win_wr_addr_s {
+		u64 reserved_49_63 : 15;
+		u64 iobit : 1;
+		u64 wr_addr : 46;
+		u64 reserved_0_1 : 2;
+	} s;
+	struct cvmx_npei_win_wr_addr_s cn52xx;
+	struct cvmx_npei_win_wr_addr_s cn52xxp1;
+	struct cvmx_npei_win_wr_addr_s cn56xx;
+	struct cvmx_npei_win_wr_addr_s cn56xxp1;
+};
+
+typedef union cvmx_npei_win_wr_addr cvmx_npei_win_wr_addr_t;
+
+/**
+ * cvmx_npei_win_wr_data
+ *
+ * NPEI_WIN_WR_DATA = NPEI Window Write Data Register
+ *
+ * Contains the data to write to the address located in the NPEI_WIN_WR_ADDR
+ * Register.
+ * Writing the least-significant-byte of this register will cause a write
+ * operation to take place.
+ */
+union cvmx_npei_win_wr_data {
+	u64 u64;
+	struct cvmx_npei_win_wr_data_s {
+		u64 wr_data : 64;
+	} s;
+	struct cvmx_npei_win_wr_data_s cn52xx;
+	struct cvmx_npei_win_wr_data_s cn52xxp1;
+	struct cvmx_npei_win_wr_data_s cn56xx;
+	struct cvmx_npei_win_wr_data_s cn56xxp1;
+};
+
+typedef union cvmx_npei_win_wr_data cvmx_npei_win_wr_data_t;
+
+/**
+ * cvmx_npei_win_wr_mask
+ *
+ * NPEI_WIN_WR_MASK = NPEI Window Write Mask Register
+ *
+ * Contains the mask for the data in the NPEI_WIN_WR_DATA Register.
+ */
+union cvmx_npei_win_wr_mask {
+	u64 u64;
+	struct cvmx_npei_win_wr_mask_s {
+		u64 reserved_8_63 : 56;
+		u64 wr_mask : 8;
+	} s;
+	struct cvmx_npei_win_wr_mask_s cn52xx;
+	struct cvmx_npei_win_wr_mask_s cn52xxp1;
+	struct cvmx_npei_win_wr_mask_s cn56xx;
+	struct cvmx_npei_win_wr_mask_s cn56xxp1;
+};
+
+typedef union cvmx_npei_win_wr_mask cvmx_npei_win_wr_mask_t;
+
+/**
+ * cvmx_npei_window_ctl
+ *
+ * NPEI_WINDOW_CTL = NPEI's Window Control
+ *
+ * The name of this register is misleading. The timeout value is used for BAR0
+ * access from PCIE0 and PCIE1.
+ * Any access to the regigisters on the RML will timeout as 0xFFFF clock cycle.
+ * At time of timeout the next
+ * RML access will start, and interrupt will be set, and in the case of reads
+ * no data will be returned.
+ *
+ * The value of this register should be set to a minimum of 0x200000 to ensure
+ * that a timeout to an RML register
+ * occurs on the RML 0xFFFF timer before the timeout for a BAR0 access from
+ * the PCIE#.
+ */
+union cvmx_npei_window_ctl {
+	u64 u64;
+	struct cvmx_npei_window_ctl_s {
+		u64 reserved_32_63 : 32;
+		u64 time : 32;
+	} s;
+	struct cvmx_npei_window_ctl_s cn52xx;
+	struct cvmx_npei_window_ctl_s cn52xxp1;
+	struct cvmx_npei_window_ctl_s cn56xx;
+	struct cvmx_npei_window_ctl_s cn56xxp1;
+};
+
+typedef union cvmx_npei_window_ctl cvmx_npei_window_ctl_t;
+
+#endif
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-pcsxx-defs.h b/arch/mips/mach-octeon/include/mach/cvmx-pcsxx-defs.h
new file mode 100644
index 0000000..e16a4c4
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-pcsxx-defs.h
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon pcsxx.
+ */
+
+#ifndef __CVMX_PCSXX_DEFS_H__
+#define __CVMX_PCSXX_DEFS_H__
+
+static inline u64 CVMX_PCSXX_10GBX_STATUS_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000828ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000828ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000828ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000828ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_BIST_STATUS_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000870ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000870ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000870ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000870ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_BIT_LOCK_STATUS_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000850ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000850ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000850ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000850ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_CONTROL1_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000800ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000800ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000800ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000800ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_CONTROL2_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000818ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000818ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000818ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000818ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_INT_EN_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000860ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000860ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000860ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000860ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_INT_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000858ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000858ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000858ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000858ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_LOG_ANL_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000868ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000868ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000868ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000868ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_MISC_CTL_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000848ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000848ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000848ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000848ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_RX_SYNC_STATES_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000838ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000838ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000838ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000838ull + (offset) * 0x8000000ull;
+}
+
+#define CVMX_PCSXX_SERDES_CRDT_CNT_REG(offset) (0x00011800B0000880ull)
+static inline u64 CVMX_PCSXX_SPD_ABIL_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000810ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000810ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000810ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000810ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_STATUS1_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000808ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000808ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000808ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000808ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_STATUS2_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000820ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000820ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000820ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000820ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_TX_RX_POLARITY_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000840ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000840ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000840ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000840ull + (offset) * 0x8000000ull;
+}
+
+static inline u64 CVMX_PCSXX_TX_RX_STATES_REG(unsigned long offset)
+{
+	switch (cvmx_get_octeon_family()) {
+	case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000830ull + (offset) * 0x8000000ull;
+	case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+	case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000830ull + (offset) * 0x8000000ull;
+	case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+		return 0x00011800B0000830ull + (offset) * 0x1000000ull;
+	}
+	return 0x00011800B0000830ull + (offset) * 0x8000000ull;
+}
+
+/**
+ * cvmx_pcsx#_10gbx_status_reg
+ *
+ * PCSX_10GBX_STATUS_REG = 10gbx_status_reg
+ *
+ */
+union cvmx_pcsxx_10gbx_status_reg {
+	u64 u64;
+	struct cvmx_pcsxx_10gbx_status_reg_s {
+		u64 reserved_13_63 : 51;
+		u64 alignd : 1;
+		u64 pattst : 1;
+		u64 reserved_4_10 : 7;
+		u64 l3sync : 1;
+		u64 l2sync : 1;
+		u64 l1sync : 1;
+		u64 l0sync : 1;
+	} s;
+	struct cvmx_pcsxx_10gbx_status_reg_s cn52xx;
+	struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1;
+	struct cvmx_pcsxx_10gbx_status_reg_s cn56xx;
+	struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1;
+	struct cvmx_pcsxx_10gbx_status_reg_s cn61xx;
+	struct cvmx_pcsxx_10gbx_status_reg_s cn63xx;
+	struct cvmx_pcsxx_10gbx_status_reg_s cn63xxp1;
+	struct cvmx_pcsxx_10gbx_status_reg_s cn66xx;
+	struct cvmx_pcsxx_10gbx_status_reg_s cn68xx;
+	struct cvmx_pcsxx_10gbx_status_reg_s cn68xxp1;
+	struct cvmx_pcsxx_10gbx_status_reg_s cn70xx;
+	struct cvmx_pcsxx_10gbx_status_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_10gbx_status_reg cvmx_pcsxx_10gbx_status_reg_t;
+
+/**
+ * cvmx_pcsx#_bist_status_reg
+ *
+ * PCSX Bist Status Register
+ *
+ */
+union cvmx_pcsxx_bist_status_reg {
+	u64 u64;
+	struct cvmx_pcsxx_bist_status_reg_s {
+		u64 reserved_1_63 : 63;
+		u64 bist_status : 1;
+	} s;
+	struct cvmx_pcsxx_bist_status_reg_s cn52xx;
+	struct cvmx_pcsxx_bist_status_reg_s cn52xxp1;
+	struct cvmx_pcsxx_bist_status_reg_s cn56xx;
+	struct cvmx_pcsxx_bist_status_reg_s cn56xxp1;
+	struct cvmx_pcsxx_bist_status_reg_s cn61xx;
+	struct cvmx_pcsxx_bist_status_reg_s cn63xx;
+	struct cvmx_pcsxx_bist_status_reg_s cn63xxp1;
+	struct cvmx_pcsxx_bist_status_reg_s cn66xx;
+	struct cvmx_pcsxx_bist_status_reg_s cn68xx;
+	struct cvmx_pcsxx_bist_status_reg_s cn68xxp1;
+	struct cvmx_pcsxx_bist_status_reg_s cn70xx;
+	struct cvmx_pcsxx_bist_status_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_bist_status_reg cvmx_pcsxx_bist_status_reg_t;
+
+/**
+ * cvmx_pcsx#_bit_lock_status_reg
+ *
+ * PCSX Bit Lock Status Register
+ *
+ */
+union cvmx_pcsxx_bit_lock_status_reg {
+	u64 u64;
+	struct cvmx_pcsxx_bit_lock_status_reg_s {
+		u64 reserved_4_63 : 60;
+		u64 bitlck3 : 1;
+		u64 bitlck2 : 1;
+		u64 bitlck1 : 1;
+		u64 bitlck0 : 1;
+	} s;
+	struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx;
+	struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1;
+	struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx;
+	struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1;
+	struct cvmx_pcsxx_bit_lock_status_reg_s cn61xx;
+	struct cvmx_pcsxx_bit_lock_status_reg_s cn63xx;
+	struct cvmx_pcsxx_bit_lock_status_reg_s cn63xxp1;
+	struct cvmx_pcsxx_bit_lock_status_reg_s cn66xx;
+	struct cvmx_pcsxx_bit_lock_status_reg_s cn68xx;
+	struct cvmx_pcsxx_bit_lock_status_reg_s cn68xxp1;
+	struct cvmx_pcsxx_bit_lock_status_reg_s cn70xx;
+	struct cvmx_pcsxx_bit_lock_status_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_bit_lock_status_reg cvmx_pcsxx_bit_lock_status_reg_t;
+
+/**
+ * cvmx_pcsx#_control1_reg
+ *
+ * NOTE: Logic Analyzer is enabled with LA_EN for the specified PCS lane only.
+ * PKT_SZ is effective only when LA_EN=1
+ * For normal operation(sgmii or 1000Base-X), this bit must be 0.
+ * See pcsx.csr for xaui logic analyzer mode.
+ * For full description see document at .../rtl/pcs/readme_logic_analyzer.txt
+ *
+ *
+ *  PCSX regs follow IEEE Std 802.3-2005, Section: 45.2.3
+ *
+ *
+ *  PCSX_CONTROL1_REG = Control Register1
+ */
+union cvmx_pcsxx_control1_reg {
+	u64 u64;
+	struct cvmx_pcsxx_control1_reg_s {
+		u64 reserved_16_63 : 48;
+		u64 reset : 1;
+		u64 loopbck1 : 1;
+		u64 spdsel1 : 1;
+		u64 reserved_12_12 : 1;
+		u64 lo_pwr : 1;
+		u64 reserved_7_10 : 4;
+		u64 spdsel0 : 1;
+		u64 spd : 4;
+		u64 reserved_0_1 : 2;
+	} s;
+	struct cvmx_pcsxx_control1_reg_s cn52xx;
+	struct cvmx_pcsxx_control1_reg_s cn52xxp1;
+	struct cvmx_pcsxx_control1_reg_s cn56xx;
+	struct cvmx_pcsxx_control1_reg_s cn56xxp1;
+	struct cvmx_pcsxx_control1_reg_s cn61xx;
+	struct cvmx_pcsxx_control1_reg_s cn63xx;
+	struct cvmx_pcsxx_control1_reg_s cn63xxp1;
+	struct cvmx_pcsxx_control1_reg_s cn66xx;
+	struct cvmx_pcsxx_control1_reg_s cn68xx;
+	struct cvmx_pcsxx_control1_reg_s cn68xxp1;
+	struct cvmx_pcsxx_control1_reg_s cn70xx;
+	struct cvmx_pcsxx_control1_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_control1_reg cvmx_pcsxx_control1_reg_t;
+
+/**
+ * cvmx_pcsx#_control2_reg
+ *
+ * PCSX_CONTROL2_REG = Control Register2
+ *
+ */
+union cvmx_pcsxx_control2_reg {
+	u64 u64;
+	struct cvmx_pcsxx_control2_reg_s {
+		u64 reserved_2_63 : 62;
+		u64 type : 2;
+	} s;
+	struct cvmx_pcsxx_control2_reg_s cn52xx;
+	struct cvmx_pcsxx_control2_reg_s cn52xxp1;
+	struct cvmx_pcsxx_control2_reg_s cn56xx;
+	struct cvmx_pcsxx_control2_reg_s cn56xxp1;
+	struct cvmx_pcsxx_control2_reg_s cn61xx;
+	struct cvmx_pcsxx_control2_reg_s cn63xx;
+	struct cvmx_pcsxx_control2_reg_s cn63xxp1;
+	struct cvmx_pcsxx_control2_reg_s cn66xx;
+	struct cvmx_pcsxx_control2_reg_s cn68xx;
+	struct cvmx_pcsxx_control2_reg_s cn68xxp1;
+	struct cvmx_pcsxx_control2_reg_s cn70xx;
+	struct cvmx_pcsxx_control2_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_control2_reg cvmx_pcsxx_control2_reg_t;
+
+/**
+ * cvmx_pcsx#_int_en_reg
+ *
+ * PCSX Interrupt Enable Register
+ *
+ */
+union cvmx_pcsxx_int_en_reg {
+	u64 u64;
+	struct cvmx_pcsxx_int_en_reg_s {
+		u64 reserved_7_63 : 57;
+		u64 dbg_sync_en : 1;
+		u64 algnlos_en : 1;
+		u64 synlos_en : 1;
+		u64 bitlckls_en : 1;
+		u64 rxsynbad_en : 1;
+		u64 rxbad_en : 1;
+		u64 txflt_en : 1;
+	} s;
+	struct cvmx_pcsxx_int_en_reg_cn52xx {
+		u64 reserved_6_63 : 58;
+		u64 algnlos_en : 1;
+		u64 synlos_en : 1;
+		u64 bitlckls_en : 1;
+		u64 rxsynbad_en : 1;
+		u64 rxbad_en : 1;
+		u64 txflt_en : 1;
+	} cn52xx;
+	struct cvmx_pcsxx_int_en_reg_cn52xx cn52xxp1;
+	struct cvmx_pcsxx_int_en_reg_cn52xx cn56xx;
+	struct cvmx_pcsxx_int_en_reg_cn52xx cn56xxp1;
+	struct cvmx_pcsxx_int_en_reg_s cn61xx;
+	struct cvmx_pcsxx_int_en_reg_s cn63xx;
+	struct cvmx_pcsxx_int_en_reg_s cn63xxp1;
+	struct cvmx_pcsxx_int_en_reg_s cn66xx;
+	struct cvmx_pcsxx_int_en_reg_s cn68xx;
+	struct cvmx_pcsxx_int_en_reg_s cn68xxp1;
+	struct cvmx_pcsxx_int_en_reg_s cn70xx;
+	struct cvmx_pcsxx_int_en_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_int_en_reg cvmx_pcsxx_int_en_reg_t;
+
+/**
+ * cvmx_pcsx#_int_reg
+ *
+ * PCSX Interrupt Register
+ * Note: DBG_SYNC is a edge triggered interrupt. When set it indicates PCS Synchronization state
+ * machine in
+ * Figure 48-7 state diagram in IEEE Std 802.3-2005 changes state SYNC_ACQUIRED_1 to
+ * SYNC_ACQUIRED_2
+ * indicating an invalid code group was received on one of the 4 receive lanes.
+ * This interrupt should be always disabled and used only for link problem debugging help.
+ */
+union cvmx_pcsxx_int_reg {
+	u64 u64;
+	struct cvmx_pcsxx_int_reg_s {
+		u64 reserved_7_63 : 57;
+		u64 dbg_sync : 1;
+		u64 algnlos : 1;
+		u64 synlos : 1;
+		u64 bitlckls : 1;
+		u64 rxsynbad : 1;
+		u64 rxbad : 1;
+		u64 txflt : 1;
+	} s;
+	struct cvmx_pcsxx_int_reg_cn52xx {
+		u64 reserved_6_63 : 58;
+		u64 algnlos : 1;
+		u64 synlos : 1;
+		u64 bitlckls : 1;
+		u64 rxsynbad : 1;
+		u64 rxbad : 1;
+		u64 txflt : 1;
+	} cn52xx;
+	struct cvmx_pcsxx_int_reg_cn52xx cn52xxp1;
+	struct cvmx_pcsxx_int_reg_cn52xx cn56xx;
+	struct cvmx_pcsxx_int_reg_cn52xx cn56xxp1;
+	struct cvmx_pcsxx_int_reg_s cn61xx;
+	struct cvmx_pcsxx_int_reg_s cn63xx;
+	struct cvmx_pcsxx_int_reg_s cn63xxp1;
+	struct cvmx_pcsxx_int_reg_s cn66xx;
+	struct cvmx_pcsxx_int_reg_s cn68xx;
+	struct cvmx_pcsxx_int_reg_s cn68xxp1;
+	struct cvmx_pcsxx_int_reg_s cn70xx;
+	struct cvmx_pcsxx_int_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_int_reg cvmx_pcsxx_int_reg_t;
+
+/**
+ * cvmx_pcsx#_log_anl_reg
+ *
+ * PCSX Logic Analyzer Register
+ * NOTE: Logic Analyzer is enabled with LA_EN for xaui only. PKT_SZ is effective only when
+ * LA_EN=1
+ * For normal operation(xaui), this bit must be 0. The dropped lane is used to send rxc[3:0].
+ * See pcs.csr  for sgmii/1000Base-X logic analyzer mode.
+ * For full description see document at .../rtl/pcs/readme_logic_analyzer.txt
+ */
+union cvmx_pcsxx_log_anl_reg {
+	u64 u64;
+	struct cvmx_pcsxx_log_anl_reg_s {
+		u64 reserved_7_63 : 57;
+		u64 enc_mode : 1;
+		u64 drop_ln : 2;
+		u64 lafifovfl : 1;
+		u64 la_en : 1;
+		u64 pkt_sz : 2;
+	} s;
+	struct cvmx_pcsxx_log_anl_reg_s cn52xx;
+	struct cvmx_pcsxx_log_anl_reg_s cn52xxp1;
+	struct cvmx_pcsxx_log_anl_reg_s cn56xx;
+	struct cvmx_pcsxx_log_anl_reg_s cn56xxp1;
+	struct cvmx_pcsxx_log_anl_reg_s cn61xx;
+	struct cvmx_pcsxx_log_anl_reg_s cn63xx;
+	struct cvmx_pcsxx_log_anl_reg_s cn63xxp1;
+	struct cvmx_pcsxx_log_anl_reg_s cn66xx;
+	struct cvmx_pcsxx_log_anl_reg_s cn68xx;
+	struct cvmx_pcsxx_log_anl_reg_s cn68xxp1;
+	struct cvmx_pcsxx_log_anl_reg_s cn70xx;
+	struct cvmx_pcsxx_log_anl_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_log_anl_reg cvmx_pcsxx_log_anl_reg_t;
+
+/**
+ * cvmx_pcsx#_misc_ctl_reg
+ *
+ * PCSX Misc Control Register
+ * LN_SWAP for XAUI is to simplify interconnection layout between devices
+ */
+union cvmx_pcsxx_misc_ctl_reg {
+	u64 u64;
+	struct cvmx_pcsxx_misc_ctl_reg_s {
+		u64 reserved_4_63 : 60;
+		u64 tx_swap : 1;
+		u64 rx_swap : 1;
+		u64 xaui : 1;
+		u64 gmxeno : 1;
+	} s;
+	struct cvmx_pcsxx_misc_ctl_reg_s cn52xx;
+	struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1;
+	struct cvmx_pcsxx_misc_ctl_reg_s cn56xx;
+	struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1;
+	struct cvmx_pcsxx_misc_ctl_reg_s cn61xx;
+	struct cvmx_pcsxx_misc_ctl_reg_s cn63xx;
+	struct cvmx_pcsxx_misc_ctl_reg_s cn63xxp1;
+	struct cvmx_pcsxx_misc_ctl_reg_s cn66xx;
+	struct cvmx_pcsxx_misc_ctl_reg_s cn68xx;
+	struct cvmx_pcsxx_misc_ctl_reg_s cn68xxp1;
+	struct cvmx_pcsxx_misc_ctl_reg_s cn70xx;
+	struct cvmx_pcsxx_misc_ctl_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_misc_ctl_reg cvmx_pcsxx_misc_ctl_reg_t;
+
+/**
+ * cvmx_pcsx#_rx_sync_states_reg
+ *
+ * PCSX_RX_SYNC_STATES_REG = Receive Sync States Register
+ *
+ */
+union cvmx_pcsxx_rx_sync_states_reg {
+	u64 u64;
+	struct cvmx_pcsxx_rx_sync_states_reg_s {
+		u64 reserved_16_63 : 48;
+		u64 sync3st : 4;
+		u64 sync2st : 4;
+		u64 sync1st : 4;
+		u64 sync0st : 4;
+	} s;
+	struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx;
+	struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1;
+	struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx;
+	struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1;
+	struct cvmx_pcsxx_rx_sync_states_reg_s cn61xx;
+	struct cvmx_pcsxx_rx_sync_states_reg_s cn63xx;
+	struct cvmx_pcsxx_rx_sync_states_reg_s cn63xxp1;
+	struct cvmx_pcsxx_rx_sync_states_reg_s cn66xx;
+	struct cvmx_pcsxx_rx_sync_states_reg_s cn68xx;
+	struct cvmx_pcsxx_rx_sync_states_reg_s cn68xxp1;
+	struct cvmx_pcsxx_rx_sync_states_reg_s cn70xx;
+	struct cvmx_pcsxx_rx_sync_states_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_rx_sync_states_reg cvmx_pcsxx_rx_sync_states_reg_t;
+
+/**
+ * cvmx_pcsx#_serdes_crdt_cnt_reg
+ *
+ * PCSX SERDES Credit Count
+ *
+ */
+union cvmx_pcsxx_serdes_crdt_cnt_reg {
+	u64 u64;
+	struct cvmx_pcsxx_serdes_crdt_cnt_reg_s {
+		u64 reserved_5_63 : 59;
+		u64 cnt : 5;
+	} s;
+	struct cvmx_pcsxx_serdes_crdt_cnt_reg_s cn70xx;
+	struct cvmx_pcsxx_serdes_crdt_cnt_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_serdes_crdt_cnt_reg cvmx_pcsxx_serdes_crdt_cnt_reg_t;
+
+/**
+ * cvmx_pcsx#_spd_abil_reg
+ *
+ * PCSX_SPD_ABIL_REG = Speed ability register
+ *
+ */
+union cvmx_pcsxx_spd_abil_reg {
+	u64 u64;
+	struct cvmx_pcsxx_spd_abil_reg_s {
+		u64 reserved_2_63 : 62;
+		u64 tenpasst : 1;
+		u64 tengb : 1;
+	} s;
+	struct cvmx_pcsxx_spd_abil_reg_s cn52xx;
+	struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1;
+	struct cvmx_pcsxx_spd_abil_reg_s cn56xx;
+	struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1;
+	struct cvmx_pcsxx_spd_abil_reg_s cn61xx;
+	struct cvmx_pcsxx_spd_abil_reg_s cn63xx;
+	struct cvmx_pcsxx_spd_abil_reg_s cn63xxp1;
+	struct cvmx_pcsxx_spd_abil_reg_s cn66xx;
+	struct cvmx_pcsxx_spd_abil_reg_s cn68xx;
+	struct cvmx_pcsxx_spd_abil_reg_s cn68xxp1;
+	struct cvmx_pcsxx_spd_abil_reg_s cn70xx;
+	struct cvmx_pcsxx_spd_abil_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_spd_abil_reg cvmx_pcsxx_spd_abil_reg_t;
+
+/**
+ * cvmx_pcsx#_status1_reg
+ *
+ * PCSX_STATUS1_REG = Status Register1
+ *
+ */
+union cvmx_pcsxx_status1_reg {
+	u64 u64;
+	struct cvmx_pcsxx_status1_reg_s {
+		u64 reserved_8_63 : 56;
+		u64 flt : 1;
+		u64 reserved_3_6 : 4;
+		u64 rcv_lnk : 1;
+		u64 lpable : 1;
+		u64 reserved_0_0 : 1;
+	} s;
+	struct cvmx_pcsxx_status1_reg_s cn52xx;
+	struct cvmx_pcsxx_status1_reg_s cn52xxp1;
+	struct cvmx_pcsxx_status1_reg_s cn56xx;
+	struct cvmx_pcsxx_status1_reg_s cn56xxp1;
+	struct cvmx_pcsxx_status1_reg_s cn61xx;
+	struct cvmx_pcsxx_status1_reg_s cn63xx;
+	struct cvmx_pcsxx_status1_reg_s cn63xxp1;
+	struct cvmx_pcsxx_status1_reg_s cn66xx;
+	struct cvmx_pcsxx_status1_reg_s cn68xx;
+	struct cvmx_pcsxx_status1_reg_s cn68xxp1;
+	struct cvmx_pcsxx_status1_reg_s cn70xx;
+	struct cvmx_pcsxx_status1_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_status1_reg cvmx_pcsxx_status1_reg_t;
+
+/**
+ * cvmx_pcsx#_status2_reg
+ *
+ * PCSX_STATUS2_REG = Status Register2
+ *
+ */
+union cvmx_pcsxx_status2_reg {
+	u64 u64;
+	struct cvmx_pcsxx_status2_reg_s {
+		u64 reserved_16_63 : 48;
+		u64 dev : 2;
+		u64 reserved_12_13 : 2;
+		u64 xmtflt : 1;
+		u64 rcvflt : 1;
+		u64 reserved_3_9 : 7;
+		u64 tengb_w : 1;
+		u64 tengb_x : 1;
+		u64 tengb_r : 1;
+	} s;
+	struct cvmx_pcsxx_status2_reg_s cn52xx;
+	struct cvmx_pcsxx_status2_reg_s cn52xxp1;
+	struct cvmx_pcsxx_status2_reg_s cn56xx;
+	struct cvmx_pcsxx_status2_reg_s cn56xxp1;
+	struct cvmx_pcsxx_status2_reg_s cn61xx;
+	struct cvmx_pcsxx_status2_reg_s cn63xx;
+	struct cvmx_pcsxx_status2_reg_s cn63xxp1;
+	struct cvmx_pcsxx_status2_reg_s cn66xx;
+	struct cvmx_pcsxx_status2_reg_s cn68xx;
+	struct cvmx_pcsxx_status2_reg_s cn68xxp1;
+	struct cvmx_pcsxx_status2_reg_s cn70xx;
+	struct cvmx_pcsxx_status2_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_status2_reg cvmx_pcsxx_status2_reg_t;
+
+/**
+ * cvmx_pcsx#_tx_rx_polarity_reg
+ *
+ * RX lane polarity vector [3:0] = XOR_RXPLRT<9:6>  ^  [4[RXPLRT<1>]];
+ * TX lane polarity vector [3:0] = XOR_TXPLRT<5:2>  ^  [4[TXPLRT<0>]];
+ * In short keep <1:0> to 2'b00, and use <5:2> and <9:6> fields to define per lane polarities
+ */
+union cvmx_pcsxx_tx_rx_polarity_reg {
+	u64 u64;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_s {
+		u64 reserved_10_63 : 54;
+		u64 xor_rxplrt : 4;
+		u64 xor_txplrt : 4;
+		u64 rxplrt : 1;
+		u64 txplrt : 1;
+	} s;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 {
+		u64 reserved_2_63 : 62;
+		u64 rxplrt : 1;
+		u64 txplrt : 1;
+	} cn52xxp1;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_s cn61xx;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xx;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xxp1;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_s cn66xx;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xx;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xxp1;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_s cn70xx;
+	struct cvmx_pcsxx_tx_rx_polarity_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_tx_rx_polarity_reg cvmx_pcsxx_tx_rx_polarity_reg_t;
+
+/**
+ * cvmx_pcsx#_tx_rx_states_reg
+ *
+ * PCSX_TX_RX_STATES_REG = Transmit Receive States Register
+ *
+ */
+union cvmx_pcsxx_tx_rx_states_reg {
+	u64 u64;
+	struct cvmx_pcsxx_tx_rx_states_reg_s {
+		u64 reserved_14_63 : 50;
+		u64 term_err : 1;
+		u64 syn3bad : 1;
+		u64 syn2bad : 1;
+		u64 syn1bad : 1;
+		u64 syn0bad : 1;
+		u64 rxbad : 1;
+		u64 algn_st : 3;
+		u64 rx_st : 2;
+		u64 tx_st : 3;
+	} s;
+	struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx;
+	struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 {
+		u64 reserved_13_63 : 51;
+		u64 syn3bad : 1;
+		u64 syn2bad : 1;
+		u64 syn1bad : 1;
+		u64 syn0bad : 1;
+		u64 rxbad : 1;
+		u64 algn_st : 3;
+		u64 rx_st : 2;
+		u64 tx_st : 3;
+	} cn52xxp1;
+	struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx;
+	struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1;
+	struct cvmx_pcsxx_tx_rx_states_reg_s cn61xx;
+	struct cvmx_pcsxx_tx_rx_states_reg_s cn63xx;
+	struct cvmx_pcsxx_tx_rx_states_reg_s cn63xxp1;
+	struct cvmx_pcsxx_tx_rx_states_reg_s cn66xx;
+	struct cvmx_pcsxx_tx_rx_states_reg_s cn68xx;
+	struct cvmx_pcsxx_tx_rx_states_reg_s cn68xxp1;
+	struct cvmx_pcsxx_tx_rx_states_reg_s cn70xx;
+	struct cvmx_pcsxx_tx_rx_states_reg_s cn70xxp1;
+};
+
+typedef union cvmx_pcsxx_tx_rx_states_reg cvmx_pcsxx_tx_rx_states_reg_t;
+
+#endif
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-pki-cluster.h b/arch/mips/mach-octeon/include/mach/cvmx-pki-cluster.h
new file mode 100644
index 0000000..4d5a9d4
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-pki-cluster.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+/* L4_PORT_CHECK_DISABLE_LF tag */
+/* This file is autogenerated from ipemainc.elf */
+const int cvmx_pki_cluster_code_length = 997;
+const u64 cvmx_pki_cluster_code_default[] = {
+	0x000000000a000000ull, 0x0000413a68024070ull, 0x0000813800200020ull,
+	0x900081b800200020ull, 0x0004da00ffff0001ull, 0x000455ab68010b0eull,
+	0x00045fba46010000ull, 0x9046898120002000ull, 0x0004418068010028ull,
+	0x90665300680100f0ull, 0x0004413f68004070ull, 0x00065380680100f0ull,
+	0x00045a346803a0f0ull, 0x000401b448000001ull, 0x00045cb968030870ull,
+	0x0007debd00100010ull, 0x0000813b80008000ull, 0x000441bb68004070ull,
+	0xd001c00000000000ull, 0xd021c00000000000ull, 0x00045f80680100f0ull,
+	0x0004c639ff000200ull, 0x0004403f72010000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x000041ba68034078ull, 0x0000512268030870ull,
+	0x000041bc68034070ull, 0x00005d3a68030870ull, 0x00045cb942080000ull,
+	0x0004552a4e09312dull, 0x00045cb968082868ull, 0x0004410246090000ull,
+	0x0000813800800080ull, 0x000401a486000005ull, 0x000615ab74000123ull,
+	0x0007122448000004ull, 0x0000813901000000ull, 0x000481b800010001ull,
+	0x000685b800020002ull, 0xa006823800010001ull, 0x0006c639ff000400ull,
+	0x00085f3e68010a00ull, 0xa0885f3e68010f01ull, 0x00085f3e68010405ull,
+	0x00085f3e68010906ull, 0xa0485f3e68010e07ull, 0xa061c00000000000ull,
+	0xa4085f3e68010b28ull, 0xa421c00000000000ull, 0x00095f3e68010940ull,
+	0xa066403e72010000ull, 0x000941be68034039ull, 0x00085f3e68010305ull,
+	0xa4685f3e68010028ull, 0x00095f3e68030030ull, 0x00095f3e68010416ull,
+	0x0001c00000000000ull, 0x00065cb942080000ull, 0xa046552a4e09312dull,
+	0xa446c639ff000500ull, 0x0006debd00010001ull, 0x0006403e72010001ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x00065cb942080000ull, 0x0006552a4e09312dull,
+	0x00065cb968082868ull, 0x0006410246090000ull, 0x9060813901000000ull,
+	0x0004c639ff000800ull, 0x0004400072010000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x00045cb942080000ull,
+	0x9084552a4e09312dull, 0x90a4c639ff000900ull, 0x00045f80680100f0ull,
+	0x0004403f72010001ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x00045cb942080000ull, 0x9004552a4e09312dull,
+	0x0004c639ff000a00ull, 0x0004400072010000ull, 0x00048181ff00ff00ull,
+	0x0007820101000100ull, 0x0006898100ff00ffull, 0x00048301ffff0180ull,
+	0x0008d5ab10001000ull, 0x0004d4a900010001ull, 0x0001c00000000000ull,
+	0x00045cb942080000ull, 0x9024552a4e09312dull, 0x0004c639ff000b00ull,
+	0x90445f80680100f0ull, 0x000459b368020070ull, 0x000401024000000cull,
+	0x0006823fffffffffull, 0x00088281ffffffffull, 0x000ad5ab20002000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0004403f72010001ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x000c8b3fffffc200ull, 0x000c8b01ffff0001ull,
+	0x000ddebd00020002ull, 0x00045cb942080000ull, 0x0004552a4e09312dull,
+	0x00045cb968082868ull, 0x0004410246090000ull, 0x0000813901000000ull,
+	0x000481b800080008ull, 0x9846c639ff001200ull, 0x9861c00000000000ull,
+	0x00064180680100f0ull, 0x0006400372010000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x000683891f000200ull,
+	0x000ed52a00800080ull, 0x000e5e3c68020070ull, 0x00065cb942080000ull,
+	0x0006552a4e09312dull, 0x00065cb968082868ull, 0x0006410246090000ull,
+	0x0000813d00020002ull, 0x0004893901000000ull, 0x9004893800040004ull,
+	0x9024c639ff001300ull, 0x00044180680100f0ull, 0x9044400372010001ull,
+	0x0001c00000000000ull, 0x00045f3e68010044ull, 0x0004debd00040004ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x000483891f000200ull, 0x000ed52a00800080ull, 0x000e5e3c68020070ull,
+	0x00045cb942080000ull, 0x0004552a4e09312dull, 0x00045cb968082868ull,
+	0x0004410246090000ull, 0x000581b902000000ull, 0x9826c639ff001800ull,
+	0x9801c00000000000ull, 0x00064180680100f0ull, 0x0006400172030000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x000682091f000200ull, 0x000883aa00800080ull, 0x000ed52a00400040ull,
+	0x000e5e3c68020870ull, 0x000fd52a00800080ull, 0x000f5e3c68020070ull,
+	0x000983891f000000ull, 0x000f54a968090148ull, 0x000f59b368020870ull,
+	0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
+	0x0006410246090000ull, 0x000081b902000000ull, 0x9826c639ff001900ull,
+	0x9801c00000000000ull, 0x00064180680100f0ull, 0x0006400172030001ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x000682091f000200ull, 0x000883aa00800080ull, 0x000ed52a00400040ull,
+	0x000e5e3c68020870ull, 0x000fd52a00800080ull, 0x000f5e3c68020070ull,
+	0x000983891f000000ull, 0x000f54a968090148ull, 0x000f59b368020870ull,
+	0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
+	0x0006410246090000ull, 0x000081b902000000ull, 0x9826c639ff001a00ull,
+	0x9801c00000000000ull, 0x00064180680100f0ull, 0x0006400172030000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x000682091f000200ull, 0x000883aa00800080ull, 0x000ed52a00400040ull,
+	0x000e5e3c68020870ull, 0x000fd52a00800080ull, 0x000f5e3c68020070ull,
+	0x000983891f000000ull, 0x000f54a968090148ull, 0x000f59b368020870ull,
+	0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
+	0x0006410246090000ull, 0x000081b902000000ull, 0x9826c639ff001b00ull,
+	0x9801c00000000000ull, 0x00064180680100f0ull, 0x0006400172030001ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x000682091f000200ull, 0x000883aa00800080ull, 0x000ed52a00400040ull,
+	0x000e5e3c68020870ull, 0x000fd52a00800080ull, 0x000f5e3c68020070ull,
+	0x000983891f000000ull, 0x000f54a968090148ull, 0x000f59b368020870ull,
+	0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
+	0x0006410246090000ull, 0x9000813902000000ull, 0x000481b800400040ull,
+	0x00068981ffff8847ull, 0x00068581ffff8848ull, 0x0006debd00080008ull,
+	0x0006c639ff001e00ull, 0x0006010240000002ull, 0x9801c00000000000ull,
+	0x9821c00000000000ull, 0x00065f80680100f0ull, 0x0006403f72010000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x00065cb942080000ull, 0x0006552a4e09312dull, 0x00065cb968082868ull,
+	0x0006010240000004ull, 0x0006823902000000ull, 0x00065f3e68010629ull,
+	0xac28828101000100ull, 0x000b010240000004ull, 0xa42b820101000100ull,
+	0x0009010240000004ull, 0xac29828101000100ull, 0x000b010240000004ull,
+	0xa42b820101000100ull, 0x0009010240000004ull, 0xac29828101000100ull,
+	0x000b010240000004ull, 0x0006823904000000ull, 0x0008d4a907c00200ull,
+	0x0008593268020070ull, 0x0008dcb902000200ull, 0x9000813902000000ull,
+	0x0001c00000000000ull, 0x00040181840005ffull, 0x0006010240000008ull,
+	0x9801c00000000000ull, 0x0006debd00200020ull, 0x00048181ffff0806ull,
+	0x0006d4a907c00180ull, 0x00048201ffff8035ull, 0x00068581ffff8035ull,
+	0x0008d4a907c001c0ull, 0x0006dcb97c007c00ull, 0x00048201ffff0800ull,
+	0x00088601ffff86ddull, 0x00068581ffff0800ull, 0x00068581ffff86ddull,
+	0x0008d4a907c00200ull, 0x0009dcb97c007c00ull, 0x0007823d00200020ull,
+	0x000685bd00200020ull, 0x0008d4a907c00140ull, 0x0004010240000002ull,
+	0x0006593268020070ull, 0x000042a486020000ull, 0x000a15ab74000124ull,
+	0x9000813904000000ull, 0x0001c00000000000ull, 0x00048181f0004000ull,
+	0x9886593268020070ull, 0x0006d4a907c00200ull, 0x00068201ff000000ull,
+	0xa40815ab74000345ull, 0x0009debd01000100ull, 0xa429418068010038ull,
+	0x00095a3468010870ull, 0x0009028386000005ull, 0x000a068186000014ull,
+	0xacca15ab74000343ull, 0xacebc639ff002200ull, 0x000b5f80680100f0ull,
+	0xac8b403f72010000ull, 0x000b8203000f0005ull, 0x000b5a3468010070ull,
+	0x0009d4a907c00240ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x000b5cb942080000ull, 0xad0b552a4e09312dull,
+	0xad2bc639ff002700ull, 0x000b5f80680100f0ull, 0xac6b403f72010001ull,
+	0x0001c00000000000ull, 0x000b82013fff0000ull, 0x0009d52a00010001ull,
+	0x0009d4a9f8006800ull, 0x0009593268020870ull, 0x0006418068030230ull,
+	0x000b5cb942080000ull, 0x000b552a4e09312dull, 0x0006410240030000ull,
+	0x9c01c00000000000ull, 0x0001c00000000000ull, 0x00078201f0006000ull,
+	0x0008593268020070ull, 0x0008d4a907c00280ull, 0xa069d4a907c00000ull,
+	0x00085a3468010874ull, 0x0008818100ff0000ull, 0x000615ab74000345ull,
+	0x00075a3468010078ull, 0x9c8741b9680040f0ull, 0x9ca7c603ff001f00ull,
+	0x00075f80680100f0ull, 0x0007403f72010001ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0007418342080000ull,
+	0x9cc7552a4e09312dull, 0x9ce7c603ff002000ull, 0x00075f80680100f0ull,
+	0x0007403f72010000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0007418342080000ull, 0x9d07552a4e09312dull,
+	0x9d27c603ff002100ull, 0x00075f80680100f0ull, 0x0007403f72010001ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0007418342080000ull, 0x0007552a4e09312dull, 0x9d475c80680300f0ull,
+	0x9d67c639ff002200ull, 0x00075f80680100f0ull, 0x0007403f72010000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x00075cb942080000ull, 0x0007552a4e09312dull, 0x9d8741b9680040f0ull,
+	0x9da7c603ff002400ull, 0x00075f80680100f0ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0007403f72010000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0007418342080000ull, 0x9dc7552a4e09312dull,
+	0x9de7c603ff002500ull, 0x00075f80680100f0ull, 0x0007403f72010001ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0007418342080000ull, 0x0007552a4e09312dull, 0x0007010240000020ull,
+	0x9c01c00000000000ull, 0x9c27c603ff002600ull, 0x00075f80680100f0ull,
+	0x0007403f72010000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0007418342080000ull, 0x0007552a4e09312dull,
+	0x9c475c80680300f0ull, 0x9c67c639ff002700ull, 0x00075f80680100f0ull,
+	0x0007403f72010001ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x00075cb942080000ull, 0x0007552a4e09312dull,
+	0x0007010240000008ull, 0xa80782b400ff0000ull, 0x000ad4a907c002c0ull,
+	0x000a5a3468010078ull, 0x000a410244010000ull, 0xa80782b400ff003cull,
+	0x000ad4a907c002c0ull, 0x000a5a3468010078ull, 0x000a410244010000ull,
+	0xa80782b400ff002bull, 0x000ad4a907c002c0ull, 0x000a5a3468010078ull,
+	0x000a410244010000ull, 0xa80782b400ff002cull, 0x000ad4a9ffc06ac0ull,
+	0x000a593268020870ull, 0x000ad52a00010001ull, 0x000a5a3468010078ull,
+	0x000a010240000008ull, 0x0007debd01000100ull, 0x000481bd01000100ull,
+	0x0006c639ff002300ull, 0x000641aa68034000ull, 0x000641a968034846ull,
+	0x0006403472030001ull, 0x0004822907000200ull, 0x000915ab74000341ull,
+	0x000082aa00010001ull, 0x000a86ab00ff0045ull, 0x000adcb978007800ull,
+	0x0000822907000200ull, 0x00088a3908000000ull, 0x00065cb942080000ull,
+	0x0006552a4e09312dull, 0x00065cb968082868ull, 0x0006410246090000ull,
+	0x000042a486020000ull, 0x000a15ab74000343ull, 0x000081b940004000ull,
+	0x000685a907c00000ull, 0x000782b807000100ull, 0x000a41b268004070ull,
+	0x000a410040030000ull, 0x000a41ba68004078ull, 0x000a410240030000ull,
+	0xa801c00000000000ull, 0xa821c00000000000ull, 0x000a4180680100f0ull,
+	0x000ac639ff003900ull, 0x000a400372010001ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x000a83891f000000ull,
+	0x000f542868090a48ull, 0x000f583068020070ull, 0x000a5cb942080000ull,
+	0x000a552a4e09312dull, 0x000a5cb968082868ull, 0x000a410246090000ull,
+	0x982881b400ff0011ull, 0x9881c00000000000ull, 0x00064180680100f0ull,
+	0x00068283ffff12b5ull, 0x000a8a8108000800ull, 0x000ad4a9f8009800ull,
+	0x00068303ffff17c1ull, 0x000c8b01c0000000ull, 0xb0ac5bb768010a58ull,
+	0x000cd4a9f800b800ull, 0x000c8281ffff6558ull, 0x000adbb701000100ull,
+	0x000c8281ffff86ddull, 0x000a8681ffff0800ull, 0x000adbb702000200ull,
+	0x000682a9c8009800ull, 0x000adebd02000200ull, 0x000a593268020870ull,
+	0x000a010240000008ull, 0x9c21c00000000000ull, 0x0007813400ff002full,
+	0x90048201ffff6558ull, 0x00098381ffff0800ull, 0x00088281b0002000ull,
+	0x000a593268020870ull, 0x000ad4a9f800a800ull, 0x000adebd02000200ull,
+	0x000e593268020870ull, 0x000ed4a9f800a000ull, 0x000e010240000004ull,
+	0x000e828180008000ull, 0x000a010240000004ull, 0x000e828120002000ull,
+	0x000a010240000004ull, 0x000e828110001000ull, 0x000a010240000004ull,
+	0x000082bd02000200ull, 0xa80ac639ff002800ull, 0xa861c00000000000ull,
+	0x000a418368010526ull, 0xa84a418368010878ull, 0x000a5bb768030078ull,
+	0x000a400172030000ull, 0x000a5b00680100f0ull, 0x000041b468034878ull,
+	0x00005fbf68030878ull, 0x00068229c8009800ull, 0x0008010248000008ull,
+	0xa001c00000000000ull, 0x000843a486020000ull, 0x00088101ffff0000ull,
+	0x000415ab74000464ull, 0x000e15ab74000461ull, 0x0008010240000008ull,
+	0x000c41b76800425aull, 0x000c410240030000ull, 0x000a010240000008ull,
+	0x000a5cb942080000ull, 0x000a552a4e09312dull, 0x000a5cb968082868ull,
+	0x000a410246090000ull, 0x0000422486020000ull, 0x000815ab74000461ull,
+	0x000081b940004000ull, 0x000685a9f8000000ull, 0x000782b807000200ull,
+	0x000a41b268004078ull, 0x000a410040030000ull, 0x000a41ba68004078ull,
+	0x000a410240030000ull, 0xa801c00000000000ull, 0xa821c00000000000ull,
+	0x000a4180680100f0ull, 0x000ac639ff003900ull, 0x000a400372010001ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x000a83891f000000ull, 0x000f542868090a48ull, 0x000f583068020070ull,
+	0x000a5cb942080000ull, 0x000a552a4e09312dull, 0x000a5cb968082868ull,
+	0x000a410246090000ull, 0x000081a9f800b800ull, 0x000689b701000100ull,
+	0x000685a9f8009800ull, 0x000685a9f800a800ull, 0x00078229f800b800ull,
+	0x000601024000000cull, 0x9801c00000000000ull, 0x00088a3702000200ull,
+	0x00088629f800a000ull, 0x00068101ffff8100ull, 0x0004010240000004ull,
+	0x9801c00000000000ull, 0x0009dcb910001000ull, 0x00068101ffff86ddull,
+	0x00048501ffff0800ull, 0x0005dcb978003800ull, 0x0006010240000002ull,
+	0x000081a9f8000000ull, 0x9007813910000000ull, 0x0001c00000000000ull,
+	0x00048181f0004000ull, 0x988658b168020070ull, 0x0006d428001f0008ull,
+	0x00068201ff000000ull, 0xa40815ab74000545ull, 0x0009debd04000400ull,
+	0xa429418068010038ull, 0x00095a3468010870ull, 0x0009028386000005ull,
+	0xac8a068186000014ull, 0x000a15ab74000543ull, 0x000b5a3468010070ull,
+	0xac6b8303000f0005ull, 0x000dd428001f0009ull, 0x000b83013fff0000ull,
+	0x000dd42803e001a0ull, 0x000d58b168020870ull, 0x000ddcb960006000ull,
+	0x0006418068030230ull, 0x0006410240030000ull, 0x9c01c00000000000ull,
+	0x0001c00000000000ull, 0x00078201f0006000ull, 0x000858b168020070ull,
+	0xa068d428001f000aull, 0x00085a3468010874ull, 0x0008818100ff0000ull,
+	0x000615ab74000545ull, 0x00075a3468010078ull, 0x0007010240000028ull,
+	0xa80782b400ff0000ull, 0x000ad428001f000bull, 0x000a5a3468010078ull,
+	0x000a410244010000ull, 0xa80782b400ff003cull, 0x000ad428001f000bull,
+	0x000a5a3468010078ull, 0x000a410244010000ull, 0xa80782b400ff002bull,
+	0x000ad428001f000bull, 0x000a5a3468010078ull, 0x000a410244010000ull,
+	0xa80782b400ff002cull, 0x000ad42803ff01abull, 0x000adcb960006000ull,
+	0x000a58b168020870ull, 0x000a5a3468010078ull, 0x000a010240000008ull,
+	0x0007debd04000400ull, 0x000481bd04000400ull, 0x0006c639ff002b00ull,
+	0x0006832803e001a0ull, 0x000cc18300010001ull, 0x000dc18300010000ull,
+	0x000641a868034840ull, 0x0006403472030001ull, 0x00048228001c0008ull,
+	0x000915ab74000541ull, 0x000082ab00ff0045ull, 0x000adcb960006000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x00065cb942080000ull,
+	0x0006552a4e09312dull, 0x00065cb968082868ull, 0x0006410246090000ull,
+	0x000042a486020000ull, 0x000a15ab74000543ull, 0x000081b940004000ull,
+	0x000685a8001f0000ull, 0x000782b807000300ull, 0x000a41b168004070ull,
+	0x000a410040030000ull, 0x000a41ba68004078ull, 0x000a410240030000ull,
+	0xa801c00000000000ull, 0xa821c00000000000ull, 0x000a4180680100f0ull,
+	0x000ac639ff003900ull, 0x000a400372010001ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x000a83891f000000ull,
+	0x000f542868090a48ull, 0x000f583068020070ull, 0x000a5cb942080000ull,
+	0x000a552a4e09312dull, 0x000a5cb968082868ull, 0x000a410246090000ull,
+	0x00008329ff000200ull, 0x000c8728001c0008ull, 0x000c813920000000ull,
+	0x000481b400ff006cull, 0x0006d42803e001c0ull, 0x000658b168020870ull,
+	0xa047823400ff0033ull, 0x0008d42803e00180ull, 0xa0685f80680100f0ull,
+	0xa007823400ff0032ull, 0x0008d42803e00180ull, 0xa0285f80680100f0ull,
+	0x0007822803e00180ull, 0x0008c639ff002e00ull, 0x0008403f72010000ull,
+	0x000858b168020870ull, 0x00085abf680040f0ull, 0x00085d80680100f0ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x00085cb942080000ull, 0x0008552a4e09312dull, 0x00085cb968082868ull,
+	0x0008410246090000ull, 0x986981b400ff002full, 0x0006d42803e00280ull,
+	0x00065a80680100f0ull, 0x000658b168020870ull, 0x000481b400ff0084ull,
+	0x0006d42803e00240ull, 0x0004823400ff0011ull, 0x0008d42803e00220ull,
+	0x98c481b400ff0006ull, 0x0006d42803e00200ull, 0x00065ebd68010b31ull,
+	0x000641806801003cull, 0x0006028386000005ull, 0x000a15ab74000661ull,
+	0x0006418068030230ull, 0x0008c180ffff0008ull, 0x0008863400ff0006ull,
+	0x0008418240030000ull, 0x000842a486030000ull, 0x000a15ab74000661ull,
+	0x9008863400ff0084ull, 0x0004c639ff002f00ull, 0x0004400072010001ull,
+	0x000858b168020870ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x00085cb942080000ull, 0x9028552a4e09312dull,
+	0x0004c639ff003000ull, 0x0004403472010000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x000858b168020870ull, 0x0001c00000000000ull,
+	0x000081b940004000ull, 0x000685a803e00000ull, 0x00045cb942080000ull,
+	0x0004552a4e09312dull, 0x00045cb968082868ull, 0x0004410246090000ull,
+	0x000483891f000000ull, 0x000f542868090a48ull, 0x000f583068020070ull,
+	0x000042a486020000ull, 0x000a15ab74000661ull, 0x000782b807000400ull,
+	0x000a41b168004078ull, 0x000a410040030000ull, 0x000a41ba68004078ull,
+	0x000a410240030000ull, 0xa801c00000000000ull, 0xa821c00000000000ull,
+	0x000a4180680100f0ull, 0x000ac639ff003900ull, 0x000a400372010001ull,
+	0x0001c00000000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull, 0x000041bf68034878ull, 0x00005a3468030878ull,
+	0x000a83891f000000ull, 0x000f542868090a48ull, 0x000f583068020070ull,
+	0x000a5cb942080000ull, 0x000a552a4e09312dull, 0x000a5cb968082868ull,
+	0x000a410246090000ull, 0x00005fb968004250ull, 0x0000003f70000000ull,
+	0x000041b968034070ull, 0x0000512268030070ull, 0x0000813800200020ull,
+	0x0004413a68024070ull, 0x9001c00000000000ull, 0x000081b800200020ull,
+	0x9026898180008000ull, 0x0004890110001000ull, 0x000456ad680100a0ull,
+	0x0006898180008000ull, 0x000652a56801001dull, 0x000456ad68090b5bull,
+	0x00055680680900f0ull, 0x0005debd00400040ull, 0x00005600680800f0ull,
+	0x0000833d00200020ull, 0x000c872907c00000ull, 0x000dd62c20000000ull,
+	0x0000822902800280ull, 0x000841b268034070ull, 0x000982a8000a000aull,
+	0x000a41b168034070ull, 0x000b822907c00000ull, 0x0000003f70000800ull,
+	0x000941b268034070ull, 0x0000418048030000ull, 0x0000018340000008ull,
+	0x0009018348000004ull, 0x000050a168030c20ull, 0x000082aa00800080ull,
+	0x000850a168080c2bull, 0x0000820800010001ull, 0x000850a168000c20ull,
+	0x000752a56808001eull, 0x000a822a00400040ull, 0x00088a0900010001ull,
+	0x000841bc68034078ull, 0x000941bc68034070ull, 0x000a583068030870ull,
+	0x0000813d00400000ull, 0x0005c180ffff0000ull, 0x00058288001e0000ull,
+	0x000b8208001e0008ull, 0x00085d2168004030ull, 0x00098308001e0010ull,
+	0x00088608001e0010ull, 0x000c5d2168004070ull, 0x0008418068080025ull,
+	0x000841ba6803a0f0ull, 0x000856ad40030000ull, 0x0008c180ffff0000ull,
+	0x0005820807000500ull, 0x00088a3d00010001ull, 0x000841be68004050ull,
+	0x0005828807000300ull, 0x000a8abd00040004ull, 0x000a41be68004040ull,
+	0x0005820807000100ull, 0x00088a2a00800080ull, 0x0008413068004078ull,
+	0xa021c00000000000ull, 0x0005828807000200ull, 0x000841806801002dull,
+	0x000a8abd00080008ull, 0x000a41be68004026ull, 0x0005820807000400ull,
+	0x00088a2907000200ull, 0x000841b46800405aull, 0x000556ad40030000ull,
+	0x000081bd00100010ull, 0x0006c180ffff0000ull, 0x0006822a00800080ull,
+	0x00088a0900100010ull, 0x0008413c68024070ull, 0xa021c00000000000ull,
+	0x0006832907000200ull, 0x0008c181f0008000ull, 0x000841834c00ffffull,
+	0x0006822a00400040ull, 0x00088a0900200020ull, 0x0008413c68024078ull,
+	0xa021c00000000000ull, 0x000c8b0900400040ull, 0x0008dc01f0008000ull,
+	0x000841b84c03ffffull, 0x000c8b2a00010000ull, 0x000c41b44c0300ffull,
+	0x000682a9f800a800ull, 0x000a86a9f8009800ull, 0x000a8a8904000400ull,
+	0x000a41b64c03ffffull, 0x000a41b74c0300ffull, 0x0000828901000100ull,
+	0x000a822803e00180ull, 0x0008413168024078ull, 0x0008833400ff0033ull,
+	0x000c010240000004ull, 0xa001c00000000000ull, 0xa021c00000000000ull,
+	0x000841814c03ffffull, 0x000841814c03ffffull, 0x000a822803e00280ull,
+	0x000841b54c03ffffull, 0x000682287c005800ull, 0x00088a0902000200ull,
+	0x0008413068024070ull, 0xa001c00000000000ull, 0x0006830900020002ull,
+	0x00088281e0002000ull, 0xa84a868108000800ull, 0xa861c00000000000ull,
+	0x000a41814c03ffffull, 0x000a41814c03ffffull, 0x00065380680300f0ull,
+	0x000c5321680040b0ull, 0x000dd3260fff0fffull, 0x0006810900800080ull,
+	0x0000003f70000400ull, 0x000082a907000200ull, 0x000a413268024070ull,
+	0xa50a822902800280ull, 0x0004893d08000800ull, 0x00098301ffffffffull,
+	0xa4c98381f000e000ull, 0x00095f00680100f0ull, 0xa5295f3e64010000ull,
+	0x0001c00000000000ull, 0xa4ec8b01ffffffffull, 0x00095d00680100f0ull,
+	0xa1895d3a64010000ull, 0x000cd5ab80008000ull, 0x00088a01ff00ff00ull,
+	0x0008d5ab40004000ull, 0x000ed5ab40004000ull, 0x0004893d40000000ull,
+	0x00005700680800f0ull, 0x00005780680900f0ull, 0x00008229f800a000ull,
+	0x0008c180ffff0018ull, 0x000857af680320f0ull, 0x0007d72ef1ff0000ull,
+	0x0007d7aff0000000ull, 0x0004d72e00fc0000ull, 0x0000812c00020002ull,
+	0x0004892907c00200ull, 0x000441a7680040f0ull, 0x000441be4c03ffffull,
+	0x000441ba4c03ffffull, 0x000481a803c00200ull, 0x0006413168024078ull,
+	0x9801c00000000000ull, 0x9821c00000000000ull, 0x00065f80680100f0ull,
+	0x00065fbf64010000ull, 0x000641bf4c03ffffull, 0x000452a568030250ull,
+	0x0000000008000000ull, 0x0001c00000000000ull, 0x0001c00000000000ull,
+	0x0001c00000000000ull
+};
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-pko.h b/arch/mips/mach-octeon/include/mach/cvmx-pko.h
new file mode 100644
index 0000000..26e7a9a
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-pko.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Backward compatibility for packet transmission using legacy PKO command.
+ */
+
+#ifndef __CVMX_PKO_H__
+#define __CVMX_PKO_H__
+
+extern cvmx_pko_return_value_t
+cvmx_pko3_legacy_xmit(unsigned int dq, cvmx_pko_command_word0_t pko_command,
+		      cvmx_buf_ptr_t packet, uint64_t addr, bool tag_sw);
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly
+ * once before this, and the same parameters must be passed to both
+ * cvmx_pko_send_packet_prepare() and cvmx_pko_send_packet_finish().
+ *
+ * WARNING: This function may have to look up the proper PKO port in
+ * the IPD port to PKO port map, and is thus slower than calling
+ * cvmx_pko_send_packet_finish_pkoid() directly if the PKO port
+ * identifier is known.
+ *
+ * @param ipd_port   The IPD port corresponding the to pko port the packet is for
+ * @param queue  Queue to use
+ * @param pko_command
+ *               PKO HW command word
+ * @param packet to send
+ * @param use_locking
+ *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG,
+ *               or CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
+ */
+static inline cvmx_pko_return_value_t
+cvmx_pko_send_packet_finish(u64 ipd_port, uint64_t queue,
+			    cvmx_pko_command_word0_t pko_command,
+			    cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking)
+{
+	cvmx_cmd_queue_result_t result;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+		return cvmx_pko3_legacy_xmit(queue, pko_command, packet, 0,
+					     use_locking ==
+						     CVMX_PKO_LOCK_ATOMIC_TAG);
+	}
+
+	if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+		cvmx_pow_tag_sw_wait();
+
+	result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
+				       (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+				       pko_command.u64, packet.u64);
+	if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
+		cvmx_pko_doorbell(ipd_port, queue, 2);
+		return CVMX_PKO_SUCCESS;
+	} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) ||
+		   (result == CVMX_CMD_QUEUE_FULL)) {
+		return CVMX_PKO_NO_MEMORY;
+	} else {
+		return CVMX_PKO_INVALID_QUEUE;
+	}
+}
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly
+ * once before this, and the same parameters must be passed to both
+ * cvmx_pko_send_packet_prepare() and cvmx_pko_send_packet_finish().
+ *
+ * WARNING: This function may have to look up the proper PKO port in
+ * the IPD port to PKO port map, and is thus slower than calling
+ * cvmx_pko_send_packet_finish3_pkoid() directly if the PKO port
+ * identifier is known.
+ *
+ * @param ipd_port   The IPD port corresponding the to pko port the packet is for
+ * @param queue  Queue to use
+ * @param pko_command
+ *               PKO HW command word
+ * @param packet to send
+ * @param addr   Physical address of a work queue entry or physical address to zero
+ *               on complete.
+ * @param use_locking
+ *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG,
+ *               or CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
+ */
+static inline cvmx_pko_return_value_t
+cvmx_pko_send_packet_finish3(u64 ipd_port, uint64_t queue,
+			     cvmx_pko_command_word0_t pko_command,
+			     cvmx_buf_ptr_t packet, uint64_t addr,
+			     cvmx_pko_lock_t use_locking)
+{
+	cvmx_cmd_queue_result_t result;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+		return cvmx_pko3_legacy_xmit(queue, pko_command, packet, addr,
+					     use_locking ==
+						     CVMX_PKO_LOCK_ATOMIC_TAG);
+	}
+
+	if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+		cvmx_pow_tag_sw_wait();
+
+	result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
+				       (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+				       pko_command.u64, packet.u64, addr);
+	if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
+		cvmx_pko_doorbell(ipd_port, queue, 3);
+		return CVMX_PKO_SUCCESS;
+	} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) ||
+		   (result == CVMX_CMD_QUEUE_FULL)) {
+		return CVMX_PKO_NO_MEMORY;
+	} else {
+		return CVMX_PKO_INVALID_QUEUE;
+	}
+}
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly
+ * once before this, and the same parameters must be passed to both
+ * cvmx_pko_send_packet_prepare() and cvmx_pko_send_packet_finish_pkoid().
+ *
+ * @param pko_port   Port to send it on
+ * @param queue  Queue to use
+ * @param pko_command
+ *               PKO HW command word
+ * @param packet to send
+ * @param use_locking
+ *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG,
+ *               or CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
+ */
+static inline cvmx_pko_return_value_t
+cvmx_pko_send_packet_finish_pkoid(int pko_port, uint64_t queue,
+				  cvmx_pko_command_word0_t pko_command,
+				  cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking)
+{
+	cvmx_cmd_queue_result_t result;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+		return cvmx_pko3_legacy_xmit(queue, pko_command, packet, 0,
+					     use_locking ==
+						     CVMX_PKO_LOCK_ATOMIC_TAG);
+	}
+
+	if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+		cvmx_pow_tag_sw_wait();
+	result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
+				       (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+				       pko_command.u64, packet.u64);
+	if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
+		cvmx_pko_doorbell_pkoid(pko_port, queue, 2);
+		return CVMX_PKO_SUCCESS;
+	} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) ||
+		   (result == CVMX_CMD_QUEUE_FULL)) {
+		return CVMX_PKO_NO_MEMORY;
+	} else {
+		return CVMX_PKO_INVALID_QUEUE;
+	}
+}
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly
+ * once before this, and the same parameters must be passed to both
+ * cvmx_pko_send_packet_prepare() and cvmx_pko_send_packet_finish_pkoid().
+ *
+ * @param pko_port   The PKO port the packet is for
+ * @param queue  Queue to use
+ * @param pko_command
+ *               PKO HW command word
+ * @param packet to send
+ * @param addr   Plysical address of a work queue entry or physical address to zero
+ *               on complete.
+ * @param use_locking
+ *               CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG,
+ *               or CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
+ */
+static inline cvmx_pko_return_value_t
+cvmx_pko_send_packet_finish3_pkoid(u64 pko_port, uint64_t queue,
+				   cvmx_pko_command_word0_t pko_command,
+				   cvmx_buf_ptr_t packet, uint64_t addr,
+				   cvmx_pko_lock_t use_locking)
+{
+	cvmx_cmd_queue_result_t result;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+		return cvmx_pko3_legacy_xmit(queue, pko_command, packet, addr,
+					     use_locking ==
+						     CVMX_PKO_LOCK_ATOMIC_TAG);
+	}
+
+	if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+		cvmx_pow_tag_sw_wait();
+	result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
+				       (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+				       pko_command.u64, packet.u64, addr);
+	if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
+		cvmx_pko_doorbell_pkoid(pko_port, queue, 3);
+		return CVMX_PKO_SUCCESS;
+	} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) ||
+		   (result == CVMX_CMD_QUEUE_FULL)) {
+		return CVMX_PKO_NO_MEMORY;
+	} else {
+		return CVMX_PKO_INVALID_QUEUE;
+	}
+}
+
+#endif /* __CVMX_PKO_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-pko3-resources.h b/arch/mips/mach-octeon/include/mach/cvmx-pko3-resources.h
new file mode 100644
index 0000000..cc9f375
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-pko3-resources.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#ifndef __CVMX_PKO3_RESOURCES_H__
+#define __CVMX_PKO3_RESOURCES_H__
+
+/*
+ * Allocate or reserve contiguous list of PKO queues.
+ *
+ * @param node is the node number for PKO queues.
+ * @param level is the PKO queue level.
+ * @param owner is the owner of PKO queue resources.
+ * @param base_queue is the PKO queue base number(specify -1 to allocate).
+ * @param num_queues is the number of PKO queues that have to be reserved or allocated.
+ * @return returns queue_base if successful or -1 on failure.
+ */
+int cvmx_pko_alloc_queues(int node, int level, int owner, int base_queue,
+			  int num_queues);
+
+/**
+ * Free an allocated/reserved PKO queues for a certain level and owner
+ *
+ * @param node on which to allocate/reserve PKO queues
+ * @param level of PKO queue
+ * @param owner of reserved/allocated resources
+ * @return 0 on success, -1 on failure
+ */
+int cvmx_pko_free_queues(int node, int level, int owner);
+
+int __cvmx_pko3_dq_param_setup(unsigned node);
+
+int cvmx_pko3_num_level_queues(enum cvmx_pko3_level_e level);
+
+#endif /* __CVMX_PKO3_RESOURCES_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-pko3.h b/arch/mips/mach-octeon/include/mach/cvmx-pko3.h
new file mode 100644
index 0000000..86f89be
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-pko3.h
@@ -0,0 +1,1052 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ */
+
+#ifndef __CVMX_PKO3_H__
+#define __CVMX_PKO3_H__
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* Use full LMTDMA when PARAMETER_CHECKINS is enabled */
+#undef CVMX_ENABLE_PARAMETER_CHECKING
+#define CVMX_ENABLE_PARAMETER_CHECKING 0
+
+/*
+ * CVMSEG, scratch line for LMTDMA/LMTST operations:
+ * 1. It should differ from other CVMSEG uses, e.g. IOBDMA,
+ * 2. It must agree with the setting of CvmCtl[LMTLINE] control register.
+ * Contains 16 words, words 1-15 are cleared when word 0 is written to.
+ */
+#define CVMX_PKO_LMTLINE 2ull
+
+/* PKO3 queue level identifier */
+enum cvmx_pko3_level_e {
+	CVMX_PKO_LEVEL_INVAL = 0,
+	CVMX_PKO_PORT_QUEUES = 0xd1,
+	CVMX_PKO_L2_QUEUES = 0xc2,
+	CVMX_PKO_L3_QUEUES = 0xb3,
+	CVMX_PKO_L4_QUEUES = 0xa4,
+	CVMX_PKO_L5_QUEUES = 0x95,
+	CVMX_PKO_DESCR_QUEUES = 0x86,
+};
+
+enum cvmx_pko_dqop {
+	CVMX_PKO_DQ_SEND = 0ULL,
+	CVMX_PKO_DQ_OPEN = 1ULL,
+	CVMX_PKO_DQ_CLOSE = 2ULL,
+	CVMX_PKO_DQ_QUERY = 3ULL
+};
+
+/**
+ * Returns the PKO DQ..L2 Shaper Time-Wheel clock rate for specified node.
+ */
+static inline u64 cvmx_pko3_dq_tw_clock_rate_node(int node)
+{
+	return gd->bus_clk / 768;
+}
+
+/**
+ * Returns the PKO Port Shaper Time-Wheel clock rate for specified node.
+ */
+static inline u64 cvmx_pko3_pq_tw_clock_rate_node(int node)
+{
+	int div;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+		div = 96;
+	else
+		div = 48;
+	return gd->bus_clk / div;
+}
+
+/**
+ * @INTERNAL
+ * Return the number of MACs in the PKO (exclusing the NULL MAC)
+ * in a model-dependent manner.
+ */
+static inline unsigned int __cvmx_pko3_num_macs(void)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
+		return 10;
+	if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+		return 14;
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+		return 28;
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Return the number of queue levels, depending on SoC model
+ */
+static inline int __cvmx_pko3_sq_lvl_max(void)
+{
+	if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+		return CVMX_PKO_L3_QUEUES;
+	if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
+		return CVMX_PKO_L3_QUEUES;
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+		return CVMX_PKO_L5_QUEUES;
+	return -1;
+}
+
+/**
+ * @INTERNAL
+ * Return the next (lower) queue level for a given level
+ */
+static inline enum cvmx_pko3_level_e
+__cvmx_pko3_sq_lvl_next(enum cvmx_pko3_level_e level)
+{
+	switch (level) {
+	default:
+		return CVMX_PKO_LEVEL_INVAL;
+	case CVMX_PKO_PORT_QUEUES:
+		return CVMX_PKO_L2_QUEUES;
+	case CVMX_PKO_L2_QUEUES:
+		return CVMX_PKO_L3_QUEUES;
+	case CVMX_PKO_L3_QUEUES:
+		if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+		    OCTEON_IS_MODEL(OCTEON_CNF75XX))
+			return CVMX_PKO_DESCR_QUEUES;
+		return CVMX_PKO_L4_QUEUES;
+	case CVMX_PKO_L4_QUEUES:
+		if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+		    OCTEON_IS_MODEL(OCTEON_CNF75XX))
+			return CVMX_PKO_LEVEL_INVAL;
+		return CVMX_PKO_L5_QUEUES;
+	case CVMX_PKO_L5_QUEUES:
+		if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
+		    OCTEON_IS_MODEL(OCTEON_CNF75XX))
+			return CVMX_PKO_LEVEL_INVAL;
+		return CVMX_PKO_DESCR_QUEUES;
+	}
+}
+
+/**
+ * @INTERNAL
+ * Return an SQ identifier string, for debug messages.
+ */
+static inline char *__cvmx_pko3_sq_str(char *buf, enum cvmx_pko3_level_e level,
+				       unsigned int q)
+{
+	char *p;
+
+	switch (level) {
+	default:
+		strcpy(buf, "ERR-SQ/");
+		break;
+	case CVMX_PKO_PORT_QUEUES:
+		strcpy(buf, "PQ_L1/");
+		break;
+	case CVMX_PKO_L2_QUEUES:
+		strcpy(buf, "SQ_L2/");
+		break;
+	case CVMX_PKO_L3_QUEUES:
+		strcpy(buf, "SQ_L3/");
+		break;
+	case CVMX_PKO_L4_QUEUES:
+		strcpy(buf, "SQ_L4/");
+		break;
+	case CVMX_PKO_L5_QUEUES:
+		strcpy(buf, "SQ_L5/");
+		break;
+	case CVMX_PKO_DESCR_QUEUES:
+		strcpy(buf, "DQ/");
+		break;
+	}
+
+	for (p = buf; *p; p++)
+		;
+	*p++ = '0' + q / 1000;
+	q -= (q / 1000) * 1000;
+	*p++ = '0' + q / 100;
+	q -= (q / 100) * 100;
+	*p++ = '0' + q / 10;
+	q -= (q / 10) * 10;
+	*p++ = '0' + q;
+	*p++ = ':';
+	*p++ = '\0';
+	return buf;
+}
+
+union cvmx_pko_query_rtn {
+	u64 u64;
+	struct {
+		u64 dqstatus : 4;
+		u64 rsvd_50_59 : 10;
+		u64 dqop : 2;
+		u64 depth : 48;
+	} s;
+};
+
+typedef union cvmx_pko_query_rtn cvmx_pko_query_rtn_t;
+
+/* PKO_QUERY_RTN_S[DQSTATUS] - cvmx_pko_query_rtn_t->s.dqstatus */
+enum pko_query_dqstatus {
+	PKO_DQSTATUS_PASS = 0,	       /* No error */
+	PKO_DQSTATUS_BADSTATE = 0x8,   /* queue was not ready to enqueue */
+	PKO_DQSTATUS_NOFPABUF = 0x9,   /* FPA out of buffers */
+	PKO_DQSTATUS_NOPKOBUF = 0xA,   /* PKO out of buffers */
+	PKO_DQSTATUS_FAILRTNPTR = 0xB, /* can't return buffer ptr to FPA */
+	PKO_DQSTATUS_ALREADY = 0xC,    /* already created */
+	PKO_DQSTATUS_NOTCREATED = 0xD, /* not created */
+	PKO_DQSTATUS_NOTEMPTY = 0xE,   /* queue not empty */
+	PKO_DQSTATUS_SENDPKTDROP = 0xF /* packet dropped, illegal construct */
+};
+
+typedef enum pko_query_dqstatus pko_query_dqstatus_t;
+
+/* Sub-command three bit codes (SUBDC3) */
+#define CVMX_PKO_SENDSUBDC_LINK	  0x0
+#define CVMX_PKO_SENDSUBDC_GATHER 0x1
+#define CVMX_PKO_SENDSUBDC_JUMP	  0x2
+/* Sub-command four bit codes (SUBDC4) */
+#define CVMX_PKO_SENDSUBDC_TSO	0x8
+#define CVMX_PKO_SENDSUBDC_FREE 0x9
+#define CVMX_PKO_SENDSUBDC_WORK 0xA
+#define CVMX_PKO_SENDSUBDC_AURA 0xB
+#define CVMX_PKO_SENDSUBDC_MEM	0xC
+#define CVMX_PKO_SENDSUBDC_EXT	0xD
+#define CVMX_PKO_SENDSUBDC_CRC	0xE
+#define CVMX_PKO_SENDSUBDC_IMM	0xF
+
+/**
+ * pko buf ptr
+ * This is good for LINK_S, GATHER_S and PKI_BUFLINK_S structure use.
+ * It can also be used for JUMP_S with F-bit represented by "i" field,
+ * and the size limited to 8-bit.
+ */
+
+union cvmx_pko_buf_ptr {
+	u64 u64;
+	struct {
+		u64 size : 16;
+		u64 subdc3 : 3;
+		u64 i : 1;
+		u64 rsvd_42_43 : 2;
+		u64 addr : 42;
+	} s;
+};
+
+typedef union cvmx_pko_buf_ptr cvmx_pko_buf_ptr_t;
+
+/**
+ * pko_auraalg_e
+ */
+enum pko_auraalg_e {
+	AURAALG_NOP = 0x0,    /* aura_cnt = No change */
+	AURAALG_SUB = 0x3,    /* aura_cnt -= pko_send_aura_t.offset */
+	AURAALG_SUBLEN = 0x7, /* aura_cnt -= pko_send_aura_t.offset +
+			       *		pko_send_hdr_t.total_bytes
+			       */
+	AURAALG_SUBMBUF = 0xB /* aura_cnt -= pko_send_aura_t.offset +
+			       *		mbufs_freed
+			       */
+};
+
+/**
+ * PKO_CKL4ALG_E
+ */
+enum pko_clk4alg_e {
+	CKL4ALG_NONE = 0x0, /* No checksum. */
+	CKL4ALG_UDP = 0x1,  /* UDP L4 checksum. */
+	CKL4ALG_TCP = 0x2,  /* TCP L4 checksum. */
+	CKL4ALG_SCTP = 0x3, /* SCTP L4 checksum. */
+};
+
+/**
+ * pko_send_aura
+ */
+union cvmx_pko_send_aura {
+	u64 u64;
+	struct {
+		u64 rsvd_60_63 : 4;
+		u64 aura : 12; /* NODE+LAURA */
+		u64 subdc4 : 4;
+		u64 alg : 4; /* pko_auraalg_e */
+		u64 rsvd_08_39 : 32;
+		u64 offset : 8;
+	} s;
+};
+
+typedef union cvmx_pko_send_aura cvmx_pko_send_aura_t;
+
+/**
+ * pko_send_tso
+ */
+union cvmx_pko_send_tso {
+	u64 u64;
+	struct {
+		u64 l2len : 8;
+		u64 rsvd_48_55 : 8;
+		u64 subdc4 : 4; /* 0x8 */
+		u64 rsvd_32_43 : 12;
+		u64 sb : 8;
+		u64 mss : 16;
+		u64 eom : 1;
+		u64 fn : 7;
+	} s;
+};
+
+typedef union cvmx_pko_send_tso cvmx_pko_send_tso_t;
+
+/**
+ * pko_send_free
+ */
+union cvmx_pko_send_free {
+	u64 u64;
+	struct {
+		u64 rsvd_48_63 : 16;
+		u64 subdc4 : 4; /* 0x9 */
+		u64 rsvd : 2;
+		u64 addr : 42;
+	} s;
+};
+
+typedef union cvmx_pko_send_free cvmx_pko_send_free_t;
+
+/* PKO_SEND_HDR_S - PKO header subcommand */
+union cvmx_pko_send_hdr {
+	u64 u64;
+	struct {
+		u64 rsvd_60_63 : 4;
+		u64 aura : 12;
+		u64 ckl4 : 2; /* PKO_CKL4ALG_E */
+		u64 ckl3 : 1;
+		u64 ds : 1;
+		u64 le : 1;
+		u64 n2 : 1;
+		u64 ii : 1;
+		u64 df : 1;
+		u64 rsvd_39 : 1;
+		u64 format : 7;
+		u64 l4ptr : 8;
+		u64 l3ptr : 8;
+		u64 total : 16;
+	} s;
+};
+
+typedef union cvmx_pko_send_hdr cvmx_pko_send_hdr_t;
+
+/* PKO_SEND_EXT_S - extended header subcommand */
+union cvmx_pko_send_ext {
+	u64 u64;
+	struct {
+		u64 rsvd_48_63 : 16;
+		u64 subdc4 : 4; /* _SENDSUBDC_EXT */
+		u64 col : 2;	/* _COLORALG_E */
+		u64 ra : 2;	/* _REDALG_E */
+		u64 tstmp : 1;
+		u64 rsvd_24_38 : 15;
+		u64 markptr : 8;
+		u64 rsvd_9_15 : 7;
+		u64 shapechg : 9;
+	} s;
+};
+
+typedef union cvmx_pko_send_ext cvmx_pko_send_ext_t;
+
+/* PKO_MEMDSZ_E */
+enum cvmx_pko_memdsz_e {
+	MEMDSZ_B64 = 0,
+	MEMDSZ_B32 = 1,
+	MEMDSZ_B16 = 2, /* Not in HRM, assumed unsupported */
+	MEMDSZ_B8 = 3
+};
+
+/* PKO_MEMALG_E */
+enum cvmx_pko_memalg_e {
+	MEMALG_SET = 0,	      /* Set mem = PKO_SEND_MEM_S[OFFSET] */
+	MEMALG_SETTSTMP = 1,  /* Set the memory location to the timestamp
+			       *  PKO_SEND_MEM_S[DSZ] must be B64 and a
+			       *  PKO_SEND_EXT_S subdescriptor must be in
+			       *  the descriptor with PKO_SEND_EXT_S[TSTMP]=1
+			       */
+	MEMALG_SETRSLT = 2,   /* [DSZ] = B64; mem = PKO_MEM_RESULT_S.  */
+	MEMALG_ADD = 8,	      /* mem = mem + PKO_SEND_MEM_S[OFFSET] */
+	MEMALG_SUB = 9,	      /* mem = mem – PKO_SEND_MEM_S[OFFSET] */
+	MEMALG_ADDLEN = 0xA,  /* mem += [OFFSET] + PKO_SEND_HDR_S[TOTAL] */
+	MEMALG_SUBLEN = 0xB,  /* mem -= [OFFSET] + PKO_SEND_HDR_S[TOTAL] */
+	MEMALG_ADDMBUF = 0xC, /* mem += [OFFSET] + mbufs_freed */
+	MEMALG_SUBMBUF = 0xD  /* mem -= [OFFSET] + mbufs_freed */
+};
+
+union cvmx_pko_send_mem {
+	u64 u64;
+	struct {
+		u64 rsvd_63 : 1;
+		u64 wmem : 1;
+		u64 dsz : 2;
+		u64 alg : 4;
+		u64 offset : 8;
+		u64 subdc4 : 4;
+		u64 rsvd_42_43 : 2;
+		u64 addr : 42;
+	} s;
+};
+
+typedef union cvmx_pko_send_mem cvmx_pko_send_mem_t;
+
+union cvmx_pko_send_work {
+	u64 u64;
+	struct {
+		u64 rsvd_62_63 : 2;
+		u64 grp : 10;
+		u64 tt : 2;
+		u64 rsvd_48_49 : 2;
+		u64 subdc4 : 4;
+		u64 rsvd_42_43 : 2;
+		u64 addr : 42;
+	} s;
+};
+
+typedef union cvmx_pko_send_work cvmx_pko_send_work_t;
+
+/*** PKO_SEND_DMA_S - format of IOBDMA/LMTDMA data word ***/
+union cvmx_pko_lmtdma_data {
+	u64 u64;
+	struct {
+		u64 scraddr : 8;
+		u64 rtnlen : 8;
+		u64 did : 8; /* 0x51 */
+		u64 node : 4;
+		u64 rsvd_34_35 : 2;
+		u64 dqop : 2; /* PKO_DQOP_E */
+		u64 rsvd_26_31 : 6;
+		u64 dq : 10;
+		u64 rsvd_0_15 : 16;
+	} s;
+};
+
+typedef union cvmx_pko_lmtdma_data cvmx_pko_lmtdma_data_t;
+
+typedef struct cvmx_pko3_dq_params_s {
+	s32 depth;
+	s32 limit;
+	u64 pad[15];
+} cvmx_pko3_dq_params_t;
+
+/* DQ depth cached value */
+extern cvmx_pko3_dq_params_t *__cvmx_pko3_dq_params[CVMX_MAX_NODES];
+
+int cvmx_pko3_internal_buffer_count(unsigned int node);
+
+/**
+ * @INTERNAL
+ * PKO3 DQ parameter location
+ * @param node      node
+ * @param dq        dq
+ */
+static inline cvmx_pko3_dq_params_t *cvmx_pko3_dq_parameters(unsigned int node,
+							     unsigned int dq)
+{
+	cvmx_pko3_dq_params_t *pparam = NULL;
+	static cvmx_pko3_dq_params_t dummy;
+
+	dummy.depth = 0;
+	dummy.limit = (1 << 16);
+
+	if (cvmx_likely(node < CVMX_MAX_NODES))
+		pparam = __cvmx_pko3_dq_params[node];
+
+	if (cvmx_likely(pparam))
+		pparam += dq;
+	else
+		pparam = &dummy;
+
+	return pparam;
+}
+
+static inline void cvmx_pko3_dq_set_limit(unsigned int node, unsigned int dq,
+					  unsigned int limit)
+{
+	cvmx_pko3_dq_params_t *pparam;
+
+	pparam = cvmx_pko3_dq_parameters(node, dq);
+	pparam->limit = limit;
+}
+
+/**
+ * PKO descriptor queue operation error string
+ *
+ * @param dqstatus is the enumeration returned from hardware,
+ *	  PKO_QUERY_RTN_S[DQSTATUS].
+ *
+ * @return static constant string error description
+ */
+const char *pko_dqstatus_error(pko_query_dqstatus_t dqstatus);
+
+/*
+ * This function gets PKO mac num for a interface/port.
+ *
+ * @param interface is the interface number.
+ * @param index is the port number.
+ * @return returns mac number if successful or -1 on failure.
+ */
+static inline int __cvmx_pko3_get_mac_num(int xiface, int index)
+{
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+	cvmx_helper_interface_mode_t mode;
+	int interface_index;
+	int ilk_mac_base = -1, bgx_mac_base = -1, bgx_ports = 4;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN73XX))
+		bgx_mac_base = 2;
+
+	if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
+		bgx_mac_base = 2;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
+		ilk_mac_base = 2;
+		bgx_mac_base = 4;
+	}
+
+	mode = cvmx_helper_interface_get_mode(xiface);
+	switch (mode) {
+	case CVMX_HELPER_INTERFACE_MODE_LOOP:
+		return 0;
+	case CVMX_HELPER_INTERFACE_MODE_NPI:
+		return 1;
+	case CVMX_HELPER_INTERFACE_MODE_ILK:
+		if (ilk_mac_base < 0)
+			return -1;
+		interface_index = (xi.interface - CVMX_ILK_GBL_BASE());
+		if (interface_index < 0)
+			return -1;
+		return (ilk_mac_base + interface_index);
+	case CVMX_HELPER_INTERFACE_MODE_SRIO:
+		return (4 + 2 * xi.interface + index);
+	default:
+		if (xi.interface >= CVMX_ILK_GBL_BASE() && ilk_mac_base >= 0)
+			return -1;
+		/* All other modes belong to BGX */
+		return (bgx_mac_base + bgx_ports * xi.interface + index);
+	}
+}
+
+/**
+ * @INTERNAL
+ *
+ * Get scratch offset for LMTDMA/LMTST data buffer
+ *
+ */
+static inline unsigned int cvmx_pko3_lmtdma_scr_base(void)
+{
+	return CVMX_PKO_LMTLINE * CVMX_CACHE_LINE_SIZE;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Get address for LMTDMA/LMTST data buffer
+ *
+ */
+static inline u64 *cvmx_pko3_cvmseg_addr(void)
+{
+	const unsigned int scr = cvmx_pko3_lmtdma_scr_base();
+
+	return (u64 *)(CVMX_SCRATCH_BASE + scr);
+}
+
+/**
+ * Save scratchpad area
+ * @param buf storage buffer for saving previous scratchpad contents.
+ *
+ * This function should be used whenever the cache line is used
+ * from a context that might preempt another context that too uses
+ * the same cache line designated for LMTST/LMTDMA and Wide-Atomic
+ * operations, such as the hard interrupt context in Linux kernel,
+ * that could preempt a user-space application on the same processor
+ * core also using the same scratchpad.
+ * 'cvmx_lmtline_save()' should be called upon entry into the
+ * potentially interrupting context, and 'cvmx_lmtline_restore()' should
+ * be called prior to exitting that context.
+ */
+static inline void cvmx_lmtline_save(u64 buf[16])
+{
+	unsigned int i, scr_off = cvmx_pko3_lmtdma_scr_base();
+	unsigned int sz = CVMX_CACHE_LINE_SIZE / sizeof(u64);
+
+	/* wait LMTDMA to finish (if any) */
+	CVMX_SYNCIOBDMA;
+
+	/* Copy LMTLINE to user-provided buffer */
+	for (i = 0; i < sz; i++)
+		buf[i] = cvmx_scratch_read64(scr_off + i * sizeof(u64));
+}
+
+/**
+ * Restore scratchpad area
+ * @param buf storage buffer containing the previous content of scratchpad.
+ */
+static inline void cvmx_lmtline_restore(const u64 buf[16])
+{
+	unsigned int i, scr_off = cvmx_pko3_lmtdma_scr_base();
+	unsigned int sz = CVMX_CACHE_LINE_SIZE / sizeof(u64);
+
+	/* wait LMTDMA to finsh (if any) */
+	CVMX_SYNCIOBDMA;
+
+	/* restore scratchpad area from buf[] */
+	for (i = 0; i < sz; i++)
+		cvmx_scratch_write64(scr_off + i * sizeof(u64), buf[i]);
+}
+
+/*
+ * @INTERNAL
+ * Deliver PKO SEND commands via CVMSEG LM and LMTDMA/LMTST.
+ * The command should be already stored in the CVMSEG address.
+ *
+ * @param node is the destination node
+ * @param dq is the destination descriptor queue.
+ * @param numwords is the number of outgoing words
+ * @param tag_wait Wait to finish tag switch just before issueing LMTDMA
+ * @return the PKO3 native query result structure.
+ *
+ * <numwords> must be between 1 and 15 for CVMX_PKO_DQ_SEND command
+ *
+ * NOTE: Internal use only.
+ */
+static inline cvmx_pko_query_rtn_t
+__cvmx_pko3_lmtdma(u8 node, uint16_t dq, unsigned int numwords, bool tag_wait)
+{
+	const enum cvmx_pko_dqop dqop = CVMX_PKO_DQ_SEND;
+	cvmx_pko_query_rtn_t pko_status;
+	cvmx_pko_lmtdma_data_t pko_send_dma_data;
+	u64 dma_addr;
+	unsigned int scr_base = cvmx_pko3_lmtdma_scr_base();
+	unsigned int scr_off;
+	cvmx_pko3_dq_params_t *pparam;
+
+	if (cvmx_unlikely(numwords < 1 || numwords > 15)) {
+		debug("%s: ERROR: Internal error\n", __func__);
+		pko_status.u64 = ~0ull;
+		return pko_status;
+	}
+
+	pparam = cvmx_pko3_dq_parameters(node, dq);
+
+	pko_status.u64 = 0;
+	pko_send_dma_data.u64 = 0;
+
+	/* LMTDMA address offset is (nWords-1) */
+	dma_addr = CVMX_LMTDMA_ORDERED_IO_ADDR;
+	dma_addr += (numwords - 1) << 3;
+
+	scr_off = scr_base + numwords * sizeof(u64);
+
+	/* Write all-ones into the return area */
+	cvmx_scratch_write64(scr_off, ~0ull);
+
+	/* Barrier: make sure all prior writes complete before the following */
+	CVMX_SYNCWS;
+
+	/* If cached depth exceeds limit, check the real depth */
+	if (cvmx_unlikely(pparam->depth > pparam->limit)) {
+		cvmx_pko_dqx_wm_cnt_t wm_cnt;
+
+		wm_cnt.u64 = csr_rd_node(node, CVMX_PKO_DQX_WM_CNT(dq));
+		pko_status.s.depth = wm_cnt.s.count;
+		pparam->depth = pko_status.s.depth;
+
+		if (pparam->depth > pparam->limit) {
+			pko_status.s.dqop = dqop;
+			pko_status.s.dqstatus = PKO_DQSTATUS_NOFPABUF;
+			return pko_status;
+		}
+	} else {
+		cvmx_atomic_add32_nosync(&pparam->depth, 1);
+	}
+
+	if (CVMX_ENABLE_PARAMETER_CHECKING) {
+		/* Request one return word */
+		pko_send_dma_data.s.rtnlen = 1;
+	} else {
+		/* Do not expect a return word */
+		pko_send_dma_data.s.rtnlen = 0;
+	}
+
+	/* build store data for DMA */
+	pko_send_dma_data.s.scraddr = scr_off >> 3;
+	pko_send_dma_data.s.did = 0x51;
+	pko_send_dma_data.s.node = node;
+	pko_send_dma_data.s.dqop = dqop;
+	pko_send_dma_data.s.dq = dq;
+
+	/* Wait to finish tag switch just before issueing LMTDMA */
+	if (tag_wait)
+		cvmx_pow_tag_sw_wait();
+
+	/* issue PKO DMA */
+	cvmx_write64_uint64(dma_addr, pko_send_dma_data.u64);
+
+	if (cvmx_unlikely(pko_send_dma_data.s.rtnlen)) {
+		/* Wait for LMTDMA completion */
+		CVMX_SYNCIOBDMA;
+
+		/* Retrieve real result */
+		pko_status.u64 = cvmx_scratch_read64(scr_off);
+		pparam->depth = pko_status.s.depth;
+	} else {
+		/* Fake positive result */
+		pko_status.s.dqop = dqop;
+		pko_status.s.dqstatus = PKO_DQSTATUS_PASS;
+	}
+
+	return pko_status;
+}
+
+/*
+ * @INTERNAL
+ * Sends PKO descriptor commands via CVMSEG LM and LMTDMA.
+ * @param node is the destination node
+ * @param dq is the destination descriptor queue.
+ * @param cmds[] is an array of 64-bit PKO3 headers/subheaders
+ * @param numwords is the number of outgoing words
+ * @param dqop is the operation code
+ * @return the PKO3 native query result structure.
+ *
+ * <numwords> must be between 1 and 15 for CVMX_PKO_DQ_SEND command
+ * otherwise it must be 0.
+ *
+ * NOTE: Internal use only.
+ */
+static inline cvmx_pko_query_rtn_t __cvmx_pko3_do_dma(u8 node, uint16_t dq,
+						      u64 cmds[],
+						      unsigned int numwords,
+						      enum cvmx_pko_dqop dqop)
+{
+	const unsigned int scr_base = cvmx_pko3_lmtdma_scr_base();
+	cvmx_pko_query_rtn_t pko_status;
+	cvmx_pko_lmtdma_data_t pko_send_dma_data;
+	u64 dma_addr;
+	unsigned int i, scr_off;
+	cvmx_pko3_dq_params_t *pparam;
+
+	pparam = cvmx_pko3_dq_parameters(node, dq);
+	CVMX_PREFETCH0(pparam);
+	/* Push WB */
+	CVMX_SYNCWS;
+
+	pko_status.u64 = 0;
+	pko_send_dma_data.u64 = 0;
+
+	if (cvmx_unlikely(numwords > 15)) {
+		debug("%s: ERROR: Internal error\n", __func__);
+		pko_status.u64 = ~0ull;
+		return pko_status;
+	}
+
+	/* Store the command words into CVMSEG LM */
+	for (i = 0, scr_off = scr_base; i < numwords; i++) {
+		cvmx_scratch_write64(scr_off, cmds[i]);
+		scr_off += sizeof(cmds[0]);
+	}
+
+	/* With 0 data to send, this is an IOBDMA, else LMTDMA operation */
+	if (numwords == 0) {
+		dma_addr = CVMX_IOBDMA_ORDERED_IO_ADDR;
+	} else {
+		/* LMTDMA address offset is (nWords-1) */
+		dma_addr = CVMX_LMTDMA_ORDERED_IO_ADDR;
+		dma_addr += (numwords - 1) << 3;
+	}
+
+	if (cvmx_likely(dqop == CVMX_PKO_DQ_SEND)) {
+		if (cvmx_unlikely(pparam->depth > pparam->limit)) {
+			cvmx_pko_dqx_wm_cnt_t wm_cnt;
+
+			wm_cnt.u64 = csr_rd_node(node, CVMX_PKO_DQX_WM_CNT(dq));
+			pko_status.s.depth = wm_cnt.s.count;
+			pparam->depth = pko_status.s.depth;
+		}
+
+		if (cvmx_unlikely(pparam->depth > pparam->limit)) {
+			pko_status.s.dqop = dqop;
+			pko_status.s.dqstatus = PKO_DQSTATUS_NOFPABUF;
+			return pko_status;
+		}
+
+		cvmx_atomic_add32_nosync(&pparam->depth, 1);
+	}
+
+	if (cvmx_unlikely(dqop != CVMX_PKO_DQ_SEND) ||
+	    CVMX_ENABLE_PARAMETER_CHECKING) {
+		/* Request one return word */
+		pko_send_dma_data.s.rtnlen = 1;
+		/* Write all-ones into the return area */
+		cvmx_scratch_write64(scr_off, ~0ull);
+	} else {
+		/* Do not expext a return word */
+		pko_send_dma_data.s.rtnlen = 0;
+	}
+
+	/* build store data for DMA */
+	pko_send_dma_data.s.scraddr = scr_off >> 3;
+	pko_send_dma_data.s.did = 0x51;
+	pko_send_dma_data.s.node = node;
+	pko_send_dma_data.s.dqop = dqop;
+	pko_send_dma_data.s.dq = dq;
+
+	/* Barrier: make sure all prior writes complete before the following */
+	CVMX_SYNCWS;
+
+	/* Wait to finish tag switch just before issueing LMTDMA */
+	cvmx_pow_tag_sw_wait();
+
+	/* issue PKO DMA */
+	cvmx_write64_uint64(dma_addr, pko_send_dma_data.u64);
+
+	if (pko_send_dma_data.s.rtnlen) {
+		/* Wait LMTDMA for completion */
+		CVMX_SYNCIOBDMA;
+
+		/* Retrieve real result */
+		pko_status.u64 = cvmx_scratch_read64(scr_off);
+		pparam->depth = pko_status.s.depth;
+	} else {
+		/* Fake positive result */
+		pko_status.s.dqop = dqop;
+		pko_status.s.dqstatus = PKO_DQSTATUS_PASS;
+	}
+
+	return pko_status;
+}
+
+/*
+ * Transmit packets through PKO, simplified API
+ *
+ * @INTERNAL
+ *
+ * @param dq is a global destination queue number
+ * @param pki_ptr specifies packet first linked pointer as returned from
+ * 'cvmx_wqe_get_pki_pkt_ptr()'.
+ * @param len is the total number of bytes in the packet.
+ * @param gaura is the aura to free packet buffers after trasnmit.
+ * @param pCounter is an address of a 64-bit counter to atomically
+ * @param ptag is a Flow Tag pointer for packet odering or NULL
+ * decrement when packet transmission is complete.
+ *
+ * @return returns 0 if successful and -1 on failure.
+ *
+ *
+ * NOTE: This is a provisional API, and is subject to change.
+ */
+static inline int cvmx_pko3_xmit_link_buf(int dq, cvmx_buf_ptr_pki_t pki_ptr,
+					  unsigned int len, int gaura,
+					  u64 *pcounter, u32 *ptag)
+{
+	cvmx_pko_query_rtn_t pko_status;
+	cvmx_pko_send_hdr_t hdr_s;
+	cvmx_pko_buf_ptr_t gtr_s;
+	unsigned int node, nwords;
+	unsigned int scr_base = cvmx_pko3_lmtdma_scr_base();
+
+	/* Separate global DQ# into node and local DQ */
+	node = dq >> 10;
+	dq &= (1 << 10) - 1;
+
+	/* Fill in header */
+	hdr_s.u64 = 0;
+	hdr_s.s.total = len;
+	hdr_s.s.df = (gaura < 0);
+	hdr_s.s.ii = 1;
+	hdr_s.s.aura = (gaura >= 0) ? gaura : 0;
+
+	/* Fill in gather */
+	gtr_s.u64 = 0;
+	gtr_s.s.subdc3 = CVMX_PKO_SENDSUBDC_LINK;
+	gtr_s.s.addr = pki_ptr.addr;
+	gtr_s.s.size = pki_ptr.size;
+
+	/* Setup command word pointers */
+	cvmx_scratch_write64(scr_base + sizeof(u64) * 0, hdr_s.u64);
+	cvmx_scratch_write64(scr_base + sizeof(u64) * 1, gtr_s.u64);
+	nwords = 2;
+
+	/* Conditionally setup an atomic decrement counter */
+	if (pcounter) {
+		cvmx_pko_send_mem_t mem_s;
+
+		mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM;
+		mem_s.s.dsz = MEMDSZ_B64;
+		mem_s.s.alg = MEMALG_SUB;
+		mem_s.s.offset = 1;
+		mem_s.s.wmem = 0;
+		mem_s.s.addr = cvmx_ptr_to_phys(CASTPTR(void, pcounter));
+		cvmx_scratch_write64(scr_base + sizeof(u64) * nwords++,
+				     mem_s.u64);
+	}
+
+	/* To preserve packet order, go atomic with DQ-specific tag */
+	if (ptag)
+		cvmx_pow_tag_sw(*ptag ^ dq, CVMX_POW_TAG_TYPE_ATOMIC);
+
+	/* Do LMTDMA */
+	pko_status = __cvmx_pko3_lmtdma(node, dq, nwords, ptag);
+
+	if (cvmx_likely(pko_status.s.dqstatus == PKO_DQSTATUS_PASS))
+		return 0;
+	else
+		return -1;
+}
+
+/**
+ * @INTERNAL
+ *
+ * Retrieve PKO internal AURA from register.
+ */
+static inline unsigned int __cvmx_pko3_aura_get(unsigned int node)
+{
+	static s16 aura = -1;
+	cvmx_pko_dpfi_fpa_aura_t pko_aura;
+
+	if (aura >= 0)
+		return aura;
+
+	pko_aura.u64 = csr_rd_node(node, CVMX_PKO_DPFI_FPA_AURA);
+
+	aura = (pko_aura.s.node << 10) | pko_aura.s.laura;
+	return aura;
+}
+
+/** Open configured descriptor queues before queueing packets into them.
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be opened.
+ * @return returns 0 on success or -1 on failure.
+ */
+int cvmx_pko_dq_open(int node, int dq);
+
+/** Close a descriptor queue
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be opened.
+ * @return returns 0 on success or -1 on failure.
+ *
+ * This should be called before changing the DQ parent link, topology,
+ * or when shutting down the PKO.
+ */
+int cvmx_pko3_dq_close(int node, int dq);
+
+/** Query a descriptor queue
+ *
+ * @param node is to specify the node to which this configuration is applied.
+ * @param dq is the descriptor queue number to be opened.
+ * @return returns the descriptor queue depth on success or -1 on failure.
+ *
+ * This should be called before changing the DQ parent link, topology,
+ * or when shutting down the PKO.
+ */
+int cvmx_pko3_dq_query(int node, int dq);
+
+/** Drain a descriptor queue
+ *
+ * Before closing a DQ, this call will drain all pending traffic
+ * on the DQ to the NULL MAC, which will circumvent any traffic
+ * shaping and flow control to quickly reclaim all packet buffers.
+ */
+void cvmx_pko3_dq_drain(int node, int dq);
+
+/*
+ * PKO global initialization for 78XX.
+ *
+ * @param node is the node on which PKO block is initialized.
+ * @param aura is the 12-bit AURA (including node) for PKO internal use.
+ * @return none.
+ */
+int cvmx_pko3_hw_init_global(int node, uint16_t aura);
+
+/**
+ * Shutdown the entire PKO
+ */
+int cvmx_pko3_hw_disable(int node);
+
+/* Define legacy type here to break circular dependency */
+typedef struct cvmx_pko_port_status cvmx_pko_port_status_t;
+
+/**
+ * @INTERNAL
+ * Backward compatibility for collecting statistics from PKO3
+ *
+ */
+void cvmx_pko3_get_legacy_port_stats(u16 ipd_port, unsigned int clear,
+				     cvmx_pko_port_status_t *status);
+
+/** Set MAC options
+ *
+ * The options supported are the parameters below:
+ *
+ * @param xiface The physical interface number
+ * @param index The physical sub-interface port
+ * @param fcs_enable Enable FCS generation
+ * @param pad_enable Enable padding to minimum packet size
+ * @param fcs_sop_off Number of bytes at start of packet to exclude from FCS
+ *
+ * The typical use for `fcs_sop_off` is when the interface is configured
+ * to use a header such as HighGig to precede every Ethernet packet,
+ * such a header usually does not partake in the CRC32 computation stream,
+ * and its size must be set with this parameter.
+ *
+ * @return Returns 0 on success, -1 if interface/port is invalid.
+ */
+int cvmx_pko3_interface_options(int xiface, int index, bool fcs_enable,
+				bool pad_enable, unsigned int fcs_sop_off);
+
+/** Set Descriptor Queue options
+ *
+ * The `min_pad` parameter must be in agreement with the interface-level
+ * padding option for all descriptor queues assigned to that particular
+ * interface/port.
+ */
+void cvmx_pko3_dq_options(unsigned int node, unsigned int dq, bool min_pad);
+
+int cvmx_pko3_port_fifo_size(unsigned int xiface, unsigned int index);
+int cvmx_pko3_channel_credit_level(int node, enum cvmx_pko3_level_e level);
+int cvmx_pko3_port_xoff(unsigned int xiface, unsigned int index);
+int cvmx_pko3_port_xon(unsigned int xiface, unsigned int index);
+
+/* Packet descriptor - PKO3 command buffer + internal state */
+typedef struct cvmx_pko3_pdesc_s {
+	u64 *jump_buf;		/**< jump buffer vaddr */
+	s16 last_aura;		/**< AURA of the latest LINK_S/GATHER_S */
+	unsigned num_words : 5, /**< valid words in word array 2..16 */
+		headroom : 10,	/**< free bytes at start of 1st buf */
+		hdr_offsets : 1, pki_word4_present : 1;
+	/* PKO3 command buffer: */
+	cvmx_pko_send_hdr_t *hdr_s;
+	u64 word[16]; /**< header and subcommands buffer */
+	/* Bookkeeping fields: */
+	u64 send_work_s; /**< SEND_WORK_S must be the very last subdc */
+	s16 jb_aura;	 /**< AURA where the jump buffer belongs */
+	u16 mem_s_ix;	 /**< index of first MEM_S subcommand */
+	u8 ckl4_alg;	 /**< L3/L4 alg to use if recalc is needed */
+	/* Fields saved from WQE for later inspection */
+	cvmx_pki_wqe_word4_t pki_word4;
+	cvmx_pki_wqe_word2_t pki_word2;
+} cvmx_pko3_pdesc_t;
+
+void cvmx_pko3_pdesc_init(cvmx_pko3_pdesc_t *pdesc);
+int cvmx_pko3_pdesc_from_wqe(cvmx_pko3_pdesc_t *pdesc, cvmx_wqe_78xx_t *wqe,
+			     bool free_bufs);
+int cvmx_pko3_pdesc_transmit(cvmx_pko3_pdesc_t *pdesc, uint16_t dq,
+			     u32 *flow_tag);
+int cvmx_pko3_pdesc_notify_decrement(cvmx_pko3_pdesc_t *pdesc,
+				     volatile u64 *p_counter);
+int cvmx_pko3_pdesc_notify_wqe(cvmx_pko3_pdesc_t *pdesc, cvmx_wqe_78xx_t *wqe,
+			       u8 node, uint8_t group, uint8_t tt, u32 tag);
+int cvmx_pko3_pdesc_buf_append(cvmx_pko3_pdesc_t *pdesc, void *p_data,
+			       unsigned int data_bytes, unsigned int gaura);
+int cvmx_pko3_pdesc_append_free(cvmx_pko3_pdesc_t *pdesc, u64 addr,
+				unsigned int gaura);
+int cvmx_pko3_pdesc_hdr_push(cvmx_pko3_pdesc_t *pdesc, const void *p_data,
+			     u8 data_bytes, uint8_t layer);
+int cvmx_pko3_pdesc_hdr_pop(cvmx_pko3_pdesc_t *pdesc, void *hdr_buf,
+			    unsigned int num_bytes);
+int cvmx_pko3_pdesc_hdr_peek(cvmx_pko3_pdesc_t *pdesc, void *hdr_buf,
+			     unsigned int num_bytes, unsigned int offset);
+void cvmx_pko3_pdesc_set_free(cvmx_pko3_pdesc_t *pdesc, bool free_bufs);
+
+#endif /* __CVMX_PKO3_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-range.h b/arch/mips/mach-octeon/include/mach/cvmx-range.h
new file mode 100644
index 0000000..f0c1307
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-range.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#ifndef __CVMX_RANGE_H__
+#define __CVMX_RANGE_H__
+
+int cvmx_range_init(u64 range_addr, int size);
+int cvmx_range_alloc(u64 range_addr, uint64_t owner, uint64_t cnt, int align);
+int cvmx_range_alloc_ordered(u64 range_addr, uint64_t owner, u64 cnt, int align,
+			     int reverse);
+int cvmx_range_alloc_non_contiguos(u64 range_addr, uint64_t owner, u64 cnt,
+				   int elements[]);
+int cvmx_range_reserve(u64 range_addr, uint64_t owner, u64 base, uint64_t cnt);
+int cvmx_range_free_with_base(u64 range_addr, int base, int cnt);
+int cvmx_range_free_with_owner(u64 range_addr, uint64_t owner);
+u64 cvmx_range_get_owner(u64 range_addr, uint64_t base);
+void cvmx_range_show(uint64_t range_addr);
+int cvmx_range_memory_size(int nelements);
+int cvmx_range_free_mutiple(u64 range_addr, int bases[], int count);
+
+#endif // __CVMX_RANGE_H__
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-regs.h b/arch/mips/mach-octeon/include/mach/cvmx-regs.h
index dbb7723..f97c1e9 100644
--- a/arch/mips/mach-octeon/include/mach/cvmx-regs.h
+++ b/arch/mips/mach-octeon/include/mach/cvmx-regs.h
@@ -6,6 +6,7 @@
 #ifndef __CVMX_REGS_H__
 #define __CVMX_REGS_H__
 
+#include <log.h>
 #include <linux/bitfield.h>
 #include <linux/bitops.h>
 #include <linux/io.h>
@@ -32,6 +33,7 @@
 
 /* Regs */
 #define CVMX_CIU3_NMI		0x0001010000000160ULL
+#define CVMX_CIU3_ISCX_W1C(x)	(0x0001010090000000ull + ((x) & 1048575) * 8)
 
 #define CVMX_MIO_BOOT_LOC_CFGX(x) (0x0001180000000080ULL + ((x) & 1) * 8)
 #define MIO_BOOT_LOC_CFG_BASE	GENMASK_ULL(27, 3)
@@ -55,11 +57,19 @@
 #define CVMX_RNM_CTL_STATUS	0x0001180040000000ULL
 #define RNM_CTL_STATUS_EER_VAL	BIT_ULL(9)
 
+/* IOBDMA/LMTDMA IO addresses */
+#define CVMX_LMTDMA_ORDERED_IO_ADDR 0xffffffffffffa400ull
 #define CVMX_IOBDMA_ORDERED_IO_ADDR 0xffffffffffffa200ull
 
 /* turn the variable name into a string */
 #define CVMX_TMP_STR(x)		CVMX_TMP_STR2(x)
 #define CVMX_TMP_STR2(x)	#x
+#define VASTR(...)		#__VA_ARGS__
+
+#define CVMX_PKO_LMTLINE	2ull
+#define CVMX_SCRATCH_BASE	(-32768l)	/* 0xffffffffffff8000 */
+
+#define COP0_CVMMEMCTL		$11,7	/* Cavium memory control */
 
 #define CVMX_RDHWR(result, regstr)					\
 	asm volatile("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
@@ -67,6 +77,13 @@
 	asm("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
 #define CVMX_POP(result, input)						\
 	asm("pop %[rd],%[rs]" : [rd] "=d"(result) : [rs] "d"(input))
+#define CVMX_MF_COP0(val, cop0)						\
+	asm("dmfc0 %[rt]," VASTR(cop0) : [rt] "=d" (val))
+#define CVMX_MT_COP0(val, cop0)						\
+	asm("dmtc0 %[rt]," VASTR(cop0) : : [rt] "d" (val))
+
+#define CVMX_MF_CVM_MEM_CTL(val)	CVMX_MF_COP0(val, COP0_CVMMEMCTL)
+#define CVMX_MT_CVM_MEM_CTL(val)	CVMX_MT_COP0(val, COP0_CVMMEMCTL)
 
 #define CVMX_SYNC   asm volatile("sync\n" : : : "memory")
 #define CVMX_SYNCW  asm volatile("syncw\nsyncw\n" : : : "memory")
@@ -81,6 +98,18 @@
 
 #define CVMX_MF_CHORD(dest)	CVMX_RDHWR(dest, 30)
 
+#define CVMX_PREFETCH0(address)	CVMX_PREFETCH(address, 0)
+#define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
+
+/** a normal prefetch */
+#define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
+
+/** normal prefetches that use the pref instruction */
+#define CVMX_PREFETCH_PREFX(X, address, offset)				\
+	asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))
+#define CVMX_PREFETCH_PREF0(address, offset)	\
+	CVMX_PREFETCH_PREFX(0, address, offset)
+
 /*
  * The macros cvmx_likely and cvmx_unlikely use the
  * __builtin_expect GCC operation to control branch
@@ -406,6 +435,30 @@
 }
 
 /**
+ * Given a CSR address return the node number of that address
+ *
+ * @param addr	Address to extract node number from
+ *
+ * @return node number
+ */
+static inline u8 cvmx_csr_addr_to_node(u64 addr)
+{
+	return (addr >> CVMX_NODE_IO_SHIFT) & CVMX_NODE_MASK;
+}
+
+/**
+ * Strip the node address bits from a CSR address
+ *
+ * @param addr	CSR address to strip the node bits from
+ *
+ * @return CSR address with the node bits set to zero
+ */
+static inline u64 cvmx_csr_addr_strip_node(u64 addr)
+{
+	return addr & ~((u64)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT);
+}
+
+/**
  * Returns the number of bits set in the provided value.
  * Simple wrapper for POP instruction.
  *
@@ -428,14 +481,45 @@
 #define cvmx_printf  printf
 #define cvmx_vprintf vprintf
 
-#if defined(DEBUG)
-void cvmx_warn(const char *format, ...) __printf(1, 2);
-#else
-void cvmx_warn(const char *format, ...);
-#endif
+/* Use common debug macros */
+#define cvmx_warn	debug
+#define cvmx_warn_if	debug_cond
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ * @param ptr    address in memory to add incr to
+ * @param incr   amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int32_t cvmx_atomic_fetch_and_add32(int32_t * ptr, int32_t incr)
+{
+	int32_t val;
 
-#define cvmx_warn_if(expression, format, ...)				\
-	if (expression)							\
-		cvmx_warn(format, ##__VA_ARGS__)
+	val = *ptr;
+	*ptr += incr;
+	return val;
+}
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations.  This should only be used when there are no memory operation
+ * ordering constraints.  (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr    address in memory to add incr to
+ * @param incr   amount to increment memory location by (signed)
+ */
+static inline void cvmx_atomic_add32_nosync(int32_t * ptr, int32_t incr)
+{
+	*ptr += incr;
+}
 
 #endif /* __CVMX_REGS_H__ */
diff --git a/arch/mips/mach-octeon/include/mach/cvmx-xcv-defs.h b/arch/mips/mach-octeon/include/mach/cvmx-xcv-defs.h
new file mode 100644
index 0000000..4fd4d16
--- /dev/null
+++ b/arch/mips/mach-octeon/include/mach/cvmx-xcv-defs.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon xcv.
+ */
+
+#ifndef __CVMX_XCV_DEFS_H__
+#define __CVMX_XCV_DEFS_H__
+
+#define CVMX_XCV_BATCH_CRD_RET (0x00011800DB000100ull)
+#define CVMX_XCV_COMP_CTL      (0x00011800DB000020ull)
+#define CVMX_XCV_CTL	       (0x00011800DB000030ull)
+#define CVMX_XCV_DLL_CTL       (0x00011800DB000010ull)
+#define CVMX_XCV_ECO	       (0x00011800DB000200ull)
+#define CVMX_XCV_INBND_STATUS  (0x00011800DB000080ull)
+#define CVMX_XCV_INT	       (0x00011800DB000040ull)
+#define CVMX_XCV_RESET	       (0x00011800DB000000ull)
+
+/**
+ * cvmx_xcv_batch_crd_ret
+ */
+union cvmx_xcv_batch_crd_ret {
+	u64 u64;
+	struct cvmx_xcv_batch_crd_ret_s {
+		u64 reserved_1_63 : 63;
+		u64 crd_ret : 1;
+	} s;
+	struct cvmx_xcv_batch_crd_ret_s cn73xx;
+};
+
+typedef union cvmx_xcv_batch_crd_ret cvmx_xcv_batch_crd_ret_t;
+
+/**
+ * cvmx_xcv_comp_ctl
+ *
+ * This register controls programmable compensation.
+ *
+ */
+union cvmx_xcv_comp_ctl {
+	u64 u64;
+	struct cvmx_xcv_comp_ctl_s {
+		u64 drv_byp : 1;
+		u64 reserved_61_62 : 2;
+		u64 cmp_pctl : 5;
+		u64 reserved_53_55 : 3;
+		u64 cmp_nctl : 5;
+		u64 reserved_45_47 : 3;
+		u64 drv_pctl : 5;
+		u64 reserved_37_39 : 3;
+		u64 drv_nctl : 5;
+		u64 reserved_31_31 : 1;
+		u64 pctl_lock : 1;
+		u64 pctl_sat : 1;
+		u64 reserved_28_28 : 1;
+		u64 nctl_lock : 1;
+		u64 reserved_1_26 : 26;
+		u64 nctl_sat : 1;
+	} s;
+	struct cvmx_xcv_comp_ctl_s cn73xx;
+};
+
+typedef union cvmx_xcv_comp_ctl cvmx_xcv_comp_ctl_t;
+
+/**
+ * cvmx_xcv_ctl
+ *
+ * This register contains the status control bits.
+ *
+ */
+union cvmx_xcv_ctl {
+	u64 u64;
+	struct cvmx_xcv_ctl_s {
+		u64 reserved_4_63 : 60;
+		u64 lpbk_ext : 1;
+		u64 lpbk_int : 1;
+		u64 speed : 2;
+	} s;
+	struct cvmx_xcv_ctl_s cn73xx;
+};
+
+typedef union cvmx_xcv_ctl cvmx_xcv_ctl_t;
+
+/**
+ * cvmx_xcv_dll_ctl
+ *
+ * The RGMII timing specification requires that devices transmit clock and
+ * data synchronously. The specification requires external sources (namely
+ * the PC board trace routes) to introduce the appropriate 1.5 to 2.0 ns of
+ * delay.
+ *
+ * To eliminate the need for the PC board delays, the RGMII interface has optional
+ * on-board DLLs for both transmit and receive. For correct operation, at most one
+ * of the transmitter, board, or receiver involved in an RGMII link should
+ * introduce delay. By default/reset, the RGMII receivers delay the received clock,
+ * and the RGMII transmitters do not delay the transmitted clock. Whether this
+ * default works as-is with a given link partner depends on the behavior of the
+ * link partner and the PC board.
+ *
+ * These are the possible modes of RGMII receive operation:
+ *
+ * * XCV_DLL_CTL[CLKRX_BYP] = 0 (reset value) - The RGMII
+ * receive interface introduces clock delay using its internal DLL.
+ * This mode is appropriate if neither the remote
+ * transmitter nor the PC board delays the clock.
+ *
+ * * XCV_DLL_CTL[CLKRX_BYP] = 1, [CLKRX_SET] = 0x0 - The
+ * RGMII receive interface introduces no clock delay. This mode
+ * is appropriate if either the remote transmitter or the PC board
+ * delays the clock.
+ *
+ * These are the possible modes of RGMII transmit operation:
+ *
+ * * XCV_DLL_CTL[CLKTX_BYP] = 1, [CLKTX_SET] = 0x0 (reset value) -
+ * The RGMII transmit interface introduces no clock
+ * delay. This mode is appropriate is either the remote receiver
+ * or the PC board delays the clock.
+ *
+ * * XCV_DLL_CTL[CLKTX_BYP] = 0 - The RGMII transmit
+ * interface introduces clock delay using its internal DLL.
+ * This mode is appropriate if neither the remote receiver
+ * nor the PC board delays the clock.
+ */
+union cvmx_xcv_dll_ctl {
+	u64 u64;
+	struct cvmx_xcv_dll_ctl_s {
+		u64 reserved_32_63 : 32;
+		u64 lock : 1;
+		u64 clk_set : 7;
+		u64 clkrx_byp : 1;
+		u64 clkrx_set : 7;
+		u64 clktx_byp : 1;
+		u64 clktx_set : 7;
+		u64 reserved_2_7 : 6;
+		u64 refclk_sel : 2;
+	} s;
+	struct cvmx_xcv_dll_ctl_s cn73xx;
+};
+
+typedef union cvmx_xcv_dll_ctl cvmx_xcv_dll_ctl_t;
+
+/**
+ * cvmx_xcv_eco
+ */
+union cvmx_xcv_eco {
+	u64 u64;
+	struct cvmx_xcv_eco_s {
+		u64 reserved_16_63 : 48;
+		u64 eco_rw : 16;
+	} s;
+	struct cvmx_xcv_eco_s cn73xx;
+};
+
+typedef union cvmx_xcv_eco cvmx_xcv_eco_t;
+
+/**
+ * cvmx_xcv_inbnd_status
+ *
+ * This register contains RGMII in-band status.
+ *
+ */
+union cvmx_xcv_inbnd_status {
+	u64 u64;
+	struct cvmx_xcv_inbnd_status_s {
+		u64 reserved_4_63 : 60;
+		u64 duplex : 1;
+		u64 speed : 2;
+		u64 link : 1;
+	} s;
+	struct cvmx_xcv_inbnd_status_s cn73xx;
+};
+
+typedef union cvmx_xcv_inbnd_status cvmx_xcv_inbnd_status_t;
+
+/**
+ * cvmx_xcv_int
+ *
+ * This register controls interrupts.
+ *
+ */
+union cvmx_xcv_int {
+	u64 u64;
+	struct cvmx_xcv_int_s {
+		u64 reserved_7_63 : 57;
+		u64 tx_ovrflw : 1;
+		u64 tx_undflw : 1;
+		u64 incomp_byte : 1;
+		u64 duplex : 1;
+		u64 reserved_2_2 : 1;
+		u64 speed : 1;
+		u64 link : 1;
+	} s;
+	struct cvmx_xcv_int_s cn73xx;
+};
+
+typedef union cvmx_xcv_int cvmx_xcv_int_t;
+
+/**
+ * cvmx_xcv_reset
+ *
+ * This register controls reset.
+ *
+ */
+union cvmx_xcv_reset {
+	u64 u64;
+	struct cvmx_xcv_reset_s {
+		u64 enable : 1;
+		u64 reserved_16_62 : 47;
+		u64 clkrst : 1;
+		u64 reserved_12_14 : 3;
+		u64 dllrst : 1;
+		u64 reserved_8_10 : 3;
+		u64 comp : 1;
+		u64 reserved_4_6 : 3;
+		u64 tx_pkt_rst_n : 1;
+		u64 tx_dat_rst_n : 1;
+		u64 rx_pkt_rst_n : 1;
+		u64 rx_dat_rst_n : 1;
+	} s;
+	struct cvmx_xcv_reset_s cn73xx;
+};
+
+typedef union cvmx_xcv_reset cvmx_xcv_reset_t;
+
+#endif
diff --git a/arch/mips/mach-octeon/include/mach/octeon_eth.h b/arch/mips/mach-octeon/include/mach/octeon_eth.h
index 096fcfb..83e6207 100644
--- a/arch/mips/mach-octeon/include/mach/octeon_eth.h
+++ b/arch/mips/mach-octeon/include/mach/octeon_eth.h
@@ -1,17 +1,13 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
  */
 
 #ifndef __OCTEON_ETH_H__
 #define __OCTEON_ETH_H__
 
-#include <phy.h>
-#include <miiphy.h>
-
 #include <mach/cvmx-helper.h>
 #include <mach/cvmx-helper-board.h>
-#include <mach/octeon_fdt.h>
 
 struct eth_device;
 
@@ -27,33 +23,25 @@
 	struct phy_device *phydev; /** PHY device */
 	struct eth_device *ethdev; /** Eth device this priv is part of */
 	int mii_addr;
-	int phy_fdt_offset;		    /** Offset of PHY info in device tree */
-	int fdt_offset;			    /** Offset of Eth interface in DT */
-	int phy_offset;			    /** Offset of PHY dev in device tree */
+	int phy_fdt_offset; /** Offset of PHY info in device tree */
+	int fdt_offset;	    /** Offset of Eth interface in DT */
+	int phy_offset;	    /** Offset of PHY device in device tree */
 	enum cvmx_phy_type phy_device_type; /** Type of PHY */
 	/* current link status, use to reconfigure on status changes */
 	u64 packets_sent;
 	u64 packets_received;
-	u32 link_speed : 2;
-	u32 link_duplex : 1;
-	u32 link_status : 1;
-	u32 loopback : 1;
-	u32 enabled : 1;
-	u32 is_c45 : 1;		    /** Set if we need to use clause 45 */
-	u32 vitesse_sfp_config : 1; /** Need Vitesse SFP config */
-	u32 ti_gpio_config : 1;	    /** Need TI GPIO config */
-	u32 bgx_mac_set : 1;	    /** Has the BGX MAC been set already */
-	u64 last_bgx_mac;	    /** Last BGX MAC address set */
-	u64 gmx_base;		    /** Base address to access GMX CSRs */
-	bool mod_abs;		    /** True if module is absent */
-
-	/**
-	 * User defined function to check if a SFP+ module is absent or not.
-	 *
-	 * @param	dev	Ethernet device
-	 * @param	data	User supplied data
-	 */
-	int (*check_mod_abs)(struct eth_device *dev, void *data);
+	uint32_t link_speed : 2;
+	uint32_t link_duplex : 1;
+	uint32_t link_status : 1;
+	uint32_t loopback : 1;
+	uint32_t enabled : 1;
+	uint32_t is_c45 : 1;		 /** Set if we need to use clause 45 */
+	uint32_t vitesse_sfp_config : 1; /** Need Vitesse SFP config */
+	uint32_t ti_gpio_config : 1;	 /** Need TI GPIO configuration */
+	uint32_t bgx_mac_set : 1;	 /** Has the BGX MAC been set already */
+	u64 last_bgx_mac;		 /** Last BGX MAC address set */
+	u64 gmx_base;			 /** Base address to access GMX CSRs */
+	bool mod_abs;			 /** True if module is absent */
 
 	/** User supplied data for check_mod_abs */
 	void *mod_abs_data;
@@ -71,12 +59,20 @@
 	 * @return	0 for success, otherwise error
 	 */
 	int (*mod_abs_changed)(struct eth_device *dev, bool mod_abs);
+
 	/** SDK phy information data structure */
 	cvmx_phy_info_t phy_info;
+
+	struct udevice *mdio_dev;
+	struct mii_dev *bus;
+	struct phy_device *phy_dev;
+
 #ifdef CONFIG_OCTEON_SFP
 	/** Information about connected SFP/SFP+/SFP28/QSFP+/QSFP28 module */
 	struct octeon_sfp_info sfp;
 #endif
+
+	cvmx_wqe_t *work;
 };
 
 /**
@@ -136,6 +132,6 @@
  *
  * NOTE: If the module state is changed then the module callback is called.
  */
-void octeon_phy_port_check(struct eth_device *dev);
+void octeon_phy_port_check(struct udevice *dev);
 
 #endif /* __OCTEON_ETH_H__ */
diff --git a/arch/sandbox/dts/test.dts b/arch/sandbox/dts/test.dts
index a8a86bc..8f93775 100644
--- a/arch/sandbox/dts/test.dts
+++ b/arch/sandbox/dts/test.dts
@@ -997,7 +997,7 @@
 		#address-cells = <3>;
 		#size-cells = <2>;
 		ranges = <0x02000000 0 0x30000000 0x30000000 0 0x2000 // MEM0
-			  0x02000000 0 0x31000000 0x31000000 0 0x2000 // MEM1
+			  0x02000000 0 0x31000000 0x3e000000 0 0x2000 // MEM1
 			  0x01000000 0 0x40000000 0x40000000 0 0x2000>;
 		sandbox,dev-info = <0x08 0x00 0x1234 0x5678
 				    0x0c 0x00 0x1234 0x5678
diff --git a/arch/x86/cpu/baytrail/cpu.c b/arch/x86/cpu/baytrail/cpu.c
index 68bf40b..4fb6a48 100644
--- a/arch/x86/cpu/baytrail/cpu.c
+++ b/arch/x86/cpu/baytrail/cpu.c
@@ -56,7 +56,7 @@
 	for (i = 0; i < 2; i++) {
 		ret = dm_pci_bus_find_bdf(PCI_BDF(0, 0x1e, 3 + i), &dev);
 		if (!ret) {
-			base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+			base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
 					      PCI_REGION_MEM);
 			hsuart_clock_set(base);
 		}
diff --git a/board/CZ.NIC/turris_mox/turris_mox.c b/board/CZ.NIC/turris_mox/turris_mox.c
index 9702d1f..a4738b3 100644
--- a/board/CZ.NIC/turris_mox/turris_mox.c
+++ b/board/CZ.NIC/turris_mox/turris_mox.c
@@ -13,6 +13,7 @@
 #include <button.h>
 #include <clk.h>
 #include <dm.h>
+#include <dm/of_extra.h>
 #include <env.h>
 #include <fdt_support.h>
 #include <init.h>
@@ -216,35 +217,35 @@
 #define SW_SMI_CMD_R(d, r)	(0x9800 | (((d) & 0x1f) << 5) | ((r) & 0x1f))
 #define SW_SMI_CMD_W(d, r)	(0x9400 | (((d) & 0x1f) << 5) | ((r) & 0x1f))
 
-static int sw_multi_read(struct mii_dev *bus, int sw, int dev, int reg)
+static int sw_multi_read(struct udevice *bus, int sw, int dev, int reg)
 {
-	bus->write(bus, sw, 0, 0, SW_SMI_CMD_R(dev, reg));
+	dm_mdio_write(bus, sw, MDIO_DEVAD_NONE, 0, SW_SMI_CMD_R(dev, reg));
 	mdelay(5);
-	return bus->read(bus, sw, 0, 1);
+	return dm_mdio_read(bus, sw, MDIO_DEVAD_NONE, 1);
 }
 
-static void sw_multi_write(struct mii_dev *bus, int sw, int dev, int reg,
+static void sw_multi_write(struct udevice *bus, int sw, int dev, int reg,
 			   u16 val)
 {
-	bus->write(bus, sw, 0, 1, val);
-	bus->write(bus, sw, 0, 0, SW_SMI_CMD_W(dev, reg));
+	dm_mdio_write(bus, sw, MDIO_DEVAD_NONE, 1, val);
+	dm_mdio_write(bus, sw, MDIO_DEVAD_NONE, 0, SW_SMI_CMD_W(dev, reg));
 	mdelay(5);
 }
 
-static int sw_scratch_read(struct mii_dev *bus, int sw, int reg)
+static int sw_scratch_read(struct udevice *bus, int sw, int reg)
 {
 	sw_multi_write(bus, sw, 0x1c, 0x1a, (reg & 0x7f) << 8);
 	return sw_multi_read(bus, sw, 0x1c, 0x1a) & 0xff;
 }
 
-static void sw_led_write(struct mii_dev *bus, int sw, int port, int reg,
+static void sw_led_write(struct udevice *bus, int sw, int port, int reg,
 			 u16 val)
 {
 	sw_multi_write(bus, sw, port, 0x16, 0x8000 | ((reg & 7) << 12)
 					    | (val & 0x7ff));
 }
 
-static void sw_blink_leds(struct mii_dev *bus, int peridot, int topaz)
+static void sw_blink_leds(struct udevice *bus, int peridot, int topaz)
 {
 	int i, p;
 	struct {
@@ -275,7 +276,7 @@
 	}
 }
 
-static void check_switch_address(struct mii_dev *bus, int addr)
+static void check_switch_address(struct udevice *bus, int addr)
 {
 	if (sw_scratch_read(bus, addr, 0x70) >> 3 != addr)
 		printf("Check of switch MDIO address failed for 0x%02x\n",
@@ -374,36 +375,22 @@
 static void mox_phy_leds_start_blinking(void)
 {
 	struct phy_device *phydev;
-	struct mii_dev *bus;
-	const char *node_name;
-	int node;
-
-	node = fdt_path_offset(gd->fdt_blob, "ethernet0");
-	if (node < 0) {
-		printf("Cannot get eth0!\n");
-		return;
-	}
+	ofnode phy_node;
 
-	node_name = fdt_get_name(gd->fdt_blob, node, NULL);
-	if (!node_name) {
-		printf("Cannot get eth0 node name!\n");
-		return;
-	}
-
-	bus = miiphy_get_dev_by_name(node_name);
-	if (!bus) {
-		printf("Cannot get MDIO bus device!\n");
-		return;
-	}
+	phy_node = ofnode_get_phy_node(ofnode_path("ethernet0"));
+	if (!ofnode_valid(phy_node))
+		goto err;
 
-	phydev = phy_find_by_mask(bus, BIT(1));
-	if (!phydev) {
-		printf("Cannot get ethernet PHY!\n");
-		return;
-	}
+	phydev = dm_phy_find_by_ofnode(phy_node);
+	if (!phydev)
+		goto err;
 
 	mox_phy_modify(phydev, 3, 0x12, 0x700, 0x400);
 	mox_phy_modify(phydev, 3, 0x10, 0xff, 0xbb);
+
+	return;
+err:
+	printf("Cannot get ethernet PHY!\n");
 }
 
 static bool read_reset_button(void)
@@ -611,6 +598,26 @@
 	return 0;
 }
 
+static struct udevice *mox_mdio_bus(void)
+{
+	struct udevice *bus;
+	ofnode node;
+
+	node = ofnode_by_compatible(ofnode_null(), "marvell,orion-mdio");
+	if (!ofnode_valid(node))
+		goto err;
+
+	dm_mdio_probe_devices();
+
+	if (uclass_get_device_by_ofnode(UCLASS_MDIO, node, &bus))
+		goto err;
+
+	return bus;
+err:
+	printf("Cannot get MDIO bus device!\n");
+	return NULL;
+}
+
 int last_stage_init(void)
 {
 	struct gpio_desc reset_gpio = {};
@@ -636,16 +643,9 @@
 	 * 0x70 of Peridot (and potentially Topaz) modules
 	 */
 	if (peridot || topaz) {
-		struct mii_dev *bus;
-		const char *node_name;
-		int node;
+		struct udevice *bus = mox_mdio_bus();
 
-		node = fdt_path_offset(gd->fdt_blob, "ethernet0");
-		node_name = (node >= 0) ? fdt_get_name(gd->fdt_blob, node, NULL) : NULL;
-		bus = node_name ? miiphy_get_dev_by_name(node_name) : NULL;
-		if (!bus) {
-			printf("Cannot get MDIO bus device!\n");
-		} else {
+		if (bus) {
 			int i;
 
 			for (i = 0; i < peridot; ++i)
diff --git a/board/Marvell/mvebu_armada-37xx/board.c b/board/Marvell/mvebu_armada-37xx/board.c
index 98e1b36..3e5e0a0 100644
--- a/board/Marvell/mvebu_armada-37xx/board.c
+++ b/board/Marvell/mvebu_armada-37xx/board.c
@@ -11,6 +11,7 @@
 #include <i2c.h>
 #include <init.h>
 #include <mmc.h>
+#include <miiphy.h>
 #include <phy.h>
 #include <asm/global_data.h>
 #include <asm/io.h>
@@ -254,14 +255,15 @@
 	return 0;
 }
 
+#ifdef CONFIG_LAST_STAGE_INIT
 /* Helper function for accessing switch devices in multi-chip connection mode */
-static int mii_multi_chip_mode_write(struct mii_dev *bus, int dev_smi_addr,
+static int mii_multi_chip_mode_write(struct udevice *bus, int dev_smi_addr,
 				     int smi_addr, int reg, u16 value)
 {
 	u16 smi_cmd = 0;
 
-	if (bus->write(bus, dev_smi_addr, 0,
-		       MVEBU_SW_SMI_DATA_REG, value) != 0) {
+	if (dm_mdio_write(bus, dev_smi_addr, MDIO_DEVAD_NONE,
+			  MVEBU_SW_SMI_DATA_REG, value) != 0) {
 		printf("Error writing to the PHY addr=%02x reg=%02x\n",
 		       smi_addr, reg);
 		return -EFAULT;
@@ -272,8 +274,8 @@
 		  (1 << SW_SMI_CMD_SMI_OP_OFF) |
 		  (smi_addr << SW_SMI_CMD_DEV_ADDR_OFF) |
 		  (reg << SW_SMI_CMD_REG_ADDR_OFF);
-	if (bus->write(bus, dev_smi_addr, 0,
-		       MVEBU_SW_SMI_CMD_REG, smi_cmd) != 0) {
+	if (dm_mdio_write(bus, dev_smi_addr, MDIO_DEVAD_NONE,
+			  MVEBU_SW_SMI_CMD_REG, smi_cmd) != 0) {
 		printf("Error writing to the PHY addr=%02x reg=%02x\n",
 		       smi_addr, reg);
 		return -EFAULT;
@@ -283,11 +285,22 @@
 }
 
 /* Bring-up board-specific network stuff */
-int board_network_enable(struct mii_dev *bus)
+int last_stage_init(void)
 {
+	struct udevice *bus;
+	ofnode node;
+
 	if (!of_machine_is_compatible("globalscale,espressobin"))
 		return 0;
 
+	node = ofnode_by_compatible(ofnode_null(), "marvell,orion-mdio");
+	if (!ofnode_valid(node) ||
+	    uclass_get_device_by_ofnode(UCLASS_MDIO, node, &bus) ||
+	    device_probe(bus)) {
+		printf("Cannot find MDIO bus\n");
+		return 0;
+	}
+
 	/*
 	 * FIXME: remove this code once Topaz driver gets available
 	 * A3720 Community Board Only
@@ -327,6 +340,7 @@
 
 	return 0;
 }
+#endif
 
 #ifdef CONFIG_OF_BOARD_SETUP
 int ft_board_setup(void *blob, struct bd_info *bd)
diff --git a/board/Marvell/octeon_nic23/board.c b/board/Marvell/octeon_nic23/board.c
index 9f5eb2e..3e2c544 100644
--- a/board/Marvell/octeon_nic23/board.c
+++ b/board/Marvell/octeon_nic23/board.c
@@ -1,10 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0+
 /*
- * Copyright (C) 2021 Stefan Roese <sr@denx.de>
+ * Copyright (C) 2021-2022 Stefan Roese <sr@denx.de>
  */
 
 #include <dm.h>
 #include <ram.h>
+#include <asm/gpio.h>
 
 #include <mach/octeon_ddr.h>
 #include <mach/cvmx-qlm.h>
@@ -84,6 +85,52 @@
 	return rc;
 }
 
+int board_early_init_f(void)
+{
+	struct gpio_desc gpio = {};
+	ofnode node;
+
+	/* Initial GPIO configuration */
+
+	/* GPIO 7: Vitesse reset */
+	node = ofnode_by_compatible(ofnode_null(), "vitesse,vsc7224");
+	if (ofnode_valid(node)) {
+		gpio_request_by_name_nodev(node, "los", 0, &gpio, GPIOD_IS_IN);
+		dm_gpio_free(gpio.dev, &gpio);
+		gpio_request_by_name_nodev(node, "reset", 0, &gpio,
+					   GPIOD_IS_OUT);
+		if (dm_gpio_is_valid(&gpio)) {
+			/* Vitesse reset */
+			debug("%s: Setting GPIO 7 to 1\n", __func__);
+			dm_gpio_set_value(&gpio, 1);
+		}
+		dm_gpio_free(gpio.dev, &gpio);
+	}
+
+	/* SFP+ transmitters */
+	ofnode_for_each_compatible_node(node, "ethernet,sfp-slot") {
+		gpio_request_by_name_nodev(node, "tx_disable", 0,
+					   &gpio, GPIOD_IS_OUT);
+		if (dm_gpio_is_valid(&gpio)) {
+			debug("%s: Setting GPIO %d to 1\n", __func__,
+			      gpio.offset);
+			dm_gpio_set_value(&gpio, 1);
+		}
+		dm_gpio_free(gpio.dev, &gpio);
+		gpio_request_by_name_nodev(node, "mod_abs", 0, &gpio,
+					   GPIOD_IS_IN);
+		dm_gpio_free(gpio.dev, &gpio);
+		gpio_request_by_name_nodev(node, "tx_error", 0, &gpio,
+					   GPIOD_IS_IN);
+		dm_gpio_free(gpio.dev, &gpio);
+		gpio_request_by_name_nodev(node, "rx_los", 0, &gpio,
+					   GPIOD_IS_IN);
+		dm_gpio_free(gpio.dev, &gpio);
+	}
+
+	return 0;
+}
+
 void board_configure_qlms(void)
 {
 	octeon_configure_qlm(4, 3000, CVMX_QLM_MODE_SATA_2X1, 0, 0, 0, 0);
@@ -100,7 +147,45 @@
 
 int board_late_init(void)
 {
+	struct gpio_desc gpio = {};
+	ofnode node;
+
+	/* Turn on SFP+ transmitters */
+	ofnode_for_each_compatible_node(node, "ethernet,sfp-slot") {
+		gpio_request_by_name_nodev(node, "tx_disable", 0,
+					   &gpio, GPIOD_IS_OUT);
+		if (dm_gpio_is_valid(&gpio)) {
+			debug("%s: Setting GPIO %d to 0\n", __func__,
+			      gpio.offset);
+			dm_gpio_set_value(&gpio, 0);
+		}
+		dm_gpio_free(gpio.dev, &gpio);
+	}
+
 	board_configure_qlms();
 
 	return 0;
 }
+
+int last_stage_init(void)
+{
+	struct gpio_desc gpio = {};
+	ofnode node;
+
+	node = ofnode_by_compatible(ofnode_null(), "vitesse,vsc7224");
+	if (!ofnode_valid(node)) {
+		printf("Vitesse SPF DT node not found!");
+		return 0;
+	}
+
+	gpio_request_by_name_nodev(node, "reset", 0, &gpio, GPIOD_IS_OUT);
+	if (dm_gpio_is_valid(&gpio)) {
+		/* Take Vitesse retimer out of reset */
+		debug("%s: Setting GPIO 7 to 0\n", __func__);
+		dm_gpio_set_value(&gpio, 0);
+		mdelay(50);
+	}
+	dm_gpio_free(gpio.dev, &gpio);
+
+	return 0;
+}
diff --git a/configs/clearfog_defconfig b/configs/clearfog_defconfig
index 1e9c389..a7b6508 100644
--- a/configs/clearfog_defconfig
+++ b/configs/clearfog_defconfig
@@ -62,6 +62,7 @@
 CONFIG_PHY_GIGE=y
 CONFIG_MVNETA=y
 CONFIG_MII=y
+CONFIG_MVMDIO=y
 CONFIG_PCI=y
 CONFIG_PCI_MVEBU=y
 CONFIG_SCSI=y
diff --git a/configs/controlcenterdc_defconfig b/configs/controlcenterdc_defconfig
index d4b966b..df38b2c 100644
--- a/configs/controlcenterdc_defconfig
+++ b/configs/controlcenterdc_defconfig
@@ -77,6 +77,7 @@
 CONFIG_PHY_GIGE=y
 CONFIG_MVNETA=y
 CONFIG_MII=y
+CONFIG_MVMDIO=y
 CONFIG_PCI=y
 CONFIG_DM_PCI_COMPAT=y
 CONFIG_PCI_MVEBU=y
diff --git a/configs/db-88f6820-amc_defconfig b/configs/db-88f6820-amc_defconfig
index 46c822f..9b77b4a 100644
--- a/configs/db-88f6820-amc_defconfig
+++ b/configs/db-88f6820-amc_defconfig
@@ -67,6 +67,7 @@
 CONFIG_PHY_GIGE=y
 CONFIG_MVNETA=y
 CONFIG_MII=y
+CONFIG_MVMDIO=y
 CONFIG_PCI=y
 CONFIG_PCI_MVEBU=y
 CONFIG_DEBUG_UART_SHIFT=2
diff --git a/configs/db-88f6820-gp_defconfig b/configs/db-88f6820-gp_defconfig
index 2dcbc2f..f56d1fb 100644
--- a/configs/db-88f6820-gp_defconfig
+++ b/configs/db-88f6820-gp_defconfig
@@ -62,6 +62,7 @@
 CONFIG_PHY_GIGE=y
 CONFIG_MVNETA=y
 CONFIG_MII=y
+CONFIG_MVMDIO=y
 CONFIG_PCI=y
 CONFIG_PCI_MVEBU=y
 CONFIG_SCSI=y
diff --git a/configs/db-mv784mp-gp_defconfig b/configs/db-mv784mp-gp_defconfig
index f19cc54..5683f11 100644
--- a/configs/db-mv784mp-gp_defconfig
+++ b/configs/db-mv784mp-gp_defconfig
@@ -65,6 +65,7 @@
 CONFIG_PHY_GIGE=y
 CONFIG_MVNETA=y
 CONFIG_MII=y
+CONFIG_MVMDIO=y
 CONFIG_PCI=y
 CONFIG_PCI_MVEBU=y
 CONFIG_DEBUG_UART_SHIFT=2
diff --git a/configs/ds414_defconfig b/configs/ds414_defconfig
index a3279c1..a83fe07 100644
--- a/configs/ds414_defconfig
+++ b/configs/ds414_defconfig
@@ -65,6 +65,7 @@
 CONFIG_PHY_GIGE=y
 CONFIG_MVNETA=y
 CONFIG_MII=y
+CONFIG_MVMDIO=y
 CONFIG_PCI=y
 CONFIG_PCI_MVEBU=y
 CONFIG_DEBUG_UART_SHIFT=2
diff --git a/configs/helios4_defconfig b/configs/helios4_defconfig
index 7d812e8..c2130ba 100644
--- a/configs/helios4_defconfig
+++ b/configs/helios4_defconfig
@@ -63,6 +63,7 @@
 CONFIG_PHY_GIGE=y
 CONFIG_MVNETA=y
 CONFIG_MII=y
+CONFIG_MVMDIO=y
 CONFIG_PCI=y
 CONFIG_PCI_MVEBU=y
 CONFIG_SCSI=y
diff --git a/configs/maxbcm_defconfig b/configs/maxbcm_defconfig
index 8dd6adf..40f79d4 100644
--- a/configs/maxbcm_defconfig
+++ b/configs/maxbcm_defconfig
@@ -47,6 +47,7 @@
 CONFIG_PHY_GIGE=y
 CONFIG_MVNETA=y
 CONFIG_MII=y
+CONFIG_MVMDIO=y
 CONFIG_DEBUG_UART_SHIFT=2
 CONFIG_SYS_NS16550=y
 CONFIG_KIRKWOOD_SPI=y
diff --git a/configs/mvebu_espressobin-88f3720_defconfig b/configs/mvebu_espressobin-88f3720_defconfig
index ff05630..8d7d57f 100644
--- a/configs/mvebu_espressobin-88f3720_defconfig
+++ b/configs/mvebu_espressobin-88f3720_defconfig
@@ -25,6 +25,7 @@
 CONFIG_ARCH_EARLY_INIT_R=y
 CONFIG_BOARD_EARLY_INIT_F=y
 CONFIG_BOARD_LATE_INIT=y
+CONFIG_LAST_STAGE_INIT=y
 # CONFIG_CMD_FLASH is not set
 CONFIG_CMD_FUSE=y
 CONFIG_CMD_GPIO=y
@@ -73,9 +74,11 @@
 CONFIG_SPI_FLASH_WINBOND=y
 CONFIG_SPI_FLASH_MTD=y
 CONFIG_PHY_MARVELL=y
+CONFIG_PHY_FIXED=y
 CONFIG_PHY_GIGE=y
 CONFIG_E1000=y
 CONFIG_MVNETA=y
+CONFIG_MVMDIO=y
 CONFIG_NVME_PCI=y
 CONFIG_PCI=y
 CONFIG_PCI_AARDVARK=y
diff --git a/configs/octeon_ebb7304_defconfig b/configs/octeon_ebb7304_defconfig
index e907144..f70f0d2 100644
--- a/configs/octeon_ebb7304_defconfig
+++ b/configs/octeon_ebb7304_defconfig
@@ -26,6 +26,7 @@
 CONFIG_CMD_PCI=y
 CONFIG_CMD_USB=y
 CONFIG_CMD_DHCP=y
+CONFIG_CMD_MII=y
 CONFIG_CMD_PING=y
 CONFIG_CMD_RTC=y
 CONFIG_CMD_TIME=y
@@ -36,6 +37,7 @@
 CONFIG_EFI_PARTITION=y
 CONFIG_PARTITION_TYPE_GUID=y
 CONFIG_ENV_IS_IN_FLASH=y
+CONFIG_TFTP_TSIZE=y
 CONFIG_CLK=y
 # CONFIG_INPUT is not set
 CONFIG_MISC=y
@@ -53,7 +55,12 @@
 CONFIG_SPI_FLASH_ATMEL=y
 CONFIG_SPI_FLASH_SPANSION=y
 CONFIG_SPI_FLASH_STMICRO=y
+CONFIG_PHYLIB=y
+CONFIG_PHY_MARVELL=y
+CONFIG_DM_MDIO=y
+CONFIG_DM_ETH_PHY=y
 CONFIG_E1000=y
+CONFIG_NET_OCTEON=y
 CONFIG_PCI=y
 CONFIG_PCIE_OCTEON=y
 CONFIG_DM_REGULATOR=y
diff --git a/configs/octeon_nic23_defconfig b/configs/octeon_nic23_defconfig
index 1a1718a..3ab5838 100644
--- a/configs/octeon_nic23_defconfig
+++ b/configs/octeon_nic23_defconfig
@@ -14,13 +14,16 @@
 CONFIG_TARGET_OCTEON_NIC23=y
 # CONFIG_MIPS_CACHE_SETUP is not set
 # CONFIG_MIPS_CACHE_DISABLE is not set
+CONFIG_MIPS_RELOCATION_TABLE_SIZE=0xc000
 CONFIG_DEBUG_UART=y
 CONFIG_AHCI=y
 CONFIG_OF_BOARD_FIXUP=y
 CONFIG_SYS_CONSOLE_ENV_OVERWRITE=y
 # CONFIG_SYS_DEVICE_NULLDEV is not set
 CONFIG_ARCH_MISC_INIT=y
+CONFIG_BOARD_EARLY_INIT_F=y
 CONFIG_BOARD_LATE_INIT=y
+CONFIG_LAST_STAGE_INIT=y
 CONFIG_HUSH_PARSER=y
 # CONFIG_CMD_FLASH is not set
 CONFIG_CMD_GPIO=y
@@ -37,6 +40,7 @@
 CONFIG_CMD_FS_GENERIC=y
 CONFIG_EFI_PARTITION=y
 CONFIG_ENV_IS_IN_SPI_FLASH=y
+CONFIG_TFTP_TSIZE=y
 CONFIG_SATA=y
 CONFIG_AHCI_MVEBU=y
 CONFIG_CLK=y
@@ -50,7 +54,11 @@
 CONFIG_SPI_FLASH_ATMEL=y
 CONFIG_SPI_FLASH_SPANSION=y
 CONFIG_SPI_FLASH_STMICRO=y
-# CONFIG_NETDEVICES is not set
+CONFIG_PHYLIB=y
+CONFIG_PHYLIB_10G=y
+CONFIG_DM_MDIO=y
+CONFIG_DM_ETH_PHY=y
+CONFIG_NET_OCTEON=y
 CONFIG_PCI=y
 CONFIG_DM_REGULATOR=y
 CONFIG_DM_REGULATOR_FIXED=y
diff --git a/configs/theadorable_debug_defconfig b/configs/theadorable_debug_defconfig
index 86129e7..9a03a0a 100644
--- a/configs/theadorable_debug_defconfig
+++ b/configs/theadorable_debug_defconfig
@@ -70,6 +70,7 @@
 CONFIG_PHY_GIGE=y
 CONFIG_MVNETA=y
 CONFIG_MII=y
+CONFIG_MVMDIO=y
 CONFIG_PCI=y
 CONFIG_DM_PCI_COMPAT=y
 CONFIG_PCI_MVEBU=y
diff --git a/configs/turris_mox_defconfig b/configs/turris_mox_defconfig
index 9a76a11..bcd3699 100644
--- a/configs/turris_mox_defconfig
+++ b/configs/turris_mox_defconfig
@@ -82,6 +82,7 @@
 CONFIG_PHY_MARVELL=y
 CONFIG_PHY_GIGE=y
 CONFIG_MVNETA=y
+CONFIG_MVMDIO=y
 CONFIG_NVME_PCI=y
 CONFIG_PCI=y
 CONFIG_PCI_AARDVARK=y
diff --git a/configs/turris_omnia_defconfig b/configs/turris_omnia_defconfig
index 5591c97..217e260 100644
--- a/configs/turris_omnia_defconfig
+++ b/configs/turris_omnia_defconfig
@@ -82,9 +82,11 @@
 CONFIG_SPI_FLASH_SPANSION=y
 CONFIG_SPI_FLASH_MTD=y
 CONFIG_PHY_MARVELL=y
+CONFIG_PHY_FIXED=y
 CONFIG_PHY_GIGE=y
 CONFIG_MVNETA=y
 CONFIG_MII=y
+CONFIG_MVMDIO=y
 CONFIG_NVME_PCI=y
 CONFIG_PCI=y
 CONFIG_PCI_MVEBU=y
diff --git a/configs/uDPU_defconfig b/configs/uDPU_defconfig
index c07bad5..f2852ad 100644
--- a/configs/uDPU_defconfig
+++ b/configs/uDPU_defconfig
@@ -73,6 +73,7 @@
 CONFIG_PHY_GIGE=y
 CONFIG_E1000=y
 CONFIG_MVNETA=y
+CONFIG_MVMDIO=y
 CONFIG_PCI=y
 CONFIG_PCI_AARDVARK=y
 CONFIG_PHY=y
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 2062197..de6131f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -416,8 +416,8 @@
 	uc_priv->udma_mask = 0x7f;	/*Fixme,assume to support UDMA6 */
 
 #if !defined(CONFIG_DM_SCSI)
-	uc_priv->mmio_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_5,
-					      PCI_REGION_MEM);
+	uc_priv->mmio_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_5, 0, 0,
+					    PCI_REGION_TYPE, PCI_REGION_MEM);
 
 	/* Take from kernel:
 	 * JMicron-specific fixup:
@@ -1148,8 +1148,8 @@
 	ulong base;
 	u16 vendor, device;
 
-	base = (ulong)dm_pci_map_bar(ahci_dev, PCI_BASE_ADDRESS_5,
-				     PCI_REGION_MEM);
+	base = (ulong)dm_pci_map_bar(ahci_dev, PCI_BASE_ADDRESS_5, 0, 0,
+				     PCI_REGION_TYPE, PCI_REGION_MEM);
 
 	/*
 	 * Note:
@@ -1164,6 +1164,7 @@
 	if (vendor == PCI_VENDOR_ID_CAVIUM &&
 	    device == PCI_DEVICE_ID_CAVIUM_SATA)
 		base = (uintptr_t)dm_pci_map_bar(ahci_dev, PCI_BASE_ADDRESS_0,
+						 0, 0, PCI_REGION_TYPE,
 						 PCI_REGION_MEM);
 	return ahci_probe_scsi(ahci_dev, base);
 }
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index b213eba..7065154 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -699,9 +699,11 @@
 
 	/* Read out all BARs */
 	sata_info.iobase[0] = (ulong)dm_pci_map_bar(dev,
-			PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+			PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
+			PCI_REGION_MEM);
 	sata_info.iobase[1] = (ulong)dm_pci_map_bar(dev,
-			PCI_BASE_ADDRESS_2, PCI_REGION_MEM);
+			PCI_BASE_ADDRESS_2, 0, 0, PCI_REGION_TYPE,
+			PCI_REGION_MEM);
 
 	/* mask out the unused bits */
 	sata_info.iobase[0] &= 0xffffff80;
diff --git a/drivers/bios_emulator/atibios.c b/drivers/bios_emulator/atibios.c
index 9547470..cdc5ba6 100644
--- a/drivers/bios_emulator/atibios.c
+++ b/drivers/bios_emulator/atibios.c
@@ -368,8 +368,8 @@
 		return NULL;
 	}
 
-	BIOSImage = dm_pci_bus_to_virt(pcidev, BIOSImageBus,
-				       PCI_REGION_MEM, 0, MAP_NOCACHE);
+	BIOSImage = dm_pci_bus_to_virt(pcidev, BIOSImageBus, 0, PCI_REGION_TYPE,
+				       PCI_REGION_MEM, MAP_NOCACHE);
 
 	/*Change the PCI BAR registers to map it onto the bus.*/
 	dm_pci_write_config32(pcidev, BIOSImageBAR, 0);
diff --git a/drivers/gpio/octeon_gpio.c b/drivers/gpio/octeon_gpio.c
index 42eae79..2b2465b 100644
--- a/drivers/gpio/octeon_gpio.c
+++ b/drivers/gpio/octeon_gpio.c
@@ -183,7 +183,7 @@
 	priv->data = (const struct octeon_gpio_data *)dev_get_driver_data(dev);
 
 	if (priv->data->probe == PROBE_PCI) {
-		priv->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+		priv->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
 					    PCI_REGION_MEM);
 		uc_priv->gpio_count = readq(priv->base +
 					    priv->data->reg_offs + GPIO_CONST) &
diff --git a/drivers/i2c/designware_i2c_pci.c b/drivers/i2c/designware_i2c_pci.c
index 9e38773..1572c2c 100644
--- a/drivers/i2c/designware_i2c_pci.c
+++ b/drivers/i2c/designware_i2c_pci.c
@@ -59,7 +59,8 @@
 		priv->regs = (struct i2c_regs *)dm_pci_read_bar32(dev, 0);
 	} else {
 		priv->regs = (struct i2c_regs *)
-			dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+			dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0,
+				       PCI_REGION_TYPE, PCI_REGION_MEM);
 	}
 	if (!priv->regs)
 		return -EINVAL;
diff --git a/drivers/i2c/intel_i2c.c b/drivers/i2c/intel_i2c.c
index 52f7a52..dc26fa8 100644
--- a/drivers/i2c/intel_i2c.c
+++ b/drivers/i2c/intel_i2c.c
@@ -251,7 +251,7 @@
 	ulong base;
 
 	/* Save base address from PCI BAR */
-	priv->base = (ulong)dm_pci_map_bar(dev, PCI_BASE_ADDRESS_4,
+	priv->base = (ulong)dm_pci_map_bar(dev, PCI_BASE_ADDRESS_4, 0, 0, PCI_REGION_TYPE,
 					   PCI_REGION_IO);
 	base = priv->base;
 
diff --git a/drivers/i2c/octeon_i2c.c b/drivers/i2c/octeon_i2c.c
index 50199ff..e54ef18 100644
--- a/drivers/i2c/octeon_i2c.c
+++ b/drivers/i2c/octeon_i2c.c
@@ -792,7 +792,7 @@
 
 		debug("TWSI PCI device: %x\n", bdf);
 
-		twsi->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+		twsi->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
 					    PCI_REGION_MEM);
 	} else {
 		twsi->base = dev_remap_addr(dev);
diff --git a/drivers/mmc/octeontx_hsmmc.c b/drivers/mmc/octeontx_hsmmc.c
index f0519f0..6e9acf7 100644
--- a/drivers/mmc/octeontx_hsmmc.c
+++ b/drivers/mmc/octeontx_hsmmc.c
@@ -3822,7 +3822,7 @@
 
 	/* Octeon TX & TX2 use PCI based probing */
 	if (device_is_compatible(dev, "cavium,thunder-8890-mmc")) {
-		host->base_addr = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+		host->base_addr = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
 						 PCI_REGION_MEM);
 		if (!host->base_addr) {
 			pr_err("%s: Error: MMC base address not found\n",
diff --git a/drivers/mmc/pci_mmc.c b/drivers/mmc/pci_mmc.c
index b9ab064..cba2ea8 100644
--- a/drivers/mmc/pci_mmc.c
+++ b/drivers/mmc/pci_mmc.c
@@ -50,7 +50,7 @@
 	desc = mmc_get_blk_desc(&plat->mmc);
 	desc->removable = !(plat->cfg.host_caps & MMC_CAP_NONREMOVABLE);
 
-	host->ioaddr = (void *)dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+	host->ioaddr = (void *)dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
 					      PCI_REGION_MEM);
 	host->name = dev->name;
 	host->cd_gpio = priv->cd_gpio;
diff --git a/drivers/mtd/nand/raw/octeontx_bch.c b/drivers/mtd/nand/raw/octeontx_bch.c
index 24ffa51..c1d721c 100644
--- a/drivers/mtd/nand/raw/octeontx_bch.c
+++ b/drivers/mtd/nand/raw/octeontx_bch.c
@@ -176,7 +176,8 @@
 	if (!bch)
 		return -ENOMEM;
 
-	bch->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+	bch->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0,
+				       PCI_REGION_TYPE, PCI_REGION_MEM);
 	bch->dev = dev;
 
 	debug("%s: base address: %p\n", __func__, bch->reg_base);
@@ -361,7 +362,8 @@
 	vf->dev = dev;
 
 	/* Map PF's configuration registers */
-	vf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+	vf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0,
+				      PCI_REGION_TYPE, PCI_REGION_MEM);
 	debug("%s: reg base: %p\n", __func__, vf->reg_base);
 
 	err = octeontx_cmd_queue_initialize(dev, QID_BCH, QDEPTH - 1, 0,
diff --git a/drivers/mtd/nand/raw/octeontx_nand.c b/drivers/mtd/nand/raw/octeontx_nand.c
index ff363a5..b338b20 100644
--- a/drivers/mtd/nand/raw/octeontx_nand.c
+++ b/drivers/mtd/nand/raw/octeontx_nand.c
@@ -2098,7 +2098,7 @@
 	tn->dev = dev;
 	INIT_LIST_HEAD(&tn->chips);
 
-	tn->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+	tn->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE, PCI_REGION_MEM);
 	if (!tn->base) {
 		ret = -EINVAL;
 		goto release;
diff --git a/drivers/mtd/spi/sf_dataflash.c b/drivers/mtd/spi/sf_dataflash.c
index b59edd1..8586781 100644
--- a/drivers/mtd/spi/sf_dataflash.c
+++ b/drivers/mtd/spi/sf_dataflash.c
@@ -70,6 +70,9 @@
 #define OP_WRITE_SECURITY_REVC	0x9A
 #define OP_WRITE_SECURITY	0x9B	/* revision D */
 
+#define DATAFLASH_SHIFT_EXTID	24
+#define DATAFLASH_SHIFT_ID	40
+
 struct dataflash {
 	uint8_t			command[16];
 	unsigned short		page_offset;	/* offset in flash address */
@@ -455,7 +458,7 @@
 	 * JEDEC id has a high byte of zero plus three data bytes:
 	 * the manufacturer id, then a two byte device id.
 	 */
-	uint32_t	jedec_id;
+	uint64_t	jedec_id;
 
 	/* The size listed here is what works with OP_ERASE_PAGE. */
 	unsigned	nr_pages;
@@ -463,6 +466,7 @@
 	uint16_t	pageoffset;
 
 	uint16_t	flags;
+#define SUP_EXTID	0x0004		/* supports extended ID data */
 #define SUP_POW2PS	0x0002		/* supports 2^N byte pages */
 #define IS_POW2PS	0x0001		/* uses 2^N byte pages */
 };
@@ -506,50 +510,31 @@
 
 	{ "AT45DB642x",  0x1f2800, 8192, 1056, 11, SUP_POW2PS},
 	{ "at45db642d",  0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
+
+	{ "AT45DB641E",  0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
+	{ "at45db641e",  0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
 };
 
-static struct data_flash_info *jedec_probe(struct spi_slave *spi)
+static struct data_flash_info *jedec_lookup(struct spi_slave *spi,
+					    u64 jedec, bool use_extid)
+
 {
-	int			tmp;
-	uint8_t			id[5];
-	uint32_t		jedec;
-	struct data_flash_info	*info;
-	u8 opcode		= CMD_READ_ID;
+	struct data_flash_info *info;
 	int status;
 
-	/*
-	 * JEDEC also defines an optional "extended device information"
-	 * string for after vendor-specific data, after the three bytes
-	 * we use here.  Supporting some chips might require using it.
-	 *
-	 * If the vendor ID isn't Atmel's (0x1f), assume this call failed.
-	 * That's not an error; only rev C and newer chips handle it, and
-	 * only Atmel sells these chips.
-	 */
-	tmp = spi_write_then_read(spi, &opcode, 1, NULL, id, sizeof(id));
-	if (tmp < 0) {
-		printf("dataflash: error %d reading JEDEC ID\n", tmp);
-		return ERR_PTR(tmp);
-	}
-	if (id[0] != 0x1f)
-		return NULL;
-
-	jedec = id[0];
-	jedec = jedec << 8;
-	jedec |= id[1];
-	jedec = jedec << 8;
-	jedec |= id[2];
+	for (info = dataflash_data;
+	     info < dataflash_data + ARRAY_SIZE(dataflash_data);
+	     info++) {
+		if (use_extid && !(info->flags & SUP_EXTID))
+			continue;
 
-	for (tmp = 0, info = dataflash_data;
-			tmp < ARRAY_SIZE(dataflash_data);
-			tmp++, info++) {
 		if (info->jedec_id == jedec) {
 			if (info->flags & SUP_POW2PS) {
 				status = dataflash_status(spi);
 				if (status < 0) {
 					debug("dataflash: status error %d\n",
 					      status);
-					return NULL;
+					return ERR_PTR(status);
 				}
 				if (status & 0x1) {
 					if (info->flags & IS_POW2PS)
@@ -564,12 +549,58 @@
 		}
 	}
 
+	return ERR_PTR(-ENODEV);
+}
+
+static struct data_flash_info *jedec_probe(struct spi_slave *spi)
+{
+	int			tmp;
+	uint64_t		jedec;
+	uint8_t			id[sizeof(jedec)] = {0};
+	const unsigned int	id_size = 5;
+	struct data_flash_info	*info;
+	u8 opcode		= CMD_READ_ID;
+
+	/*
+	 * JEDEC also defines an optional "extended device information"
+	 * string for after vendor-specific data, after the three bytes
+	 * we use here.  Supporting some chips might require using it.
+	 *
+	 * If the vendor ID isn't Atmel's (0x1f), assume this call failed.
+	 * That's not an error; only rev C and newer chips handle it, and
+	 * only Atmel sells these chips.
+	 */
+	tmp = spi_write_then_read(spi, &opcode, 1, NULL, id, id_size);
+	if (tmp < 0) {
+		printf("dataflash: error %d reading JEDEC ID\n", tmp);
+		return ERR_PTR(tmp);
+	}
+
+	if (id[0] != 0x1f)
+		return NULL;
+
+	jedec = be64_to_cpup((__be64 *)id);
+
+	/*
+	 * First, try to match device using extended device
+	 * information
+	 */
+	info = jedec_lookup(spi, jedec >> DATAFLASH_SHIFT_EXTID, true);
+	if (!IS_ERR(info))
+		return info;
+	/*
+	 * If that fails, make another pass using regular ID
+	 * information
+	 */
+	info = jedec_lookup(spi, jedec >> DATAFLASH_SHIFT_ID, false);
+	if (!IS_ERR(info))
+		return info;
 	/*
 	 * Treat other chips as errors ... we won't know the right page
 	 * size (it might be binary) even when we can tell which density
 	 * class is involved (legacy chip id scheme).
 	 */
-	printf("dataflash: JEDEC id %06x not handled\n", jedec);
+	printf("dataflash: JEDEC id 0x%016llx not handled\n", jedec);
 	return ERR_PTR(-ENODEV);
 }
 
@@ -618,6 +649,8 @@
 				(info->flags & SUP_POW2PS) ? 'd' : 'c');
 		if (status < 0)
 			goto err_status;
+		else
+			return status;
 	}
 
        /*
diff --git a/drivers/mtd/spi/spi-nor-ids.c b/drivers/mtd/spi/spi-nor-ids.c
index 763bab0..7050ddc 100644
--- a/drivers/mtd/spi/spi-nor-ids.c
+++ b/drivers/mtd/spi/spi-nor-ids.c
@@ -166,6 +166,8 @@
 	{ INFO("mx25u6435f",  0xc22537, 0, 64 * 1024, 128, SECT_4K) },
 	{ INFO("mx25l12805d", 0xc22018, 0, 64 * 1024, 256, SECT_4K) },
 	{ INFO("mx25u12835f", 0xc22538, 0, 64 * 1024, 256, SECT_4K) },
+	{ INFO("mx25u51245g", 0xc2253a, 0, 64 * 1024, 1024, SECT_4K |
+	       SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
 	{ INFO("mx25l12855e", 0xc22618, 0, 64 * 1024, 256, 0) },
 	{ INFO("mx25l25635e", 0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 	{ INFO("mx25u25635f", 0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
@@ -346,6 +348,11 @@
 			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
 	},
 	{
+		INFO("w25q128jw", 0xef8018, 0, 64 * 1024, 256,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
+	{
 		INFO("w25q256fw", 0xef6019, 0, 64 * 1024, 512,
 			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
 			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 347fe8a..7fe0e00 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -445,6 +445,7 @@
 	bool "Marvell Armada XP/385/3700 network interface support"
 	depends on ARMADA_XP || ARMADA_38X || ARMADA_3700
 	select PHYLIB
+	select DM_MDIO
 	help
 	  This driver supports the network interface units in the
 	  Marvell ARMADA XP, ARMADA 38X and ARMADA 3700 SoCs
@@ -495,6 +496,13 @@
 	  The MediaTek MT7628 ethernet interface is used on MT7628 and
 	  MT7688 based boards.
 
+config NET_OCTEON
+	bool "MIPS Octeon ethernet support"
+	depends on ARCH_OCTEON
+	help
+	  You must select Y to enable network device support for
+	  MIPS Octeon SoCs. If unsure, say n
+
 config NET_OCTEONTX
 	bool "OcteonTX Ethernet support"
 	depends on ARCH_OCTEONTX
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 16733d2..69fb3bb 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -64,6 +64,7 @@
 obj-$(CONFIG_MVNETA) += mvneta.o
 obj-$(CONFIG_MVPP2) += mvpp2.o
 obj-$(CONFIG_NETCONSOLE) += netconsole.o
+obj-$(CONFIG_NET_OCTEON) += octeon/
 obj-$(CONFIG_NET_OCTEONTX) += octeontx/
 obj-$(CONFIG_NET_OCTEONTX2) += octeontx2/
 obj-$(CONFIG_OCTEONTX2_CGX_INTF) += octeontx2/cgx_intf.o
diff --git a/drivers/net/bnxt/bnxt.c b/drivers/net/bnxt/bnxt.c
index 9844e96..1c9a996 100644
--- a/drivers/net/bnxt/bnxt.c
+++ b/drivers/net/bnxt/bnxt.c
@@ -28,9 +28,12 @@
 	dm_pci_read_config16(bp->pdev, PCI_SUBSYSTEM_ID, &bp->subsystem_device);
 	dm_pci_read_config16(bp->pdev, PCI_COMMAND, &bp->cmd_reg);
 	dm_pci_read_config8(bp->pdev, PCI_INTERRUPT_LINE, &bp->irq);
-	bp->bar0 = dm_pci_map_bar(bp->pdev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
-	bp->bar1 = dm_pci_map_bar(bp->pdev, PCI_BASE_ADDRESS_2, PCI_REGION_MEM);
-	bp->bar2 = dm_pci_map_bar(bp->pdev, PCI_BASE_ADDRESS_4, PCI_REGION_MEM);
+	bp->bar0 = dm_pci_map_bar(bp->pdev, PCI_BASE_ADDRESS_0, 0, 0,
+				  PCI_REGION_TYPE, PCI_REGION_MEM);
+	bp->bar1 = dm_pci_map_bar(bp->pdev, PCI_BASE_ADDRESS_2, 0, 0,
+				  PCI_REGION_TYPE, PCI_REGION_MEM);
+	bp->bar2 = dm_pci_map_bar(bp->pdev, PCI_BASE_ADDRESS_4, 0, 0,
+				  PCI_REGION_TYPE, PCI_REGION_MEM);
 	cmd_reg = bp->cmd_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
 	cmd_reg |= PCI_COMMAND_INTX_DISABLE; /* disable intr */
 	dm_pci_write_config16(bp->pdev, PCI_COMMAND, cmd_reg);
diff --git a/drivers/net/e1000.c b/drivers/net/e1000.c
index 4e34248..5fe016e 100644
--- a/drivers/net/e1000.c
+++ b/drivers/net/e1000.c
@@ -5549,8 +5549,8 @@
 	hw->eeprom_semaphore_present = true;
 #endif
 #ifdef CONFIG_DM_ETH
-	hw->hw_addr = dm_pci_map_bar(devno,	PCI_BASE_ADDRESS_0,
-						PCI_REGION_MEM);
+	hw->hw_addr = dm_pci_map_bar(devno,	PCI_BASE_ADDRESS_0, 0, 0,
+						PCI_REGION_TYPE, PCI_REGION_MEM);
 #else
 	hw->hw_addr = pci_map_bar(devno,	PCI_BASE_ADDRESS_0,
 						PCI_REGION_MEM);
diff --git a/drivers/net/fsl_enetc.c b/drivers/net/fsl_enetc.c
index 1724f94..9b97a03 100644
--- a/drivers/net/fsl_enetc.c
+++ b/drivers/net/fsl_enetc.c
@@ -339,7 +339,7 @@
 	}
 
 	/* initialize register */
-	priv->regs_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0);
+	priv->regs_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE, 0);
 	if (!priv->regs_base) {
 		enetc_dbg(dev, "failed to map BAR0\n");
 		return -EINVAL;
diff --git a/drivers/net/fsl_enetc_mdio.c b/drivers/net/fsl_enetc_mdio.c
index 3eb6ac9..50ad76d 100644
--- a/drivers/net/fsl_enetc_mdio.c
+++ b/drivers/net/fsl_enetc_mdio.c
@@ -125,7 +125,7 @@
 {
 	struct enetc_mdio_priv *priv = dev_get_priv(dev);
 
-	priv->regs_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0);
+	priv->regs_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE, 0);
 	if (!priv->regs_base) {
 		enetc_dbg(dev, "failed to map BAR0\n");
 		return -EINVAL;
diff --git a/drivers/net/mscc_eswitch/felix_switch.c b/drivers/net/mscc_eswitch/felix_switch.c
index 60b2e8f..709c9e3 100644
--- a/drivers/net/mscc_eswitch/felix_switch.c
+++ b/drivers/net/mscc_eswitch/felix_switch.c
@@ -292,13 +292,13 @@
 		return -ENODEV;
 	}
 
-	priv->imdio_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0);
+	priv->imdio_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE, 0);
 	if (!priv->imdio_base) {
 		dev_err(dev, "failed to map BAR0\n");
 		return -EINVAL;
 	}
 
-	priv->regs_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_4, 0);
+	priv->regs_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_4, 0, 0, PCI_REGION_TYPE, 0);
 	if (!priv->regs_base) {
 		dev_err(dev, "failed to map BAR4\n");
 		return -EINVAL;
diff --git a/drivers/net/mvneta.c b/drivers/net/mvneta.c
index 15dc714..d2c42c4 100644
--- a/drivers/net/mvneta.c
+++ b/drivers/net/mvneta.c
@@ -40,11 +40,7 @@
 
 DECLARE_GLOBAL_DATA_PTR;
 
-#if !defined(CONFIG_PHYLIB)
-# error Marvell mvneta requires PHYLIB
-#endif
-
-#define CONFIG_NR_CPUS		1
+#define MVNETA_NR_CPUS		1
 #define ETH_HLEN		14	/* Total octets in header */
 
 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
@@ -192,7 +188,6 @@
 #define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
 #define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
 #define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
-#define      MVNETA_GMAC_FORCE_LINK_UP           (BIT(0) | BIT(1))
 #define      MVNETA_GMAC_IB_BYPASS_AN_EN         BIT(3)
 #define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 #define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
@@ -282,13 +277,11 @@
 	unsigned int speed;
 
 	int init;
-	int phyaddr;
 	struct phy_device *phydev;
 #if CONFIG_IS_ENABLED(DM_GPIO)
 	struct gpio_desc phy_reset_gpio;
 	struct gpio_desc sfp_tx_disable_gpio;
 #endif
-	struct mii_dev *bus;
 };
 
 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -414,15 +407,6 @@
  */
 #define BD_SPACE	(1 << 20)
 
-/*
- * Dummy implementation that can be overwritten by a board
- * specific function
- */
-__weak int board_network_enable(struct mii_dev *bus)
-{
-	return 0;
-}
-
 /* Utility/helper methods */
 
 /* Write helper method */
@@ -576,13 +560,6 @@
 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
 }
 
-static int mvneta_port_is_fixed_link(struct mvneta_port *pp)
-{
-	/* phy_addr is set to invalid value for fixed link */
-	return pp->phyaddr > PHY_MAX_ADDR;
-}
-
-
 /* Start the Ethernet port RX and TX activity */
 static void mvneta_port_up(struct mvneta_port *pp)
 {
@@ -791,7 +768,7 @@
 	/* Set CPU queue access map - all CPUs have access to all RX
 	 * queues and to all TX queues
 	 */
-	for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
+	for (cpu = 0; cpu < MVNETA_NR_CPUS; cpu++)
 		mvreg_write(pp, MVNETA_CPU_MAP(cpu),
 			    (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
 			     MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
@@ -834,7 +811,10 @@
 	mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
 
 	/* Enable PHY polling in hardware if not in fixed-link mode */
-	if (!mvneta_port_is_fixed_link(pp)) {
+	if (!CONFIG_IS_ENABLED(PHY_FIXED) ||
+	    pp->phydev->phy_id != PHY_FIXED_ID) {
+		mvreg_write(pp, MVNETA_PHY_ADDR, pp->phydev->addr);
+
 		val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
 		val |= MVNETA_PHY_POLLING_ENABLE;
 		mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
@@ -1171,38 +1151,46 @@
 {
 	struct mvneta_port *pp = dev_get_priv(dev);
 	struct phy_device *phydev = pp->phydev;
-	int status_change = 0;
+	bool status_change = false;
 
-	if (mvneta_port_is_fixed_link(pp)) {
-		debug("Using fixed link, skip link adjust\n");
-		return;
-	}
+	if (phydev->link &&
+	    (pp->speed != phydev->speed || pp->duplex != phydev->duplex)) {
+		u32 val;
 
-	if (phydev->link) {
-		if ((pp->speed != phydev->speed) ||
-		    (pp->duplex != phydev->duplex)) {
-			u32 val;
+		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+		val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
+			 MVNETA_GMAC_CONFIG_GMII_SPEED |
+			 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
+			 MVNETA_GMAC_AN_SPEED_EN |
+			 MVNETA_GMAC_AN_DUPLEX_EN);
 
-			val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-			val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
-				 MVNETA_GMAC_CONFIG_GMII_SPEED |
-				 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
-				 MVNETA_GMAC_AN_SPEED_EN |
-				 MVNETA_GMAC_AN_DUPLEX_EN);
+		/* FIXME: For fixed-link case, these were the initial settings
+		 * used before the code was converted to use PHY_FIXED. Some of
+		 * these may look nonsensical (for example BYPASS_AN makes sense
+		 * for 1000base-x and 2500base-x modes, AFAIK), and in fact this
+		 * may be changed in the future (when support for inband AN will
+		 * be added). Also, why is ADVERT_FC enabled if we don't enable
+		 * inband AN at all?
+		 */
+		if (CONFIG_IS_ENABLED(PHY_FIXED) &&
+		    pp->phydev->phy_id == PHY_FIXED_ID)
+			val = MVNETA_GMAC_IB_BYPASS_AN_EN |
+			      MVNETA_GMAC_SET_FC_EN |
+			      MVNETA_GMAC_ADVERT_FC_EN |
+			      MVNETA_GMAC_SAMPLE_TX_CFG_EN;
 
-			if (phydev->duplex)
-				val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+		if (phydev->duplex)
+			val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
 
-			if (phydev->speed == SPEED_1000)
-				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
-			else
-				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+		if (phydev->speed == SPEED_1000)
+			val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+		else if (pp->speed == SPEED_100)
+			val |= MVNETA_GMAC_CONFIG_MII_SPEED;
 
-			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
 
-			pp->duplex = phydev->duplex;
-			pp->speed  = phydev->speed;
-		}
+		pp->duplex = phydev->duplex;
+		pp->speed = phydev->speed;
 	}
 
 	if (phydev->link != pp->link) {
@@ -1212,7 +1200,7 @@
 		}
 
 		pp->link = phydev->link;
-		status_change = 1;
+		status_change = true;
 	}
 
 	if (status_change) {
@@ -1428,118 +1416,6 @@
 
 /* U-Boot only functions follow here */
 
-/* SMI / MDIO functions */
-
-static int smi_wait_ready(struct mvneta_port *pp)
-{
-	u32 timeout = MVNETA_SMI_TIMEOUT;
-	u32 smi_reg;
-
-	/* wait till the SMI is not busy */
-	do {
-		/* read smi register */
-		smi_reg = mvreg_read(pp, MVNETA_SMI);
-		if (timeout-- == 0) {
-			printf("Error: SMI busy timeout\n");
-			return -EFAULT;
-		}
-	} while (smi_reg & MVNETA_SMI_BUSY);
-
-	return 0;
-}
-
-/*
- * mvneta_mdio_read - miiphy_read callback function.
- *
- * Returns 16bit phy register value, or 0xffff on error
- */
-static int mvneta_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
-{
-	struct mvneta_port *pp = bus->priv;
-	u32 smi_reg;
-	u32 timeout;
-
-	/* check parameters */
-	if (addr > MVNETA_PHY_ADDR_MASK) {
-		printf("Error: Invalid PHY address %d\n", addr);
-		return -EFAULT;
-	}
-
-	if (reg > MVNETA_PHY_REG_MASK) {
-		printf("Err: Invalid register offset %d\n", reg);
-		return -EFAULT;
-	}
-
-	/* wait till the SMI is not busy */
-	if (smi_wait_ready(pp) < 0)
-		return -EFAULT;
-
-	/* fill the phy address and regiser offset and read opcode */
-	smi_reg = (addr << MVNETA_SMI_DEV_ADDR_OFFS)
-		| (reg << MVNETA_SMI_REG_ADDR_OFFS)
-		| MVNETA_SMI_OPCODE_READ;
-
-	/* write the smi register */
-	mvreg_write(pp, MVNETA_SMI, smi_reg);
-
-	/* wait till read value is ready */
-	timeout = MVNETA_SMI_TIMEOUT;
-
-	do {
-		/* read smi register */
-		smi_reg = mvreg_read(pp, MVNETA_SMI);
-		if (timeout-- == 0) {
-			printf("Err: SMI read ready timeout\n");
-			return -EFAULT;
-		}
-	} while (!(smi_reg & MVNETA_SMI_READ_VALID));
-
-	/* Wait for the data to update in the SMI register */
-	for (timeout = 0; timeout < MVNETA_SMI_TIMEOUT; timeout++)
-		;
-
-	return mvreg_read(pp, MVNETA_SMI) & MVNETA_SMI_DATA_MASK;
-}
-
-/*
- * mvneta_mdio_write - miiphy_write callback function.
- *
- * Returns 0 if write succeed, -EINVAL on bad parameters
- * -ETIME on timeout
- */
-static int mvneta_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
-			     u16 value)
-{
-	struct mvneta_port *pp = bus->priv;
-	u32 smi_reg;
-
-	/* check parameters */
-	if (addr > MVNETA_PHY_ADDR_MASK) {
-		printf("Error: Invalid PHY address %d\n", addr);
-		return -EFAULT;
-	}
-
-	if (reg > MVNETA_PHY_REG_MASK) {
-		printf("Err: Invalid register offset %d\n", reg);
-		return -EFAULT;
-	}
-
-	/* wait till the SMI is not busy */
-	if (smi_wait_ready(pp) < 0)
-		return -EFAULT;
-
-	/* fill the phy addr and reg offset and write opcode and data */
-	smi_reg = value << MVNETA_SMI_DATA_OFFS;
-	smi_reg |= (addr << MVNETA_SMI_DEV_ADDR_OFFS)
-		| (reg << MVNETA_SMI_REG_ADDR_OFFS);
-	smi_reg &= ~MVNETA_SMI_OPCODE_READ;
-
-	/* write the smi register */
-	mvreg_write(pp, MVNETA_SMI, smi_reg);
-
-	return 0;
-}
-
 static int mvneta_start(struct udevice *dev)
 {
 	struct mvneta_port *pp = dev_get_priv(dev);
@@ -1548,57 +1424,28 @@
 	mvneta_port_power_up(pp, pp->phy_interface);
 
 	if (!pp->init || pp->link == 0) {
-		if (mvneta_port_is_fixed_link(pp)) {
-			u32 val;
-
-			pp->init = 1;
-			pp->link = 1;
-			mvneta_init(dev);
-
-			val = MVNETA_GMAC_FORCE_LINK_UP |
-			      MVNETA_GMAC_IB_BYPASS_AN_EN |
-			      MVNETA_GMAC_SET_FC_EN |
-			      MVNETA_GMAC_ADVERT_FC_EN |
-			      MVNETA_GMAC_SAMPLE_TX_CFG_EN;
-
-			if (pp->duplex)
-				val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
-
-			if (pp->speed == SPEED_1000)
-				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
-			else if (pp->speed == SPEED_100)
-				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
-
-			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-		} else {
-			/* Set phy address of the port */
-			mvreg_write(pp, MVNETA_PHY_ADDR, pp->phyaddr);
-
-			phydev = phy_connect(pp->bus, pp->phyaddr, dev,
-					     pp->phy_interface);
-			if (!phydev) {
-				printf("phy_connect failed\n");
-				return -ENODEV;
-			}
-
-			pp->phydev = phydev;
-			phy_config(phydev);
-			phy_startup(phydev);
-			if (!phydev->link) {
-				printf("%s: No link.\n", phydev->dev->name);
-				return -1;
-			}
+		phydev = dm_eth_phy_connect(dev);
+		if (!phydev) {
+			printf("dm_eth_phy_connect failed\n");
+			return -ENODEV;
+		}
 
-			/* Full init on first call */
-			mvneta_init(dev);
-			pp->init = 1;
-			return 0;
+		pp->phydev = phydev;
+		phy_config(phydev);
+		phy_startup(phydev);
+		if (!phydev->link) {
+			printf("%s: No link.\n", phydev->dev->name);
+			return -1;
 		}
-	}
 
-	/* Upon all following calls, this is enough */
-	mvneta_port_up(pp);
-	mvneta_port_enable(pp);
+		/* Full init on first call */
+		mvneta_init(dev);
+		pp->init = 1;
+	} else {
+		/* Upon all following calls, this is enough */
+		mvneta_port_up(pp);
+		mvneta_port_enable(pp);
+	}
 
 	return 0;
 }
@@ -1692,18 +1539,11 @@
 
 static int mvneta_probe(struct udevice *dev)
 {
-	struct eth_pdata *pdata = dev_get_plat(dev);
 	struct mvneta_port *pp = dev_get_priv(dev);
 #if CONFIG_IS_ENABLED(DM_GPIO)
 	struct ofnode_phandle_args sfp_args;
 #endif
-	void *blob = (void *)gd->fdt_blob;
-	int node = dev_of_offset(dev);
-	struct mii_dev *bus;
-	unsigned long addr;
 	void *bd_space;
-	int ret;
-	int fl_node;
 
 	/*
 	 * Allocate buffer area for descs and rx_buffers. This is only
@@ -1729,7 +1569,10 @@
 		buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size);
 	}
 
-	pp->base = (void __iomem *)pdata->iobase;
+	pp->base = dev_read_addr_ptr(dev);
+	pp->phy_interface = dev_read_phy_mode(dev);
+	if (pp->phy_interface == PHY_INTERFACE_MODE_NA)
+		return -EINVAL;
 
 	/* Configure MBUS address windows */
 	if (device_is_compatible(dev, "marvell,armada-3700-neta"))
@@ -1737,42 +1580,9 @@
 	else
 		mvneta_conf_mbus_windows(pp);
 
-	/* PHY interface is already decoded in mvneta_of_to_plat() */
-	pp->phy_interface = pdata->phy_interface;
-
-	/* fetch 'fixed-link' property from 'neta' node */
-	fl_node = fdt_subnode_offset(blob, node, "fixed-link");
-	if (fl_node != -FDT_ERR_NOTFOUND) {
-		/* set phy_addr to invalid value for fixed link */
-		pp->phyaddr = PHY_MAX_ADDR + 1;
-		pp->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
-		pp->speed = fdtdec_get_int(blob, fl_node, "speed", 0);
-	} else {
-		/* Now read phyaddr from DT */
-		addr = fdtdec_get_int(blob, node, "phy", 0);
-		addr = fdt_node_offset_by_phandle(blob, addr);
-		pp->phyaddr = fdtdec_get_int(blob, addr, "reg", 0);
-	}
-
-	bus = mdio_alloc();
-	if (!bus) {
-		printf("Failed to allocate MDIO bus\n");
-		return -ENOMEM;
-	}
-
-	bus->read = mvneta_mdio_read;
-	bus->write = mvneta_mdio_write;
-	snprintf(bus->name, sizeof(bus->name), dev->name);
-	bus->priv = (void *)pp;
-	pp->bus = bus;
-
-	ret = mdio_register(bus);
-	if (ret)
-		return ret;
-
 #if CONFIG_IS_ENABLED(DM_GPIO)
-	ret = dev_read_phandle_with_args(dev, "sfp", NULL, 0, 0, &sfp_args);
-	if (!ret && ofnode_is_enabled(sfp_args.node))
+	if (!dev_read_phandle_with_args(dev, "sfp", NULL, 0, 0, &sfp_args) &&
+	    ofnode_is_enabled(sfp_args.node))
 		gpio_request_by_name_nodev(sfp_args.node, "tx-disable-gpio", 0,
 					   &pp->sfp_tx_disable_gpio, GPIOD_IS_OUT);
 
@@ -1789,7 +1599,7 @@
 		dm_gpio_set_value(&pp->sfp_tx_disable_gpio, 0);
 #endif
 
-	return board_network_enable(bus);
+	return 0;
 }
 
 static void mvneta_stop(struct udevice *dev)
@@ -1808,20 +1618,6 @@
 	.write_hwaddr	= mvneta_write_hwaddr,
 };
 
-static int mvneta_of_to_plat(struct udevice *dev)
-{
-	struct eth_pdata *pdata = dev_get_plat(dev);
-
-	pdata->iobase = dev_read_addr(dev);
-
-	/* Get phy-mode / phy_interface from DT */
-	pdata->phy_interface = dev_read_phy_mode(dev);
-	if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
-		return -EINVAL;
-
-	return 0;
-}
-
 static const struct udevice_id mvneta_ids[] = {
 	{ .compatible = "marvell,armada-370-neta" },
 	{ .compatible = "marvell,armada-xp-neta" },
@@ -1833,7 +1629,6 @@
 	.name	= "mvneta",
 	.id	= UCLASS_ETH,
 	.of_match = mvneta_ids,
-	.of_to_plat = mvneta_of_to_plat,
 	.probe	= mvneta_probe,
 	.ops	= &mvneta_ops,
 	.priv_auto	= sizeof(struct mvneta_port),
diff --git a/drivers/net/octeon/Makefile b/drivers/net/octeon/Makefile
new file mode 100644
index 0000000..c573411
--- /dev/null
+++ b/drivers/net/octeon/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier:    GPL-2.0+
+#
+# Copyright (C) 2018-2022 Marvell International Ltd.
+#
+
+obj-$(CONFIG_NET_OCTEON) += octeon_eth.o octeon_mdio.o
diff --git a/drivers/net/octeon/octeon_eth.c b/drivers/net/octeon/octeon_eth.c
new file mode 100644
index 0000000..fbb1afc
--- /dev/null
+++ b/drivers/net/octeon/octeon_eth.c
@@ -0,0 +1,1060 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <env.h>
+#include <net.h>
+#include <netdev.h>
+#include <malloc.h>
+#include <miiphy.h>
+#include <misc.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/cvmx-bootmem.h>
+#include <mach/octeon-model.h>
+#include <mach/cvmx-fuse.h>
+#include <mach/octeon-feature.h>
+#include <mach/octeon_fdt.h>
+#include <mach/cvmx-qlm.h>
+#include <mach/octeon_eth.h>
+#include <mach/octeon_qlm.h>
+#include <mach/cvmx-pcie.h>
+#include <mach/cvmx-coremask.h>
+
+#include <mach/cvmx-agl-defs.h>
+#include <mach/cvmx-asxx-defs.h>
+#include <mach/cvmx-bgxx-defs.h>
+#include <mach/cvmx-dbg-defs.h>
+#include <mach/cvmx-gmxx-defs.h>
+#include <mach/cvmx-gserx-defs.h>
+#include <mach/cvmx-ipd-defs.h>
+#include <mach/cvmx-l2c-defs.h>
+#include <mach/cvmx-npi-defs.h>
+#include <mach/cvmx-pcsx-defs.h>
+#include <mach/cvmx-pexp-defs.h>
+#include <mach/cvmx-pki-defs.h>
+#include <mach/cvmx-pko-defs.h>
+#include <mach/cvmx-smix-defs.h>
+#include <mach/cvmx-sriox-defs.h>
+#include <mach/cvmx-xcv-defs.h>
+#include <mach/cvmx-pcsxx-defs.h>
+
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-helper-fdt.h>
+#include <mach/cvmx-helper-bgx.h>
+#include <mach/cvmx-helper-cfg.h>
+
+#include <mach/cvmx-hwpko.h>
+#include <mach/cvmx-pko.h>
+#include <mach/cvmx-pki.h>
+#include <mach/cvmx-config.h>
+#include <mach/cvmx-mdio.h>
+
+/** Maximum receive packet size (hardware default is 1536) */
+#define CONFIG_OCTEON_NETWORK_MRU 1536
+
+#define OCTEON_BOOTLOADER_NAMED_BLOCK_TMP_PREFIX "__tmp"
+
+/**
+ * Enables RX packet debugging if octeon_debug_rx_packets is set in the
+ * environment.
+ */
+#define DEBUG_RX_PACKET
+
+/**
+ * Enables TX packet debugging if octeon_debug_tx_packets is set in the
+ * environment.
+ */
+#define DEBUG_TX_PACKET
+
+/* Global flag indicating common hw has been set up */
+static int octeon_global_hw_inited;
+
+#if defined(DEBUG_RX_PACKET) || defined(DEBUG_TX_PACKET)
+static int packet_rx_debug;
+static int packet_tx_debug;
+#endif
+
+/* Make sure that we have enough buffers to keep prefetching blocks happy.
+ * Absolute minimum is probably about 200.
+ */
+#define NUM_PACKET_BUFFERS 1000
+
+#define PKO_SHUTDOWN_TIMEOUT_VAL 100
+
+/* Define the offsets from the base CSR */
+#define GMX_PRT_CFG 0x10
+
+#define GMX_RX_FRM_MAX 0x30
+#define GMX_RX_JABBER  0x38
+
+#define GMX_RX_ADR_CTL	  0x100
+#define GMX_RX_ADR_CAM_EN 0x108
+#define GMX_RX_ADR_CAM0	  0x180
+#define GMX_RX_ADR_CAM1	  0x188
+#define GMX_RX_ADR_CAM2	  0x190
+#define GMX_RX_ADR_CAM3	  0x198
+#define GMX_RX_ADR_CAM4	  0x1a0
+#define GMX_RX_ADR_CAM5	  0x1a8
+#define GMX_TX_OVR_BP	  0x4c8
+
+/**
+ * Set the hardware MAC address for a device
+ *
+ * @param interface    interface of port to set
+ * @param index    index of port to set MAC address for
+ * @param addr   Address structure to change it too.
+ * @return Zero on success
+ */
+static int cvm_oct_set_mac_address(struct udevice *dev)
+{
+	struct octeon_eth_info *priv = dev_get_priv(dev);
+	struct eth_pdata *pdata = dev_get_plat(dev);
+	cvmx_gmxx_prtx_cfg_t gmx_cfg;
+	cvmx_helper_interface_mode_t mode;
+	cvmx_gmxx_rxx_adr_ctl_t control;
+	u8 *ptr = (uint8_t *)pdata->enetaddr;
+	int interface = priv->interface;
+	int index = priv->index;
+	u64 mac = 0;
+	u64 gmx_reg;
+	int xipd_port;
+	int i;
+
+	for (i = 0; i < 6; i++)
+		mac = (mac << 8) | (u64)(ptr[i]);
+
+	debug("%s(%s (%pM))\n", __func__, dev->name, ptr);
+	mode = cvmx_helper_interface_get_mode(interface);
+
+	/* It's rather expensive to change the MAC address for BGX so we only
+	 * do this if it has changed or not been set previously.
+	 */
+	if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
+		xipd_port = cvmx_helper_get_ipd_port(interface, index);
+		if (priv->last_bgx_mac != mac || !priv->bgx_mac_set) {
+			cvmx_helper_bgx_set_mac(xipd_port, 1, 2, mac);
+			priv->last_bgx_mac = mac;
+			priv->bgx_mac_set = 1;
+		}
+		return 0;
+	}
+
+	if (mode == CVMX_HELPER_INTERFACE_MODE_AGL) {
+		gmx_reg = CVMX_AGL_GMX_RXX_INT_REG(0);
+	} else {
+		gmx_reg = CVMX_GMXX_RXX_INT_REG(index, interface);
+		csr_wr(CVMX_GMXX_SMACX(index, interface), mac);
+	}
+
+	/* Disable interface */
+	gmx_cfg.u64 = csr_rd(gmx_reg + GMX_PRT_CFG);
+	csr_wr(gmx_reg + GMX_PRT_CFG, gmx_cfg.u64 & ~1ull);
+	debug("%s: gmx reg: 0x%llx\n", __func__, gmx_reg);
+
+	csr_wr(gmx_reg + GMX_RX_ADR_CAM0, ptr[0]);
+	csr_wr(gmx_reg + GMX_RX_ADR_CAM1, ptr[1]);
+	csr_wr(gmx_reg + GMX_RX_ADR_CAM2, ptr[2]);
+	csr_wr(gmx_reg + GMX_RX_ADR_CAM3, ptr[3]);
+	csr_wr(gmx_reg + GMX_RX_ADR_CAM4, ptr[4]);
+	csr_wr(gmx_reg + GMX_RX_ADR_CAM5, ptr[5]);
+
+	control.u64 = 0;
+	control.s.bcst = 1;	/* Allow broadcast MAC addresses */
+	control.s.mcst = 1;	/* Force reject multicast packets */
+	control.s.cam_mode = 1; /* Filter packets based on the CAM */
+
+	csr_wr(gmx_reg + GMX_RX_ADR_CTL, control.u64);
+
+	csr_wr(gmx_reg + GMX_RX_ADR_CAM_EN, 1);
+
+	/* Return interface to previous enable state */
+	csr_wr(gmx_reg + GMX_PRT_CFG, gmx_cfg.u64);
+
+	return 0;
+}
+
+static void cvm_oct_fill_hw_memory(u64 pool, u64 size, u64 elements)
+{
+	static int alloc_count;
+	char tmp_name[64];
+	int ret;
+
+	debug("%s: pool: 0x%llx, size: 0xx%llx, count: 0x%llx\n",
+	      __func__, pool, size, elements);
+	sprintf(tmp_name, "%s_fpa_alloc_%d",
+		OCTEON_BOOTLOADER_NAMED_BLOCK_TMP_PREFIX, alloc_count++);
+	ret = cvmx_fpa_setup_pool(pool, tmp_name, NULL, size, elements);
+}
+
+/**
+ * Configure common hardware for all interfaces
+ */
+static void cvm_oct_configure_common_hw(void)
+{
+	int mru = env_get_ulong("octeon_mru", 0, CONFIG_OCTEON_NETWORK_MRU);
+	int packet_pool_size = CVMX_FPA_PACKET_POOL_SIZE;
+
+	if (mru > packet_pool_size)
+		packet_pool_size = (mru + CVMX_CACHE_LINE_SIZE - 1) &
+				   ~(CVMX_CACHE_LINE_SIZE - 1);
+
+	/* Setup the FPA */
+	cvmx_fpa_enable();
+
+	cvm_oct_fill_hw_memory(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
+			       NUM_PACKET_BUFFERS);
+#if CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL
+	if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
+		cvm_oct_fill_hw_memory(CVMX_FPA_OUTPUT_BUFFER_POOL,
+				       CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+	}
+#endif
+	cvm_oct_fill_hw_memory(CVMX_FPA_PACKET_POOL, packet_pool_size,
+			       NUM_PACKET_BUFFERS);
+
+	cvmx_helper_initialize_packet_io_global();
+	cvmx_helper_initialize_packet_io_local();
+
+	/* The MRU defaults to 1536 bytes by the hardware.  Setting
+	 * CONFIG_OCTEON_NETWORK_MRU allows this to be overridden.
+	 */
+	if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
+		struct cvmx_pki_global_config gbl_cfg;
+		int i;
+
+		cvmx_pki_read_global_config(0, &gbl_cfg);
+		for (i = 0; i < CVMX_PKI_NUM_FRAME_CHECK; i++)
+			gbl_cfg.frm_len[i].maxlen = mru;
+		cvmx_pki_write_global_config(0, &gbl_cfg);
+	}
+
+	/* Set POW get work timeout to maximum value */
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE) ||
+	    octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
+		csr_wr(CVMX_SSO_NW_TIM, 0x3ff);
+	else
+		csr_wr(CVMX_POW_NW_TIM, 0x3ff);
+}
+
+/**
+ * Enables Ethernet devices to allow packets to be transmitted and received.
+ * For example, this is activated when the DHCP command is issued.
+ *
+ * @param	dev	Ethernet device to initialize
+ * @param	bis	board data structure, not used.
+ *
+ * @return	1 for success
+ */
+int octeon_eth_init(struct udevice *dev)
+{
+	struct octeon_eth_info *priv = dev_get_priv(dev);
+
+	debug("%s(), dev_ptr: %p, dev: %s, port: %d\n", __func__, dev,
+	      dev->name, priv->port);
+
+	if (priv->initted_flag) {
+		debug("%s already initialized\n", dev->name);
+		return 1;
+	}
+
+	if (!octeon_global_hw_inited) {
+		debug("Initializing common hardware\n");
+		cvm_oct_configure_common_hw();
+	}
+
+	/* Ignore backpressure on RGMII ports */
+	if (!octeon_has_feature(OCTEON_FEATURE_BGX))
+		csr_wr(priv->gmx_base + GMX_TX_OVR_BP, 0xf << 8 | 0xf);
+
+	debug("%s: Setting MAC address\n", __func__);
+	cvm_oct_set_mac_address(dev);
+
+	if (!octeon_global_hw_inited) {
+		debug("Enabling packet input\n");
+		cvmx_helper_ipd_and_packet_input_enable();
+		octeon_global_hw_inited = 1;
+
+		/* Connect, configure and start the PHY, if the device is
+		 * connected to one. If not, then it's most likely an SPF
+		 * enabled port, which does not have such PHY setup here.
+		 */
+		if (priv->mdio_dev) {
+			priv->phy_dev = dm_eth_phy_connect(dev);
+			phy_config(priv->phy_dev);
+			phy_startup(priv->phy_dev);
+		}
+	}
+	priv->enabled = 0;
+	priv->initted_flag = 1;
+
+	debug("%s exiting successfully\n", __func__);
+	return 1;
+}
+
+/**
+ * Initializes the specified interface and port
+ *
+ * @param	interface	interface to initialize
+ * @param	index		port index on interface
+ * @param	port		ipd port number
+ * @param	if_mode		interface mode
+ *
+ * @return	0 for success, -1 if out of memory, 1 if port is invalid
+ */
+static int octeon_eth_initialize(struct udevice *dev, int interface,
+				 int index, int port,
+				 cvmx_helper_interface_mode_t if_mode)
+{
+	struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+	int eth;
+
+	eth = cvmx_helper_get_port_fdt_node_offset(interface, index);
+	if (eth <= 0) {
+		debug("ERROR: No fdt node for interface %d, index %d\n",
+		      interface, index);
+		return 1;
+	}
+
+	oct_eth_info->is_c45 = (if_mode == CVMX_HELPER_INTERFACE_MODE_XAUI) ||
+			       (if_mode == CVMX_HELPER_INTERFACE_MODE_RXAUI) ||
+			       (if_mode == CVMX_HELPER_INTERFACE_MODE_XFI) ||
+			       (if_mode == CVMX_HELPER_INTERFACE_MODE_XLAUI) ||
+			       (if_mode == CVMX_HELPER_INTERFACE_MODE_10G_KR) ||
+			       (if_mode == CVMX_HELPER_INTERFACE_MODE_10G_KR);
+	oct_eth_info->port = port;
+	oct_eth_info->index = index;
+	oct_eth_info->interface = interface;
+	oct_eth_info->initted_flag = 0;
+	/* This is guaranteed to force the link state to be printed out */
+	oct_eth_info->link_state = 0xffffffffffffffffULL;
+	debug("Setting up port: %d, int: %d, index: %d, device: octeth%d\n",
+	      oct_eth_info->port, oct_eth_info->interface, oct_eth_info->index,
+	      dev_seq(dev));
+	if (if_mode == CVMX_HELPER_INTERFACE_MODE_AGL) {
+		oct_eth_info->gmx_base = CVMX_AGL_GMX_RXX_INT_REG(0);
+	} else {
+		if (!octeon_has_feature(OCTEON_FEATURE_BGX))
+			oct_eth_info->gmx_base =
+				CVMX_GMXX_RXX_INT_REG(index, interface);
+	}
+
+	return 0;
+}
+
+/**
+ * @INTERNAL
+ * Converts a BGX address to the node, interface and port number
+ *
+ * @param bgx_addr	Address of CSR register
+ *
+ * @return node, interface and port number, will be -1 for invalid address.
+ */
+static struct cvmx_xiface __cvmx_bgx_reg_addr_to_xiface(u64 bgx_addr)
+{
+	struct cvmx_xiface xi = { -1, -1 };
+
+	xi.node = cvmx_csr_addr_to_node(bgx_addr);
+	bgx_addr = cvmx_csr_addr_strip_node(bgx_addr);
+	if ((bgx_addr & 0xFFFFFFFFF0000000) != 0x00011800E0000000) {
+		debug("%s: Invalid BGX address 0x%llx\n", __func__,
+		      (unsigned long long)bgx_addr);
+		xi.node = -1;
+		return xi;
+	}
+	xi.interface = (bgx_addr >> 24) & 0x0F;
+
+	return xi;
+}
+
+static int octeon_nic_probe(struct udevice *dev)
+{
+	struct octeon_eth_info *info = dev_get_priv(dev);
+	struct ofnode_phandle_args phandle;
+	struct cvmx_xiface xi;
+	ofnode node, mdio_node;
+	int ipd_port;
+	int intf;
+	int ret;
+
+	/* The empty stub is to keep cvmx_user_app_init() happy. */
+	cvmx_npi_max_pknds = 1;
+	__cvmx_helper_init_port_valid();
+
+	xi = __cvmx_bgx_reg_addr_to_xiface(dev_read_addr(dev));
+	intf = xi.interface;
+	debug("%s: Found BGX node %d, interface %d\n", __func__, xi.node, intf);
+
+	ipd_port = cvmx_helper_get_ipd_port(intf, xi.node);
+	ret = octeon_eth_initialize(dev, intf, xi.node, ipd_port,
+				    cvmx_helper_interface_get_mode(intf));
+
+	/* Move to subnode, as this includes the "phy-handle" */
+	node = dev_read_first_subnode(dev);
+
+	/* Check if an SPF module is conneted, then no MDIO is probed */
+	ret = ofnode_parse_phandle_with_args(node, "sfp-slot", NULL, 0, 0,
+					     &phandle);
+	if (!ret) {
+		dev_dbg(dev, "sfp-slot found, not probing for MDIO\n");
+		return 0;
+	}
+
+	/* Continue with MDIO probing */
+	ret = ofnode_parse_phandle_with_args(node, "phy-handle", NULL, 0, 0,
+					     &phandle);
+	if (ret) {
+		dev_err(dev, "phy-handle not found in subnode\n");
+		return -ENODEV;
+	}
+
+	/* Get MDIO node */
+	mdio_node = ofnode_get_parent(phandle.node);
+	ret = uclass_get_device_by_ofnode(UCLASS_MDIO, mdio_node,
+					  &info->mdio_dev);
+	if (ret) {
+		dev_err(dev, "mdio_dev not found\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * Sets the hardware MAC address of the Ethernet device
+ *
+ * @param dev - Ethernet device
+ *
+ * @return 0 for success
+ */
+int octeon_eth_write_hwaddr(struct udevice *dev)
+{
+	struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+	struct eth_pdata *pdata = dev_get_plat(dev);
+
+	/* Skip if the interface isn't yet enabled */
+	if (!oct_eth_info->enabled) {
+		debug("%s: Interface not enabled, not setting MAC address\n",
+		      __func__);
+		return 0;
+	}
+	debug("%s: Setting %s address to %02x:%02x:%02x:%02x:%02x:%02x\n",
+	      __func__, dev->name, pdata->enetaddr[0], pdata->enetaddr[1],
+	      pdata->enetaddr[2], pdata->enetaddr[3], pdata->enetaddr[4],
+	      pdata->enetaddr[5]);
+	return cvm_oct_set_mac_address(dev);
+}
+
+/**
+ * Enables and disables the XCV RGMII interface
+ *
+ * @param	interface	Interface number
+ * @param	index		Port index (should be 0 for RGMII)
+ * @param	enable		True to enable it, false to disable it
+ */
+static void octeon_bgx_xcv_rgmii_enable(int interface, int index, bool enable)
+{
+	union cvmx_xcv_reset xcv_reset;
+
+	debug("%s(%d, %d, %sable)\n", __func__, interface, index,
+	      enable ? "en" : "dis");
+	xcv_reset.u64 = csr_rd(CVMX_XCV_RESET);
+	xcv_reset.s.rx_pkt_rst_n = enable ? 1 : 0;
+	csr_wr(CVMX_XCV_RESET, xcv_reset.u64);
+}
+
+/**
+ * Enables a SGMII interface
+ *
+ * @param dev - Ethernet device to initialize
+ */
+void octeon_eth_sgmii_enable(struct udevice *dev)
+{
+	struct octeon_eth_info *oct_eth_info;
+	cvmx_gmxx_prtx_cfg_t gmx_cfg;
+	int index, interface;
+	cvmx_helper_interface_mode_t if_mode;
+
+	oct_eth_info = dev_get_priv(dev);
+	interface = oct_eth_info->interface;
+	index = oct_eth_info->index;
+
+	debug("%s(%s) (%d.%d)\n", __func__, dev->name, interface, index);
+	if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
+		cvmx_bgxx_cmrx_config_t cmr_config;
+
+		cmr_config.u64 =
+			csr_rd(CVMX_BGXX_CMRX_CONFIG(index, interface));
+		cmr_config.s.enable = 1;
+		cmr_config.s.data_pkt_tx_en = 1;
+		cmr_config.s.data_pkt_rx_en = 1;
+		csr_wr(CVMX_BGXX_CMRX_CONFIG(index, interface), cmr_config.u64);
+		mdelay(100);
+		if (cvmx_helper_bgx_is_rgmii(interface, index))
+			octeon_bgx_xcv_rgmii_enable(interface, index, true);
+	} else {
+		if_mode = cvmx_helper_interface_get_mode(interface);
+		/* Normal operating mode. */
+
+		if (if_mode == CVMX_HELPER_INTERFACE_MODE_SGMII ||
+		    if_mode == CVMX_HELPER_INTERFACE_MODE_QSGMII) {
+			cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
+
+			debug("  if mode: (Q)SGMII\n");
+			pcsx_miscx_ctl_reg.u64 = csr_rd(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+			pcsx_miscx_ctl_reg.s.gmxeno = 0;
+			csr_wr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+			       pcsx_miscx_ctl_reg.u64);
+		} else if (if_mode != CVMX_HELPER_INTERFACE_MODE_AGL) {
+			cvmx_pcsxx_misc_ctl_reg_t pcsxx_misc_ctl_reg;
+
+			debug("  if mode: AGM\n");
+			pcsxx_misc_ctl_reg.u64 =
+				csr_rd(CVMX_PCSXX_MISC_CTL_REG(interface));
+			pcsxx_misc_ctl_reg.s.gmxeno = 0;
+			csr_wr(CVMX_PCSXX_MISC_CTL_REG(interface),
+			       pcsxx_misc_ctl_reg.u64);
+		}
+
+		gmx_cfg.u64 = csr_rd(oct_eth_info->gmx_base + GMX_PRT_CFG);
+		gmx_cfg.s.en = 1;
+		csr_wr(oct_eth_info->gmx_base + GMX_PRT_CFG, gmx_cfg.u64);
+		gmx_cfg.u64 = csr_rd(oct_eth_info->gmx_base + GMX_PRT_CFG);
+	}
+}
+
+/**
+ * Enables an Ethernet interface
+ *
+ * @param dev - Ethernet device to enable
+ */
+void octeon_eth_enable(struct udevice *dev)
+{
+	struct octeon_eth_info *oct_eth_info;
+	u64 tmp;
+	int interface;
+	cvmx_helper_interface_mode_t if_mode;
+
+	oct_eth_info = dev_get_priv(dev);
+	interface = oct_eth_info->interface;
+	if_mode = cvmx_helper_interface_get_mode(interface);
+
+	switch (if_mode) {
+	case CVMX_HELPER_INTERFACE_MODE_RGMII:
+	case CVMX_HELPER_INTERFACE_MODE_GMII:
+		debug("  rgmii/gmii mode\n");
+		tmp = csr_rd(CVMX_ASXX_RX_PRT_EN(interface));
+		tmp |= (1ull << (oct_eth_info->port & 0x3));
+		csr_wr(CVMX_ASXX_RX_PRT_EN(interface), tmp);
+		tmp = csr_rd(CVMX_ASXX_TX_PRT_EN(interface));
+		tmp |= (1ull << (oct_eth_info->port & 0x3));
+		csr_wr(CVMX_ASXX_TX_PRT_EN(interface), tmp);
+		octeon_eth_write_hwaddr(dev);
+		break;
+
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+	case CVMX_HELPER_INTERFACE_MODE_XAUI:
+	case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+	case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+	case CVMX_HELPER_INTERFACE_MODE_XFI:
+	case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+	case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+	case CVMX_HELPER_INTERFACE_MODE_MIXED:
+	case CVMX_HELPER_INTERFACE_MODE_AGL:
+		debug("  SGMII/XAUI/etc.\n");
+		octeon_eth_sgmii_enable(dev);
+		octeon_eth_write_hwaddr(dev);
+		break;
+
+	default:
+		break;
+	}
+}
+
+void octeon_phy_port_check(struct udevice *dev)
+{
+	struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+	struct phy_device *phydev = oct_eth_info->phydev;
+
+	if (oct_eth_info->phy_port_check)
+		oct_eth_info->phy_port_check(phydev);
+}
+
+/**
+ * Configure the RGMII port for the negotiated speed
+ *
+ * @param dev    Linux device for the RGMII port
+ */
+static void cvm_oct_configure_rgmii_speed(struct udevice *dev)
+{
+	struct octeon_eth_info *priv = dev_get_priv(dev);
+	int port = priv->port;
+	cvmx_helper_link_info_t link_state = cvmx_helper_link_get(port);
+
+	/* If the port is down some PHYs we need to check modules, etc. */
+	if (!link_state.s.link_up)
+		octeon_phy_port_check(dev);
+
+	if (link_state.u64 != priv->link_state) {
+		cvmx_helper_interface_mode_t mode;
+
+		octeon_phy_port_check(dev);
+
+		debug("%s(%s): Link state changed\n", __func__, dev->name);
+		printf("%s: ", dev->name);
+		if (!link_state.s.link_up) {
+			puts("Down ");
+		} else {
+			printf("Up %d Mbps ", link_state.s.speed);
+			if (link_state.s.full_duplex)
+				puts("Full duplex ");
+			else
+				puts("Half duplex ");
+		}
+		mode = cvmx_helper_interface_get_mode(priv->interface);
+		printf("(port %2d) (%s)\n", port,
+		       cvmx_helper_interface_mode_to_string(mode));
+		debug("%s: Setting link state\n", __func__);
+		cvmx_helper_link_set(priv->port, link_state);
+		priv->link_state = link_state.u64;
+	}
+}
+
+#if defined(DEBUG_TX_PACKET) || defined(DEBUG_RX_PACKET)
+static void print_mac(const char *label, const uint8_t *mac_addr)
+{
+	printf("%s: %02x:%02x:%02x:%02x:%02x:%02x", label, mac_addr[0],
+	       mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
+}
+
+static void print_ip(const void *packet)
+{
+	u8 *p = (uint8_t *)packet;
+	u16 length;
+	u8 hdr_len;
+
+	puts("IP Header:\n");
+	if ((p[0] & 0xF0) != 0x40) {
+		printf("Invalid IP version %d\n", *p >> 4);
+		return;
+	}
+	hdr_len = *p & 0x0F;
+	if (hdr_len < 5)
+		printf("Invalid IP header length %d\n", hdr_len);
+	printf("  Version: 4, Header length: %d\n", hdr_len);
+	length = (p[2] << 8) | p[3];
+	printf("  TOS: 0x%02x, length: %d\n", p[1], length);
+	printf("  ID: %d, %s%s%s fragment offset: %d\n", (p[4] << 8) | p[5],
+	       p[6] & 0x80 ? "congested, " : "", p[6] & 0x40 ? "DF, " : "",
+	       p[6] & 0x20 ? "MF, " : "", ((p[6] & 0x1F) << 8) | p[7]);
+	printf("  TTL: %d, Protocol: %d, Header Checksum: 0x%x\n", p[8], p[9],
+	       (p[10] << 8) | p[11]);
+	printf("  Source IP: %d.%d.%d.%d\n  Destination IP: %d.%d.%d.%d\n",
+	       p[12], p[13], p[14], p[15], p[16], p[17], p[18], p[19]);
+	if (p[9] == 17 || p[9] == 6)
+		printf("  Source port: %u, Destination port: %u\n",
+		       (p[20] << 8) | p[21], (p[22] << 8) | p[23]);
+	puts("\n");
+}
+
+/**
+ * Prints out a packet for debugging purposes
+ *
+ * @param[in]	packet - pointer to packet data
+ * @param	length - length of packet in bytes
+ */
+static void print_packet(const void *packet, int length)
+{
+	int i, j;
+	const unsigned char *up = packet;
+	u16 type = (up[12] << 8 | up[13]);
+	int start = 14;
+
+	print_mac("DMAC", &up[0]);
+	puts("    ");
+	print_mac("SMAC", &up[6]);
+	printf("    TYPE: %04x\n", type);
+
+	if (type == 0x0800)
+		print_ip(&up[start]);
+
+	for (i = start; (i + 16) < length; i += 16) {
+		printf("%04x ", i);
+		for (j = 0; j < 16; ++j)
+			printf("%02x ", up[i + j]);
+
+		printf("    ");
+		for (j = 0; j < 16; ++j)
+			printf("%c",
+			       ((up[i + j] >= ' ') && (up[i + j] <= '~')) ?
+				       up[i + j] :
+					     '.');
+		printf("\n");
+	}
+	printf("%04x ", i);
+	for (j = 0; i + j < length; ++j)
+		printf("%02x ", up[i + j]);
+
+	for (; j < 16; ++j)
+		printf("   ");
+
+	printf("    ");
+	for (j = 0; i + j < length; ++j)
+		printf("%c", ((up[i + j] >= ' ') && (up[i + j] <= '~')) ?
+				     up[i + j] :
+					   '.');
+
+	printf("\n");
+}
+#endif
+
+/**
+ * String representation of error codes.
+ */
+static const char * const rx_error_codes[] = {
+	"OK",
+	"partial",
+	"jabber",
+	"overrun",
+	"oversize",
+	"alignment",
+	"fragment",
+	"fcs",
+	"undersize",
+	"extend",
+	"length mismatch",
+	"rgmii rx",
+	"skip error",
+	"nibble error (studder)",
+	"(undefined)",
+	"(undefined)",
+	"SPI 4.2 FCS",
+	"skip",
+	"L2 malformed",
+};
+
+/**
+ * Called to receive a packet
+ *
+ * @param dev - device to receive on
+ *
+ * @return - length of packet
+ *
+ * This function is used to poll packets.  In turn it calls NetReceive
+ * to process the packets.
+ */
+static int nic_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+	cvmx_wqe_t *work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
+	struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+	cvmx_buf_ptr_t buf_ptr;
+	void *packet_data;
+	int length;
+	int error_code;
+
+	if (!oct_eth_info->enabled) {
+		oct_eth_info->enabled = 1;
+		debug("%s: Enabling interface %s\n", __func__, dev->name);
+		octeon_eth_enable(dev);
+	}
+
+	if (!work) {
+		/*
+		 * Somtimes the link is not up yet. Return here in this
+		 * case, this function will be called again later.
+		 */
+		return 0;
+	}
+
+	error_code = cvmx_wqe_get_rcv_err(work);
+	if (error_code) {
+		/* Work has error, so drop */
+		cvmx_helper_free_packet_data(work);
+		cvmx_wqe_free(work);
+		if (error_code < ARRAY_SIZE(rx_error_codes) &&
+		    !octeon_has_feature(OCTEON_FEATURE_BGX))
+			printf("Receive error (code %d: %s), dropping\n",
+			       error_code, rx_error_codes[error_code]);
+		else
+			printf("Receive error (code %d (unknown), dropping\n",
+			       error_code);
+		return 0;
+	}
+	if (cvmx_wqe_get_bufs(work) != 1) {
+		/* can only support single-buffer packets */
+		printf("Abnormal packet received in %u bufs, dropping\n",
+		       cvmx_wqe_get_bufs(work));
+		length = cvmx_wqe_get_len(work);
+		buf_ptr = cvmx_wqe_get_packet_ptr(work);
+		packet_data = cvmx_phys_to_ptr(buf_ptr.s.addr);
+		print_packet(packet_data, length);
+		cvmx_helper_free_packet_data(work);
+		cvmx_wqe_free(work);
+		return 0;
+	}
+
+	buf_ptr = cvmx_wqe_get_packet_ptr(work);
+	packet_data = cvmx_phys_to_ptr(buf_ptr.s.addr);
+	length = cvmx_wqe_get_len(work);
+
+	oct_eth_info->packets_received++;
+	debug("############# got work: %p, len: %d, packet_ptr: %p\n", work,
+	      length, packet_data);
+#if defined(DEBUG_RX_PACKET)
+	if (packet_rx_debug) {
+		printf("\nRX packet: interface: %d, index: %d\n",
+		       oct_eth_info->interface, oct_eth_info->index);
+		print_packet(packet_data, length);
+	}
+#endif
+	*packetp = (uchar *)packet_data;
+
+	/* Save work for free_pkt() */
+	oct_eth_info->work = work;
+
+	/* Free WQE and packet data */
+	return length;
+}
+
+static int nic_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len)
+{
+	struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+	cvmx_wqe_t *work = oct_eth_info->work;
+
+	if (!work)
+		return 0;
+
+	cvmx_helper_free_packet_data(work);
+	cvmx_wqe_free(work);
+	oct_eth_info->work = NULL;
+
+	return 0;
+}
+
+/**
+ * Packet transmit
+ *
+ * @param skb    Packet to send
+ * @param dev    Device info structure
+ * @return Always returns zero
+ */
+static int cvm_oct_xmit(struct udevice *dev, void *packet, int len)
+{
+	struct octeon_eth_info *priv = dev_get_priv(dev);
+	int queue = cvmx_pko_get_base_queue(priv->port);
+	cvmx_pko_command_word0_t pko_command;
+	cvmx_buf_ptr_t hw_buffer;
+	int rv;
+
+	debug("%s: addr: %p, len: %d\n", __func__, packet, len);
+
+	hw_buffer.u64 = 0;
+	hw_buffer.s.addr = cvmx_ptr_to_phys(packet);
+	hw_buffer.s.pool = CVMX_FPA_PACKET_POOL;
+	hw_buffer.s.size = len;
+	hw_buffer.s.back = 0;
+
+	/* Build the PKO command */
+	pko_command.u64 = 0;
+	pko_command.s.subone0 = 1;
+	pko_command.s.dontfree = 0;
+	pko_command.s.segs = 1;
+	pko_command.s.total_bytes = len;
+	/* Send the packet to the output queue */
+
+	debug("%s: port: %d, queue: %d\n", __func__, priv->port, queue);
+	cvmx_pko_send_packet_prepare(priv->port, queue, 0);
+	rv = cvmx_pko_send_packet_finish(priv->port, queue, pko_command,
+					 hw_buffer, 0);
+	if (rv)
+		printf("Failed to send the packet rv=%d\n", rv);
+
+	return 0;
+}
+
+static int nic_xmit(struct udevice *dev, void *pkt, int pkt_len)
+{
+	struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+	void *fpa_buf = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
+
+	if (!oct_eth_info->enabled) {
+		oct_eth_info->enabled = 1;
+		octeon_eth_enable(dev);
+	}
+
+	/* We need to copy this to a FPA buffer, then give that to TX */
+
+	if (oct_eth_info->packets_sent == 0 &&
+	    !octeon_has_feature(OCTEON_FEATURE_BGX))
+		cvm_oct_configure_rgmii_speed(dev);
+
+	if (!fpa_buf) {
+		printf("ERROR allocating buffer for packet!\n");
+		return -1;
+	}
+
+	memcpy(fpa_buf, pkt, pkt_len);
+#ifdef DEBUG_TX_PACKET
+	if (packet_tx_debug) {
+		printf("\nTX packet: interface: %d, index: %d\n",
+		       oct_eth_info->interface, oct_eth_info->index);
+		print_packet(pkt, pkt_len);
+	}
+#endif
+	cvm_oct_xmit(dev, fpa_buf, pkt_len);
+	oct_eth_info->packets_sent++;
+
+	return 0;
+}
+
+int nic_open(struct udevice *dev)
+{
+	octeon_eth_init(dev);
+
+	return 0;
+}
+
+static void octeon_eth_halt_bgx(struct udevice *dev,
+				cvmx_helper_interface_mode_t mode)
+{
+	union cvmx_bgxx_cmrx_config cmr_config;
+	union cvmx_bgxx_cmr_rx_adrx_cam cmr_cam;
+	struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+	int index = oct_eth_info->index;
+	int xiface = oct_eth_info->interface;
+	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
+
+	debug("%s(%s(%d.%d), %d)\n", __func__, dev->name, xiface, index, mode);
+
+	/* For RGMII we need to properly shut down the XCV interface */
+	if (cvmx_helper_bgx_is_rgmii(xiface, index)) {
+		debug("  Shut down XCV RGMII\n");
+		octeon_bgx_xcv_rgmii_enable(xi.interface, index, false);
+	} else {
+		cmr_config.u64 = csr_rd_node(xi.node,
+					     CVMX_BGXX_CMRX_CONFIG(index, xi.interface));
+		cmr_config.s.data_pkt_tx_en = 0;
+		cmr_config.s.data_pkt_rx_en = 0;
+		csr_wr_node(xi.node, CVMX_BGXX_CMRX_CONFIG(index, xi.interface),
+			    cmr_config.u64);
+
+		cmr_cam.u64 = csr_rd_node(xi.node,
+					  CVMX_BGXX_CMR_RX_ADRX_CAM(index * 8, xi.interface));
+		cmr_cam.s.en = 0;
+		csr_wr_node(xi.node,
+			    CVMX_BGXX_CMR_RX_ADRX_CAM(index * 8, xi.interface),
+			    cmr_cam.u64);
+		oct_eth_info->last_bgx_mac = 0;
+		oct_eth_info->bgx_mac_set = 0;
+	}
+}
+
+/**
+ * Halts the specified Ethernet interface preventing it from receiving any more
+ * packets.
+ *
+ * @param dev - Ethernet device to shut down.
+ */
+void octeon_eth_halt(struct udevice *dev)
+{
+	struct octeon_eth_info *oct_eth_info = dev_get_priv(dev);
+	int index = oct_eth_info->index;
+	int interface = oct_eth_info->interface;
+	cvmx_helper_interface_mode_t mode;
+	union cvmx_gmxx_rxx_adr_ctl adr_ctl;
+	cvmx_gmxx_prtx_cfg_t gmx_cfg;
+	u64 tmp;
+
+	debug("%s(%s): Halting\n", __func__, dev->name);
+
+	oct_eth_info->enabled = 0;
+
+	mode = cvmx_helper_interface_get_mode(oct_eth_info->interface);
+	if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
+		octeon_eth_halt_bgx(dev, mode);
+		return;
+	}
+
+	/* Stop SCC */
+	/* Disable reception on this port at the GMX block */
+	switch (mode) {
+	case CVMX_HELPER_INTERFACE_MODE_RGMII:
+	case CVMX_HELPER_INTERFACE_MODE_GMII:
+		debug("  RGMII/GMII\n");
+		tmp = csr_rd(CVMX_ASXX_RX_PRT_EN(oct_eth_info->interface));
+		tmp &= ~(1ull << index);
+		/* Disable the RGMII RX ports */
+		csr_wr(CVMX_ASXX_RX_PRT_EN(oct_eth_info->interface), tmp);
+		tmp = csr_rd(CVMX_ASXX_TX_PRT_EN(oct_eth_info->interface));
+		tmp &= ~(1ull << index);
+		/* Disable the RGMII TX ports */
+		csr_wr(CVMX_ASXX_TX_PRT_EN(oct_eth_info->interface), tmp);
+		/* No break! */
+	case CVMX_HELPER_INTERFACE_MODE_SGMII:
+	case CVMX_HELPER_INTERFACE_MODE_QSGMII:
+	case CVMX_HELPER_INTERFACE_MODE_XAUI:
+	case CVMX_HELPER_INTERFACE_MODE_RXAUI:
+	case CVMX_HELPER_INTERFACE_MODE_XLAUI:
+	case CVMX_HELPER_INTERFACE_MODE_XFI:
+	case CVMX_HELPER_INTERFACE_MODE_10G_KR:
+	case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
+	case CVMX_HELPER_INTERFACE_MODE_MIXED:
+	case CVMX_HELPER_INTERFACE_MODE_AGL:
+		/* Disable MAC filtering */
+		gmx_cfg.u64 = csr_rd(oct_eth_info->gmx_base + GMX_PRT_CFG);
+		csr_wr(oct_eth_info->gmx_base + GMX_PRT_CFG,
+		       gmx_cfg.u64 & ~1ull);
+		adr_ctl.u64 = 0;
+		adr_ctl.s.bcst = 1; /* Reject broadcast */
+		csr_wr(oct_eth_info->gmx_base + GMX_RX_ADR_CTL, adr_ctl.u64);
+		csr_wr(oct_eth_info->gmx_base + GMX_RX_ADR_CAM_EN, 0);
+		csr_wr(oct_eth_info->gmx_base + GMX_PRT_CFG, gmx_cfg.u64);
+		break;
+	default:
+		printf("%s: Unknown mode %d for interface 0x%x:%d\n", __func__,
+		       mode, interface, index);
+		break;
+	}
+}
+
+void nic_stop(struct udevice *dev)
+{
+	octeon_eth_halt(dev);
+}
+
+int nic_write_hwaddr(struct udevice *dev)
+{
+	cvm_oct_set_mac_address(dev);
+
+	return 0;
+}
+
+static const struct eth_ops octeon_nic_ops = {
+	.start = nic_open,
+	.stop = nic_stop,
+	.send = nic_xmit,
+	.recv = nic_recv,
+	.free_pkt = nic_free_pkt,
+	.write_hwaddr = nic_write_hwaddr,
+};
+
+static const struct udevice_id octeon_nic_ids[] = {
+	{ .compatible = "cavium,octeon-7890-bgx" },
+	{}
+};
+
+U_BOOT_DRIVER(octeon_nic) = {
+	.name = "octeon_nic",
+	.id = UCLASS_ETH,
+	.probe = octeon_nic_probe,
+	.of_match = octeon_nic_ids,
+	.ops = &octeon_nic_ops,
+	.priv_auto = sizeof(struct octeon_eth_info),
+};
diff --git a/drivers/net/octeon/octeon_mdio.c b/drivers/net/octeon/octeon_mdio.c
new file mode 100644
index 0000000..34ee809
--- /dev/null
+++ b/drivers/net/octeon/octeon_mdio.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018-2022 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <fdt_support.h>
+#include <log.h>
+#include <miiphy.h>
+#include <net.h>
+#include <linux/delay.h>
+
+#include <mach/cvmx-regs.h>
+#include <mach/cvmx-csr.h>
+#include <mach/octeon-model.h>
+#include <mach/octeon-feature.h>
+#include <mach/cvmx-smix-defs.h>
+#include <mach/cvmx-config.h>
+#include <mach/cvmx-helper.h>
+#include <mach/cvmx-helper-board.h>
+#include <mach/cvmx-mdio.h>
+
+#define CVMX_SMI_DRV_CTL   0x0001180000001828ull
+#define DEFAULT_MDIO_SPEED 2500000 /** 2.5 MHz default speed */
+
+/**
+ * cvmx_smi_drv_ctl
+ *
+ * Enables the SMI interface.
+ *
+ */
+union cvmx_smi_drv_ctl {
+	u64 u64;
+	struct cvmx_smi_drv_ctl_s {
+		u64 reserved_14_63 : 50;
+		u64 pctl : 6;
+		u64 reserved_6_7 : 2;
+		u64 nctl : 6;
+	} s;
+};
+
+struct octeon_mdiobus {
+	struct mii_dev *mii_dev;
+	/**
+	 * The local bus is in the lower 8 bits, followed by the remote bus in
+	 * the top 8 bits.  Bit 16 will be set if the bus is non-local.
+	 */
+	u32 bus_id;
+
+	int node;     /** Node number */
+	int speed;    /** Bus speed, normally 2.5 MHz */
+	int fdt_node; /** Node in FDT */
+	bool local;   /** true if local MDIO bus */
+};
+
+static int octeon_mdio_read(struct udevice *mdio_dev, int phy_addr,
+			    int dev_addr, int reg_addr)
+{
+	struct octeon_mdiobus *p = dev_get_priv(mdio_dev);
+	struct mii_dev *dev = p->mii_dev;
+	int value;
+
+	debug("%s(0x%p(%s): bus_id=%d phy_addr=%d, 0x%x, 0x%x) - ", __func__,
+	      dev, dev->name, p->bus_id, phy_addr, dev_addr, reg_addr);
+	if (IS_ENABLED(CONFIG_PHYLIB_10G) && dev_addr != MDIO_DEVAD_NONE) {
+		debug("clause 45 mode\n");
+		value = cvmx_mdio_45_read(p->bus_id & 0xff, phy_addr, dev_addr,
+					  reg_addr);
+	} else {
+		value = cvmx_mdio_read(p->bus_id & 0xff, phy_addr, reg_addr);
+	}
+
+	debug("Return value: 0x%x\n", value);
+	return value;
+}
+
+static int octeon_mdio_write(struct udevice *mdio_dev, int phy_addr,
+			     int dev_addr, int reg_addr, u16 value)
+{
+	struct octeon_mdiobus *p = dev_get_priv(mdio_dev);
+	struct mii_dev *dev = p->mii_dev;
+
+	debug("%s(0x%p(%s): bus_id=%d phy_addr=%d, 0x%x, 0x%x, 0x%x)\n",
+	      __func__, dev, dev->name, p->bus_id, phy_addr, dev_addr, reg_addr,
+	      value);
+
+	if (IS_ENABLED(CONFIG_PHYLIB_10G) && dev_addr != MDIO_DEVAD_NONE) {
+		debug("clause 45 mode\n");
+		return cvmx_mdio_45_write(p->bus_id & 0xff, phy_addr, dev_addr,
+					  reg_addr, value);
+	}
+
+	return cvmx_mdio_write(p->bus_id & 0xff, phy_addr, reg_addr, value);
+}
+
+/**
+ * Converts a MDIO register address to a bus number
+ *
+ * @param reg_addr	MDIO base register address
+ *
+ * @return	MDIO bus number or -1 if invalid address
+ */
+int octeon_mdio_reg_addr_to_bus(u64 reg_addr)
+{
+	int bus_base;
+	int bus;
+
+	/* Adjust the bus number based on the node number */
+	bus_base = cvmx_csr_addr_to_node(reg_addr) * 4;
+	reg_addr = cvmx_csr_addr_strip_node(reg_addr);
+
+	switch (reg_addr) {
+	case 0x1180000001800:
+	case 0x1180000003800: /* 68XX/78XX address */
+		bus = 0;
+		break;
+	case 0x1180000001900:
+	case 0x1180000003880:
+		bus = 1;
+		break;
+	case 0x1180000003900:
+		bus = 2;
+		break;
+	case 0x1180000003980:
+		bus = 3;
+		break;
+	default:
+		printf("%s: Unknown register address 0x%llx\n", __func__,
+		       reg_addr);
+		return -1;
+	}
+	bus += bus_base;
+	debug("%s: address 0x%llx is bus %d\n", __func__, reg_addr, bus);
+	return bus;
+}
+
+static int octeon_mdio_probe(struct udevice *dev)
+{
+	struct octeon_mdiobus *p = dev_get_priv(dev);
+	union cvmx_smi_drv_ctl drv_ctl;
+	cvmx_smix_clk_t smi_clk;
+	u64 mdio_addr;
+	int bus;
+	u64 sclock;
+	u32 sample_dly;
+	u64 denom;
+
+	mdio_addr = dev_read_addr(dev);
+	debug("%s: Translated address: 0x%llx\n", __func__, mdio_addr);
+	bus = octeon_mdio_reg_addr_to_bus(mdio_addr);
+	p->bus_id = bus;
+	debug("%s: bus: %d\n", __func__, bus);
+
+	drv_ctl.u64 = csr_rd(CVMX_SMI_DRV_CTL);
+	drv_ctl.s.pctl = dev_read_u32_default(dev, "cavium,pctl-drive-strength",
+					      drv_ctl.s.pctl);
+	drv_ctl.s.nctl = dev_read_u32_default(dev, "cavium,nctl-drive-strength",
+					      drv_ctl.s.nctl);
+	debug("%s: Set MDIO PCTL drive strength to 0x%x and NCTL drive strength to 0x%x\n",
+	      __func__, drv_ctl.s.pctl, drv_ctl.s.nctl);
+	csr_wr(CVMX_SMI_DRV_CTL, drv_ctl.u64);
+
+	/* Set the bus speed, default is 2.5MHz */
+	p->speed = dev_read_u32_default(dev, "cavium,max-speed",
+					DEFAULT_MDIO_SPEED);
+	sclock = gd->bus_clk;
+	smi_clk.u64 = csr_rd(CVMX_SMIX_CLK(bus & 3));
+	smi_clk.s.phase = sclock / (p->speed * 2);
+
+	/* Allow sample delay to be specified */
+	sample_dly = dev_read_u32_default(dev, "cavium,sample-delay", 0);
+	/* Only change the sample delay if it is set, otherwise use
+	 * the default value of 2.
+	 */
+	if (sample_dly) {
+		u32 sample;
+
+		denom = (sclock * 1000ULL) / sample_dly;
+		debug("%s: sclock: %llu, sample_dly: %u ps, denom: %llu\n",
+		      __func__, sclock, sample_dly, denom);
+		sample = (sclock + denom - 1) / denom;
+		debug("%s: sample: %u\n", __func__, smi_clk.s.sample);
+		if (sample < 2) {
+			printf("%s: warning: cavium,sample-delay %u ps is too small in device tree for %s\n",
+			       __func__, sample_dly, dev->name);
+			sample = 2;
+		}
+		if (sample > (2 * smi_clk.s.phase - 3)) {
+			printf("%s: warning: cavium,sample-delay %u ps is too large in device tree for %s\n",
+			       __func__, sample_dly, dev->name);
+			sample = 2 * smi_clk.s.phase - 3;
+		}
+		smi_clk.s.sample = sample & 0xf;
+		smi_clk.s.sample_hi = (sample >> 4) & 0xf;
+		debug("%s(%s): sample delay: %u ps (%d clocks)\n", __func__,
+		      dev->name, sample_dly, smi_clk.s.sample);
+	}
+	csr_wr(CVMX_SMIX_CLK(bus & 3), smi_clk.u64);
+
+	debug("mdio clock phase: %d clocks\n", smi_clk.s.phase);
+	csr_wr(CVMX_SMIX_CLK(bus & 3), smi_clk.u64);
+	debug("Enabling SMI interface %s\n", dev->name);
+	csr_wr(CVMX_SMIX_EN(bus & 3), 1);
+
+	/* Muxed MDIO bus support removed for now! */
+	return 0;
+}
+
+static const struct mdio_ops octeon_mdio_ops = {
+	.read = octeon_mdio_read,
+	.write = octeon_mdio_write,
+};
+
+static const struct udevice_id octeon_mdio_ids[] = {
+	{ .compatible = "cavium,octeon-3860-mdio" },
+	{}
+};
+
+U_BOOT_DRIVER(octeon_mdio) = {
+	.name = "octeon_mdio",
+	.id = UCLASS_MDIO,
+	.of_match = octeon_mdio_ids,
+	.probe = octeon_mdio_probe,
+	.ops = &octeon_mdio_ops,
+	.priv_auto = sizeof(struct octeon_mdiobus),
+};
diff --git a/drivers/net/octeontx/bgx.c b/drivers/net/octeontx/bgx.c
index a5c0c9f..b6592ff 100644
--- a/drivers/net/octeontx/bgx.c
+++ b/drivers/net/octeontx/bgx.c
@@ -1458,7 +1458,7 @@
 	int bgx_idx, node;
 	int inc = 1;
 
-	bgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+	bgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
 				       PCI_REGION_MEM);
 	if (!bgx->reg_base) {
 		debug("No PCI region found\n");
diff --git a/drivers/net/octeontx/nic_main.c b/drivers/net/octeontx/nic_main.c
index 0f36f25..99886e3 100644
--- a/drivers/net/octeontx/nic_main.c
+++ b/drivers/net/octeontx/nic_main.c
@@ -713,7 +713,7 @@
 		return -ENOMEM;
 
 	/* MAP PF's configuration registers */
-	nic->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+	nic->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
 				       PCI_REGION_MEM);
 	if (!nic->reg_base) {
 		printf("Cannot map config register space, aborting\n");
diff --git a/drivers/net/octeontx/nicvf_main.c b/drivers/net/octeontx/nicvf_main.c
index c30ba49..6e4d0a0 100644
--- a/drivers/net/octeontx/nicvf_main.c
+++ b/drivers/net/octeontx/nicvf_main.c
@@ -509,7 +509,7 @@
 	/* Enable TSO support */
 	nicvf->hw_tso = true;
 
-	nicvf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+	nicvf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
 					 PCI_REGION_MEM);
 
 	debug("nicvf->reg_base: %p\n", nicvf->reg_base);
diff --git a/drivers/net/octeontx/smi.c b/drivers/net/octeontx/smi.c
index d70fa82..233c26f 100644
--- a/drivers/net/octeontx/smi.c
+++ b/drivers/net/octeontx/smi.c
@@ -322,7 +322,7 @@
 	u64 baseaddr;
 
 	debug("SMI PCI device: %x\n", bdf);
-	if (!dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM)) {
+	if (!dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE, PCI_REGION_MEM)) {
 		printf("Failed to map PCI region for bdf %x\n", bdf);
 		return -1;
 	}
diff --git a/drivers/net/octeontx2/cgx.c b/drivers/net/octeontx2/cgx.c
index d139029..c6ec320 100644
--- a/drivers/net/octeontx2/cgx.c
+++ b/drivers/net/octeontx2/cgx.c
@@ -253,7 +253,7 @@
 	struct cgx *cgx = dev_get_priv(dev);
 	int err;
 
-	cgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+	cgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
 				       PCI_REGION_MEM);
 	cgx->dev = dev;
 	cgx->cgx_id = ((u64)(cgx->reg_base) >> 24) & 0x7;
diff --git a/drivers/net/octeontx2/rvu_af.c b/drivers/net/octeontx2/rvu_af.c
index d2f9654..0d3a9ff 100644
--- a/drivers/net/octeontx2/rvu_af.c
+++ b/drivers/net/octeontx2/rvu_af.c
@@ -127,7 +127,7 @@
 {
 	struct rvu_af *af_ptr = dev_get_priv(dev);
 
-	af_ptr->af_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+	af_ptr->af_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
 					 PCI_REGION_MEM);
 	debug("%s RVU AF BAR %p\n", __func__, af_ptr->af_base);
 	af_ptr->dev = dev;
diff --git a/drivers/net/octeontx2/rvu_pf.c b/drivers/net/octeontx2/rvu_pf.c
index 4b00178..5f3ea1f 100644
--- a/drivers/net/octeontx2/rvu_pf.c
+++ b/drivers/net/octeontx2/rvu_pf.c
@@ -58,7 +58,8 @@
 
 	debug("%s: name: %s\n", __func__, dev->name);
 
-	rvu->pf_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_2, PCI_REGION_MEM);
+	rvu->pf_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_2, 0, 0,
+				      PCI_REGION_TYPE, PCI_REGION_MEM);
 	rvu->pfid = dev_seq(dev) + 1; // RVU PF's start from 1;
 	rvu->dev = dev;
 	if (!rvu_af_dev) {
diff --git a/drivers/net/pch_gbe.c b/drivers/net/pch_gbe.c
index f189524..ad7b5b8 100644
--- a/drivers/net/pch_gbe.c
+++ b/drivers/net/pch_gbe.c
@@ -449,7 +449,7 @@
 
 	priv->dev = dev;
 
-	iobase = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_1, PCI_REGION_MEM);
+	iobase = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_1, 0, 0, PCI_REGION_TYPE, PCI_REGION_MEM);
 
 	plat->iobase = (ulong)iobase;
 	priv->mac_regs = (struct pch_gbe_regs *)iobase;
diff --git a/drivers/nvme/nvme_pci.c b/drivers/nvme/nvme_pci.c
index 5f60fb88..36bf9c5 100644
--- a/drivers/nvme/nvme_pci.c
+++ b/drivers/nvme/nvme_pci.c
@@ -28,8 +28,8 @@
 	sprintf(ndev->vendor, "0x%.4x", pplat->vendor);
 
 	ndev->instance = trailing_strtol(udev->name);
-	ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
-			PCI_REGION_MEM);
+	ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0, 0, 0,
+				   PCI_REGION_TYPE, PCI_REGION_MEM);
 	return nvme_init(udev);
 }
 
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 47cd074..fd22034 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -67,6 +67,13 @@
 	  if available on a PCI Physical Function device and probe for
 	  applicable drivers.
 
+config PCI_ENHANCED_ALLOCATION
+	bool "Enable support for Enhanced Allocation of resources"
+	default y
+	help
+	  Enable support for Enhanced Allocation which can be used by supported
+	  devices in place of traditional BARS for allocation of resources.
+
 config PCI_ARID
         bool "Enable Alternate Routing-ID support for PCI"
         help
diff --git a/drivers/pci/pci-uclass.c b/drivers/pci/pci-uclass.c
index 33dda00..970ee1a 100644
--- a/drivers/pci/pci-uclass.c
+++ b/drivers/pci/pci-uclass.c
@@ -645,7 +645,11 @@
 		return log_msg_ret("probe", -EINVAL);
 	}
 
-	ea_pos = dm_pci_find_capability(bus, PCI_CAP_ID_EA);
+	if (IS_ENABLED(CONFIG_PCI_ENHANCED_ALLOCATION))
+		ea_pos = dm_pci_find_capability(bus, PCI_CAP_ID_EA);
+	else
+		ea_pos = 0;
+
 	if (ea_pos) {
 		dm_pci_read_config8(bus, ea_pos + sizeof(u32) + sizeof(u8),
 				    &reg);
@@ -1013,7 +1017,22 @@
 
 		if (!IS_ENABLED(CONFIG_SYS_PCI_64BIT) &&
 		    type == PCI_REGION_MEM && upper_32_bits(pci_addr)) {
-			debug(" - beyond the 32-bit boundary, ignoring\n");
+			debug(" - pci_addr beyond the 32-bit boundary, ignoring\n");
+			continue;
+		}
+
+		if (!IS_ENABLED(CONFIG_PHYS_64BIT) && upper_32_bits(addr)) {
+			debug(" - addr beyond the 32-bit boundary, ignoring\n");
+			continue;
+		}
+
+		if (~((pci_addr_t)0) - pci_addr < size) {
+			debug(" - PCI range exceeds max address, ignoring\n");
+			continue;
+		}
+
+		if (~((phys_addr_t)0) - addr < size) {
+			debug(" - phys range exceeds max address, ignoring\n");
 			continue;
 		}
 
@@ -1375,131 +1394,84 @@
 	dm_pci_write_config32(dev, bar, addr);
 }
 
-static int _dm_pci_bus_to_phys(struct udevice *ctlr,
-			       pci_addr_t bus_addr, unsigned long flags,
-			       unsigned long skip_mask, phys_addr_t *pa)
+phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr,
+			       size_t len, unsigned long mask,
+			       unsigned long flags)
 {
-	struct pci_controller *hose = dev_get_uclass_priv(ctlr);
+	struct udevice *ctlr;
+	struct pci_controller *hose;
 	struct pci_region *res;
+	pci_addr_t offset;
 	int i;
 
-	if (hose->region_count == 0) {
-		*pa = bus_addr;
-		return 0;
-	}
+	/* The root controller has the region information */
+	ctlr = pci_get_controller(dev);
+	hose = dev_get_uclass_priv(ctlr);
+
+	if (hose->region_count == 0)
+		return bus_addr;
 
 	for (i = 0; i < hose->region_count; i++) {
 		res = &hose->regions[i];
 
-		if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0)
+		if ((res->flags & mask) != flags)
 			continue;
 
-		if (res->flags & skip_mask)
+		if (bus_addr < res->bus_start)
 			continue;
 
-		if (bus_addr >= res->bus_start &&
-		    (bus_addr - res->bus_start) < res->size) {
-			*pa = (bus_addr - res->bus_start + res->phys_start);
-			return 0;
-		}
-	}
-
-	return 1;
-}
-
-phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr,
-			       unsigned long flags)
-{
-	phys_addr_t phys_addr = 0;
-	struct udevice *ctlr;
-	int ret;
+		offset = bus_addr - res->bus_start;
+		if (offset >= res->size)
+			continue;
 
-	/* The root controller has the region information */
-	ctlr = pci_get_controller(dev);
+		if (len > res->size - offset)
+			continue;
 
-	/*
-	 * if PCI_REGION_MEM is set we do a two pass search with preference
-	 * on matches that don't have PCI_REGION_SYS_MEMORY set
-	 */
-	if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) {
-		ret = _dm_pci_bus_to_phys(ctlr, bus_addr,
-					  flags, PCI_REGION_SYS_MEMORY,
-					  &phys_addr);
-		if (!ret)
-			return phys_addr;
+		return res->phys_start + offset;
 	}
 
-	ret = _dm_pci_bus_to_phys(ctlr, bus_addr, flags, 0, &phys_addr);
-
-	if (ret)
-		puts("pci_hose_bus_to_phys: invalid physical address\n");
-
-	return phys_addr;
+	puts("pci_hose_bus_to_phys: invalid physical address\n");
+	return 0;
 }
 
-static int _dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
-			       unsigned long flags, unsigned long skip_mask,
-			       pci_addr_t *ba)
+pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
+			      size_t len, unsigned long mask,
+			      unsigned long flags)
 {
-	struct pci_region *res;
 	struct udevice *ctlr;
-	pci_addr_t bus_addr;
-	int i;
 	struct pci_controller *hose;
+	struct pci_region *res;
+	phys_addr_t offset;
+	int i;
 
 	/* The root controller has the region information */
 	ctlr = pci_get_controller(dev);
 	hose = dev_get_uclass_priv(ctlr);
 
-	if (hose->region_count == 0) {
-		*ba = phys_addr;
-		return 0;
-	}
+	if (hose->region_count == 0)
+		return phys_addr;
 
 	for (i = 0; i < hose->region_count; i++) {
 		res = &hose->regions[i];
 
-		if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0)
+		if ((res->flags & mask) != flags)
 			continue;
 
-		if (res->flags & skip_mask)
+		if (phys_addr < res->phys_start)
 			continue;
 
-		bus_addr = phys_addr - res->phys_start + res->bus_start;
-
-		if (bus_addr >= res->bus_start &&
-		    (bus_addr - res->bus_start) < res->size) {
-			*ba = bus_addr;
-			return 0;
-		}
-	}
-
-	return 1;
-}
+		offset = phys_addr - res->phys_start;
+		if (offset >= res->size)
+			continue;
 
-pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
-			      unsigned long flags)
-{
-	pci_addr_t bus_addr = 0;
-	int ret;
+		if (len > res->size - offset)
+			continue;
 
-	/*
-	 * if PCI_REGION_MEM is set we do a two pass search with preference
-	 * on matches that don't have PCI_REGION_SYS_MEMORY set
-	 */
-	if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) {
-		ret = _dm_pci_phys_to_bus(dev, phys_addr, flags,
-					  PCI_REGION_SYS_MEMORY, &bus_addr);
-		if (!ret)
-			return bus_addr;
+		return res->bus_start + offset;
 	}
 
-	ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 0, &bus_addr);
-
-	if (ret)
-		puts("pci_hose_phys_to_bus: invalid physical address\n");
-
-	return bus_addr;
+	puts("pci_hose_phys_to_bus: invalid physical address\n");
+	return 0;
 }
 
 static phys_addr_t dm_pci_map_ea_virt(struct udevice *dev, int ea_off,
@@ -1533,8 +1505,9 @@
 	return addr;
 }
 
-static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, int flags,
-			       int ea_off, struct pci_child_plat *pdata)
+static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, size_t offset,
+			       size_t len, int ea_off,
+			       struct pci_child_plat *pdata)
 {
 	int ea_cnt, i, entry_size;
 	int bar_id = (bar - PCI_BASE_ADDRESS_0) >> 2;
@@ -1576,14 +1549,18 @@
 		if (IS_ENABLED(CONFIG_PCI_SRIOV))
 			addr += dm_pci_map_ea_virt(dev, ea_off, pdata);
 
+		if (~((phys_addr_t)0) - addr < offset)
+			return NULL;
+
 		/* size ignored for now */
-		return map_physmem(addr, 0, flags);
+		return map_physmem(addr + offset, len, MAP_NOCACHE);
 	}
 
 	return 0;
 }
 
-void *dm_pci_map_bar(struct udevice *dev, int bar, int flags)
+void *dm_pci_map_bar(struct udevice *dev, int bar, size_t offset, size_t len,
+		     unsigned long mask, unsigned long flags)
 {
 	struct pci_child_plat *pdata = dev_get_parent_plat(dev);
 	struct udevice *udev = dev;
@@ -1606,21 +1583,29 @@
 	 * Incase of virtual functions, pdata will help read VF BEI
 	 * and EA entry size.
 	 */
-	ea_off = dm_pci_find_capability(udev, PCI_CAP_ID_EA);
+	if (IS_ENABLED(CONFIG_PCI_ENHANCED_ALLOCATION))
+		ea_off = dm_pci_find_capability(udev, PCI_CAP_ID_EA);
+	else
+		ea_off = 0;
+
 	if (ea_off)
-		return dm_pci_map_ea_bar(udev, bar, flags, ea_off, pdata);
+		return dm_pci_map_ea_bar(udev, bar, offset, len, ea_off, pdata);
 
 	/* read BAR address */
 	dm_pci_read_config32(udev, bar, &bar_response);
 	pci_bus_addr = (pci_addr_t)(bar_response & ~0xf);
 
+	if (~((pci_addr_t)0) - pci_bus_addr < offset)
+		return NULL;
+
 	/*
-	 * Pass "0" as the length argument to pci_bus_to_virt.  The arg
-	 * isn't actually used on any platform because U-Boot assumes a static
-	 * linear mapping.  In the future, this could read the BAR size
-	 * and pass that as the size if needed.
+	 * Forward the length argument to dm_pci_bus_to_virt. The length will
+	 * be used to check that the entire address range has been declared as
+	 * a PCI range, but a better check would be to probe for the size of
+	 * the bar and prevent overflow more locally.
 	 */
-	return dm_pci_bus_to_virt(udev, pci_bus_addr, flags, 0, MAP_NOCACHE);
+	return dm_pci_bus_to_virt(udev, pci_bus_addr + offset, len, mask, flags,
+				  MAP_NOCACHE);
 }
 
 static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8dba95a..a1e515c 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -283,6 +283,12 @@
 	  can be used to access the SPI flash on platforms embedding
 	  this Macronix IP core.
 
+config NPCM_FIU_SPI
+	bool "FIU driver for Nuvoton NPCM SoC"
+	help
+	  This enables support for the Flash Interface Unit SPI controller
+	  in master mode.
+
 config NXP_FSPI
 	bool "NXP FlexSPI driver"
 	depends on SPI_MEM
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 7f43f84..06e81b4 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -47,6 +47,7 @@
 obj-$(CONFIG_MVEBU_A3700_SPI) += mvebu_a3700_spi.o
 obj-$(CONFIG_MXC_SPI) += mxc_spi.o
 obj-$(CONFIG_MXS_SPI) += mxs_spi.o
+obj-$(CONFIG_NPCM_FIU_SPI) += npcm_fiu_spi.o
 obj-$(CONFIG_NXP_FSPI) += nxp_fspi.o
 obj-$(CONFIG_ATCSPI200_SPI) += atcspi200_spi.o
 obj-$(CONFIG_OCTEON_SPI) += octeon_spi.o
diff --git a/drivers/spi/npcm_fiu_spi.c b/drivers/spi/npcm_fiu_spi.c
new file mode 100644
index 0000000..7000fe5
--- /dev/null
+++ b/drivers/spi/npcm_fiu_spi.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2022 Nuvoton Technology Corp.
+ * NPCM Flash Interface Unit(FIU) SPI master controller driver.
+ */
+
+#include <clk.h>
+#include <dm.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <linux/bitfield.h>
+#include <linux/log2.h>
+#include <linux/iopoll.h>
+
+#define DW_SIZE			4
+#define CHUNK_SIZE		16
+#define XFER_TIMEOUT		1000000
+
+/* FIU UMA Configuration Register (UMA_CFG) */
+#define UMA_CFG_RDATSIZ_MASK	GENMASK(28, 24)
+#define UMA_CFG_DBSIZ_MASK	GENMASK(23, 21)
+#define UMA_CFG_WDATSIZ_MASK	GENMASK(20, 16)
+#define UMA_CFG_ADDSIZ_MASK	GENMASK(13, 11)
+#define UMA_CFG_RDBPCK_MASK	GENMASK(9, 8)
+#define UMA_CFG_DBPCK_MASK	GENMASK(7, 6)
+#define UMA_CFG_WDBPCK_MASK	GENMASK(5, 4)
+#define UMA_CFG_ADBPCK_MASK	GENMASK(3, 2)
+#define UMA_CFG_CMBPCK_MASK	GENMASK(1, 0)
+#define UMA_CFG_CMDSIZ_SHIFT	10
+
+/* FIU UMA Control and Status Register (UMA_CTS) */
+#define UMA_CTS_SW_CS		BIT(16)
+#define UMA_CTS_EXEC_DONE	BIT(0)
+#define UMA_CTS_RDYST		BIT(24)
+#define UMA_CTS_DEV_NUM_MASK	GENMASK(9, 8)
+
+struct npcm_fiu_regs {
+	unsigned int    drd_cfg;
+	unsigned int    dwr_cfg;
+	unsigned int    uma_cfg;
+	unsigned int    uma_cts;
+	unsigned int    uma_cmd;
+	unsigned int    uma_addr;
+	unsigned int    prt_cfg;
+	unsigned char	res1[4];
+	unsigned int    uma_dw0;
+	unsigned int    uma_dw1;
+	unsigned int    uma_dw2;
+	unsigned int    uma_dw3;
+	unsigned int    uma_dr0;
+	unsigned int    uma_dr1;
+	unsigned int    uma_dr2;
+	unsigned int    uma_dr3;
+	unsigned int    prt_cmd0;
+	unsigned int    prt_cmd1;
+	unsigned int    prt_cmd2;
+	unsigned int    prt_cmd3;
+	unsigned int    prt_cmd4;
+	unsigned int    prt_cmd5;
+	unsigned int    prt_cmd6;
+	unsigned int    prt_cmd7;
+	unsigned int    prt_cmd8;
+	unsigned int    prt_cmd9;
+	unsigned int    stuff[4];
+	unsigned int    fiu_cfg;
+};
+
+struct npcm_fiu_priv {
+	struct npcm_fiu_regs *regs;
+	struct clk clk;
+};
+
+static int npcm_fiu_spi_set_speed(struct udevice *bus, uint speed)
+{
+	struct npcm_fiu_priv *priv = dev_get_priv(bus);
+	int ret;
+
+	debug("%s: set speed %u\n", bus->name, speed);
+	ret = clk_set_rate(&priv->clk, speed);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int npcm_fiu_spi_set_mode(struct udevice *bus, uint mode)
+{
+	return 0;
+}
+
+static inline void activate_cs(struct npcm_fiu_regs *regs, int cs)
+{
+	writel(FIELD_PREP(UMA_CTS_DEV_NUM_MASK, cs), &regs->uma_cts);
+}
+
+static inline void deactivate_cs(struct npcm_fiu_regs *regs, int cs)
+{
+	writel(FIELD_PREP(UMA_CTS_DEV_NUM_MASK, cs) | UMA_CTS_SW_CS, &regs->uma_cts);
+}
+
+static int fiu_uma_read(struct udevice *bus, u8 *buf, u32 size)
+{
+	struct npcm_fiu_priv *priv = dev_get_priv(bus);
+	struct npcm_fiu_regs *regs = priv->regs;
+	u32 data_reg[4];
+	u32 val;
+	int ret;
+
+	/* Set data size */
+	writel(FIELD_PREP(UMA_CFG_RDATSIZ_MASK, size), &regs->uma_cfg);
+
+	/* Initiate the read */
+	writel(readl(&regs->uma_cts) | UMA_CTS_EXEC_DONE, &regs->uma_cts);
+
+	/* Wait for completion */
+	ret = readl_poll_timeout(&regs->uma_cts, val,
+				 !(val & UMA_CTS_EXEC_DONE), XFER_TIMEOUT);
+	if (ret) {
+		printf("npcm_fiu: read timeout\n");
+		return ret;
+	}
+
+	/* Copy data from data registers */
+	if (size)
+		data_reg[0] = readl(&regs->uma_dr0);
+	if (size > DW_SIZE)
+		data_reg[1] = readl(&regs->uma_dr1);
+	if (size > DW_SIZE * 2)
+		data_reg[2] = readl(&regs->uma_dr2);
+	if (size > DW_SIZE * 3)
+		data_reg[3] = readl(&regs->uma_dr3);
+	memcpy(buf, data_reg, size);
+
+	return 0;
+}
+
+static int fiu_uma_write(struct udevice *bus, const u8 *buf, u32 size)
+{
+	struct npcm_fiu_priv *priv = dev_get_priv(bus);
+	struct npcm_fiu_regs *regs = priv->regs;
+	u32 data_reg[4];
+	u32 val;
+	int ret;
+
+	/* Set data size */
+	writel(FIELD_PREP(UMA_CFG_WDATSIZ_MASK, size), &regs->uma_cfg);
+
+	/* Write data to data registers */
+	memcpy(data_reg, buf, size);
+	if (size)
+		writel(data_reg[0], &regs->uma_dw0);
+	if (size > DW_SIZE)
+		writel(data_reg[1], &regs->uma_dw1);
+	if (size > DW_SIZE * 2)
+		writel(data_reg[2], &regs->uma_dw2);
+	if (size > DW_SIZE * 3)
+		writel(data_reg[3], &regs->uma_dw3);
+
+	/* Initiate the transaction */
+	writel(readl(&regs->uma_cts) | UMA_CTS_EXEC_DONE, &regs->uma_cts);
+
+	/* Wait for completion */
+	ret = readl_poll_timeout(&regs->uma_cts, val,
+				 !(val & UMA_CTS_EXEC_DONE), XFER_TIMEOUT);
+	if (ret)
+		printf("npcm_fiu: write timeout\n");
+
+	return ret;
+}
+
+static int npcm_fiu_spi_xfer(struct udevice *dev, unsigned int bitlen,
+			     const void *dout, void *din, unsigned long flags)
+{
+	struct udevice *bus = dev->parent;
+	struct npcm_fiu_priv *priv = dev_get_priv(bus);
+	struct npcm_fiu_regs *regs = priv->regs;
+	struct dm_spi_slave_plat *slave_plat =
+			dev_get_parent_plat(dev);
+	const u8 *tx = dout;
+	u8 *rx = din;
+	int bytes = bitlen / 8;
+	int ret = 0;
+	int len;
+
+	if (flags & SPI_XFER_BEGIN)
+		activate_cs(regs, slave_plat->cs);
+
+	while (bytes) {
+		len = (bytes > CHUNK_SIZE) ? CHUNK_SIZE : bytes;
+		if (tx) {
+			ret = fiu_uma_write(bus, tx, len);
+			if (ret)
+				break;
+			tx += len;
+		} else {
+			ret = fiu_uma_read(bus, rx, len);
+			if (ret)
+				break;
+			rx += len;
+		}
+		bytes -= len;
+	}
+
+	if (flags & SPI_XFER_END)
+		deactivate_cs(regs, slave_plat->cs);
+
+	return ret;
+}
+
+static int npcm_fiu_uma_operation(struct npcm_fiu_priv *priv, const struct spi_mem_op *op,
+				  u32 addr, const u8 *tx, u8 *rx, u32 nbytes, bool started)
+{
+	struct npcm_fiu_regs *regs = priv->regs;
+	u32 uma_cfg = 0, val;
+	u32 data_reg[4];
+	int ret;
+
+	debug("fiu_uma: opcode 0x%x, dir %d, addr 0x%x, %d bytes\n",
+	      op->cmd.opcode, op->data.dir, addr, nbytes);
+	debug("         buswidth cmd:%d, addr:%d, dummy:%d, data:%d\n",
+	      op->cmd.buswidth, op->addr.buswidth, op->dummy.buswidth,
+	      op->data.buswidth);
+	debug("         size cmd:%d, addr:%d, dummy:%d, data:%d\n",
+	      1, op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
+	debug("         tx %p, rx %p\n", tx, rx);
+
+	if (!started) {
+		/* Send cmd/addr in the begin of an transaction */
+		writel(op->cmd.opcode, &regs->uma_cmd);
+
+		uma_cfg |= FIELD_PREP(UMA_CFG_CMBPCK_MASK, ilog2(op->cmd.buswidth)) |
+			   (1 << UMA_CFG_CMDSIZ_SHIFT);
+		/* Configure addr bytes */
+		if (op->addr.nbytes) {
+			uma_cfg |= FIELD_PREP(UMA_CFG_ADBPCK_MASK, ilog2(op->addr.buswidth)) |
+				   FIELD_PREP(UMA_CFG_ADDSIZ_MASK, op->addr.nbytes);
+			writel(addr, &regs->uma_addr);
+		}
+		/* Configure dummy bytes */
+		if (op->dummy.nbytes)
+			uma_cfg |= FIELD_PREP(UMA_CFG_DBPCK_MASK, ilog2(op->dummy.buswidth)) |
+				   FIELD_PREP(UMA_CFG_DBSIZ_MASK, op->dummy.nbytes);
+	}
+	/* Set data bus width and data size */
+	if (op->data.dir == SPI_MEM_DATA_IN && nbytes)
+		uma_cfg |= FIELD_PREP(UMA_CFG_RDBPCK_MASK, ilog2(op->data.buswidth)) |
+			   FIELD_PREP(UMA_CFG_RDATSIZ_MASK, nbytes);
+	else if (op->data.dir == SPI_MEM_DATA_OUT && nbytes)
+		uma_cfg |= FIELD_PREP(UMA_CFG_WDBPCK_MASK, ilog2(op->data.buswidth)) |
+			   FIELD_PREP(UMA_CFG_WDATSIZ_MASK, nbytes);
+	writel(uma_cfg, &regs->uma_cfg);
+
+	if (op->data.dir == SPI_MEM_DATA_OUT && nbytes) {
+		memcpy(data_reg, tx, nbytes);
+
+		if (nbytes)
+			writel(data_reg[0], &regs->uma_dw0);
+		if (nbytes > DW_SIZE)
+			writel(data_reg[1], &regs->uma_dw1);
+		if (nbytes > DW_SIZE * 2)
+			writel(data_reg[2], &regs->uma_dw2);
+		if (nbytes > DW_SIZE * 3)
+			writel(data_reg[3], &regs->uma_dw3);
+	}
+	/* Initiate the transaction */
+	writel(readl(&regs->uma_cts) | UMA_CTS_EXEC_DONE, &regs->uma_cts);
+
+	/* Wait for completion */
+	ret = readl_poll_timeout(&regs->uma_cts, val,
+				 !(val & UMA_CTS_EXEC_DONE), XFER_TIMEOUT);
+	if (ret) {
+		printf("npcm_fiu: UMA op timeout\n");
+		return ret;
+	}
+
+	if (op->data.dir == SPI_MEM_DATA_IN && nbytes) {
+		if (nbytes)
+			data_reg[0] = readl(&regs->uma_dr0);
+		if (nbytes > DW_SIZE)
+			data_reg[1] = readl(&regs->uma_dr1);
+		if (nbytes > DW_SIZE * 2)
+			data_reg[2] = readl(&regs->uma_dr2);
+		if (nbytes > DW_SIZE * 3)
+			data_reg[3] = readl(&regs->uma_dr3);
+
+		memcpy(rx, data_reg, nbytes);
+	}
+
+	return 0;
+}
+
+static int npcm_fiu_exec_op(struct spi_slave *slave,
+			    const struct spi_mem_op *op)
+{
+	struct udevice *bus = slave->dev->parent;
+	struct npcm_fiu_priv *priv = dev_get_priv(bus);
+	struct npcm_fiu_regs *regs = priv->regs;
+	struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(slave->dev);
+	u32 bytes, len, addr;
+	const u8 *tx;
+	u8 *rx;
+	bool started = false;
+	int ret;
+
+	bytes = op->data.nbytes;
+	addr = (u32)op->addr.val;
+	if (!bytes) {
+		activate_cs(regs, slave_plat->cs);
+		ret = npcm_fiu_uma_operation(priv, op, addr, NULL, NULL, 0, false);
+		deactivate_cs(regs, slave_plat->cs);
+		return ret;
+	}
+
+	tx = op->data.buf.out;
+	rx = op->data.buf.in;
+	/*
+	 * Use SW-control CS for write to extend the transaction and
+	 *     keep the Write Enable state.
+	 * Use HW-control CS for read to avoid clock and timing issues.
+	 */
+	if (op->data.dir == SPI_MEM_DATA_OUT)
+		activate_cs(regs, slave_plat->cs);
+	else
+		writel(FIELD_PREP(UMA_CTS_DEV_NUM_MASK, slave_plat->cs) | UMA_CTS_SW_CS,
+		       &regs->uma_cts);
+	while (bytes) {
+		len = (bytes > CHUNK_SIZE) ? CHUNK_SIZE : bytes;
+		ret = npcm_fiu_uma_operation(priv, op, addr, tx, rx, len, started);
+		if (ret)
+			return ret;
+
+		/* CS is kept low for uma write, extend the transaction */
+		if (op->data.dir == SPI_MEM_DATA_OUT)
+			started = true;
+
+		bytes -= len;
+		addr += len;
+		if (tx)
+			tx += len;
+		if (rx)
+			rx += len;
+	}
+	if (op->data.dir == SPI_MEM_DATA_OUT)
+		deactivate_cs(regs, slave_plat->cs);
+
+	return 0;
+}
+
+static int npcm_fiu_spi_probe(struct udevice *bus)
+{
+	struct npcm_fiu_priv *priv = dev_get_priv(bus);
+	int ret;
+
+	priv->regs = (struct npcm_fiu_regs *)dev_read_addr_ptr(bus);
+
+	ret = clk_get_by_index(bus, 0, &priv->clk);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static const struct spi_controller_mem_ops npcm_fiu_mem_ops = {
+	.exec_op = npcm_fiu_exec_op,
+};
+
+static const struct dm_spi_ops npcm_fiu_spi_ops = {
+	.xfer           = npcm_fiu_spi_xfer,
+	.set_speed      = npcm_fiu_spi_set_speed,
+	.set_mode       = npcm_fiu_spi_set_mode,
+	.mem_ops        = &npcm_fiu_mem_ops,
+};
+
+static const struct udevice_id npcm_fiu_spi_ids[] = {
+	{ .compatible = "nuvoton,npcm845-fiu" },
+	{ .compatible = "nuvoton,npcm750-fiu" },
+	{ }
+};
+
+U_BOOT_DRIVER(npcm_fiu_spi) = {
+	.name   = "npcm_fiu_spi",
+	.id     = UCLASS_SPI,
+	.of_match = npcm_fiu_spi_ids,
+	.ops    = &npcm_fiu_spi_ops,
+	.priv_auto = sizeof(struct npcm_fiu_priv),
+	.probe  = npcm_fiu_spi_probe,
+};
diff --git a/drivers/spi/octeon_spi.c b/drivers/spi/octeon_spi.c
index fcabc11..c2a7ee23 100644
--- a/drivers/spi/octeon_spi.c
+++ b/drivers/spi/octeon_spi.c
@@ -568,7 +568,7 @@
 		pci_dev_t bdf = dm_pci_get_bdf(dev);
 
 		debug("SPI PCI device: %x\n", bdf);
-		priv->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+		priv->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
 					    PCI_REGION_MEM);
 		/* Add base offset */
 		priv->base += 0x1000;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 4f711de..1ab3061 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -36,7 +36,8 @@
 		return ret;
 
 	hccr = (struct ehci_hccr *)dm_pci_map_bar(dev,
-			PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+			PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
+			PCI_REGION_MEM);
 	hcor = (struct ehci_hcor *)((uintptr_t) hccr +
 			HC_LENGTH(ehci_readl(&hccr->cr_capbase)));
 
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 6ddc9da..f061aec 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -18,7 +18,7 @@
 {
 	struct ohci_regs *regs;
 
-	regs = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+	regs = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE, PCI_REGION_MEM);
 	return ohci_register(dev, regs);
 }
 
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 630bc20..11f1c02 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -27,7 +27,8 @@
 	u32 cmd;
 
 	hccr = (struct xhci_hccr *)dm_pci_map_bar(dev,
-			PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+			PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE,
+			PCI_REGION_MEM);
 	if (!hccr) {
 		printf("xhci-pci init cannot map PCI mem bar\n");
 		return -EIO;
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 863c3fb..586263e 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -37,6 +37,15 @@
 	  This driver provides support for virtio based paravirtual device
 	  drivers over PCI.
 
+config VIRTIO_PCI_LEGACY
+	bool "PCI driver for legacy virtio devices"
+	depends on PCI
+	select VIRTIO
+	default VIRTIO_PCI
+	help
+	  This driver provides support for legacy virtio based paravirtual
+	  device drivers over PCI.
+
 config VIRTIO_SANDBOX
 	bool "Sandbox driver for virtio devices"
 	depends on SANDBOX
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index dc88809..4c63a6c 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -5,7 +5,8 @@
 
 obj-y += virtio-uclass.o virtio_ring.o
 obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
-obj-$(CONFIG_VIRTIO_PCI) += virtio_pci_legacy.o virtio_pci_modern.o
+obj-$(CONFIG_VIRTIO_PCI) += virtio_pci_modern.o
+obj-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
 obj-$(CONFIG_VIRTIO_SANDBOX) += virtio_sandbox.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
 obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 03fa5cb..cf5dfb1 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -319,7 +319,8 @@
 	uc_priv->device = subdevice;
 	uc_priv->vendor = subvendor;
 
-	priv->ioaddr = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0, PCI_REGION_IO);
+	priv->ioaddr = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0, 0, 0,
+				      PCI_REGION_TYPE, PCI_REGION_IO);
 	if (!priv->ioaddr)
 		return -ENXIO;
 	debug("(%s): virtio legacy device reg base %04lx\n",
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index fd8a1f3..880a12c 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -94,6 +94,7 @@
  *
  * @common: pci transport device common register block base
  * @notify_base: pci transport device notify register block base
+ * @notify_len: pci transport device notify register block length
  * @device: pci transport device device-specific register block base
  * @device_len: pci transport device device-specific register block length
  * @notify_offset_multiplier: multiply queue_notify_off by this value
@@ -101,6 +102,7 @@
 struct virtio_pci_priv {
 	struct virtio_pci_common_cfg __iomem *common;
 	void __iomem *notify_base;
+	u32 notify_len;
 	void __iomem *device;
 	u32 device_len;
 	u32 notify_offset_multiplier;
@@ -114,7 +116,11 @@
 	__le16 w;
 	__le32 l;
 
-	WARN_ON(offset + len > priv->device_len);
+	if (!priv->device)
+		return -ENOSYS;
+
+	if (offset + len > priv->device_len)
+		return -EINVAL;
 
 	switch (len) {
 	case 1:
@@ -136,7 +142,7 @@
 		memcpy(buf + sizeof(l), &l, sizeof(l));
 		break;
 	default:
-		WARN_ON(true);
+		return -EINVAL;
 	}
 
 	return 0;
@@ -150,7 +156,11 @@
 	__le16 w;
 	__le32 l;
 
-	WARN_ON(offset + len > priv->device_len);
+	if (!priv->device)
+		return -ENOSYS;
+
+	if (offset + len > priv->device_len)
+		return -EINVAL;
 
 	switch (len) {
 	case 1:
@@ -172,7 +182,7 @@
 		iowrite32(le32_to_cpu(l), priv->device + offset + sizeof(l));
 		break;
 	default:
-		WARN_ON(true);
+		return -EINVAL;
 	}
 
 	return 0;
@@ -365,11 +375,19 @@
 	off = ioread16(&priv->common->queue_notify_off);
 
 	/*
+	 * Check the effective offset is in bounds and leaves space for the
+	 * notification, which is just a single 16-bit value since
+	 * VIRTIO_F_NOTIFICATION_DATA isn't negotiated by the drivers.
+	 */
+	off *= priv->notify_offset_multiplier;
+	if (off > priv->notify_len - sizeof(u16))
+		return -EIO;
+
+	/*
 	 * We write the queue's selector into the notification register
 	 * to signal the other end
 	 */
-	iowrite16(vq->index,
-		  priv->notify_base + off * priv->notify_offset_multiplier);
+	iowrite16(vq->index, priv->notify_base + off);
 
 	return 0;
 }
@@ -379,28 +397,51 @@
  *
  * @udev:	the transport device
  * @cfg_type:	the VIRTIO_PCI_CAP_* value we seek
+ * @cap_size:	expected size of the capability
+ * @cap:	capability read from the config space
  *
  * Return: offset of the configuration structure
  */
-static int virtio_pci_find_capability(struct udevice *udev, u8 cfg_type)
+static int virtio_pci_find_capability(struct udevice *udev, u8 cfg_type,
+				      size_t cap_size,
+				      struct virtio_pci_cap *cap)
 {
 	int pos;
 	int offset;
-	u8 type, bar;
+
+	assert(cap_size >= sizeof(struct virtio_pci_cap));
+	assert(cap_size <= PCI_CFG_SPACE_SIZE);
+
+	if (!cap)
+		return 0;
 
 	for (pos = dm_pci_find_capability(udev, PCI_CAP_ID_VNDR);
 	     pos > 0;
 	     pos = dm_pci_find_next_capability(udev, pos, PCI_CAP_ID_VNDR)) {
+		/* Ensure the capability is within bounds */
+		if (PCI_CFG_SPACE_SIZE - cap_size < pos)
+			return 0;
+
+		offset = pos + offsetof(struct virtio_pci_cap, cap_vndr);
+		dm_pci_read_config8(udev, offset, &cap->cap_vndr);
+		offset = pos + offsetof(struct virtio_pci_cap, cap_next);
+		dm_pci_read_config8(udev, offset, &cap->cap_next);
+		offset = pos + offsetof(struct virtio_pci_cap, cap_len);
+		dm_pci_read_config8(udev, offset, &cap->cap_len);
 		offset = pos + offsetof(struct virtio_pci_cap, cfg_type);
-		dm_pci_read_config8(udev, offset, &type);
+		dm_pci_read_config8(udev, offset, &cap->cfg_type);
 		offset = pos + offsetof(struct virtio_pci_cap, bar);
-		dm_pci_read_config8(udev, offset, &bar);
+		dm_pci_read_config8(udev, offset, &cap->bar);
+		offset = pos + offsetof(struct virtio_pci_cap, offset);
+		dm_pci_read_config32(udev, offset, &cap->offset);
+		offset = pos + offsetof(struct virtio_pci_cap, length);
+		dm_pci_read_config32(udev, offset, &cap->length);
 
 		/* Ignore structures with reserved BAR values */
-		if (bar > 0x5)
+		if (cap->bar > 0x5)
 			continue;
 
-		if (type == cfg_type)
+		if (cap->cfg_type == cfg_type)
 			return pos;
 	}
 
@@ -411,35 +452,24 @@
  * virtio_pci_map_capability - map base address of the capability
  *
  * @udev:	the transport device
- * @off:	offset of the configuration structure
+ * @cap:	capability to map
  *
  * Return: base address of the capability
  */
-static void __iomem *virtio_pci_map_capability(struct udevice *udev, int off)
+static void __iomem *virtio_pci_map_capability(struct udevice *udev,
+					       const struct virtio_pci_cap *cap)
 {
-	u8 bar;
-	u32 offset;
-	ulong base;
-	void __iomem *p;
-
-	if (!off)
-		return NULL;
-
-	offset = off + offsetof(struct virtio_pci_cap, bar);
-	dm_pci_read_config8(udev, offset, &bar);
-	offset = off + offsetof(struct virtio_pci_cap, offset);
-	dm_pci_read_config32(udev, offset, &offset);
-
 	/*
-	 * TODO: adding 64-bit BAR support
-	 *
-	 * Per spec, the BAR is permitted to be either 32-bit or 64-bit.
-	 * For simplicity, only read the BAR address as 32-bit.
+	 * Find the corresponding memory region that isn't system memory but is
+	 * writable.
 	 */
-	base = dm_pci_read_bar32(udev, bar);
-	p = (void __iomem *)base + offset;
+	unsigned long mask =
+			PCI_REGION_TYPE | PCI_REGION_SYS_MEMORY | PCI_REGION_RO;
+	unsigned long flags = PCI_REGION_MEM;
+	u8 *p = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0 + cap->bar, cap->offset,
+			       cap->length, mask, flags);
 
-	return p;
+	return (void __iomem *)p;
 }
 
 static int virtio_pci_bind(struct udevice *udev)
@@ -462,6 +492,7 @@
 	u16 subvendor;
 	u8 revision;
 	int common, notify, device;
+	struct virtio_pci_cap common_cap, notify_cap, device_cap;
 	int offset;
 
 	/* We only own devices >= 0x1040 and <= 0x107f: leave the rest. */
@@ -477,34 +508,60 @@
 	uc_priv->vendor = subvendor;
 
 	/* Check for a common config: if not, use legacy mode (bar 0) */
-	common = virtio_pci_find_capability(udev, VIRTIO_PCI_CAP_COMMON_CFG);
+	common = virtio_pci_find_capability(udev, VIRTIO_PCI_CAP_COMMON_CFG,
+					    sizeof(struct virtio_pci_cap),
+					    &common_cap);
 	if (!common) {
 		printf("(%s): leaving for legacy driver\n", udev->name);
 		return -ENODEV;
 	}
 
+	if (common_cap.length < sizeof(struct virtio_pci_common_cfg)) {
+		printf("(%s): virtio common config too small\n", udev->name);
+		return -EINVAL;
+	}
+
 	/* If common is there, notify should be too */
-	notify = virtio_pci_find_capability(udev, VIRTIO_PCI_CAP_NOTIFY_CFG);
+	notify = virtio_pci_find_capability(udev, VIRTIO_PCI_CAP_NOTIFY_CFG,
+					    sizeof(struct virtio_pci_notify_cap),
+					    &notify_cap);
 	if (!notify) {
 		printf("(%s): missing capabilities %i/%i\n", udev->name,
 		       common, notify);
 		return -EINVAL;
 	}
 
+	/* Map configuration structures */
+	priv->common = virtio_pci_map_capability(udev, &common_cap);
+	if (!priv->common) {
+		printf("(%s): could not map common config\n", udev->name);
+		return -EINVAL;
+	}
+
+	priv->notify_len = notify_cap.length;
+	priv->notify_base = virtio_pci_map_capability(udev, &notify_cap);
+	if (!priv->notify_base) {
+		printf("(%s): could not map notify config\n", udev->name);
+		return -EINVAL;
+	}
+
 	/*
 	 * Device capability is only mandatory for devices that have
 	 * device-specific configuration.
 	 */
-	device = virtio_pci_find_capability(udev, VIRTIO_PCI_CAP_DEVICE_CFG);
+	device = virtio_pci_find_capability(udev, VIRTIO_PCI_CAP_DEVICE_CFG,
+					    sizeof(struct virtio_pci_cap),
+					    &device_cap);
 	if (device) {
-		offset = notify + offsetof(struct virtio_pci_cap, length);
-		dm_pci_read_config32(udev, offset, &priv->device_len);
+		priv->device_len = device_cap.length;
+		priv->device = virtio_pci_map_capability(udev, &device_cap);
+		if (!priv->device) {
+			printf("(%s): could not map device config\n",
+			       udev->name);
+			return -EINVAL;
+		}
 	}
 
-	/* Map configuration structures */
-	priv->common = virtio_pci_map_capability(udev, common);
-	priv->notify_base = virtio_pci_map_capability(udev, notify);
-	priv->device = virtio_pci_map_capability(udev, device);
 	debug("(%p): common @ %p, notify base @ %p, device @ %p\n",
 	      udev, priv->common, priv->notify_base, priv->device);
 
diff --git a/include/configs/octeon_common.h b/include/configs/octeon_common.h
index 2e4bfd0..7e71c83 100644
--- a/include/configs/octeon_common.h
+++ b/include/configs/octeon_common.h
@@ -8,7 +8,7 @@
 #define __OCTEON_COMMON_H__
 
 #if defined(CONFIG_RAM_OCTEON)
-#define CONFIG_SYS_INIT_SP_OFFSET	0x20100000
+#define CONFIG_SYS_INIT_SP_OFFSET	0x20180000
 #else
 /* No DDR init -> run in L2 cache with limited resources */
 #define CONFIG_SYS_INIT_SP_OFFSET	0x00180000
diff --git a/include/configs/octeon_ebb7304.h b/include/configs/octeon_ebb7304.h
index 358db69..8c6c57b 100644
--- a/include/configs/octeon_ebb7304.h
+++ b/include/configs/octeon_ebb7304.h
@@ -16,4 +16,6 @@
 #define CONFIG_SYS_FLASH_CFI_WIDTH	FLASH_CFI_8BIT
 #define CONFIG_SYS_FLASH_EMPTY_INFO	/* flinfo indicates empty blocks */
 
+#define PHY_ANEG_TIMEOUT	8000	/* PHY needs a longer aneg time */
+
 #endif /* __CONFIG_H__ */
diff --git a/include/miiphy.h b/include/miiphy.h
index 110921f..c66a184 100644
--- a/include/miiphy.h
+++ b/include/miiphy.h
@@ -189,6 +189,15 @@
 int dm_mdio_reset(struct udevice *mdio_dev);
 
 /**
+ * dm_phy_find_by_ofnode - Find PHY device by ofnode
+ *
+ * @phynode: PHY's ofnode
+ *
+ * Return: pointer to phy_device, or NULL on error
+ */
+struct phy_device *dm_phy_find_by_ofnode(ofnode phynode);
+
+/**
  * dm_mdio_phy_connect - Wrapper over phy_connect for DM MDIO
  *
  * @mdiodev: mdio device the PHY is accesible on
diff --git a/include/pci.h b/include/pci.h
index 5dbdcb0..d7ed35d 100644
--- a/include/pci.h
+++ b/include/pci.h
@@ -1313,26 +1313,30 @@
 u32 dm_pci_read_bar32(const struct udevice *dev, int barnum);
 
 /**
- * dm_pci_bus_to_phys() - convert a PCI bus address to a physical address
+ * dm_pci_bus_to_phys() - convert a PCI bus address range to a physical address
  *
  * @dev:	Device containing the PCI address
  * @addr:	PCI address to convert
+ * @len:	Length of the address range
+ * @mask:	Mask to match flags for the region type
  * @flags:	Flags for the region type (PCI_REGION_...)
  * Return: physical address corresponding to that PCI bus address
  */
-phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t addr,
-			       unsigned long flags);
+phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t addr, size_t len,
+			       unsigned long mask, unsigned long flags);
 
 /**
  * dm_pci_phys_to_bus() - convert a physical address to a PCI bus address
  *
  * @dev:	Device containing the bus address
  * @addr:	Physical address to convert
+ * @len:	Length of the address range
+ * @mask:	Mask to match flags for the region type
  * @flags:	Flags for the region type (PCI_REGION_...)
  * Return: PCI bus address corresponding to that physical address
  */
-pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t addr,
-			      unsigned long flags);
+pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t addr, size_t len,
+			      unsigned long mask, unsigned long flags);
 
 /**
  * dm_pci_map_bar() - get a virtual address associated with a BAR region
@@ -1346,10 +1350,14 @@
  *
  * @dev:	Device to check
  * @bar:	Bar register offset (PCI_BASE_ADDRESS_...)
+ * @offset:     Offset from the base to map
+ * @len:        Length to map
+ * @mask:       Mask to match flags for the region type
  * @flags:	Flags for the region type (PCI_REGION_...)
  * @return: pointer to the virtual address to use or 0 on error
  */
-void *dm_pci_map_bar(struct udevice *dev, int bar, int flags);
+void *dm_pci_map_bar(struct udevice *dev, int bar, size_t offset, size_t len,
+		     unsigned long mask, unsigned long flags);
 
 /**
  * dm_pci_find_next_capability() - find a capability starting from an offset
@@ -1453,28 +1461,34 @@
 int dm_pci_flr(struct udevice *dev);
 
 #define dm_pci_virt_to_bus(dev, addr, flags) \
-	dm_pci_phys_to_bus(dev, (virt_to_phys(addr)), (flags))
-#define dm_pci_bus_to_virt(dev, addr, flags, len, map_flags) \
-	map_physmem(dm_pci_bus_to_phys(dev, (addr), (flags)), \
-		    (len), (map_flags))
+	dm_pci_phys_to_bus(dev, (virt_to_phys(addr)), 0, PCI_REGION_TYPE, (flags))
+#define dm_pci_bus_to_virt(dev, addr, len, mask, flags, map_flags)	\
+({									\
+	size_t _len = (len);						\
+	phys_addr_t phys_addr = dm_pci_bus_to_phys((dev), (addr), _len,	\
+						   (mask), (flags));	\
+	map_physmem(phys_addr, _len, (map_flags));			\
+})
 
 #define dm_pci_phys_to_mem(dev, addr) \
-	dm_pci_phys_to_bus((dev), (addr), PCI_REGION_MEM)
+	dm_pci_phys_to_bus((dev), (addr), 0, PCI_REGION_TYPE, PCI_REGION_MEM)
 #define dm_pci_mem_to_phys(dev, addr) \
-	dm_pci_bus_to_phys((dev), (addr), PCI_REGION_MEM)
+	dm_pci_bus_to_phys((dev), (addr), 0, PCI_REGION_TYPE, PCI_REGION_MEM)
 #define dm_pci_phys_to_io(dev, addr) \
-	dm_pci_phys_to_bus((dev), (addr), PCI_REGION_IO)
+	dm_pci_phys_to_bus((dev), (addr), 0, PCI_REGION_TYPE, PCI_REGION_IO)
 #define dm_pci_io_to_phys(dev, addr) \
-	dm_pci_bus_to_phys((dev), (addr), PCI_REGION_IO)
+	dm_pci_bus_to_phys((dev), (addr), 0, PCI_REGION_TYPE, PCI_REGION_IO)
 
 #define dm_pci_virt_to_mem(dev, addr) \
 	dm_pci_virt_to_bus((dev), (addr), PCI_REGION_MEM)
 #define dm_pci_mem_to_virt(dev, addr, len, map_flags) \
-	dm_pci_bus_to_virt((dev), (addr), PCI_REGION_MEM, (len), (map_flags))
+	dm_pci_bus_to_virt((dev), (addr), (len), PCI_REGION_TYPE, \
+			   PCI_REGION_MEM, (map_flags))
 #define dm_pci_virt_to_io(dev, addr) \
 	dm_pci_virt_to_bus((dev), (addr), PCI_REGION_IO)
 #define dm_pci_io_to_virt(dev, addr, len, map_flags) \
-	dm_pci_bus_to_virt((dev), (addr), PCI_REGION_IO, (len), (map_flags))
+	dm_pci_bus_to_virt((dev), (addr), (len), PCI_REGION_TYPE, \
+			   PCI_REGION_IO, (map_flags))
 
 /**
  * dm_pci_find_device() - find a device by vendor/device ID
diff --git a/net/mdio-uclass.c b/net/mdio-uclass.c
index 7593618..4401492 100644
--- a/net/mdio-uclass.c
+++ b/net/mdio-uclass.c
@@ -129,6 +129,28 @@
 	return 0;
 }
 
+struct phy_device *dm_phy_find_by_ofnode(ofnode phynode)
+{
+	struct mdio_perdev_priv *pdata;
+	struct udevice *mdiodev;
+	u32 phy_addr;
+
+	if (ofnode_read_u32(phynode, "reg", &phy_addr))
+		return NULL;
+
+	if (uclass_get_device_by_ofnode(UCLASS_MDIO,
+					ofnode_get_parent(phynode),
+					&mdiodev))
+		return NULL;
+
+	if (device_probe(mdiodev))
+		return NULL;
+
+	pdata = dev_get_uclass_priv(mdiodev);
+
+	return phy_find_by_mask(pdata->mii_bus, BIT(phy_addr));
+}
+
 struct phy_device *dm_mdio_phy_connect(struct udevice *mdiodev, int phyaddr,
 				       struct udevice *ethdev,
 				       phy_interface_t interface)
diff --git a/test/dm/pci.c b/test/dm/pci.c
index 00e4440..70a736c 100644
--- a/test/dm/pci.c
+++ b/test/dm/pci.c
@@ -268,27 +268,27 @@
 	ut_asserteq(PCI_CAP_ID_EA_OFFSET, cap);
 
 	/* test swap case in BAR 1 */
-	bar = dm_pci_map_bar(swap, PCI_BASE_ADDRESS_0, 0);
+	bar = dm_pci_map_bar(swap, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE, 0);
 	ut_assertnonnull(bar);
 	*(int *)bar = 2; /* swap upper/lower */
 
-	bar = dm_pci_map_bar(swap, PCI_BASE_ADDRESS_1, 0);
+	bar = dm_pci_map_bar(swap, PCI_BASE_ADDRESS_1, 0, 0, PCI_REGION_TYPE, 0);
 	ut_assertnonnull(bar);
 	strcpy(bar, "ea TEST");
 	unmap_sysmem(bar);
-	bar = dm_pci_map_bar(swap, PCI_BASE_ADDRESS_1, 0);
+	bar = dm_pci_map_bar(swap, PCI_BASE_ADDRESS_1, 0, 0, PCI_REGION_TYPE, 0);
 	ut_assertnonnull(bar);
 	ut_asserteq_str("EA test", bar);
 
 	/* test magic values in BARs2, 4;  BAR 3 is n/a */
-	bar = dm_pci_map_bar(swap, PCI_BASE_ADDRESS_2, 0);
+	bar = dm_pci_map_bar(swap, PCI_BASE_ADDRESS_2, 0, 0, PCI_REGION_TYPE, 0);
 	ut_assertnonnull(bar);
 	ut_asserteq(PCI_EA_BAR2_MAGIC, *(u32 *)bar);
 
-	bar = dm_pci_map_bar(swap, PCI_BASE_ADDRESS_3, 0);
+	bar = dm_pci_map_bar(swap, PCI_BASE_ADDRESS_3, 0, 0, PCI_REGION_TYPE, 0);
 	ut_assertnull(bar);
 
-	bar = dm_pci_map_bar(swap, PCI_BASE_ADDRESS_4, 0);
+	bar = dm_pci_map_bar(swap, PCI_BASE_ADDRESS_4, 0, 0, PCI_REGION_TYPE, 0);
 	ut_assertnonnull(bar);
 	ut_asserteq(PCI_EA_BAR4_MAGIC, *(u32 *)bar);
 
@@ -376,3 +376,109 @@
 	return 0;
 }
 DM_TEST(dm_test_pci_region_multi, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
+
+/*
+ * Test the translation of PCI bus addresses to physical addresses using the
+ * ranges from bus#1.
+ */
+static int dm_test_pci_bus_to_phys(struct unit_test_state *uts)
+{
+	unsigned long mask = PCI_REGION_TYPE;
+	unsigned long flags = PCI_REGION_MEM;
+	struct udevice *dev;
+	phys_addr_t phys_addr;
+
+	ut_assertok(dm_pci_bus_find_bdf(PCI_BDF(1, 0x08, 0), &dev));
+
+	/* Before any of the ranges. */
+	phys_addr = dm_pci_bus_to_phys(dev, 0x20000000, 0x400, mask, flags);
+	ut_asserteq(0, phys_addr);
+
+	/* Identity range: whole, start, mid, end */
+	phys_addr = dm_pci_bus_to_phys(dev, 0x2ffff000, 0x2000, mask, flags);
+	ut_asserteq(0, phys_addr);
+	phys_addr = dm_pci_bus_to_phys(dev, 0x30000000, 0x2000, mask, flags);
+	ut_asserteq(0x30000000, phys_addr);
+	phys_addr = dm_pci_bus_to_phys(dev, 0x30000000, 0x1000, mask, flags);
+	ut_asserteq(0x30000000, phys_addr);
+	phys_addr = dm_pci_bus_to_phys(dev, 0x30000abc, 0x12, mask, flags);
+	ut_asserteq(0x30000abc, phys_addr);
+	phys_addr = dm_pci_bus_to_phys(dev, 0x30000800, 0x1800, mask, flags);
+	ut_asserteq(0x30000800, phys_addr);
+	phys_addr = dm_pci_bus_to_phys(dev, 0x30008000, 0x1801, mask, flags);
+	ut_asserteq(0, phys_addr);
+
+	/* Translated range: whole, start, mid, end */
+	phys_addr = dm_pci_bus_to_phys(dev, 0x30fff000, 0x2000, mask, flags);
+	ut_asserteq(0, phys_addr);
+	phys_addr = dm_pci_bus_to_phys(dev, 0x31000000, 0x2000, mask, flags);
+	ut_asserteq(0x3e000000, phys_addr);
+	phys_addr = dm_pci_bus_to_phys(dev, 0x31000000, 0x1000, mask, flags);
+	ut_asserteq(0x3e000000, phys_addr);
+	phys_addr = dm_pci_bus_to_phys(dev, 0x31000abc, 0x12, mask, flags);
+	ut_asserteq(0x3e000abc, phys_addr);
+	phys_addr = dm_pci_bus_to_phys(dev, 0x31000800, 0x1800, mask, flags);
+	ut_asserteq(0x3e000800, phys_addr);
+	phys_addr = dm_pci_bus_to_phys(dev, 0x31008000, 0x1801, mask, flags);
+	ut_asserteq(0, phys_addr);
+
+	/* Beyond all of the ranges. */
+	phys_addr = dm_pci_bus_to_phys(dev, 0x32000000, 0x400, mask, flags);
+	ut_asserteq(0, phys_addr);
+
+	return 0;
+}
+DM_TEST(dm_test_pci_bus_to_phys, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
+
+/*
+ * Test the translation of physical addresses to PCI bus addresses using the
+ * ranges from bus#1.
+ */
+static int dm_test_pci_phys_to_bus(struct unit_test_state *uts)
+{
+	unsigned long mask = PCI_REGION_TYPE;
+	unsigned long flags = PCI_REGION_MEM;
+	struct udevice *dev;
+	pci_addr_t pci_addr;
+
+	ut_assertok(dm_pci_bus_find_bdf(PCI_BDF(1, 0x08, 0), &dev));
+
+	/* Before any of the ranges. */
+	pci_addr = dm_pci_phys_to_bus(dev, 0x20000000, 0x400, mask, flags);
+	ut_asserteq(0, pci_addr);
+
+	/* Identity range: partial overlap, whole, start, mid, end */
+	pci_addr = dm_pci_phys_to_bus(dev, 0x2ffff000, 0x2000, mask, flags);
+	ut_asserteq(0, pci_addr);
+	pci_addr = dm_pci_phys_to_bus(dev, 0x30000000, 0x2000, mask, flags);
+	ut_asserteq(0x30000000, pci_addr);
+	pci_addr = dm_pci_phys_to_bus(dev, 0x30000000, 0x1000, mask, flags);
+	ut_asserteq(0x30000000, pci_addr);
+	pci_addr = dm_pci_phys_to_bus(dev, 0x30000abc, 0x12, mask, flags);
+	ut_asserteq(0x30000abc, pci_addr);
+	pci_addr = dm_pci_phys_to_bus(dev, 0x30000800, 0x1800, mask, flags);
+	ut_asserteq(0x30000800, pci_addr);
+	pci_addr = dm_pci_phys_to_bus(dev, 0x30008000, 0x1801, mask, flags);
+	ut_asserteq(0, pci_addr);
+
+	/* Translated range: partial overlap, whole, start, mid, end */
+	pci_addr = dm_pci_phys_to_bus(dev, 0x3dfff000, 0x2000, mask, flags);
+	ut_asserteq(0, pci_addr);
+	pci_addr = dm_pci_phys_to_bus(dev, 0x3e000000, 0x2000, mask, flags);
+	ut_asserteq(0x31000000, pci_addr);
+	pci_addr = dm_pci_phys_to_bus(dev, 0x3e000000, 0x1000, mask, flags);
+	ut_asserteq(0x31000000, pci_addr);
+	pci_addr = dm_pci_phys_to_bus(dev, 0x3e000abc, 0x12, mask, flags);
+	ut_asserteq(0x31000abc, pci_addr);
+	pci_addr = dm_pci_phys_to_bus(dev, 0x3e000800, 0x1800, mask, flags);
+	ut_asserteq(0x31000800, pci_addr);
+	pci_addr = dm_pci_phys_to_bus(dev, 0x3e008000, 0x1801, mask, flags);
+	ut_asserteq(0, pci_addr);
+
+	/* Beyond all of the ranges. */
+	pci_addr = dm_pci_phys_to_bus(dev, 0x3f000000, 0x400, mask, flags);
+	ut_asserteq(0, pci_addr);
+
+	return 0;
+}
+DM_TEST(dm_test_pci_phys_to_bus, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);