Merge https://gitlab.denx.de/u-boot/custodians/u-boot-marvell

- Add basic Marvell/Cavium OcteonTX/TX2 support (Suneel)
- Infrastructure changes to PCI uclass to support these SoC's (Suneel)
- Add PCI, MMC & watchdog driver drivers for OcteonTX/TX2 (Suneel)
- Increase CONFIG_SYS_MALLOC_F_LEN for qemu-x86 (Stefan)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c5ebd59..80702c2 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1737,6 +1737,24 @@
 	imply TPL_SYSRESET
 	imply USB_FUNCTION_FASTBOOT
 
+config ARCH_OCTEONTX
+	bool "Support OcteonTX SoCs"
+	select DM
+	select ARM64
+	select OF_CONTROL
+	select OF_LIVE
+	select BOARD_LATE_INIT
+	select SYS_CACHE_SHIFT_7
+
+config ARCH_OCTEONTX2
+	bool "Support OcteonTX2 SoCs"
+	select DM
+	select ARM64
+	select OF_CONTROL
+	select OF_LIVE
+	select BOARD_LATE_INIT
+	select SYS_CACHE_SHIFT_7
+
 config TARGET_THUNDERX_88XX
 	bool "Support ThunderX 88xx"
 	select ARM64
@@ -1835,6 +1853,10 @@
 
 source "arch/arm/mach-mvebu/Kconfig"
 
+source "arch/arm/mach-octeontx/Kconfig"
+
+source "arch/arm/mach-octeontx2/Kconfig"
+
 source "arch/arm/cpu/armv7/ls102xa/Kconfig"
 
 source "arch/arm/mach-imx/mx2/Kconfig"
@@ -1920,6 +1942,8 @@
 source "board/CarMediaLab/flea3/Kconfig"
 source "board/Marvell/aspenite/Kconfig"
 source "board/Marvell/gplugd/Kconfig"
+source "board/Marvell/octeontx/Kconfig"
+source "board/Marvell/octeontx2/Kconfig"
 source "board/armadeus/apf27/Kconfig"
 source "board/armltd/vexpress/Kconfig"
 source "board/armltd/vexpress64/Kconfig"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index bf3890e..28b523b 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -80,6 +80,8 @@
 machine-$(CONFIG_ARCH_SUNXI)		+= sunxi
 machine-$(CONFIG_ARCH_TEGRA)		+= tegra
 machine-$(CONFIG_ARCH_U8500)		+= u8500
+machine-$(CONFIG_ARCH_OCTEONTX)		+= octeontx
+machine-$(CONFIG_ARCH_OCTEONTX2)	+= octeontx2
 machine-$(CONFIG_ARCH_UNIPHIER)		+= uniphier
 machine-$(CONFIG_ARCH_VERSAL)		+= versal
 machine-$(CONFIG_ARCH_ZYNQ)		+= zynq
diff --git a/arch/arm/include/asm/arch-octeontx/board.h b/arch/arm/include/asm/arch-octeontx/board.h
new file mode 100644
index 0000000..c9fc399
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx/board.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#ifndef __BOARD_H__
+#define __BOARD_H__
+
+#include <asm/arch/soc.h>
+
+#define MAX_LMAC_PER_BGX 4
+#define LMAC_CNT MAX_LMAC_PER_BGX
+
+#if defined(CONFIG_TARGET_OCTEONTX_81XX)
+
+/** Maximum number of BGX interfaces per CPU node */
+#define MAX_BGX_PER_NODE	3
+#define OCTEONTX_XCV	/* RGMII Interface */
+
+#elif defined(CONFIG_TARGET_OCTEONTX_83XX)
+
+/** Maximum number of BGX interfaces per CPU node */
+#define MAX_BGX_PER_NODE	4
+
+#endif
+
+/** Reg offsets */
+#define RST_BOOT	0x87E006001600ULL
+
+/** Structure definitions */
+
+/**
+ * Register (RSL) rst_boot
+ *
+ * RST Boot Register This register is not accessible through ROM scripts;
+ * see SCR_WRITE32_S[ADDR].
+ */
+union rst_boot {
+	u64 u;
+	struct rst_boot_s {
+		u64 rboot_pin                        : 1;
+		u64 rboot                            : 1;
+		u64 reserved_2_32                    : 31;
+		u64 pnr_mul                          : 6;
+		u64 reserved_39                      : 1;
+		u64 c_mul                            : 7;
+		u64 reserved_47_52                   : 6;
+		u64 gpio_ejtag                       : 1;
+		u64 mcp_jtagdis                      : 1;
+		u64 dis_scan                         : 1;
+		u64 dis_huk                          : 1;
+		u64 vrm_err                          : 1;
+		u64 jt_tstmode                       : 1;
+		u64 ckill_ppdis                      : 1;
+		u64 trusted_mode                     : 1;
+		u64 reserved_61_62                   : 2;
+		u64 chipkill                         : 1;
+	} s;
+	struct rst_boot_cn81xx {
+		u64 rboot_pin                        : 1;
+		u64 rboot                            : 1;
+		u64 lboot                            : 10;
+		u64 lboot_ext23                      : 6;
+		u64 lboot_ext45                      : 6;
+		u64 lboot_jtg                        : 1;
+		u64 lboot_ckill                      : 1;
+		u64 reserved_26_29                   : 4;
+		u64 lboot_oci                        : 3;
+		u64 pnr_mul                          : 6;
+		u64 reserved_39                      : 1;
+		u64 c_mul                            : 7;
+		u64 reserved_47_54                   : 8;
+		u64 dis_scan                         : 1;
+		u64 dis_huk                          : 1;
+		u64 vrm_err                          : 1;
+		u64 jt_tstmode                       : 1;
+		u64 ckill_ppdis                      : 1;
+		u64 trusted_mode                     : 1;
+		u64 ejtagdis                         : 1;
+		u64 jtcsrdis                         : 1;
+		u64 chipkill                         : 1;
+	} cn81xx;
+	struct rst_boot_cn83xx {
+		u64 rboot_pin                        : 1;
+		u64 rboot                            : 1;
+		u64 lboot                            : 10;
+		u64 lboot_ext23                      : 6;
+		u64 lboot_ext45                      : 6;
+		u64 lboot_jtg                        : 1;
+		u64 lboot_ckill                      : 1;
+		u64 lboot_pf_flr                     : 4;
+		u64 lboot_oci                        : 3;
+		u64 pnr_mul                          : 6;
+		u64 reserved_39                      : 1;
+		u64 c_mul                            : 7;
+		u64 reserved_47_54                   : 8;
+		u64 dis_scan                         : 1;
+		u64 dis_huk                          : 1;
+		u64 vrm_err                          : 1;
+		u64 jt_tstmode                       : 1;
+		u64 ckill_ppdis                      : 1;
+		u64 trusted_mode                     : 1;
+		u64 ejtagdis                         : 1;
+		u64 jtcsrdis                         : 1;
+		u64 chipkill                         : 1;
+	} cn83xx;
+};
+
+extern unsigned long fdt_base_addr;
+
+/** Function definitions */
+void mem_map_fill(void);
+int octeontx_board_has_pmp(void);
+const char *fdt_get_board_model(void);
+const char *fdt_get_board_serial(void);
+const char *fdt_get_board_revision(void);
+void fdt_parse_phy_info(void);
+void fdt_board_get_ethaddr(int bgx, int lmac, unsigned char *eth);
+void bgx_set_board_info(int bgx_id, int *mdio_bus, int *phy_addr,
+			bool *autoneg_dis, bool *lmac_reg, bool *lmac_enable);
+#endif /* __BOARD_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx/clock.h b/arch/arm/include/asm/arch-octeontx/clock.h
new file mode 100644
index 0000000..7bf600a
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx/clock.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#ifndef __CLOCK_H__
+#define __CLOCK_H__
+
+/** System PLL reference clock */
+#define PLL_REF_CLK                     50000000        /* 50 MHz */
+#define NS_PER_REF_CLK_TICK             (1000000000 / PLL_REF_CLK)
+
+/**
+ * Returns the I/O clock speed in Hz
+ */
+u64 octeontx_get_io_clock(void);
+
+/**
+ * Returns the core clock speed in Hz
+ */
+u64 octeontx_get_core_clock(void);
+
+#endif /* __CLOCK_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx/csrs/csrs-mio_emm.h b/arch/arm/include/asm/arch-octeontx/csrs/csrs-mio_emm.h
new file mode 100644
index 0000000..a5a4740
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx/csrs/csrs-mio_emm.h
@@ -0,0 +1,1193 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+#ifndef __CSRS_MIO_EMM_H__
+#define __CSRS_MIO_EMM_H__
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * MIO_EMM.
+ *
+ * This file is auto generated.  Do not edit.
+ *
+ */
+
+/**
+ * Enumeration mio_emm_bar_e
+ *
+ * eMMC Base Address Register Enumeration Enumerates the base address
+ * registers.
+ */
+#define MIO_EMM_BAR_E_MIO_EMM_PF_BAR0_CN8 (0x87e009000000ll)
+#define MIO_EMM_BAR_E_MIO_EMM_PF_BAR0_CN8_SIZE 0x800000ull
+#define MIO_EMM_BAR_E_MIO_EMM_PF_BAR0_CN9 (0x87e009000000ll)
+#define MIO_EMM_BAR_E_MIO_EMM_PF_BAR0_CN9_SIZE 0x10000ull
+#define MIO_EMM_BAR_E_MIO_EMM_PF_BAR4 (0x87e009f00000ll)
+#define MIO_EMM_BAR_E_MIO_EMM_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration mio_emm_int_vec_e
+ *
+ * eMMC MSI-X Vector Enumeration Enumerates the MSI-X interrupt vectors.
+ */
+#define MIO_EMM_INT_VEC_E_DMA_INT_DONE (8)
+#define MIO_EMM_INT_VEC_E_DMA_INT_FIFO (7)
+#define MIO_EMM_INT_VEC_E_EMM_BUF_DONE (0)
+#define MIO_EMM_INT_VEC_E_EMM_CMD_DONE (1)
+#define MIO_EMM_INT_VEC_E_EMM_CMD_ERR (3)
+#define MIO_EMM_INT_VEC_E_EMM_DMA_DONE (2)
+#define MIO_EMM_INT_VEC_E_EMM_DMA_ERR (4)
+#define MIO_EMM_INT_VEC_E_EMM_SWITCH_DONE (5)
+#define MIO_EMM_INT_VEC_E_EMM_SWITCH_ERR (6)
+#define MIO_EMM_INT_VEC_E_NCB_FLT (9)
+#define MIO_EMM_INT_VEC_E_NCB_RAS (0xa)
+
+/**
+ * Register (RSL) mio_emm_access_wdog
+ *
+ * eMMC Access Watchdog Register
+ */
+union mio_emm_access_wdog {
+	u64 u;
+	struct mio_emm_access_wdog_s {
+		u64 clk_cnt                          : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct mio_emm_access_wdog_s cn; */
+};
+
+static inline u64 MIO_EMM_ACCESS_WDOG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_ACCESS_WDOG(void)
+{
+	return 0x20f0;
+}
+
+/**
+ * Register (RSL) mio_emm_buf_dat
+ *
+ * eMMC Data Buffer Access Register
+ */
+union mio_emm_buf_dat {
+	u64 u;
+	struct mio_emm_buf_dat_s {
+		u64 dat                              : 64;
+	} s;
+	/* struct mio_emm_buf_dat_s cn; */
+};
+
+static inline u64 MIO_EMM_BUF_DAT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_BUF_DAT(void)
+{
+	return 0x20e8;
+}
+
+/**
+ * Register (RSL) mio_emm_buf_idx
+ *
+ * eMMC Data Buffer Address Register
+ */
+union mio_emm_buf_idx {
+	u64 u;
+	struct mio_emm_buf_idx_s {
+		u64 offset                           : 6;
+		u64 buf_num                          : 1;
+		u64 reserved_7_15                    : 9;
+		u64 inc                              : 1;
+		u64 reserved_17_63                   : 47;
+	} s;
+	/* struct mio_emm_buf_idx_s cn; */
+};
+
+static inline u64 MIO_EMM_BUF_IDX(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_BUF_IDX(void)
+{
+	return 0x20e0;
+}
+
+/**
+ * Register (RSL) mio_emm_calb
+ *
+ * eMMC Calbration Register This register initiates delay line
+ * characterization.
+ */
+union mio_emm_calb {
+	u64 u;
+	struct mio_emm_calb_s {
+		u64 start                            : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct mio_emm_calb_s cn; */
+};
+
+static inline u64 MIO_EMM_CALB(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_CALB(void)
+{
+	return 0x20c0;
+}
+
+/**
+ * Register (RSL) mio_emm_cfg
+ *
+ * eMMC Configuration Register
+ */
+union mio_emm_cfg {
+	u64 u;
+	struct mio_emm_cfg_s {
+		u64 bus_ena                          : 4;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct mio_emm_cfg_s cn; */
+};
+
+static inline u64 MIO_EMM_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_CFG(void)
+{
+	return 0x2000;
+}
+
+/**
+ * Register (RSL) mio_emm_cmd
+ *
+ * eMMC Command Register
+ */
+union mio_emm_cmd {
+	u64 u;
+	struct mio_emm_cmd_s {
+		u64 arg                              : 32;
+		u64 cmd_idx                          : 6;
+		u64 rtype_xor                        : 3;
+		u64 ctype_xor                        : 2;
+		u64 reserved_43_48                   : 6;
+		u64 offset                           : 6;
+		u64 dbuf                             : 1;
+		u64 reserved_56_58                   : 3;
+		u64 cmd_val                          : 1;
+		u64 bus_id                           : 2;
+		u64 skip_busy                        : 1;
+		u64 reserved_63                      : 1;
+	} s;
+	/* struct mio_emm_cmd_s cn; */
+};
+
+static inline u64 MIO_EMM_CMD(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_CMD(void)
+{
+	return 0x2058;
+}
+
+/**
+ * Register (RSL) mio_emm_comp
+ *
+ * eMMC Compensation Register
+ */
+union mio_emm_comp {
+	u64 u;
+	struct mio_emm_comp_s {
+		u64 nctl                             : 3;
+		u64 reserved_3_7                     : 5;
+		u64 pctl                             : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct mio_emm_comp_s cn; */
+};
+
+static inline u64 MIO_EMM_COMP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_COMP(void)
+{
+	return 0x2040;
+}
+
+/**
+ * Register (RSL) mio_emm_debug
+ *
+ * eMMC Debug Register
+ */
+union mio_emm_debug {
+	u64 u;
+	struct mio_emm_debug_s {
+		u64 clk_on                           : 1;
+		u64 reserved_1_7                     : 7;
+		u64 cmd_sm                           : 4;
+		u64 data_sm                          : 4;
+		u64 dma_sm                           : 4;
+		u64 emmc_clk_disable                 : 1;
+		u64 rdsync_rst                       : 1;
+		u64 reserved_22_63                   : 42;
+	} s;
+	struct mio_emm_debug_cn96xxp1 {
+		u64 clk_on                           : 1;
+		u64 reserved_1_7                     : 7;
+		u64 cmd_sm                           : 4;
+		u64 data_sm                          : 4;
+		u64 dma_sm                           : 4;
+		u64 reserved_20_63                   : 44;
+	} cn96xxp1;
+	/* struct mio_emm_debug_s cn96xxp3; */
+	/* struct mio_emm_debug_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 MIO_EMM_DEBUG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DEBUG(void)
+{
+	return 0x20f8;
+}
+
+/**
+ * Register (RSL) mio_emm_dma
+ *
+ * eMMC External DMA Configuration Register
+ */
+union mio_emm_dma {
+	u64 u;
+	struct mio_emm_dma_s {
+		u64 card_addr                        : 32;
+		u64 block_cnt                        : 16;
+		u64 multi                            : 1;
+		u64 rw                               : 1;
+		u64 rel_wr                           : 1;
+		u64 thres                            : 6;
+		u64 dat_null                         : 1;
+		u64 sector                           : 1;
+		u64 dma_val                          : 1;
+		u64 bus_id                           : 2;
+		u64 skip_busy                        : 1;
+		u64 extra_args                       : 1;
+	} s;
+	struct mio_emm_dma_cn8 {
+		u64 card_addr                        : 32;
+		u64 block_cnt                        : 16;
+		u64 multi                            : 1;
+		u64 rw                               : 1;
+		u64 rel_wr                           : 1;
+		u64 thres                            : 6;
+		u64 dat_null                         : 1;
+		u64 sector                           : 1;
+		u64 dma_val                          : 1;
+		u64 bus_id                           : 2;
+		u64 skip_busy                        : 1;
+		u64 reserved_63                      : 1;
+	} cn8;
+	struct mio_emm_dma_cn9 {
+		u64 card_addr                        : 32;
+		u64 block_cnt                        : 16;
+		u64 multi                            : 1;
+		u64 rw                               : 1;
+		u64 reserved_50                      : 1;
+		u64 thres                            : 6;
+		u64 dat_null                         : 1;
+		u64 sector                           : 1;
+		u64 dma_val                          : 1;
+		u64 bus_id                           : 2;
+		u64 skip_busy                        : 1;
+		u64 extra_args                       : 1;
+	} cn9;
+};
+
+static inline u64 MIO_EMM_DMA(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA(void)
+{
+	return 0x2050;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_adr
+ *
+ * eMMC DMA Address Register This register sets the address for eMMC/SD
+ * flash transfers to/from memory. Sixty-four-bit operations must be used
+ * to access this register. This register is updated by the DMA hardware
+ * and can be reloaded by the values placed in the MIO_EMM_DMA_FIFO_ADR.
+ */
+union mio_emm_dma_adr {
+	u64 u;
+	struct mio_emm_dma_adr_s {
+		u64 adr                              : 53;
+		u64 reserved_53_63                   : 11;
+	} s;
+	struct mio_emm_dma_adr_cn8 {
+		u64 adr                              : 49;
+		u64 reserved_49_63                   : 15;
+	} cn8;
+	/* struct mio_emm_dma_adr_s cn9; */
+};
+
+static inline u64 MIO_EMM_DMA_ADR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_ADR(void)
+{
+	return 0x188;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_arg
+ *
+ * eMMC External DMA Extra Arguments Register
+ */
+union mio_emm_dma_arg {
+	u64 u;
+	struct mio_emm_dma_arg_s {
+		u64 cmd23_args                       : 8;
+		u64 force_pgm                        : 1;
+		u64 context_id                       : 4;
+		u64 tag_req                          : 1;
+		u64 pack_cmd                         : 1;
+		u64 rel_wr                           : 1;
+		u64 alt_cmd                          : 6;
+		u64 skip_blk_cmd                     : 1;
+		u64 reserved_23_31                   : 9;
+		u64 alt_cmd_arg                      : 32;
+	} s;
+	/* struct mio_emm_dma_arg_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_ARG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_ARG(void)
+{
+	return 0x2090;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_cfg
+ *
+ * eMMC DMA Configuration Register This register controls the internal
+ * DMA engine used with the eMMC/SD flash controller. Sixty- four-bit
+ * operations must be used to access this register. This register is
+ * updated by the hardware DMA engine and can also be reloaded by writes
+ * to the MIO_EMM_DMA_FIFO_CMD register.
+ */
+union mio_emm_dma_cfg {
+	u64 u;
+	struct mio_emm_dma_cfg_s {
+		u64 reserved_0_35                    : 36;
+		u64 size                             : 20;
+		u64 endian                           : 1;
+		u64 swap8                            : 1;
+		u64 swap16                           : 1;
+		u64 swap32                           : 1;
+		u64 reserved_60                      : 1;
+		u64 clr                              : 1;
+		u64 rw                               : 1;
+		u64 en                               : 1;
+	} s;
+	/* struct mio_emm_dma_cfg_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_CFG(void)
+{
+	return 0x180;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_fifo_adr
+ *
+ * eMMC Internal DMA FIFO Address Register This register specifies the
+ * internal address that is loaded into the eMMC internal DMA FIFO. The
+ * FIFO is used to queue up operations for the
+ * MIO_EMM_DMA_CFG/MIO_EMM_DMA_ADR when the DMA completes successfully.
+ */
+union mio_emm_dma_fifo_adr {
+	u64 u;
+	struct mio_emm_dma_fifo_adr_s {
+		u64 reserved_0_2                     : 3;
+		u64 adr                              : 50;
+		u64 reserved_53_63                   : 11;
+	} s;
+	struct mio_emm_dma_fifo_adr_cn8 {
+		u64 reserved_0_2                     : 3;
+		u64 adr                              : 46;
+		u64 reserved_49_63                   : 15;
+	} cn8;
+	/* struct mio_emm_dma_fifo_adr_s cn9; */
+};
+
+static inline u64 MIO_EMM_DMA_FIFO_ADR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_FIFO_ADR(void)
+{
+	return 0x170;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_fifo_cfg
+ *
+ * eMMC Internal DMA FIFO Configuration Register This register controls
+ * DMA FIFO operations.
+ */
+union mio_emm_dma_fifo_cfg {
+	u64 u;
+	struct mio_emm_dma_fifo_cfg_s {
+		u64 count                            : 5;
+		u64 reserved_5_7                     : 3;
+		u64 int_lvl                          : 5;
+		u64 reserved_13_15                   : 3;
+		u64 clr                              : 1;
+		u64 reserved_17_63                   : 47;
+	} s;
+	/* struct mio_emm_dma_fifo_cfg_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_FIFO_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_FIFO_CFG(void)
+{
+	return 0x160;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_fifo_cmd
+ *
+ * eMMC Internal DMA FIFO Command Register This register specifies a
+ * command that is loaded into the eMMC internal DMA FIFO.  The FIFO is
+ * used to queue up operations for the MIO_EMM_DMA_CFG/MIO_EMM_DMA_ADR
+ * when the DMA completes successfully. Writes to this register store
+ * both the MIO_EMM_DMA_FIFO_CMD and the MIO_EMM_DMA_FIFO_ADR contents
+ * into the FIFO and increment the MIO_EMM_DMA_FIFO_CFG[COUNT] field.
+ * Note: This register has a similar format to MIO_EMM_DMA_CFG with the
+ * exception that the EN and CLR fields are absent. These are supported
+ * in MIO_EMM_DMA_FIFO_CFG.
+ */
+union mio_emm_dma_fifo_cmd {
+	u64 u;
+	struct mio_emm_dma_fifo_cmd_s {
+		u64 reserved_0_35                    : 36;
+		u64 size                             : 20;
+		u64 endian                           : 1;
+		u64 swap8                            : 1;
+		u64 swap16                           : 1;
+		u64 swap32                           : 1;
+		u64 intdis                           : 1;
+		u64 reserved_61                      : 1;
+		u64 rw                               : 1;
+		u64 reserved_63                      : 1;
+	} s;
+	/* struct mio_emm_dma_fifo_cmd_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_FIFO_CMD(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_FIFO_CMD(void)
+{
+	return 0x178;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_int
+ *
+ * eMMC DMA Interrupt Register Sixty-four-bit operations must be used to
+ * access this register.
+ */
+union mio_emm_dma_int {
+	u64 u;
+	struct mio_emm_dma_int_s {
+		u64 done                             : 1;
+		u64 fifo                             : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct mio_emm_dma_int_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_INT(void)
+{
+	return 0x190;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_int_ena_w1c
+ *
+ * eMMC DMA Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union mio_emm_dma_int_ena_w1c {
+	u64 u;
+	struct mio_emm_dma_int_ena_w1c_s {
+		u64 done                             : 1;
+		u64 fifo                             : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct mio_emm_dma_int_ena_w1c_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_INT_ENA_W1C(void)
+{
+	return 0x1a8;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_int_ena_w1s
+ *
+ * eMMC DMA Interrupt Enable Set Register This register sets interrupt
+ * enable bits.
+ */
+union mio_emm_dma_int_ena_w1s {
+	u64 u;
+	struct mio_emm_dma_int_ena_w1s_s {
+		u64 done                             : 1;
+		u64 fifo                             : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct mio_emm_dma_int_ena_w1s_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_INT_ENA_W1S(void)
+{
+	return 0x1a0;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_int_w1s
+ *
+ * eMMC DMA Interrupt Set Register This register sets interrupt bits.
+ */
+union mio_emm_dma_int_w1s {
+	u64 u;
+	struct mio_emm_dma_int_w1s_s {
+		u64 done                             : 1;
+		u64 fifo                             : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct mio_emm_dma_int_w1s_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_INT_W1S(void)
+{
+	return 0x198;
+}
+
+/**
+ * Register (RSL) mio_emm_int
+ *
+ * eMMC Interrupt Register
+ */
+union mio_emm_int {
+	u64 u;
+	struct mio_emm_int_s {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 ncb_flt                          : 1;
+		u64 ncb_ras                          : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	struct mio_emm_int_cn8 {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 reserved_7_63                    : 57;
+	} cn8;
+	/* struct mio_emm_int_s cn9; */
+};
+
+static inline u64 MIO_EMM_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_INT(void)
+{
+	return 0x2078;
+}
+
+/**
+ * Register (RSL) mio_emm_int_ena_w1c
+ *
+ * eMMC Interrupt Enable Clear Register This register clears interrupt
+ * enable bits.
+ */
+union mio_emm_int_ena_w1c {
+	u64 u;
+	struct mio_emm_int_ena_w1c_s {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 ncb_flt                          : 1;
+		u64 ncb_ras                          : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	struct mio_emm_int_ena_w1c_cn8 {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 reserved_7_63                    : 57;
+	} cn8;
+	/* struct mio_emm_int_ena_w1c_s cn9; */
+};
+
+static inline u64 MIO_EMM_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_INT_ENA_W1C(void)
+{
+	return 0x20b8;
+}
+
+/**
+ * Register (RSL) mio_emm_int_ena_w1s
+ *
+ * eMMC Interrupt Enable Set Register This register sets interrupt enable
+ * bits.
+ */
+union mio_emm_int_ena_w1s {
+	u64 u;
+	struct mio_emm_int_ena_w1s_s {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 ncb_flt                          : 1;
+		u64 ncb_ras                          : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	struct mio_emm_int_ena_w1s_cn8 {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 reserved_7_63                    : 57;
+	} cn8;
+	/* struct mio_emm_int_ena_w1s_s cn9; */
+};
+
+static inline u64 MIO_EMM_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_INT_ENA_W1S(void)
+{
+	return 0x20b0;
+}
+
+/**
+ * Register (RSL) mio_emm_int_w1s
+ *
+ * eMMC Interrupt Set Register This register sets interrupt bits.
+ */
+union mio_emm_int_w1s {
+	u64 u;
+	struct mio_emm_int_w1s_s {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 ncb_flt                          : 1;
+		u64 ncb_ras                          : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	struct mio_emm_int_w1s_cn8 {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 reserved_7_63                    : 57;
+	} cn8;
+	/* struct mio_emm_int_w1s_s cn9; */
+};
+
+static inline u64 MIO_EMM_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_INT_W1S(void)
+{
+	return 0x2080;
+}
+
+/**
+ * Register (RSL) mio_emm_io_ctl
+ *
+ * eMMC I/O Control Register
+ */
+union mio_emm_io_ctl {
+	u64 u;
+	struct mio_emm_io_ctl_s {
+		u64 slew                             : 1;
+		u64 reserved_1                       : 1;
+		u64 drive                            : 2;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct mio_emm_io_ctl_s cn; */
+};
+
+static inline u64 MIO_EMM_IO_CTL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_IO_CTL(void)
+{
+	return 0x2040;
+}
+
+/**
+ * Register (RSL) mio_emm_mode#
+ *
+ * eMMC Operating Mode Register
+ */
+union mio_emm_modex {
+	u64 u;
+	struct mio_emm_modex_s {
+		u64 clk_lo                           : 16;
+		u64 clk_hi                           : 16;
+		u64 power_class                      : 4;
+		u64 clk_swap                         : 1;
+		u64 reserved_37_39                   : 3;
+		u64 bus_width                        : 3;
+		u64 reserved_43_47                   : 5;
+		u64 hs_timing                        : 1;
+		u64 hs200_timing                     : 1;
+		u64 hs400_timing                     : 1;
+		u64 reserved_51_63                   : 13;
+	} s;
+	struct mio_emm_modex_cn8 {
+		u64 clk_lo                           : 16;
+		u64 clk_hi                           : 16;
+		u64 power_class                      : 4;
+		u64 reserved_36_39                   : 4;
+		u64 bus_width                        : 3;
+		u64 reserved_43_47                   : 5;
+		u64 hs_timing                        : 1;
+		u64 reserved_49_63                   : 15;
+	} cn8;
+	struct mio_emm_modex_cn96xxp1 {
+		u64 clk_lo                           : 16;
+		u64 clk_hi                           : 16;
+		u64 power_class                      : 4;
+		u64 reserved_36_39                   : 4;
+		u64 bus_width                        : 3;
+		u64 reserved_43_47                   : 5;
+		u64 hs_timing                        : 1;
+		u64 hs200_timing                     : 1;
+		u64 hs400_timing                     : 1;
+		u64 reserved_51_63                   : 13;
+	} cn96xxp1;
+	/* struct mio_emm_modex_s cn96xxp3; */
+	/* struct mio_emm_modex_s cnf95xx; */
+};
+
+static inline u64 MIO_EMM_MODEX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_MODEX(u64 a)
+{
+	return 0x2008 + 8 * a;
+}
+
+/**
+ * Register (RSL) mio_emm_msix_pba#
+ *
+ * eMMC MSI-X Pending Bit Array Registers This register is the MSI-X PBA
+ * table; the bit number is indexed by the MIO_EMM_INT_VEC_E enumeration.
+ */
+union mio_emm_msix_pbax {
+	u64 u;
+	struct mio_emm_msix_pbax_s {
+		u64 pend                             : 64;
+	} s;
+	/* struct mio_emm_msix_pbax_s cn; */
+};
+
+static inline u64 MIO_EMM_MSIX_PBAX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_MSIX_PBAX(u64 a)
+{
+	return 0xf0000 + 8 * a;
+}
+
+/**
+ * Register (RSL) mio_emm_msix_vec#_addr
+ *
+ * eMMC MSI-X Vector-Table Address Register This register is the MSI-X
+ * vector table, indexed by the MIO_EMM_INT_VEC_E enumeration.
+ */
+union mio_emm_msix_vecx_addr {
+	u64 u;
+	struct mio_emm_msix_vecx_addr_s {
+		u64 secvec                           : 1;
+		u64 reserved_1                       : 1;
+		u64 addr                             : 51;
+		u64 reserved_53_63                   : 11;
+	} s;
+	struct mio_emm_msix_vecx_addr_cn8 {
+		u64 secvec                           : 1;
+		u64 reserved_1                       : 1;
+		u64 addr                             : 47;
+		u64 reserved_49_63                   : 15;
+	} cn8;
+	/* struct mio_emm_msix_vecx_addr_s cn9; */
+};
+
+static inline u64 MIO_EMM_MSIX_VECX_ADDR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_MSIX_VECX_ADDR(u64 a)
+{
+	return 0 + 0x10 * a;
+}
+
+/**
+ * Register (RSL) mio_emm_msix_vec#_ctl
+ *
+ * eMMC MSI-X Vector-Table Control and Data Register This register is the
+ * MSI-X vector table, indexed by the MIO_EMM_INT_VEC_E enumeration.
+ */
+union mio_emm_msix_vecx_ctl {
+	u64 u;
+	struct mio_emm_msix_vecx_ctl_s {
+		u64 data                             : 32;
+		u64 mask                             : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	struct mio_emm_msix_vecx_ctl_cn8 {
+		u64 data                             : 20;
+		u64 reserved_20_31                   : 12;
+		u64 mask                             : 1;
+		u64 reserved_33_63                   : 31;
+	} cn8;
+	/* struct mio_emm_msix_vecx_ctl_s cn9; */
+};
+
+static inline u64 MIO_EMM_MSIX_VECX_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_MSIX_VECX_CTL(u64 a)
+{
+	return 8 + 0x10 * a;
+}
+
+/**
+ * Register (RSL) mio_emm_rca
+ *
+ * eMMC Relative Card Address Register
+ */
+union mio_emm_rca {
+	u64 u;
+	struct mio_emm_rca_s {
+		u64 card_rca                         : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct mio_emm_rca_s cn; */
+};
+
+static inline u64 MIO_EMM_RCA(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_RCA(void)
+{
+	return 0x20a0;
+}
+
+/**
+ * Register (RSL) mio_emm_rsp_hi
+ *
+ * eMMC Response Data High Register
+ */
+union mio_emm_rsp_hi {
+	u64 u;
+	struct mio_emm_rsp_hi_s {
+		u64 dat                              : 64;
+	} s;
+	/* struct mio_emm_rsp_hi_s cn; */
+};
+
+static inline u64 MIO_EMM_RSP_HI(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_RSP_HI(void)
+{
+	return 0x2070;
+}
+
+/**
+ * Register (RSL) mio_emm_rsp_lo
+ *
+ * eMMC Response Data Low Register
+ */
+union mio_emm_rsp_lo {
+	u64 u;
+	struct mio_emm_rsp_lo_s {
+		u64 dat                              : 64;
+	} s;
+	/* struct mio_emm_rsp_lo_s cn; */
+};
+
+static inline u64 MIO_EMM_RSP_LO(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_RSP_LO(void)
+{
+	return 0x2068;
+}
+
+/**
+ * Register (RSL) mio_emm_rsp_sts
+ *
+ * eMMC Response Status Register
+ */
+union mio_emm_rsp_sts {
+	u64 u;
+	struct mio_emm_rsp_sts_s {
+		u64 cmd_done                         : 1;
+		u64 cmd_idx                          : 6;
+		u64 cmd_type                         : 2;
+		u64 rsp_type                         : 3;
+		u64 rsp_val                          : 1;
+		u64 rsp_bad_sts                      : 1;
+		u64 rsp_crc_err                      : 1;
+		u64 rsp_timeout                      : 1;
+		u64 stp_val                          : 1;
+		u64 stp_bad_sts                      : 1;
+		u64 stp_crc_err                      : 1;
+		u64 stp_timeout                      : 1;
+		u64 rsp_busybit                      : 1;
+		u64 blk_crc_err                      : 1;
+		u64 blk_timeout                      : 1;
+		u64 dbuf                             : 1;
+		u64 reserved_24_27                   : 4;
+		u64 dbuf_err                         : 1;
+		u64 reserved_29_54                   : 26;
+		u64 acc_timeout                      : 1;
+		u64 dma_pend                         : 1;
+		u64 dma_val                          : 1;
+		u64 switch_val                       : 1;
+		u64 cmd_val                          : 1;
+		u64 bus_id                           : 2;
+		u64 reserved_62_63                   : 2;
+	} s;
+	/* struct mio_emm_rsp_sts_s cn; */
+};
+
+static inline u64 MIO_EMM_RSP_STS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_RSP_STS(void)
+{
+	return 0x2060;
+}
+
+/**
+ * Register (RSL) mio_emm_sample
+ *
+ * eMMC Sampling Register
+ */
+union mio_emm_sample {
+	u64 u;
+	struct mio_emm_sample_s {
+		u64 dat_cnt                          : 10;
+		u64 reserved_10_15                   : 6;
+		u64 cmd_cnt                          : 10;
+		u64 reserved_26_63                   : 38;
+	} s;
+	/* struct mio_emm_sample_s cn; */
+};
+
+static inline u64 MIO_EMM_SAMPLE(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_SAMPLE(void)
+{
+	return 0x2090;
+}
+
+/**
+ * Register (RSL) mio_emm_sts_mask
+ *
+ * eMMC Status Mask Register
+ */
+union mio_emm_sts_mask {
+	u64 u;
+	struct mio_emm_sts_mask_s {
+		u64 sts_msk                          : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct mio_emm_sts_mask_s cn; */
+};
+
+static inline u64 MIO_EMM_STS_MASK(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_STS_MASK(void)
+{
+	return 0x2098;
+}
+
+/**
+ * Register (RSL) mio_emm_switch
+ *
+ * eMMC Operating Mode Switch Register This register allows software to
+ * change eMMC related parameters associated with a specific BUS_ID.  The
+ * MIO_EMM_MODE() registers contain the current setting for each BUS.
+ * This register is also used to switch the [CLK_HI] and [CLK_LO]
+ * settings associated with the common EMMC_CLK.  These settings can only
+ * be changed when [BUS_ID] = 0.
+ */
+union mio_emm_switch {
+	u64 u;
+	struct mio_emm_switch_s {
+		u64 clk_lo                           : 16;
+		u64 clk_hi                           : 16;
+		u64 power_class                      : 4;
+		u64 clk_swap                         : 1;
+		u64 reserved_37_39                   : 3;
+		u64 bus_width                        : 3;
+		u64 reserved_43_47                   : 5;
+		u64 hs_timing                        : 1;
+		u64 hs200_timing                     : 1;
+		u64 hs400_timing                     : 1;
+		u64 reserved_51_55                   : 5;
+		u64 switch_err2                      : 1;
+		u64 switch_err1                      : 1;
+		u64 switch_err0                      : 1;
+		u64 switch_exe                       : 1;
+		u64 bus_id                           : 2;
+		u64 reserved_62_63                   : 2;
+	} s;
+	struct mio_emm_switch_cn8 {
+		u64 clk_lo                           : 16;
+		u64 clk_hi                           : 16;
+		u64 power_class                      : 4;
+		u64 reserved_36_39                   : 4;
+		u64 bus_width                        : 3;
+		u64 reserved_43_47                   : 5;
+		u64 hs_timing                        : 1;
+		u64 reserved_49_55                   : 7;
+		u64 switch_err2                      : 1;
+		u64 switch_err1                      : 1;
+		u64 switch_err0                      : 1;
+		u64 switch_exe                       : 1;
+		u64 bus_id                           : 2;
+		u64 reserved_62_63                   : 2;
+	} cn8;
+	struct mio_emm_switch_cn96xxp1 {
+		u64 clk_lo                           : 16;
+		u64 clk_hi                           : 16;
+		u64 power_class                      : 4;
+		u64 reserved_36_39                   : 4;
+		u64 bus_width                        : 3;
+		u64 reserved_43_47                   : 5;
+		u64 hs_timing                        : 1;
+		u64 hs200_timing                     : 1;
+		u64 hs400_timing                     : 1;
+		u64 reserved_51_55                   : 5;
+		u64 switch_err2                      : 1;
+		u64 switch_err1                      : 1;
+		u64 switch_err0                      : 1;
+		u64 switch_exe                       : 1;
+		u64 bus_id                           : 2;
+		u64 reserved_62_63                   : 2;
+	} cn96xxp1;
+	/* struct mio_emm_switch_s cn96xxp3; */
+	/* struct mio_emm_switch_s cnf95xx; */
+};
+
+static inline u64 MIO_EMM_SWITCH(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_SWITCH(void)
+{
+	return 0x2048;
+}
+
+/**
+ * Register (RSL) mio_emm_tap
+ *
+ * eMMC TAP Delay Register This register indicates the delay line
+ * characteristics.
+ */
+union mio_emm_tap {
+	u64 u;
+	struct mio_emm_tap_s {
+		u64 delay                            : 8;
+		u64 reserved_8_63                    : 56;
+	} s;
+	/* struct mio_emm_tap_s cn; */
+};
+
+static inline u64 MIO_EMM_TAP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_TAP(void)
+{
+	return 0x20c8;
+}
+
+/**
+ * Register (RSL) mio_emm_timing
+ *
+ * eMMC Timing Register This register determines the number of tap delays
+ * the EMM_DAT, EMM_DS, and EMM_CMD lines are transmitted or received in
+ * relation to EMM_CLK. These values should only be changed when the eMMC
+ * bus is idle.
+ */
+union mio_emm_timing {
+	u64 u;
+	struct mio_emm_timing_s {
+		u64 data_out_tap                     : 6;
+		u64 reserved_6_15                    : 10;
+		u64 data_in_tap                      : 6;
+		u64 reserved_22_31                   : 10;
+		u64 cmd_out_tap                      : 6;
+		u64 reserved_38_47                   : 10;
+		u64 cmd_in_tap                       : 6;
+		u64 reserved_54_63                   : 10;
+	} s;
+	/* struct mio_emm_timing_s cn; */
+};
+
+static inline u64 MIO_EMM_TIMING(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_TIMING(void)
+{
+	return 0x20d0;
+}
+
+/**
+ * Register (RSL) mio_emm_wdog
+ *
+ * eMMC Watchdog Register
+ */
+union mio_emm_wdog {
+	u64 u;
+	struct mio_emm_wdog_s {
+		u64 clk_cnt                          : 26;
+		u64 reserved_26_63                   : 38;
+	} s;
+	/* struct mio_emm_wdog_s cn; */
+};
+
+static inline u64 MIO_EMM_WDOG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_WDOG(void)
+{
+	return 0x2088;
+}
+
+#endif /* __CSRS_MIO_EMM_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx/csrs/csrs-xcv.h b/arch/arm/include/asm/arch-octeontx/csrs/csrs-xcv.h
new file mode 100644
index 0000000..159f58a
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx/csrs/csrs-xcv.h
@@ -0,0 +1,428 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+#ifndef __CSRS_XCV_H__
+#define __CSRS_XCV_H__
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * XCV.
+ *
+ * This file is auto generated.  Do not edit.
+ *
+ */
+
+/**
+ * Enumeration xcv_bar_e
+ *
+ * XCV Base Address Register Enumeration Enumerates the base address
+ * registers.
+ */
+#define XCV_BAR_E_XCVX_PF_BAR0(a) (0x87e0db000000ll + 0ll * (a))
+#define XCV_BAR_E_XCVX_PF_BAR0_SIZE 0x100000ull
+#define XCV_BAR_E_XCVX_PF_BAR4(a) (0x87e0dbf00000ll + 0ll * (a))
+#define XCV_BAR_E_XCVX_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration xcv_int_vec_e
+ *
+ * XCV MSI-X Vector Enumeration Enumerates the MSI-X interrupt vectors.
+ */
+#define XCV_INT_VEC_E_INT (0)
+
+/**
+ * Register (RSL) xcv#_batch_crd_ret
+ *
+ * XCV Batch Credit Return Register
+ */
+union xcvx_batch_crd_ret {
+	u64 u;
+	struct xcvx_batch_crd_ret_s {
+		u64 crd_ret                          : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct xcvx_batch_crd_ret_s cn; */
+};
+
+static inline u64 XCVX_BATCH_CRD_RET(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_BATCH_CRD_RET(u64 a)
+{
+	return 0x100 + 0 * a;
+}
+
+/**
+ * Register (RSL) xcv#_comp_ctl
+ *
+ * XCV Compensation Controller Register This register controls
+ * programmable compensation.
+ */
+union xcvx_comp_ctl {
+	u64 u;
+	struct xcvx_comp_ctl_s {
+		u64 nctl_sat                         : 1;
+		u64 reserved_1_26                    : 26;
+		u64 nctl_lock                        : 1;
+		u64 reserved_28                      : 1;
+		u64 pctl_sat                         : 1;
+		u64 pctl_lock                        : 1;
+		u64 reserved_31                      : 1;
+		u64 drv_nctl                         : 5;
+		u64 reserved_37_39                   : 3;
+		u64 drv_pctl                         : 5;
+		u64 reserved_45_47                   : 3;
+		u64 cmp_nctl                         : 5;
+		u64 reserved_53_55                   : 3;
+		u64 cmp_pctl                         : 5;
+		u64 reserved_61_62                   : 2;
+		u64 drv_byp                          : 1;
+	} s;
+	/* struct xcvx_comp_ctl_s cn; */
+};
+
+static inline u64 XCVX_COMP_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_COMP_CTL(u64 a)
+{
+	return 0x20 + 0 * a;
+}
+
+/**
+ * Register (RSL) xcv#_ctl
+ *
+ * XCV Control Register This register contains the status control bits.
+ */
+union xcvx_ctl {
+	u64 u;
+	struct xcvx_ctl_s {
+		u64 speed                            : 2;
+		u64 lpbk_int                         : 1;
+		u64 lpbk_ext                         : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct xcvx_ctl_s cn; */
+};
+
+static inline u64 XCVX_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_CTL(u64 a)
+{
+	return 0x30 + 0 * a;
+}
+
+/**
+ * Register (RSL) xcv#_dll_ctl
+ *
+ * XCV DLL Controller Register The RGMII timing specification requires
+ * that devices transmit clock and data synchronously. The specification
+ * requires external sources (namely the PC board trace routes) to
+ * introduce the appropriate 1.5 to 2.0 ns of delay.  To eliminate the
+ * need for the PC board delays, the RGMII interface has optional on-
+ * board DLLs for both transmit and receive. For correct operation, at
+ * most one of the transmitter, board, or receiver involved in an RGMII
+ * link should introduce delay. By default/reset, the RGMII receivers
+ * delay the received clock, and the RGMII transmitters do not delay the
+ * transmitted clock. Whether this default works as-is with a given link
+ * partner depends on the behavior of the link partner and the PC board.
+ * These are the possible modes of RGMII receive operation:  *
+ * XCV()_DLL_CTL[CLKRX_BYP] = 0 (reset value) - The RGMII receive
+ * interface introduces clock delay using its internal DLL. This mode is
+ * appropriate if neither the remote transmitter nor the PC board delays
+ * the clock.  * XCV()_DLL_CTL[CLKRX_BYP] = 1, [CLKRX_SET] = 0x0 - The
+ * RGMII receive interface introduces no clock delay. This mode is
+ * appropriate if either the remote transmitter or the PC board delays
+ * the clock.  These are the possible modes of RGMII transmit operation:
+ * * XCV()_DLL_CTL[CLKTX_BYP] = 1, [CLKTX_SET] = 0x0 (reset value) - The
+ * RGMII transmit interface introduces no clock delay. This mode is
+ * appropriate is either the remote receiver or the PC board delays the
+ * clock.  * XCV()_DLL_CTL[CLKTX_BYP] = 0 - The RGMII transmit interface
+ * introduces clock delay using its internal DLL. This mode is
+ * appropriate if neither the remote receiver nor the PC board delays the
+ * clock.
+ */
+union xcvx_dll_ctl {
+	u64 u;
+	struct xcvx_dll_ctl_s {
+		u64 refclk_sel                       : 2;
+		u64 reserved_2_7                     : 6;
+		u64 clktx_set                        : 7;
+		u64 clktx_byp                        : 1;
+		u64 clkrx_set                        : 7;
+		u64 clkrx_byp                        : 1;
+		u64 clk_set                          : 7;
+		u64 lock                             : 1;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct xcvx_dll_ctl_s cn; */
+};
+
+static inline u64 XCVX_DLL_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_DLL_CTL(u64 a)
+{
+	return 0x10 + 0 * a;
+}
+
+/**
+ * Register (RSL) xcv#_eco
+ *
+ * INTERNAL: XCV ECO Register
+ */
+union xcvx_eco {
+	u64 u;
+	struct xcvx_eco_s {
+		u64 eco_rw                           : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct xcvx_eco_s cn; */
+};
+
+static inline u64 XCVX_ECO(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_ECO(u64 a)
+{
+	return 0x200 + 0 * a;
+}
+
+/**
+ * Register (RSL) xcv#_inbnd_status
+ *
+ * XCV Inband Status Register This register contains RGMII inband status.
+ */
+union xcvx_inbnd_status {
+	u64 u;
+	struct xcvx_inbnd_status_s {
+		u64 link                             : 1;
+		u64 speed                            : 2;
+		u64 duplex                           : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct xcvx_inbnd_status_s cn; */
+};
+
+static inline u64 XCVX_INBND_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_INBND_STATUS(u64 a)
+{
+	return 0x80 + 0 * a;
+}
+
+/**
+ * Register (RSL) xcv#_int
+ *
+ * XCV Interrupt Register This register flags error for TX FIFO overflow,
+ * TX FIFO underflow and incomplete byte for 10/100 Mode. It also flags
+ * status change for link duplex, link speed and link up/down.
+ */
+union xcvx_int {
+	u64 u;
+	struct xcvx_int_s {
+		u64 link                             : 1;
+		u64 speed                            : 1;
+		u64 reserved_2                       : 1;
+		u64 duplex                           : 1;
+		u64 incomp_byte                      : 1;
+		u64 tx_undflw                        : 1;
+		u64 tx_ovrflw                        : 1;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct xcvx_int_s cn; */
+};
+
+static inline u64 XCVX_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_INT(u64 a)
+{
+	return 0x40 + 0 * a;
+}
+
+/**
+ * Register (RSL) xcv#_int_ena_w1c
+ *
+ * Loopback Error Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union xcvx_int_ena_w1c {
+	u64 u;
+	struct xcvx_int_ena_w1c_s {
+		u64 link                             : 1;
+		u64 speed                            : 1;
+		u64 reserved_2                       : 1;
+		u64 duplex                           : 1;
+		u64 incomp_byte                      : 1;
+		u64 tx_undflw                        : 1;
+		u64 tx_ovrflw                        : 1;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct xcvx_int_ena_w1c_s cn; */
+};
+
+static inline u64 XCVX_INT_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_INT_ENA_W1C(u64 a)
+{
+	return 0x50 + 0 * a;
+}
+
+/**
+ * Register (RSL) xcv#_int_ena_w1s
+ *
+ * Loopback Error Interrupt Enable Set Register This register sets
+ * interrupt enable bits.
+ */
+union xcvx_int_ena_w1s {
+	u64 u;
+	struct xcvx_int_ena_w1s_s {
+		u64 link                             : 1;
+		u64 speed                            : 1;
+		u64 reserved_2                       : 1;
+		u64 duplex                           : 1;
+		u64 incomp_byte                      : 1;
+		u64 tx_undflw                        : 1;
+		u64 tx_ovrflw                        : 1;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct xcvx_int_ena_w1s_s cn; */
+};
+
+static inline u64 XCVX_INT_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_INT_ENA_W1S(u64 a)
+{
+	return 0x58 + 0 * a;
+}
+
+/**
+ * Register (RSL) xcv#_int_w1s
+ *
+ * Loopback Error Interrupt Set Register This register sets interrupt
+ * bits.
+ */
+union xcvx_int_w1s {
+	u64 u;
+	struct xcvx_int_w1s_s {
+		u64 link                             : 1;
+		u64 speed                            : 1;
+		u64 reserved_2                       : 1;
+		u64 duplex                           : 1;
+		u64 incomp_byte                      : 1;
+		u64 tx_undflw                        : 1;
+		u64 tx_ovrflw                        : 1;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct xcvx_int_w1s_s cn; */
+};
+
+static inline u64 XCVX_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_INT_W1S(u64 a)
+{
+	return 0x48 + 0 * a;
+}
+
+/**
+ * Register (RSL) xcv#_msix_pba#
+ *
+ * XCV MSI-X Pending Bit Array Registers This register is the MSI-X PBA
+ * table; the bit number is indexed by the XCV_INT_VEC_E enumeration.
+ */
+union xcvx_msix_pbax {
+	u64 u;
+	struct xcvx_msix_pbax_s {
+		u64 pend                             : 64;
+	} s;
+	/* struct xcvx_msix_pbax_s cn; */
+};
+
+static inline u64 XCVX_MSIX_PBAX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_MSIX_PBAX(u64 a, u64 b)
+{
+	return 0xf0000 + 0 * a + 8 * b;
+}
+
+/**
+ * Register (RSL) xcv#_msix_vec#_addr
+ *
+ * XCV MSI-X Vector-Table Address Register This register is the MSI-X
+ * vector table, indexed by the XCV_INT_VEC_E enumeration.
+ */
+union xcvx_msix_vecx_addr {
+	u64 u;
+	struct xcvx_msix_vecx_addr_s {
+		u64 secvec                           : 1;
+		u64 reserved_1                       : 1;
+		u64 addr                             : 47;
+		u64 reserved_49_63                   : 15;
+	} s;
+	/* struct xcvx_msix_vecx_addr_s cn; */
+};
+
+static inline u64 XCVX_MSIX_VECX_ADDR(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_MSIX_VECX_ADDR(u64 a, u64 b)
+{
+	return 0 + 0 * a + 0x10 * b;
+}
+
+/**
+ * Register (RSL) xcv#_msix_vec#_ctl
+ *
+ * XCV MSI-X Vector-Table Control and Data Register This register is the
+ * MSI-X vector table, indexed by the XCV_INT_VEC_E enumeration.
+ */
+union xcvx_msix_vecx_ctl {
+	u64 u;
+	struct xcvx_msix_vecx_ctl_s {
+		u64 data                             : 20;
+		u64 reserved_20_31                   : 12;
+		u64 mask                             : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	/* struct xcvx_msix_vecx_ctl_s cn; */
+};
+
+static inline u64 XCVX_MSIX_VECX_CTL(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_MSIX_VECX_CTL(u64 a, u64 b)
+{
+	return 8 + 0 * a + 0x10 * b;
+}
+
+/**
+ * Register (RSL) xcv#_reset
+ *
+ * XCV Reset Registers This register controls reset.
+ */
+union xcvx_reset {
+	u64 u;
+	struct xcvx_reset_s {
+		u64 rx_dat_rst_n                     : 1;
+		u64 rx_pkt_rst_n                     : 1;
+		u64 tx_dat_rst_n                     : 1;
+		u64 tx_pkt_rst_n                     : 1;
+		u64 reserved_4_6                     : 3;
+		u64 comp                             : 1;
+		u64 reserved_8_10                    : 3;
+		u64 dllrst                           : 1;
+		u64 reserved_12_14                   : 3;
+		u64 clkrst                           : 1;
+		u64 reserved_16_62                   : 47;
+		u64 enable                           : 1;
+	} s;
+	/* struct xcvx_reset_s cn; */
+};
+
+static inline u64 XCVX_RESET(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 XCVX_RESET(u64 a)
+{
+	return 0 + 0 * a;
+}
+
+#endif /* __CSRS_XCV_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx/gpio.h b/arch/arm/include/asm/arch-octeontx/gpio.h
new file mode 100644
index 0000000..3943ffd
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx/gpio.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
diff --git a/arch/arm/include/asm/arch-octeontx/smc.h b/arch/arm/include/asm/arch-octeontx/smc.h
new file mode 100644
index 0000000..beff4d1
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx/smc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#ifndef __SMC_H__
+#define __SMC_H__
+
+/* OcteonTX Service Calls version numbers */
+#define OCTEONTX_VERSION_MAJOR	0x1
+#define OCTEONTX_VERSION_MINOR	0x0
+
+/* x1 - node number */
+#define OCTEONTX_DRAM_SIZE	0xc2000301
+
+ssize_t smc_dram_size(unsigned int node);
+
+#endif /* __SMC_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx/soc.h b/arch/arm/include/asm/arch-octeontx/soc.h
new file mode 100644
index 0000000..dc081c7
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx/soc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#ifndef __SOC_H__
+#define __SOC_H__
+
+/* Product PARTNUM */
+#define CN81XX	0xA2
+#define CN83XX	0xA3
+#define CN96XX	0xB2
+#define CN95XX	0xB3
+
+#define otx_is_altpkg()	read_alt_pkg()
+#define otx_is_soc(soc)	(read_partnum() == (soc))
+#define otx_is_board(model) (!strcmp(read_board_name(), model))
+#define otx_is_platform(platform) (read_platform() == (platform))
+
+enum platform {
+	PLATFORM_HW = 0,
+	PLATFORM_EMULATOR = 1,
+	PLATFORM_ASIM = 3,
+};
+
+int read_platform(void);
+u8 read_partnum(void);
+const char *read_board_name(void);
+bool read_alt_pkg(void);
+
+#endif /* __SOC_H */
diff --git a/arch/arm/include/asm/arch-octeontx2/board.h b/arch/arm/include/asm/arch-octeontx2/board.h
new file mode 100644
index 0000000..1c9ec11
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/board.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#ifndef __BOARD_H__
+#define __BOARD_H__
+
+#include <asm/arch/soc.h>
+
+/** Reg offsets */
+#define RST_BOOT		0x87E006001600ULL
+
+#define CPC_BOOT_OWNERX(a)	0x86D000000160ULL + (8 * (a))
+
+/** Structure definitions */
+/**
+ * Register (NCB32b) cpc_boot_owner#
+ *
+ * CPC Boot Owner Registers These registers control an external arbiter
+ * for the boot device (SPI/eMMC) across multiple external devices. There
+ * is a register for each requester: _ \<0\> - SCP          - reset on
+ * SCP reset _ \<1\> - MCP          - reset on MCP reset _ \<2\> - AP
+ * Secure    - reset on core reset _ \<3\> - AP Nonsecure - reset on core
+ * reset  These register is only writable to the corresponding
+ * requestor(s) permitted with CPC_PERMIT.
+ */
+union cpc_boot_ownerx {
+	u32 u;
+	struct cpc_boot_ownerx_s {
+		u32 boot_req		: 1;
+		u32 reserved_1_7	: 7;
+		u32 boot_wait		: 1;
+		u32 reserved_9_31	: 23;
+	} s;
+};
+
+/**
+ * Register (RSL) rst_boot
+ *
+ * RST Boot Register This register is not accessible through ROM scripts;
+ * see SCR_WRITE32_S[ADDR].
+ */
+union rst_boot {
+	u64 u;
+	struct rst_boot_s {
+		u64 rboot_pin                        : 1;
+		u64 rboot                            : 1;
+		u64 reserved_2_32                    : 31;
+		u64 pnr_mul                          : 6;
+		u64 reserved_39                      : 1;
+		u64 c_mul                            : 7;
+		u64 reserved_47_52                   : 6;
+		u64 gpio_ejtag                       : 1;
+		u64 mcp_jtagdis                      : 1;
+		u64 dis_scan                         : 1;
+		u64 dis_huk                          : 1;
+		u64 vrm_err                          : 1;
+		u64 jt_tstmode                       : 1;
+		u64 ckill_ppdis                      : 1;
+		u64 trusted_mode                     : 1;
+		u64 reserved_61_62                   : 2;
+		u64 chipkill                         : 1;
+	} s;
+	struct rst_boot_cn96xx {
+		u64 rboot_pin                        : 1;
+		u64 rboot                            : 1;
+		u64 reserved_2_23                    : 22;
+		u64 cpt_mul                          : 7;
+		u64 reserved_31_32                   : 2;
+		u64 pnr_mul                          : 6;
+		u64 reserved_39                      : 1;
+		u64 c_mul                            : 7;
+		u64 reserved_47_52                   : 6;
+		u64 gpio_ejtag                       : 1;
+		u64 mcp_jtagdis                      : 1;
+		u64 dis_scan                         : 1;
+		u64 dis_huk                          : 1;
+		u64 vrm_err                          : 1;
+		u64 reserved_58_59                   : 2;
+		u64 trusted_mode                     : 1;
+		u64 scp_jtagdis                      : 1;
+		u64 jtagdis                          : 1;
+		u64 chipkill                         : 1;
+	} cn96xx;
+	struct rst_boot_cnf95xx {
+		u64 rboot_pin                        : 1;
+		u64 rboot                            : 1;
+		u64 reserved_2_7                     : 6;
+		u64 bphy_mul                         : 7;
+		u64 reserved_15                      : 1;
+		u64 dsp_mul                          : 7;
+		u64 reserved_23                      : 1;
+		u64 cpt_mul                          : 7;
+		u64 reserved_31_32                   : 2;
+		u64 pnr_mul                          : 6;
+		u64 reserved_39                      : 1;
+		u64 c_mul                            : 7;
+		u64 reserved_47_52                   : 6;
+		u64 gpio_ejtag                       : 1;
+		u64 mcp_jtagdis                      : 1;
+		u64 dis_scan                         : 1;
+		u64 dis_huk                          : 1;
+		u64 vrm_err                          : 1;
+		u64 reserved_58_59                   : 2;
+		u64 trusted_mode                     : 1;
+		u64 scp_jtagdis                      : 1;
+		u64 jtagdis                          : 1;
+		u64 chipkill                         : 1;
+	} cnf95xx;
+};
+
+extern unsigned long fdt_base_addr;
+
+/** Function definitions */
+void mem_map_fill(void);
+int fdt_get_board_mac_cnt(void);
+u64 fdt_get_board_mac_addr(void);
+const char *fdt_get_board_model(void);
+const char *fdt_get_board_serial(void);
+const char *fdt_get_board_revision(void);
+void octeontx2_board_get_mac_addr(u8 index, u8 *mac_addr);
+void board_acquire_flash_arb(bool acquire);
+void cgx_intf_shutdown(void);
+
+#endif /* __BOARD_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx2/clock.h b/arch/arm/include/asm/arch-octeontx2/clock.h
new file mode 100644
index 0000000..7be8852
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/clock.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#ifndef __CLOCK_H__
+
+/** System PLL reference clock */
+#define PLL_REF_CLK                     50000000        /* 50 MHz */
+#define NS_PER_REF_CLK_TICK             (1000000000 / PLL_REF_CLK)
+
+/**
+ * Returns the I/O clock speed in Hz
+ */
+u64 octeontx_get_io_clock(void);
+
+/**
+ * Returns the core clock speed in Hz
+ */
+u64 octeontx_get_core_clock(void);
+
+#endif /* __CLOCK_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx2/csrs/csrs-cgx.h b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-cgx.h
new file mode 100644
index 0000000..34e7db3
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-cgx.h
@@ -0,0 +1,7851 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+#ifndef __CSRS_CGX_H__
+#define __CSRS_CGX_H__
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * CGX.
+ *
+ * This file is auto generated.  Do not edit.
+ *
+ */
+
+/**
+ * Enumeration cgx_bar_e
+ *
+ * CGX Base Address Register Enumeration Enumerates the base address
+ * registers.
+ */
+#define CGX_BAR_E_CGXX_PF_BAR0(a) (0x87e0e0000000ll + 0x1000000ll * (a))
+#define CGX_BAR_E_CGXX_PF_BAR0_SIZE 0x100000ull
+#define CGX_BAR_E_CGXX_PF_BAR4(a) (0x87e0e0400000ll + 0x1000000ll * (a))
+#define CGX_BAR_E_CGXX_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration cgx_int_vec_e
+ *
+ * CGX MSI-X Vector Enumeration Enumeration the MSI-X interrupt vectors.
+ */
+#define CGX_INT_VEC_E_CMRX_INT(a) (0 + 9 * (a))
+#define CGX_INT_VEC_E_CMRX_SW(a) (0x26 + (a))
+#define CGX_INT_VEC_E_CMR_MEM_INT (0x24)
+#define CGX_INT_VEC_E_GMPX_GMI_RX_INT(a) (5 + 9 * (a))
+#define CGX_INT_VEC_E_GMPX_GMI_TX_INT(a) (6 + 9 * (a))
+#define CGX_INT_VEC_E_GMPX_GMI_WOL_INT(a) (7 + 9 * (a))
+#define CGX_INT_VEC_E_GMPX_PCS_INT(a) (4 + 9 * (a))
+#define CGX_INT_VEC_E_SMUX_RX_INT(a) (2 + 9 * (a))
+#define CGX_INT_VEC_E_SMUX_RX_WOL_INT(a) (8 + 9 * (a))
+#define CGX_INT_VEC_E_SMUX_TX_INT(a) (3 + 9 * (a))
+#define CGX_INT_VEC_E_SPUX_INT(a) (1 + 9 * (a))
+#define CGX_INT_VEC_E_SW (0x25)
+
+/**
+ * Enumeration cgx_lmac_types_e
+ *
+ * CGX LMAC Type Enumeration Enumerates the LMAC Types that CGX supports.
+ */
+#define CGX_LMAC_TYPES_E_FIFTYG_R (8)
+#define CGX_LMAC_TYPES_E_FORTYG_R (4)
+#define CGX_LMAC_TYPES_E_HUNDREDG_R (9)
+#define CGX_LMAC_TYPES_E_QSGMII (6)
+#define CGX_LMAC_TYPES_E_RGMII (5)
+#define CGX_LMAC_TYPES_E_RXAUI (2)
+#define CGX_LMAC_TYPES_E_SGMII (0)
+#define CGX_LMAC_TYPES_E_TENG_R (3)
+#define CGX_LMAC_TYPES_E_TWENTYFIVEG_R (7)
+#define CGX_LMAC_TYPES_E_USXGMII (0xa)
+#define CGX_LMAC_TYPES_E_XAUI (1)
+
+/**
+ * Enumeration cgx_opcode_e
+ *
+ * INTERNAL: CGX Error Opcode Enumeration  Enumerates the error opcodes
+ * created by CGX and presented to NCSI/NIX.
+ */
+#define CGX_OPCODE_E_RE_FCS (7)
+#define CGX_OPCODE_E_RE_FCS_RCV (8)
+#define CGX_OPCODE_E_RE_JABBER (2)
+#define CGX_OPCODE_E_RE_NONE (0)
+#define CGX_OPCODE_E_RE_PARTIAL (1)
+#define CGX_OPCODE_E_RE_RX_CTL (0xb)
+#define CGX_OPCODE_E_RE_SKIP (0xc)
+#define CGX_OPCODE_E_RE_TERMINATE (9)
+
+/**
+ * Enumeration cgx_spu_br_train_cst_e
+ *
+ * INTERNAL: CGX Training Coefficient Status Enumeration  2-bit status
+ * for each coefficient as defined in IEEE 802.3, Table 72-5.
+ */
+#define CGX_SPU_BR_TRAIN_CST_E_MAXIMUM (3)
+#define CGX_SPU_BR_TRAIN_CST_E_MINIMUM (2)
+#define CGX_SPU_BR_TRAIN_CST_E_NOT_UPDATED (0)
+#define CGX_SPU_BR_TRAIN_CST_E_UPDATED (1)
+
+/**
+ * Enumeration cgx_spu_br_train_cup_e
+ *
+ * INTERNAL:CGX Training Coefficient Enumeration  2-bit command for each
+ * coefficient as defined in IEEE 802.3, Table 72-4.
+ */
+#define CGX_SPU_BR_TRAIN_CUP_E_DECREMENT (1)
+#define CGX_SPU_BR_TRAIN_CUP_E_HOLD (0)
+#define CGX_SPU_BR_TRAIN_CUP_E_INCREMENT (2)
+#define CGX_SPU_BR_TRAIN_CUP_E_RSV_CMD (3)
+
+/**
+ * Enumeration cgx_usxgmii_rate_e
+ *
+ * CGX USXGMII Rate Enumeration Enumerates the USXGMII sub-port type
+ * rate, CGX()_SPU()_CONTROL1[USXGMII_RATE].  Selecting a rate higher
+ * than the maximum allowed for a given port sub-type (specified by
+ * CGX()_SPU()_CONTROL1[USXGMII_TYPE]), e.g., selecting ::RATE_2HG (2.5
+ * Gbps) for CGX_USXGMII_TYPE_E::SXGMII_2G, will cause unpredictable
+ * behavior. USXGMII hardware-based autonegotiation may change this
+ * setting.
+ */
+#define CGX_USXGMII_RATE_E_RATE_100M (1)
+#define CGX_USXGMII_RATE_E_RATE_10G (5)
+#define CGX_USXGMII_RATE_E_RATE_10M (0)
+#define CGX_USXGMII_RATE_E_RATE_1G (2)
+#define CGX_USXGMII_RATE_E_RATE_20G (6)
+#define CGX_USXGMII_RATE_E_RATE_2HG (3)
+#define CGX_USXGMII_RATE_E_RATE_5G (4)
+#define CGX_USXGMII_RATE_E_RSV_RATE (7)
+
+/**
+ * Enumeration cgx_usxgmii_type_e
+ *
+ * CGX USXGMII Port Sub-Type Enumeration Enumerates the USXGMII sub-port
+ * type, CGX()_SPU()_CONTROL1[USXGMII_TYPE].  The description indicates
+ * the maximum rate and the maximum number of ports (LMACs) for each sub-
+ * type. The minimum rate for any port is 10M. The rate selection for
+ * each LMAC is made using CGX()_SPU()_CONTROL1[USXGMII_RATE] and the
+ * number of active ports/LMACs is implicitly determined by the value
+ * given to CGX()_CMR()_CONFIG[ENABLE] for each LMAC.  Selecting a rate
+ * higher than the maximum allowed for a given port sub-type or enabling
+ * more LMACs than the maximum allowed for a given port sub-type will
+ * cause unpredictable behavior.
+ */
+#define CGX_USXGMII_TYPE_E_DXGMII_10G (3)
+#define CGX_USXGMII_TYPE_E_DXGMII_20G (5)
+#define CGX_USXGMII_TYPE_E_DXGMII_5G (4)
+#define CGX_USXGMII_TYPE_E_QXGMII_10G (7)
+#define CGX_USXGMII_TYPE_E_QXGMII_20G (6)
+#define CGX_USXGMII_TYPE_E_SXGMII_10G (0)
+#define CGX_USXGMII_TYPE_E_SXGMII_2G (2)
+#define CGX_USXGMII_TYPE_E_SXGMII_5G (1)
+
+/**
+ * Structure cgx_spu_br_lane_train_status_s
+ *
+ * INTERNAL:CGX Lane Training Status Structure  This is the group of lane
+ * status bits for a single lane in the BASE-R PMD status register (MDIO
+ * address 1.151) as defined in IEEE 802.3ba-2010, Table 45-55.
+ */
+union cgx_spu_br_lane_train_status_s {
+	u32 u;
+	struct cgx_spu_br_lane_train_status_s_s {
+		u32 rx_trained                       : 1;
+		u32 frame_lock                       : 1;
+		u32 training                         : 1;
+		u32 training_failure                 : 1;
+		u32 reserved_4_31                    : 28;
+	} s;
+	/* struct cgx_spu_br_lane_train_status_s_s cn; */
+};
+
+/**
+ * Structure cgx_spu_br_train_cup_s
+ *
+ * INTERNAL:CGX Lane Training Coefficient Structure  This is the
+ * coefficient update field of the BASE-R link training packet as defined
+ * in IEEE 802.3, Table 72-4.
+ */
+union cgx_spu_br_train_cup_s {
+	u32 u;
+	struct cgx_spu_br_train_cup_s_s {
+		u32 pre_cup                          : 2;
+		u32 main_cup                         : 2;
+		u32 post_cup                         : 2;
+		u32 reserved_6_11                    : 6;
+		u32 init                             : 1;
+		u32 preset                           : 1;
+		u32 reserved_14_31                   : 18;
+	} s;
+	struct cgx_spu_br_train_cup_s_cn {
+		u32 pre_cup                          : 2;
+		u32 main_cup                         : 2;
+		u32 post_cup                         : 2;
+		u32 reserved_6_11                    : 6;
+		u32 init                             : 1;
+		u32 preset                           : 1;
+		u32 reserved_14_15                   : 2;
+		u32 reserved_16_31                   : 16;
+	} cn;
+};
+
+/**
+ * Structure cgx_spu_br_train_rep_s
+ *
+ * INTERNAL:CGX Training Report Structure  This is the status report
+ * field of the BASE-R link training packet as defined in IEEE 802.3,
+ * Table 72-5.
+ */
+union cgx_spu_br_train_rep_s {
+	u32 u;
+	struct cgx_spu_br_train_rep_s_s {
+		u32 pre_cst                          : 2;
+		u32 main_cst                         : 2;
+		u32 post_cst                         : 2;
+		u32 reserved_6_14                    : 9;
+		u32 rx_ready                         : 1;
+		u32 reserved_16_31                   : 16;
+	} s;
+	/* struct cgx_spu_br_train_rep_s_s cn; */
+};
+
+/**
+ * Structure cgx_spu_sds_cu_s
+ *
+ * INTERNAL: CGX Training Coeffiecient Structure  This structure is
+ * similar to CGX_SPU_BR_TRAIN_CUP_S format, but with reserved fields
+ * removed and [RCVR_READY] field added.
+ */
+union cgx_spu_sds_cu_s {
+	u32 u;
+	struct cgx_spu_sds_cu_s_s {
+		u32 pre_cu                           : 2;
+		u32 main_cu                          : 2;
+		u32 post_cu                          : 2;
+		u32 initialize                       : 1;
+		u32 preset                           : 1;
+		u32 rcvr_ready                       : 1;
+		u32 reserved_9_31                    : 23;
+	} s;
+	/* struct cgx_spu_sds_cu_s_s cn; */
+};
+
+/**
+ * Structure cgx_spu_sds_skew_status_s
+ *
+ * CGX Skew Status Structure Provides receive skew information detected
+ * for a physical SerDes lane when it is assigned to a multilane
+ * LMAC/LPCS. Contents are valid when RX deskew is done for the
+ * associated LMAC/LPCS.
+ */
+union cgx_spu_sds_skew_status_s {
+	u32 u;
+	struct cgx_spu_sds_skew_status_s_s {
+		u32 am_timestamp                     : 12;
+		u32 reserved_12_15                   : 4;
+		u32 am_lane_id                       : 5;
+		u32 reserved_21_22                   : 2;
+		u32 lane_skew                        : 7;
+		u32 reserved_30_31                   : 2;
+	} s;
+	/* struct cgx_spu_sds_skew_status_s_s cn; */
+};
+
+/**
+ * Structure cgx_spu_sds_sr_s
+ *
+ * INTERNAL: CGX Lane Training Coefficient Structure  Similar to
+ * CGX_SPU_BR_TRAIN_REP_S format, but with reserved and RX ready fields
+ * removed.
+ */
+union cgx_spu_sds_sr_s {
+	u32 u;
+	struct cgx_spu_sds_sr_s_s {
+		u32 pre_status                       : 2;
+		u32 main_status                      : 2;
+		u32 post_status                      : 2;
+		u32 reserved_6_31                    : 26;
+	} s;
+	/* struct cgx_spu_sds_sr_s_s cn; */
+};
+
+/**
+ * Register (RSL) cgx#_active_pc
+ *
+ * CGX ACTIVE PC Register This register counts the conditional clocks for
+ * power management.
+ */
+union cgxx_active_pc {
+	u64 u;
+	struct cgxx_active_pc_s {
+		u64 cnt                              : 64;
+	} s;
+	/* struct cgxx_active_pc_s cn; */
+};
+
+static inline u64 CGXX_ACTIVE_PC(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_ACTIVE_PC(void)
+{
+	return 0x2010;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_activity
+ *
+ * CGX CMR Activity Registers
+ */
+union cgxx_cmrx_activity {
+	u64 u;
+	struct cgxx_cmrx_activity_s {
+		u64 act_tx_lo                        : 1;
+		u64 act_tx_hi                        : 1;
+		u64 pause_tx                         : 1;
+		u64 act_rx_lo                        : 1;
+		u64 act_rx_hi                        : 1;
+		u64 pause_rx                         : 1;
+		u64 reserved_6_63                    : 58;
+	} s;
+	/* struct cgxx_cmrx_activity_s cn; */
+};
+
+static inline u64 CGXX_CMRX_ACTIVITY(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_ACTIVITY(u64 a)
+{
+	return 0x5f8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_config
+ *
+ * CGX CMR Configuration Registers Logical MAC/PCS configuration
+ * registers; one per LMAC. The maximum number of LMACs (and maximum LMAC
+ * ID) that can be enabled by these registers is limited by
+ * CGX()_CMR_RX_LMACS[LMACS] and CGX()_CMR_TX_LMACS[LMACS].  Internal:
+ * \<pre\> Example configurations:   ------------------------------------
+ * ---------------------------------------   Configuration
+ * LMACS  Register             [ENABLE]    [LMAC_TYPE]   ----------------
+ * -----------------------------------------------------------
+ * 1x50G+1x25G+1xSGMII     4      CGXn_CMR0_CONFIG     1           8
+ * CGXn_CMR1_CONFIG     0           --
+ * CGXn_CMR2_CONFIG     1           7
+ * CGXn_CMR3_CONFIG     1           0   ---------------------------------
+ * ------------------------------------------   USXGMII
+ * 1-4    CGXn_CMR0_CONFIG     1           a
+ * CGXn_CMR1_CONFIG     1           a
+ * CGXn_CMR2_CONFIG     1           a
+ * CGXn_CMR3_CONFIG     1           a   ---------------------------------
+ * ------------------------------------------   1x100GBASE-R4           1
+ * CGXn_CMR0_CONFIG     1           9
+ * CGXn_CMR1_CONFIG     0           --
+ * CGXn_CMR2_CONFIG     0           --
+ * CGXn_CMR3_CONFIG     0           --   --------------------------------
+ * -------------------------------------------   2x50GBASE-R2
+ * 2      CGXn_CMR0_CONFIG     1           8
+ * CGXn_CMR1_CONFIG     1           8
+ * CGXn_CMR2_CONFIG     0           --
+ * CGXn_CMR3_CONFIG     0           --   --------------------------------
+ * -------------------------------------------   4x25GBASE-R
+ * 4      CGXn_CMR0_CONFIG     1           7
+ * CGXn_CMR1_CONFIG     1           7
+ * CGXn_CMR2_CONFIG     1           7
+ * CGXn_CMR3_CONFIG     1           7   ---------------------------------
+ * ------------------------------------------   QSGMII                  4
+ * CGXn_CMR0_CONFIG     1           6
+ * CGXn_CMR1_CONFIG     1           6
+ * CGXn_CMR2_CONFIG     1           6
+ * CGXn_CMR3_CONFIG     1           6   ---------------------------------
+ * ------------------------------------------   1x40GBASE-R4            1
+ * CGXn_CMR0_CONFIG     1           4
+ * CGXn_CMR1_CONFIG     0           --
+ * CGXn_CMR2_CONFIG     0           --
+ * CGXn_CMR3_CONFIG     0           --   --------------------------------
+ * -------------------------------------------   4x10GBASE-R
+ * 4      CGXn_CMR0_CONFIG     1           3
+ * CGXn_CMR1_CONFIG     1           3
+ * CGXn_CMR2_CONFIG     1           3
+ * CGXn_CMR3_CONFIG     1           3   ---------------------------------
+ * ------------------------------------------   2xRXAUI                 2
+ * CGXn_CMR0_CONFIG     1           2
+ * CGXn_CMR1_CONFIG     1           2
+ * CGXn_CMR2_CONFIG     0           --
+ * CGXn_CMR3_CONFIG     0           --   --------------------------------
+ * -------------------------------------------   1x10GBASE-X/XAUI/DXAUI
+ * 1      CGXn_CMR0_CONFIG     1           1
+ * CGXn_CMR1_CONFIG     0           --
+ * CGXn_CMR2_CONFIG     0           --
+ * CGXn_CMR3_CONFIG     0           --   --------------------------------
+ * -------------------------------------------   4xSGMII/1000BASE-X
+ * 4      CGXn_CMR0_CONFIG     1           0
+ * CGXn_CMR1_CONFIG     1           0
+ * CGXn_CMR2_CONFIG     1           0
+ * CGXn_CMR3_CONFIG     1           0   ---------------------------------
+ * ------------------------------------------ \</pre\>
+ */
+union cgxx_cmrx_config {
+	u64 u;
+	struct cgxx_cmrx_config_s {
+		u64 lane_to_sds                      : 8;
+		u64 reserved_8_39                    : 32;
+		u64 lmac_type                        : 4;
+		u64 unused                           : 8;
+		u64 int_beat_gen                     : 1;
+		u64 data_pkt_tx_en                   : 1;
+		u64 data_pkt_rx_en                   : 1;
+		u64 enable                           : 1;
+		u64 x2p_select                       : 3;
+		u64 p2x_select                       : 3;
+		u64 reserved_62_63                   : 2;
+	} s;
+	/* struct cgxx_cmrx_config_s cn; */
+};
+
+static inline u64 CGXX_CMRX_CONFIG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_CONFIG(u64 a)
+{
+	return 0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_int
+ *
+ * CGX CMR Interrupt Register
+ */
+union cgxx_cmrx_int {
+	u64 u;
+	struct cgxx_cmrx_int_s {
+		u64 pause_drp                        : 1;
+		u64 overflw                          : 1;
+		u64 nic_nxc                          : 1;
+		u64 nix0_nxc                         : 1;
+		u64 nix1_nxc                         : 1;
+		u64 nix0_e_nxc                       : 1;
+		u64 nix1_e_nxc                       : 1;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct cgxx_cmrx_int_s cn; */
+};
+
+static inline u64 CGXX_CMRX_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_INT(u64 a)
+{
+	return 0x40 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_int_ena_w1c
+ *
+ * CGX CMR Interrupt Enable Clear Register This register clears interrupt
+ * enable bits.
+ */
+union cgxx_cmrx_int_ena_w1c {
+	u64 u;
+	struct cgxx_cmrx_int_ena_w1c_s {
+		u64 pause_drp                        : 1;
+		u64 overflw                          : 1;
+		u64 nic_nxc                          : 1;
+		u64 nix0_nxc                         : 1;
+		u64 nix1_nxc                         : 1;
+		u64 nix0_e_nxc                       : 1;
+		u64 nix1_e_nxc                       : 1;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct cgxx_cmrx_int_ena_w1c_s cn; */
+};
+
+static inline u64 CGXX_CMRX_INT_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_INT_ENA_W1C(u64 a)
+{
+	return 0x50 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_int_ena_w1s
+ *
+ * CGX CMR Interrupt Enable Set Register This register sets interrupt
+ * enable bits.
+ */
+union cgxx_cmrx_int_ena_w1s {
+	u64 u;
+	struct cgxx_cmrx_int_ena_w1s_s {
+		u64 pause_drp                        : 1;
+		u64 overflw                          : 1;
+		u64 nic_nxc                          : 1;
+		u64 nix0_nxc                         : 1;
+		u64 nix1_nxc                         : 1;
+		u64 nix0_e_nxc                       : 1;
+		u64 nix1_e_nxc                       : 1;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct cgxx_cmrx_int_ena_w1s_s cn; */
+};
+
+static inline u64 CGXX_CMRX_INT_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_INT_ENA_W1S(u64 a)
+{
+	return 0x58 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_int_w1s
+ *
+ * CGX CMR Interrupt Set Register This register sets interrupt bits.
+ */
+union cgxx_cmrx_int_w1s {
+	u64 u;
+	struct cgxx_cmrx_int_w1s_s {
+		u64 pause_drp                        : 1;
+		u64 overflw                          : 1;
+		u64 nic_nxc                          : 1;
+		u64 nix0_nxc                         : 1;
+		u64 nix1_nxc                         : 1;
+		u64 nix0_e_nxc                       : 1;
+		u64 nix1_e_nxc                       : 1;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct cgxx_cmrx_int_w1s_s cn; */
+};
+
+static inline u64 CGXX_CMRX_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_INT_W1S(u64 a)
+{
+	return 0x48 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_led_timing
+ *
+ * CGX MAC LED Activity Timing Registers
+ */
+union cgxx_cmrx_led_timing {
+	u64 u;
+	struct cgxx_cmrx_led_timing_s {
+		u64 extension                        : 8;
+		u64 reserved_8_63                    : 56;
+	} s;
+	/* struct cgxx_cmrx_led_timing_s cn; */
+};
+
+static inline u64 CGXX_CMRX_LED_TIMING(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_LED_TIMING(u64 a)
+{
+	return 0x5f0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_prt_cbfc_ctl
+ *
+ * CGX CMR LMAC PFC Control Registers See CGX()_CMR()_RX_LOGL_XOFF[XOFF].
+ */
+union cgxx_cmrx_prt_cbfc_ctl {
+	u64 u;
+	struct cgxx_cmrx_prt_cbfc_ctl_s {
+		u64 reserved_0_15                    : 16;
+		u64 phys_bp                          : 16;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_cmrx_prt_cbfc_ctl_s cn; */
+};
+
+static inline u64 CGXX_CMRX_PRT_CBFC_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_PRT_CBFC_CTL(u64 a)
+{
+	return 0x608 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_bp_drop
+ *
+ * CGX Receive Backpressure Drop Register
+ */
+union cgxx_cmrx_rx_bp_drop {
+	u64 u;
+	struct cgxx_cmrx_rx_bp_drop_s {
+		u64 mark                             : 7;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct cgxx_cmrx_rx_bp_drop_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_BP_DROP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_BP_DROP(u64 a)
+{
+	return 0xd8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_bp_off
+ *
+ * CGX Receive Backpressure Off Register
+ */
+union cgxx_cmrx_rx_bp_off {
+	u64 u;
+	struct cgxx_cmrx_rx_bp_off_s {
+		u64 mark                             : 7;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct cgxx_cmrx_rx_bp_off_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_BP_OFF(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_BP_OFF(u64 a)
+{
+	return 0xe8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_bp_on
+ *
+ * CGX Receive Backpressure On Register
+ */
+union cgxx_cmrx_rx_bp_on {
+	u64 u;
+	struct cgxx_cmrx_rx_bp_on_s {
+		u64 mark                             : 13;
+		u64 reserved_13_63                   : 51;
+	} s;
+	/* struct cgxx_cmrx_rx_bp_on_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_BP_ON(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_BP_ON(u64 a)
+{
+	return 0xe0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_bp_status
+ *
+ * CGX CMR Receive Backpressure Status Registers
+ */
+union cgxx_cmrx_rx_bp_status {
+	u64 u;
+	struct cgxx_cmrx_rx_bp_status_s {
+		u64 bp                               : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_cmrx_rx_bp_status_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_BP_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_BP_STATUS(u64 a)
+{
+	return 0xf0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_dmac_ctl0
+ *
+ * CGX CMR Receive DMAC Address-Control0 Register DMAC CAM control
+ * register for use by X2P/NIX bound traffic. Received packets are only
+ * passed to X2P/NIX when the DMAC0 filter result is ACCEPT and STEERING0
+ * filter result is PASS. See also CGX()_CMR_RX_DMAC()_CAM0 and
+ * CGX()_CMR_RX_STEERING0().  Internal: "* ALGORITHM Here is some pseudo
+ * code that represents the address filter behavior. \<pre\>
+ * dmac_addr_filter(uint8 prt, uint48 dmac) { for (lmac=0, lmac\<4,
+ * lmac++) {   if (is_bcst(dmac))                               //
+ * broadcast accept     return (CGX()_CMR(lmac)_RX_DMAC_CTL0[BCST_ACCEPT]
+ * ? ACCEPT : REJECT);   if (is_mcst(dmac) &&
+ * CGX()_CMR(lmac)_RX_DMAC_CTL0[MCST_MODE] == 0)   // multicast reject
+ * return REJECT;   if (is_mcst(dmac) &&
+ * CGX()_CMR(lmac)_RX_DMAC_CTL0[MCST_MODE] == 1)   // multicast accept
+ * return ACCEPT;   else        // DMAC CAM filter     cam_hit = 0;   for
+ * (i=0; i\<32; i++) {     cam = CGX()_CMR_RX_DMAC(i)_CAM0;     if
+ * (cam[EN] && cam[ID] == lmac && cam[ADR] == dmac) {       cam_hit = 1;
+ * break;     }   }   if (cam_hit) {     return
+ * (CGX()_CMR(lmac)_RX_DMAC_CTL0[CAM_ACCEPT] ? ACCEPT : REJECT);   else
+ * return (CGX()_CMR(lmac)_RX_DMAC_CTL0[CAM_ACCEPT] ? REJECT : ACCEPT);
+ * } } \</pre\>"
+ */
+union cgxx_cmrx_rx_dmac_ctl0 {
+	u64 u;
+	struct cgxx_cmrx_rx_dmac_ctl0_s {
+		u64 bcst_accept                      : 1;
+		u64 mcst_mode                        : 2;
+		u64 cam_accept                       : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct cgxx_cmrx_rx_dmac_ctl0_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_DMAC_CTL0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_DMAC_CTL0(u64 a)
+{
+	return 0x1f8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_dmac_ctl1
+ *
+ * CGX CMR Receive DMAC Address-Control1 Register DMAC CAM control
+ * register for use by NCSI bound traffic. Received packets are only
+ * passed to NCSI when the DMAC1 filter result is ACCEPT and STEERING1
+ * filter result is PASS. See also CGX()_CMR_RX_DMAC()_CAM1 and
+ * CGX()_CMR_RX_STEERING1(). For use with the LMAC associated with NCSI;
+ * see CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID].  Internal: ALGORITHM: See
+ * CGX()_CMR()_RX_DMAC_CTL0.
+ */
+union cgxx_cmrx_rx_dmac_ctl1 {
+	u64 u;
+	struct cgxx_cmrx_rx_dmac_ctl1_s {
+		u64 bcst_accept                      : 1;
+		u64 mcst_mode                        : 2;
+		u64 cam_accept                       : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct cgxx_cmrx_rx_dmac_ctl1_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_DMAC_CTL1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_DMAC_CTL1(u64 a)
+{
+	return 0x3f8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_fifo_len
+ *
+ * CGX CMR Receive Fifo Length Registers
+ */
+union cgxx_cmrx_rx_fifo_len {
+	u64 u;
+	struct cgxx_cmrx_rx_fifo_len_s {
+		u64 fifo_len                         : 14;
+		u64 busy                             : 1;
+		u64 fifo_len_e                       : 14;
+		u64 busy_e                           : 1;
+		u64 reserved_30_63                   : 34;
+	} s;
+	/* struct cgxx_cmrx_rx_fifo_len_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_FIFO_LEN(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_FIFO_LEN(u64 a)
+{
+	return 0x108 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_id_map
+ *
+ * CGX CMR Receive ID Map Register These registers set the RX LMAC ID
+ * mapping for X2P/NIX.
+ */
+union cgxx_cmrx_rx_id_map {
+	u64 u;
+	struct cgxx_cmrx_rx_id_map_s {
+		u64 pknd                             : 6;
+		u64 unused                           : 2;
+		u64 rid                              : 7;
+		u64 reserved_15_63                   : 49;
+	} s;
+	/* struct cgxx_cmrx_rx_id_map_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_ID_MAP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_ID_MAP(u64 a)
+{
+	return 0x60 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_logl_xoff
+ *
+ * CGX CMR Receive Logical XOFF Registers
+ */
+union cgxx_cmrx_rx_logl_xoff {
+	u64 u;
+	struct cgxx_cmrx_rx_logl_xoff_s {
+		u64 xoff                             : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_cmrx_rx_logl_xoff_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_LOGL_XOFF(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_LOGL_XOFF(u64 a)
+{
+	return 0xf8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_logl_xon
+ *
+ * CGX CMR Receive Logical XON Registers
+ */
+union cgxx_cmrx_rx_logl_xon {
+	u64 u;
+	struct cgxx_cmrx_rx_logl_xon_s {
+		u64 xon                              : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_cmrx_rx_logl_xon_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_LOGL_XON(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_LOGL_XON(u64 a)
+{
+	return 0x100 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_merge_stat0
+ *
+ * CGX RX Preemption Status Register 0
+ */
+union cgxx_cmrx_rx_merge_stat0 {
+	u64 u;
+	struct cgxx_cmrx_rx_merge_stat0_s {
+		u64 fa_err_cnt                       : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_merge_stat0_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_MERGE_STAT0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_MERGE_STAT0(u64 a)
+{
+	return 0x138 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_merge_stat1
+ *
+ * CGX RX Preemption Status Register 1
+ */
+union cgxx_cmrx_rx_merge_stat1 {
+	u64 u;
+	struct cgxx_cmrx_rx_merge_stat1_s {
+		u64 fs_err_cnt                       : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_merge_stat1_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_MERGE_STAT1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_MERGE_STAT1(u64 a)
+{
+	return 0x140 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_merge_stat2
+ *
+ * CGX RX Preemption Status Register 2
+ */
+union cgxx_cmrx_rx_merge_stat2 {
+	u64 u;
+	struct cgxx_cmrx_rx_merge_stat2_s {
+		u64 fa_ok_cnt                        : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_merge_stat2_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_MERGE_STAT2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_MERGE_STAT2(u64 a)
+{
+	return 0x148 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_merge_stat3
+ *
+ * CGX RX Preemption Status Register 3
+ */
+union cgxx_cmrx_rx_merge_stat3 {
+	u64 u;
+	struct cgxx_cmrx_rx_merge_stat3_s {
+		u64 ff_cnt                           : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_merge_stat3_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_MERGE_STAT3(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_MERGE_STAT3(u64 a)
+{
+	return 0x150 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_merge_stat4
+ *
+ * CGX RX Preemption Status Register 4
+ */
+union cgxx_cmrx_rx_merge_stat4 {
+	u64 u;
+	struct cgxx_cmrx_rx_merge_stat4_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_merge_stat4_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_MERGE_STAT4(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_MERGE_STAT4(u64 a)
+{
+	return 0x158 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_pause_drop_time
+ *
+ * CGX CMR Receive Pause Drop-Time Register
+ */
+union cgxx_cmrx_rx_pause_drop_time {
+	u64 u;
+	struct cgxx_cmrx_rx_pause_drop_time_s {
+		u64 pause_time                       : 16;
+		u64 pause_time_e                     : 16;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_cmrx_rx_pause_drop_time_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_PAUSE_DROP_TIME(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_PAUSE_DROP_TIME(u64 a)
+{
+	return 0x68 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_stat0
+ *
+ * CGX Receive Status Register 0 These registers provide a count of
+ * received packets that meet the following conditions: * are not
+ * recognized as ERROR packets(any OPCODE). * are not recognized as PAUSE
+ * packets. * are not dropped due FIFO full status. * are not dropped due
+ * DMAC0 or STEERING0 filtering.  Internal: "This pseudo code represents
+ * the RX STAT0 through STAT8 accounting: \<pre\> If (errored)   incr
+ * RX_STAT8 else if (ctrl packet, i.e. Pause/PFC)   incr RX_STAT2,3 else
+ * if (fifo full drop)   incr RX_STAT6,7 else if (DMAC0/VLAN0 filter
+ * drop)   incr RX_STAT4,5 if not a filter+decision else   incr
+ * RX_STAT0,1 end \</pre\>"
+ */
+union cgxx_cmrx_rx_stat0 {
+	u64 u;
+	struct cgxx_cmrx_rx_stat0_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_stat0_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_STAT0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_STAT0(u64 a)
+{
+	return 0x70 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_stat1
+ *
+ * CGX Receive Status Register 1 These registers provide a count of
+ * octets of received packets.
+ */
+union cgxx_cmrx_rx_stat1 {
+	u64 u;
+	struct cgxx_cmrx_rx_stat1_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_stat1_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_STAT1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_STAT1(u64 a)
+{
+	return 0x78 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_stat2
+ *
+ * CGX Receive Status Register 2 These registers provide a count of
+ * received packets that meet the following conditions: * are not
+ * recognized as ERROR packets(any OPCODE). * are recognized as PAUSE
+ * packets.  Pause packets can be optionally dropped or forwarded based
+ * on
+ * CGX()_SMU()_RX_FRM_CTL[CTL_DRP]/CGX()_GMP_GMI_RX()_FRM_CTL[CTL_DRP].
+ * This count increments regardless of whether the packet is dropped.
+ */
+union cgxx_cmrx_rx_stat2 {
+	u64 u;
+	struct cgxx_cmrx_rx_stat2_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_stat2_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_STAT2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_STAT2(u64 a)
+{
+	return 0x80 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_stat3
+ *
+ * CGX Receive Status Register 3 These registers provide a count of
+ * octets of received PAUSE and control packets.
+ */
+union cgxx_cmrx_rx_stat3 {
+	u64 u;
+	struct cgxx_cmrx_rx_stat3_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_stat3_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_STAT3(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_STAT3(u64 a)
+{
+	return 0x88 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_stat4
+ *
+ * CGX Receive Status Register 4 These registers provide a count of
+ * received packets that meet the following conditions: * are not
+ * recognized as ERROR packets(any OPCODE). * are not recognized as PAUSE
+ * packets. * are not dropped due FIFO full status. * are dropped due
+ * DMAC0 or STEERING0 filtering.  16B packets or smaller (20B in case of
+ * FCS strip) as the result of truncation or other means are not dropped
+ * by CGX (unless filter and decision is also asserted) and will never
+ * appear in this count. Should the MAC signal to the CMR that the packet
+ * be filtered upon decision before the end of packet, then STAT4 and
+ * STAT5 will not be updated.
+ */
+union cgxx_cmrx_rx_stat4 {
+	u64 u;
+	struct cgxx_cmrx_rx_stat4_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_stat4_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_STAT4(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_STAT4(u64 a)
+{
+	return 0x90 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_stat5
+ *
+ * CGX Receive Status Register 5 These registers provide a count of
+ * octets of filtered DMAC0 or VLAN STEERING0 packets.
+ */
+union cgxx_cmrx_rx_stat5 {
+	u64 u;
+	struct cgxx_cmrx_rx_stat5_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_stat5_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_STAT5(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_STAT5(u64 a)
+{
+	return 0x98 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_stat6
+ *
+ * CGX Receive Status Register 6 These registers provide a count of
+ * received packets that meet the following conditions: * are not
+ * recognized as ERROR packets(any OPCODE). * are not recognized as PAUSE
+ * packets. * are dropped due FIFO full status.  They do not count any
+ * packet that is truncated at the point of overflow and sent on to the
+ * NIX. The truncated packet will be marked with error and increment
+ * STAT8. These registers count all entire packets dropped by the FIFO
+ * for a given LMAC.
+ */
+union cgxx_cmrx_rx_stat6 {
+	u64 u;
+	struct cgxx_cmrx_rx_stat6_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_stat6_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_STAT6(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_STAT6(u64 a)
+{
+	return 0xa0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_stat7
+ *
+ * CGX Receive Status Register 7 These registers provide a count of
+ * octets of received packets that were dropped due to a full receive
+ * FIFO.
+ */
+union cgxx_cmrx_rx_stat7 {
+	u64 u;
+	struct cgxx_cmrx_rx_stat7_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_stat7_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_STAT7(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_STAT7(u64 a)
+{
+	return 0xa8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_stat8
+ *
+ * CGX Receive Status Register 8 These registers provide a count of
+ * received packets that meet the following conditions:  * are recognized
+ * as ERROR packets(any OPCODE).
+ */
+union cgxx_cmrx_rx_stat8 {
+	u64 u;
+	struct cgxx_cmrx_rx_stat8_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_stat8_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_STAT8(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_STAT8(u64 a)
+{
+	return 0xb0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_rx_stat_pri#_xoff
+ *
+ * CGX CMR RX XON to XOFF transition Registers
+ */
+union cgxx_cmrx_rx_stat_prix_xoff {
+	u64 u;
+	struct cgxx_cmrx_rx_stat_prix_xoff_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_rx_stat_prix_xoff_s cn; */
+};
+
+static inline u64 CGXX_CMRX_RX_STAT_PRIX_XOFF(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_RX_STAT_PRIX_XOFF(u64 a, u64 b)
+{
+	return 0x7c0 + 0x40000 * a + 8 * b;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_scratch#
+ *
+ * CGX CMR Scratch Registers
+ */
+union cgxx_cmrx_scratchx {
+	u64 u;
+	struct cgxx_cmrx_scratchx_s {
+		u64 scratch                          : 64;
+	} s;
+	/* struct cgxx_cmrx_scratchx_s cn; */
+};
+
+static inline u64 CGXX_CMRX_SCRATCHX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_SCRATCHX(u64 a, u64 b)
+{
+	return 0x1050 + 0x40000 * a + 8 * b;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_sw_int
+ *
+ * CGX CMR Interrupt Register
+ */
+union cgxx_cmrx_sw_int {
+	u64 u;
+	struct cgxx_cmrx_sw_int_s {
+		u64 sw_set                           : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_cmrx_sw_int_s cn; */
+};
+
+static inline u64 CGXX_CMRX_SW_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_SW_INT(u64 a)
+{
+	return 0x180 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_sw_int_ena_w1c
+ *
+ * CGX CMR Interrupt Enable Clear Register This register clears interrupt
+ * enable bits.
+ */
+union cgxx_cmrx_sw_int_ena_w1c {
+	u64 u;
+	struct cgxx_cmrx_sw_int_ena_w1c_s {
+		u64 sw_set                           : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_cmrx_sw_int_ena_w1c_s cn; */
+};
+
+static inline u64 CGXX_CMRX_SW_INT_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_SW_INT_ENA_W1C(u64 a)
+{
+	return 0x190 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_sw_int_ena_w1s
+ *
+ * CGX CMR Interrupt Enable Set Register This register sets interrupt
+ * enable bits.
+ */
+union cgxx_cmrx_sw_int_ena_w1s {
+	u64 u;
+	struct cgxx_cmrx_sw_int_ena_w1s_s {
+		u64 sw_set                           : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_cmrx_sw_int_ena_w1s_s cn; */
+};
+
+static inline u64 CGXX_CMRX_SW_INT_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_SW_INT_ENA_W1S(u64 a)
+{
+	return 0x198 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_sw_int_w1s
+ *
+ * CGX CMR Interrupt Set Register This register sets interrupt bits.
+ */
+union cgxx_cmrx_sw_int_w1s {
+	u64 u;
+	struct cgxx_cmrx_sw_int_w1s_s {
+		u64 sw_set                           : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_cmrx_sw_int_w1s_s cn; */
+};
+
+static inline u64 CGXX_CMRX_SW_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_SW_INT_W1S(u64 a)
+{
+	return 0x188 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_channel
+ *
+ * CGX CMR Transmit-Channels Registers
+ */
+union cgxx_cmrx_tx_channel {
+	u64 u;
+	struct cgxx_cmrx_tx_channel_s {
+		u64 msk                              : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_cmrx_tx_channel_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_CHANNEL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_CHANNEL(u64 a)
+{
+	return 0x600 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_fifo_len
+ *
+ * CGX CMR Transmit Fifo Length Registers
+ */
+union cgxx_cmrx_tx_fifo_len {
+	u64 u;
+	struct cgxx_cmrx_tx_fifo_len_s {
+		u64 fifo_len                         : 14;
+		u64 lmac_idle                        : 1;
+		u64 fifo_e_len                       : 14;
+		u64 lmac_e_idle                      : 1;
+		u64 reserved_30_63                   : 34;
+	} s;
+	/* struct cgxx_cmrx_tx_fifo_len_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_FIFO_LEN(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_FIFO_LEN(u64 a)
+{
+	return 0x618 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_hg2_status
+ *
+ * CGX CMR Transmit HiGig2 Status Registers
+ */
+union cgxx_cmrx_tx_hg2_status {
+	u64 u;
+	struct cgxx_cmrx_tx_hg2_status_s {
+		u64 lgtim2go                         : 16;
+		u64 xof                              : 16;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_cmrx_tx_hg2_status_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_HG2_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_HG2_STATUS(u64 a)
+{
+	return 0x610 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_merge_stat0
+ *
+ * CGX TX Preemption Status Register 0
+ */
+union cgxx_cmrx_tx_merge_stat0 {
+	u64 u;
+	struct cgxx_cmrx_tx_merge_stat0_s {
+		u64 ff_cnt                           : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_merge_stat0_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_MERGE_STAT0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_MERGE_STAT0(u64 a)
+{
+	return 0x160 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_ovr_bp
+ *
+ * CGX CMR Transmit-Channels Backpressure Override Registers
+ */
+union cgxx_cmrx_tx_ovr_bp {
+	u64 u;
+	struct cgxx_cmrx_tx_ovr_bp_s {
+		u64 tx_chan_bp                       : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_cmrx_tx_ovr_bp_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_OVR_BP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_OVR_BP(u64 a)
+{
+	return 0x620 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat0
+ *
+ * CGX CMR Transmit Statistics Registers 0
+ */
+union cgxx_cmrx_tx_stat0 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat0_s {
+		u64 xscol                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat0_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT0(u64 a)
+{
+	return 0x700 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat1
+ *
+ * CGX CMR Transmit Statistics Registers 1
+ */
+union cgxx_cmrx_tx_stat1 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat1_s {
+		u64 xsdef                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat1_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT1(u64 a)
+{
+	return 0x708 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat10
+ *
+ * CGX CMR Transmit Statistics Registers 10
+ */
+union cgxx_cmrx_tx_stat10 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat10_s {
+		u64 hist4                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat10_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT10(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT10(u64 a)
+{
+	return 0x750 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat11
+ *
+ * CGX CMR Transmit Statistics Registers 11
+ */
+union cgxx_cmrx_tx_stat11 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat11_s {
+		u64 hist5                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat11_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT11(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT11(u64 a)
+{
+	return 0x758 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat12
+ *
+ * CGX CMR Transmit Statistics Registers 12
+ */
+union cgxx_cmrx_tx_stat12 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat12_s {
+		u64 hist6                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat12_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT12(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT12(u64 a)
+{
+	return 0x760 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat13
+ *
+ * CGX CMR Transmit Statistics Registers 13
+ */
+union cgxx_cmrx_tx_stat13 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat13_s {
+		u64 hist7                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat13_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT13(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT13(u64 a)
+{
+	return 0x768 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat14
+ *
+ * CGX CMR Transmit Statistics Registers 14
+ */
+union cgxx_cmrx_tx_stat14 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat14_s {
+		u64 bcst                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat14_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT14(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT14(u64 a)
+{
+	return 0x770 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat15
+ *
+ * CGX CMR Transmit Statistics Registers 15
+ */
+union cgxx_cmrx_tx_stat15 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat15_s {
+		u64 mcst                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat15_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT15(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT15(u64 a)
+{
+	return 0x778 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat16
+ *
+ * CGX CMR Transmit Statistics Registers 16
+ */
+union cgxx_cmrx_tx_stat16 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat16_s {
+		u64 undflw                           : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat16_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT16(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT16(u64 a)
+{
+	return 0x780 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat17
+ *
+ * CGX CMR Transmit Statistics Registers 17
+ */
+union cgxx_cmrx_tx_stat17 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat17_s {
+		u64 ctl                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat17_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT17(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT17(u64 a)
+{
+	return 0x788 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat2
+ *
+ * CGX CMR Transmit Statistics Registers 2
+ */
+union cgxx_cmrx_tx_stat2 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat2_s {
+		u64 mcol                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat2_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT2(u64 a)
+{
+	return 0x710 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat3
+ *
+ * CGX CMR Transmit Statistics Registers 3
+ */
+union cgxx_cmrx_tx_stat3 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat3_s {
+		u64 scol                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat3_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT3(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT3(u64 a)
+{
+	return 0x718 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat4
+ *
+ * CGX CMR Transmit Statistics Registers 4
+ */
+union cgxx_cmrx_tx_stat4 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat4_s {
+		u64 octs                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat4_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT4(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT4(u64 a)
+{
+	return 0x720 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat5
+ *
+ * CGX CMR Transmit Statistics Registers 5
+ */
+union cgxx_cmrx_tx_stat5 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat5_s {
+		u64 pkts                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat5_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT5(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT5(u64 a)
+{
+	return 0x728 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat6
+ *
+ * CGX CMR Transmit Statistics Registers 6
+ */
+union cgxx_cmrx_tx_stat6 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat6_s {
+		u64 hist0                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat6_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT6(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT6(u64 a)
+{
+	return 0x730 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat7
+ *
+ * CGX CMR Transmit Statistics Registers 7
+ */
+union cgxx_cmrx_tx_stat7 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat7_s {
+		u64 hist1                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat7_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT7(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT7(u64 a)
+{
+	return 0x738 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat8
+ *
+ * CGX CMR Transmit Statistics Registers 8
+ */
+union cgxx_cmrx_tx_stat8 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat8_s {
+		u64 hist2                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat8_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT8(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT8(u64 a)
+{
+	return 0x740 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat9
+ *
+ * CGX CMR Transmit Statistics Registers 9
+ */
+union cgxx_cmrx_tx_stat9 {
+	u64 u;
+	struct cgxx_cmrx_tx_stat9_s {
+		u64 hist3                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat9_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT9(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT9(u64 a)
+{
+	return 0x748 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr#_tx_stat_pri#_xoff
+ *
+ * CGX CMR TX XON to XOFF transition Registers
+ */
+union cgxx_cmrx_tx_stat_prix_xoff {
+	u64 u;
+	struct cgxx_cmrx_tx_stat_prix_xoff_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmrx_tx_stat_prix_xoff_s cn; */
+};
+
+static inline u64 CGXX_CMRX_TX_STAT_PRIX_XOFF(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMRX_TX_STAT_PRIX_XOFF(u64 a, u64 b)
+{
+	return 0x800 + 0x40000 * a + 8 * b;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_bad
+ *
+ * CGX CMR Bad Registers
+ */
+union cgxx_cmr_bad {
+	u64 u;
+	struct cgxx_cmr_bad_s {
+		u64 rxb_nxl                          : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_cmr_bad_s cn; */
+};
+
+static inline u64 CGXX_CMR_BAD(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_BAD(void)
+{
+	return 0x1020;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_chan_msk_and
+ *
+ * CGX CMR Backpressure Channel Mask AND Registers
+ */
+union cgxx_cmr_chan_msk_and {
+	u64 u;
+	struct cgxx_cmr_chan_msk_and_s {
+		u64 msk_and                          : 64;
+	} s;
+	/* struct cgxx_cmr_chan_msk_and_s cn; */
+};
+
+static inline u64 CGXX_CMR_CHAN_MSK_AND(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_CHAN_MSK_AND(void)
+{
+	return 0x110;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_chan_msk_or
+ *
+ * CGX Backpressure Channel Mask OR Registers
+ */
+union cgxx_cmr_chan_msk_or {
+	u64 u;
+	struct cgxx_cmr_chan_msk_or_s {
+		u64 msk_or                           : 64;
+	} s;
+	/* struct cgxx_cmr_chan_msk_or_s cn; */
+};
+
+static inline u64 CGXX_CMR_CHAN_MSK_OR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_CHAN_MSK_OR(void)
+{
+	return 0x118;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_eco
+ *
+ * INTERNAL: CGX ECO Registers
+ */
+union cgxx_cmr_eco {
+	u64 u;
+	struct cgxx_cmr_eco_s {
+		u64 eco_rw                           : 32;
+		u64 eco_ro                           : 32;
+	} s;
+	/* struct cgxx_cmr_eco_s cn; */
+};
+
+static inline u64 CGXX_CMR_ECO(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_ECO(void)
+{
+	return 0x1028;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_global_config
+ *
+ * CGX CMR Global Configuration Register These registers configure the
+ * global CMR, PCS, and MAC.
+ */
+union cgxx_cmr_global_config {
+	u64 u;
+	struct cgxx_cmr_global_config_s {
+		u64 pmux_sds_sel                     : 1;
+		u64 cgx_clk_enable                   : 1;
+		u64 cmr_x2p_reset                    : 3;
+		u64 interleave_mode                  : 1;
+		u64 fcs_strip                        : 1;
+		u64 ncsi_lmac_id                     : 2;
+		u64 cmr_ncsi_drop                    : 1;
+		u64 cmr_ncsi_reset                   : 1;
+		u64 cmr_ncsi_tag_cnt                 : 13;
+		u64 cmr_clken_ovrd                   : 1;
+		u64 reserved_25_63                   : 39;
+	} s;
+	/* struct cgxx_cmr_global_config_s cn; */
+};
+
+static inline u64 CGXX_CMR_GLOBAL_CONFIG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_GLOBAL_CONFIG(void)
+{
+	return 8;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_mem_int
+ *
+ * CGX CMR Memory Interrupt Register
+ */
+union cgxx_cmr_mem_int {
+	u64 u;
+	struct cgxx_cmr_mem_int_s {
+		u64 gmp_in_overfl                    : 1;
+		u64 smu_in_overfl                    : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct cgxx_cmr_mem_int_s cn; */
+};
+
+static inline u64 CGXX_CMR_MEM_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_MEM_INT(void)
+{
+	return 0x10;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_mem_int_ena_w1c
+ *
+ * CGX CMR Memory Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union cgxx_cmr_mem_int_ena_w1c {
+	u64 u;
+	struct cgxx_cmr_mem_int_ena_w1c_s {
+		u64 gmp_in_overfl                    : 1;
+		u64 smu_in_overfl                    : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct cgxx_cmr_mem_int_ena_w1c_s cn; */
+};
+
+static inline u64 CGXX_CMR_MEM_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_MEM_INT_ENA_W1C(void)
+{
+	return 0x20;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_mem_int_ena_w1s
+ *
+ * CGX CMR Memory Interrupt Enable Set Register This register sets
+ * interrupt enable bits.
+ */
+union cgxx_cmr_mem_int_ena_w1s {
+	u64 u;
+	struct cgxx_cmr_mem_int_ena_w1s_s {
+		u64 gmp_in_overfl                    : 1;
+		u64 smu_in_overfl                    : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct cgxx_cmr_mem_int_ena_w1s_s cn; */
+};
+
+static inline u64 CGXX_CMR_MEM_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_MEM_INT_ENA_W1S(void)
+{
+	return 0x28;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_mem_int_w1s
+ *
+ * CGX CMR Memory Interrupt Set Register This register sets interrupt
+ * bits.
+ */
+union cgxx_cmr_mem_int_w1s {
+	u64 u;
+	struct cgxx_cmr_mem_int_w1s_s {
+		u64 gmp_in_overfl                    : 1;
+		u64 smu_in_overfl                    : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct cgxx_cmr_mem_int_w1s_s cn; */
+};
+
+static inline u64 CGXX_CMR_MEM_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_MEM_INT_W1S(void)
+{
+	return 0x18;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_nic_nxc_adr
+ *
+ * CGX CMR NIC NXC Exception Registers
+ */
+union cgxx_cmr_nic_nxc_adr {
+	u64 u;
+	struct cgxx_cmr_nic_nxc_adr_s {
+		u64 channel                          : 12;
+		u64 lmac_id                          : 4;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_cmr_nic_nxc_adr_s cn; */
+};
+
+static inline u64 CGXX_CMR_NIC_NXC_ADR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_NIC_NXC_ADR(void)
+{
+	return 0x1030;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_nix0_nxc_adr
+ *
+ * CGX CMR NIX0 NXC Exception Registers
+ */
+union cgxx_cmr_nix0_nxc_adr {
+	u64 u;
+	struct cgxx_cmr_nix0_nxc_adr_s {
+		u64 channel                          : 12;
+		u64 lmac_id                          : 4;
+		u64 channel_e                        : 12;
+		u64 lmac_e_id                        : 4;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_cmr_nix0_nxc_adr_s cn; */
+};
+
+static inline u64 CGXX_CMR_NIX0_NXC_ADR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_NIX0_NXC_ADR(void)
+{
+	return 0x1038;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_nix1_nxc_adr
+ *
+ * CGX CMR NIX1 NXC Exception Registers
+ */
+union cgxx_cmr_nix1_nxc_adr {
+	u64 u;
+	struct cgxx_cmr_nix1_nxc_adr_s {
+		u64 channel                          : 12;
+		u64 lmac_id                          : 4;
+		u64 channel_e                        : 12;
+		u64 lmac_e_id                        : 4;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_cmr_nix1_nxc_adr_s cn; */
+};
+
+static inline u64 CGXX_CMR_NIX1_NXC_ADR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_NIX1_NXC_ADR(void)
+{
+	return 0x1040;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_p2x#_count
+ *
+ * CGX P2X Activity Register
+ */
+union cgxx_cmr_p2xx_count {
+	u64 u;
+	struct cgxx_cmr_p2xx_count_s {
+		u64 p2x_cnt                          : 64;
+	} s;
+	/* struct cgxx_cmr_p2xx_count_s cn; */
+};
+
+static inline u64 CGXX_CMR_P2XX_COUNT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_P2XX_COUNT(u64 a)
+{
+	return 0x168 + 0x1000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_dmac#_cam0
+ *
+ * CGX CMR Receive CAM Registers These registers provide access to the 32
+ * DMAC CAM0 entries in CGX, for use by X2P/NIX bound traffic.
+ */
+union cgxx_cmr_rx_dmacx_cam0 {
+	u64 u;
+	struct cgxx_cmr_rx_dmacx_cam0_s {
+		u64 adr                              : 48;
+		u64 en                               : 1;
+		u64 id                               : 2;
+		u64 reserved_51_63                   : 13;
+	} s;
+	/* struct cgxx_cmr_rx_dmacx_cam0_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_DMACX_CAM0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_DMACX_CAM0(u64 a)
+{
+	return 0x200 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_dmac#_cam1
+ *
+ * CGX CMR Receive CAM Registers These registers provide access to the 32
+ * DMAC CAM entries in CGX for use by NCSI bound traffic. See
+ * CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID] and CGX()_CMR_RX_STEERING1()
+ * registers.
+ */
+union cgxx_cmr_rx_dmacx_cam1 {
+	u64 u;
+	struct cgxx_cmr_rx_dmacx_cam1_s {
+		u64 adr                              : 48;
+		u64 en                               : 1;
+		u64 id                               : 2;
+		u64 reserved_51_63                   : 13;
+	} s;
+	/* struct cgxx_cmr_rx_dmacx_cam1_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_DMACX_CAM1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_DMACX_CAM1(u64 a)
+{
+	return 0x400 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_lmacs
+ *
+ * CGX CMR Receive Logical MACs Registers
+ */
+union cgxx_cmr_rx_lmacs {
+	u64 u;
+	struct cgxx_cmr_rx_lmacs_s {
+		u64 lmacs                            : 3;
+		u64 reserved_3_63                    : 61;
+	} s;
+	/* struct cgxx_cmr_rx_lmacs_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_LMACS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_LMACS(void)
+{
+	return 0x128;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_ovr_bp
+ *
+ * CGX CMR Receive-Ports Backpressure Override Registers Per-LMAC
+ * backpressure override register. For SMU, CGX()_CMR_RX_OVR_BP[EN]\<0\>
+ * must be set to one and CGX()_CMR_RX_OVR_BP[BP]\<0\> must be cleared to
+ * zero (to forcibly disable hardware-automatic 802.3 PAUSE packet
+ * generation) with the HiGig2 Protocol when
+ * CGX()_SMU()_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated
+ * by CGX()_SMU()_TX_CTL[HG_EN]=1 and CGX()_SMU()_RX_UDD_SKP[LEN]=16).
+ * Hardware can only auto-generate backpressure through HiGig2 messages
+ * (optionally, when CGX()_SMU()_HG2_CONTROL[HG2TX_EN]=1) with the HiGig2
+ * protocol.
+ */
+union cgxx_cmr_rx_ovr_bp {
+	u64 u;
+	struct cgxx_cmr_rx_ovr_bp_s {
+		u64 ign_fifo_bp                      : 4;
+		u64 bp                               : 4;
+		u64 en                               : 4;
+		u64 reserved_12_63                   : 52;
+	} s;
+	/* struct cgxx_cmr_rx_ovr_bp_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_OVR_BP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_OVR_BP(void)
+{
+	return 0x130;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_stat10
+ *
+ * CGX Receive Status Register 10 These registers provide a count of
+ * octets of filtered DMAC1 or VLAN STEERING1 packets.
+ */
+union cgxx_cmr_rx_stat10 {
+	u64 u;
+	struct cgxx_cmr_rx_stat10_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmr_rx_stat10_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_STAT10(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_STAT10(void)
+{
+	return 0xc0;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_stat11
+ *
+ * CGX Receive Status Register 11 This registers provides a count of
+ * packets dropped at the NCSI interface. This includes drops due to
+ * CGX()_CMR_GLOBAL_CONFIG[CMR_NCSI_DROP] or NCSI FIFO full. The count of
+ * dropped NCSI packets is not accounted for in any other stats
+ * registers.
+ */
+union cgxx_cmr_rx_stat11 {
+	u64 u;
+	struct cgxx_cmr_rx_stat11_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmr_rx_stat11_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_STAT11(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_STAT11(void)
+{
+	return 0xc8;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_stat12
+ *
+ * CGX Receive Status Register 12 This register provide a count of octets
+ * of dropped at the NCSI interface.
+ */
+union cgxx_cmr_rx_stat12 {
+	u64 u;
+	struct cgxx_cmr_rx_stat12_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmr_rx_stat12_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_STAT12(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_STAT12(void)
+{
+	return 0xd0;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_stat9
+ *
+ * CGX Receive Status Register 9 These registers provide a count of all
+ * received packets that were dropped by the DMAC1 or VLAN STEERING1
+ * filter. Packets that are dropped by the DMAC1 or VLAN STEERING1
+ * filters are counted here regardless of whether they were ERR packets,
+ * but does not include those reported in CGX()_CMR()_RX_STAT6. 16B
+ * packets or smaller (20B in case of FCS strip) as the result of
+ * truncation or other means are not dropped by CGX (unless filter and
+ * decision is also asserted) and will never appear in this count. Should
+ * the MAC signal to the CMR that the packet be filtered upon decision
+ * before the end of packet, then STAT9 and STAT10 will not be updated.
+ */
+union cgxx_cmr_rx_stat9 {
+	u64 u;
+	struct cgxx_cmr_rx_stat9_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_cmr_rx_stat9_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_STAT9(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_STAT9(void)
+{
+	return 0xb8;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_steering0#
+ *
+ * CGX CMR Receive Steering0 Registers These registers, along with
+ * CGX()_CMR_RX_STEERING_VETYPE0(), provide eight filters for identifying
+ * and steering receive traffic to X2P/NIX. Received packets are only
+ * passed to X2P/NIX when the DMAC0 filter result is ACCEPT and STEERING0
+ * filter result is PASS. See also CGX()_CMR()_RX_DMAC_CTL0.  Internal:
+ * "* ALGORITHM \<pre\> rx_steering(uint48 pkt_dmac, uint16 pkt_etype,
+ * uint16 pkt_vlan_id) {    for (int i = 0; i \< 8; i++) {       steer =
+ * CGX()_CMR_RX_STEERING0(i);       vetype =
+ * CGX()_CMR_RX_STEERING_VETYPE0(i);       if (steer[MCST_EN] ||
+ * steer[DMAC_EN] || vetype[VLAN_EN] || vetype[VLAN_TAG_EN]) {
+ * // Filter is enabled.          if (   (!steer[MCST_EN] ||
+ * is_mcst(pkt_dmac))              && (!steer[DMAC_EN] || pkt_dmac ==
+ * steer[DMAC])              && (!vetype[VLAN_EN] || pkt_vlan_id ==
+ * vetype[VLAN_ID])              && (!vetype[VLAN_TAG_EN] || pkt_etype ==
+ * vetype[VLAN_ETYPE]) )          {             // Filter match (all
+ * enabled matching criteria are met).             return steer[PASS];
+ * }       }    }    return CGX()_CMR_RX_STEERING_DEFAULT0[PASS]; // No
+ * match } \</pre\>"
+ */
+union cgxx_cmr_rx_steering0x {
+	u64 u;
+	struct cgxx_cmr_rx_steering0x_s {
+		u64 dmac                             : 48;
+		u64 dmac_en                          : 1;
+		u64 mcst_en                          : 1;
+		u64 pass                             : 1;
+		u64 reserved_51_63                   : 13;
+	} s;
+	/* struct cgxx_cmr_rx_steering0x_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_STEERING0X(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_STEERING0X(u64 a)
+{
+	return 0x300 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_steering1#
+ *
+ * CGX CMR Receive Steering1 Registers These registers, along with
+ * CGX()_CMR_RX_STEERING_VETYPE1(), provide eight filters for identifying
+ * and steering NCSI receive traffic. Received packets are only passed to
+ * NCSI when the DMAC1 filter result is ACCEPT and STEERING1 filter
+ * result is PASS. See also CGX()_CMR_RX_DMAC()_CAM1 and
+ * CGX()_CMR_RX_STEERING1(). For use with the LMAC associated with NCSI.
+ * See CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID].  Internal: ALGORITHM: See
+ * CGX()_CMR_RX_STEERING0().
+ */
+union cgxx_cmr_rx_steering1x {
+	u64 u;
+	struct cgxx_cmr_rx_steering1x_s {
+		u64 dmac                             : 48;
+		u64 dmac_en                          : 1;
+		u64 mcst_en                          : 1;
+		u64 pass                             : 1;
+		u64 reserved_51_63                   : 13;
+	} s;
+	/* struct cgxx_cmr_rx_steering1x_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_STEERING1X(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_STEERING1X(u64 a)
+{
+	return 0x500 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_steering_default0
+ *
+ * CGX CMR Receive Steering Default0 Destination Register For determining
+ * destination of traffic that does not meet matching algorithm described
+ * in registers CGX()_CMR_RX_STEERING0() and
+ * CGX()_CMR_RX_STEERING_VETYPE0(). All 16B packets or smaller (20B in
+ * case of FCS strip) as the result of truncation will steer to default
+ * destination
+ */
+union cgxx_cmr_rx_steering_default0 {
+	u64 u;
+	struct cgxx_cmr_rx_steering_default0_s {
+		u64 pass                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_cmr_rx_steering_default0_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_STEERING_DEFAULT0(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_STEERING_DEFAULT0(void)
+{
+	return 0x3f0;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_steering_default1
+ *
+ * CGX CMR Receive Steering Default1 Destination Register For use with
+ * the lmac_id associated with NCSI. See
+ * CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID]. For determining destination of
+ * traffic that does not meet matching algorithm described in registers
+ * CGX()_CMR_RX_STEERING1() and CGX()_CMR_RX_STEERING_VETYPE1(). All 16B
+ * packets or smaller (20B in case of FCS strip) as the result of
+ * truncation will steer to default destination
+ */
+union cgxx_cmr_rx_steering_default1 {
+	u64 u;
+	struct cgxx_cmr_rx_steering_default1_s {
+		u64 pass                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_cmr_rx_steering_default1_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_STEERING_DEFAULT1(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_STEERING_DEFAULT1(void)
+{
+	return 0x5e0;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_steering_vetype0#
+ *
+ * CGX CMR Receive VLAN Ethertype1 Register These registers, along with
+ * CGX()_CMR_RX_STEERING0(), provide eight filters for identifying and
+ * steering X2P/NIX receive traffic.
+ */
+union cgxx_cmr_rx_steering_vetype0x {
+	u64 u;
+	struct cgxx_cmr_rx_steering_vetype0x_s {
+		u64 vlan_etype                       : 16;
+		u64 vlan_tag_en                      : 1;
+		u64 vlan_id                          : 12;
+		u64 vlan_en                          : 1;
+		u64 reserved_30_63                   : 34;
+	} s;
+	/* struct cgxx_cmr_rx_steering_vetype0x_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_STEERING_VETYPE0X(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_STEERING_VETYPE0X(u64 a)
+{
+	return 0x380 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_rx_steering_vetype1#
+ *
+ * CGX CMR Receive VLAN Ethertype1 Register For use with the lmac_id
+ * associated with NCSI. See CGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID]. These
+ * registers, along with CGX()_CMR_RX_STEERING1(), provide eight filters
+ * for identifying and steering NCSI receive traffic.
+ */
+union cgxx_cmr_rx_steering_vetype1x {
+	u64 u;
+	struct cgxx_cmr_rx_steering_vetype1x_s {
+		u64 vlan_etype                       : 16;
+		u64 vlan_tag_en                      : 1;
+		u64 vlan_id                          : 12;
+		u64 vlan_en                          : 1;
+		u64 reserved_30_63                   : 34;
+	} s;
+	/* struct cgxx_cmr_rx_steering_vetype1x_s cn; */
+};
+
+static inline u64 CGXX_CMR_RX_STEERING_VETYPE1X(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_RX_STEERING_VETYPE1X(u64 a)
+{
+	return 0x580 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_tx_lmacs
+ *
+ * CGX CMR Transmit Logical MACs Registers This register sets the number
+ * of LMACs allowed on the TX interface. The value is important for
+ * defining the partitioning of the transmit FIFO.
+ */
+union cgxx_cmr_tx_lmacs {
+	u64 u;
+	struct cgxx_cmr_tx_lmacs_s {
+		u64 lmacs                            : 3;
+		u64 reserved_3_63                    : 61;
+	} s;
+	/* struct cgxx_cmr_tx_lmacs_s cn; */
+};
+
+static inline u64 CGXX_CMR_TX_LMACS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_TX_LMACS(void)
+{
+	return 0x1000;
+}
+
+/**
+ * Register (RSL) cgx#_cmr_x2p#_count
+ *
+ * CGX X2P Activity Register
+ */
+union cgxx_cmr_x2px_count {
+	u64 u;
+	struct cgxx_cmr_x2px_count_s {
+		u64 x2p_cnt                          : 64;
+	} s;
+	/* struct cgxx_cmr_x2px_count_s cn; */
+};
+
+static inline u64 CGXX_CMR_X2PX_COUNT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CMR_X2PX_COUNT(u64 a)
+{
+	return 0x170 + 0x1000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_const
+ *
+ * CGX CONST Registers This register contains constants for software
+ * discovery.
+ */
+union cgxx_const {
+	u64 u;
+	struct cgxx_const_s {
+		u64 tx_fifosz                        : 24;
+		u64 lmacs                            : 8;
+		u64 rx_fifosz                        : 24;
+		u64 reserved_56_63                   : 8;
+	} s;
+	/* struct cgxx_const_s cn; */
+};
+
+static inline u64 CGXX_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CONST(void)
+{
+	return 0x2000;
+}
+
+/**
+ * Register (RSL) cgx#_const1
+ *
+ * CGX CONST1 Registers This register contains constants for software
+ * discovery.
+ */
+union cgxx_const1 {
+	u64 u;
+	struct cgxx_const1_s {
+		u64 types                            : 11;
+		u64 res_types                        : 21;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_const1_s cn; */
+};
+
+static inline u64 CGXX_CONST1(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_CONST1(void)
+{
+	return 0x2008;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi#_rx_wol_ctrl0
+ *
+ * CGX GMP GMI RX Wake-on-LAN Control 0 Registers
+ */
+union cgxx_gmp_gmix_rx_wol_ctrl0 {
+	u64 u;
+	struct cgxx_gmp_gmix_rx_wol_ctrl0_s {
+		u64 dmac                             : 48;
+		u64 pswd_len                         : 4;
+		u64 reserved_52_63                   : 12;
+	} s;
+	/* struct cgxx_gmp_gmix_rx_wol_ctrl0_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMIX_RX_WOL_CTRL0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMIX_RX_WOL_CTRL0(u64 a)
+{
+	return 0x38a00 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi#_rx_wol_ctrl1
+ *
+ * CGX GMP GMI RX Wake-on-LAN Control 1 Registers
+ */
+union cgxx_gmp_gmix_rx_wol_ctrl1 {
+	u64 u;
+	struct cgxx_gmp_gmix_rx_wol_ctrl1_s {
+		u64 pswd                             : 64;
+	} s;
+	/* struct cgxx_gmp_gmix_rx_wol_ctrl1_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMIX_RX_WOL_CTRL1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMIX_RX_WOL_CTRL1(u64 a)
+{
+	return 0x38a08 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi#_tx_eee
+ *
+ * INTERNAL: CGX GMP GMI TX EEE Configure Registers  Reserved. Internal:
+ * These registers control when GMP GMI TX requests to enter or exist
+ * LPI. Those registers take effect only when EEE is supported and
+ * enabled for a given LMAC.
+ */
+union cgxx_gmp_gmix_tx_eee {
+	u64 u;
+	struct cgxx_gmp_gmix_tx_eee_s {
+		u64 idle_thresh                      : 28;
+		u64 reserved_28                      : 1;
+		u64 force_lpi                        : 1;
+		u64 wakeup                           : 1;
+		u64 auto_lpi                         : 1;
+		u64 idle_cnt                         : 28;
+		u64 tx_lpi                           : 1;
+		u64 tx_lpi_wait                      : 1;
+		u64 sync_status_lpi_enable           : 1;
+		u64 reserved_63                      : 1;
+	} s;
+	/* struct cgxx_gmp_gmix_tx_eee_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMIX_TX_EEE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMIX_TX_EEE(u64 a)
+{
+	return 0x38800 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi#_tx_eee_cfg1
+ *
+ * INTERNAL: CGX GMP GMI TX EEE Configure More Configuration Registers
+ * Reserved. Internal: Controls the GMP exiting of LPI and starting to
+ * send data.
+ */
+union cgxx_gmp_gmix_tx_eee_cfg1 {
+	u64 u;
+	struct cgxx_gmp_gmix_tx_eee_cfg1_s {
+		u64 wake2data_time                   : 24;
+		u64 reserved_24_35                   : 12;
+		u64 tx_eee_enable                    : 1;
+		u64 reserved_37_39                   : 3;
+		u64 sync2lpi_time                    : 21;
+		u64 reserved_61_63                   : 3;
+	} s;
+	struct cgxx_gmp_gmix_tx_eee_cfg1_cn {
+		u64 wake2data_time                   : 24;
+		u64 reserved_24_31                   : 8;
+		u64 reserved_32_35                   : 4;
+		u64 tx_eee_enable                    : 1;
+		u64 reserved_37_39                   : 3;
+		u64 sync2lpi_time                    : 21;
+		u64 reserved_61_63                   : 3;
+	} cn;
+};
+
+static inline u64 CGXX_GMP_GMIX_TX_EEE_CFG1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMIX_TX_EEE_CFG1(u64 a)
+{
+	return 0x38808 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi#_wol_int
+ *
+ * CGX GMP GMI RX WOL Interrupt Registers These registers allow WOL
+ * interrupts to be sent to the control processor.
+ */
+union cgxx_gmp_gmix_wol_int {
+	u64 u;
+	struct cgxx_gmp_gmix_wol_int_s {
+		u64 wol_rcvd                         : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_gmp_gmix_wol_int_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMIX_WOL_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMIX_WOL_INT(u64 a)
+{
+	return 0x38a80 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi#_wol_int_ena_w1c
+ *
+ * CGX GMP GMI RX WOL Interrupt Enable Clear Registers This register
+ * clears interrupt enable bits.
+ */
+union cgxx_gmp_gmix_wol_int_ena_w1c {
+	u64 u;
+	struct cgxx_gmp_gmix_wol_int_ena_w1c_s {
+		u64 wol_rcvd                         : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_gmp_gmix_wol_int_ena_w1c_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMIX_WOL_INT_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMIX_WOL_INT_ENA_W1C(u64 a)
+{
+	return 0x38a90 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi#_wol_int_ena_w1s
+ *
+ * CGX GMP GMI RX WOL Interrupt Enable Set Registers This register sets
+ * interrupt enable bits.
+ */
+union cgxx_gmp_gmix_wol_int_ena_w1s {
+	u64 u;
+	struct cgxx_gmp_gmix_wol_int_ena_w1s_s {
+		u64 wol_rcvd                         : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_gmp_gmix_wol_int_ena_w1s_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMIX_WOL_INT_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMIX_WOL_INT_ENA_W1S(u64 a)
+{
+	return 0x38a98 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi#_wol_int_w1s
+ *
+ * CGX GMP GMI RX WOL Interrupt Set Registers This register sets
+ * interrupt bits.
+ */
+union cgxx_gmp_gmix_wol_int_w1s {
+	u64 u;
+	struct cgxx_gmp_gmix_wol_int_w1s_s {
+		u64 wol_rcvd                         : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_gmp_gmix_wol_int_w1s_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMIX_WOL_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMIX_WOL_INT_W1S(u64 a)
+{
+	return 0x38a88 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_prt#_cfg
+ *
+ * CGX GMP GMI LMAC Configuration Registers This register controls the
+ * configuration of the LMAC.
+ */
+union cgxx_gmp_gmi_prtx_cfg {
+	u64 u;
+	struct cgxx_gmp_gmi_prtx_cfg_s {
+		u64 reserved_0                       : 1;
+		u64 speed                            : 1;
+		u64 duplex                           : 1;
+		u64 slottime                         : 1;
+		u64 reserved_4_7                     : 4;
+		u64 speed_msb                        : 1;
+		u64 reserved_9_11                    : 3;
+		u64 rx_idle                          : 1;
+		u64 tx_idle                          : 1;
+		u64 reserved_14_63                   : 50;
+	} s;
+	/* struct cgxx_gmp_gmi_prtx_cfg_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_PRTX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_PRTX_CFG(u64 a)
+{
+	return 0x38020 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_rx#_decision
+ *
+ * CGX GMP Packet-Decision Registers This register specifies the byte
+ * count used to determine when to accept or to filter a packet. As each
+ * byte in a packet is received by GMI, the L2 byte count is compared
+ * against [CNT]. In normal operation, the L2 header begins after the
+ * PREAMBLE + SFD (CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] = 1) and any
+ * optional UDD skip data (CGX()_GMP_GMI_RX()_UDD_SKP[LEN]).  Internal:
+ * Notes: As each byte in a packet is received by GMI, the L2 byte count
+ * is compared against the [CNT].  The L2 byte count is the number of
+ * bytes from the beginning of the L2 header (DMAC).  In normal
+ * operation, the L2 header begins after the PREAMBLE+SFD
+ * (CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK]=1) and any optional UDD skip data
+ * (CGX()_GMP_GMI_RX()_UDD_SKP[LEN]). When
+ * CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are
+ * prepended to the packet and would require UDD skip length to account
+ * for them.  Full Duplex: _   L2 Size \<  [CNT] - Accept packet. No
+ * filtering is applied. _   L2 Size \>= [CNT] - Apply filter. Accept
+ * packet based on PAUSE packet filter.  Half Duplex: _   L2 Size \<
+ * [CNT] - Drop packet. Packet is unconditionally dropped. _   L2 Size
+ * \>= [CNT] - Accept packet.  where L2_size = MAX(0, total_packet_size -
+ * CGX()_GMP_GMI_RX()_UDD_SKP[LEN] -
+ * ((CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK]==1)*8)).
+ */
+union cgxx_gmp_gmi_rxx_decision {
+	u64 u;
+	struct cgxx_gmp_gmi_rxx_decision_s {
+		u64 cnt                              : 5;
+		u64 reserved_5_63                    : 59;
+	} s;
+	/* struct cgxx_gmp_gmi_rxx_decision_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_RXX_DECISION(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_RXX_DECISION(u64 a)
+{
+	return 0x38040 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_rx#_frm_chk
+ *
+ * CGX GMP Frame Check Registers
+ */
+union cgxx_gmp_gmi_rxx_frm_chk {
+	u64 u;
+	struct cgxx_gmp_gmi_rxx_frm_chk_s {
+		u64 minerr                           : 1;
+		u64 carext                           : 1;
+		u64 reserved_2                       : 1;
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 reserved_5_6                     : 2;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct cgxx_gmp_gmi_rxx_frm_chk_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_RXX_FRM_CHK(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_RXX_FRM_CHK(u64 a)
+{
+	return 0x38030 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_rx#_frm_ctl
+ *
+ * CGX GMP Frame Control Registers This register controls the handling of
+ * the frames. The [CTL_BCK] and [CTL_DRP] bits control how the hardware
+ * handles incoming PAUSE packets. The most common modes of operation: _
+ * [CTL_BCK] = 1, [CTL_DRP] = 1: hardware handles everything. _ [CTL_BCK]
+ * = 0, [CTL_DRP] = 0: software sees all PAUSE frames. _ [CTL_BCK] = 0,
+ * [CTL_DRP] = 1: all PAUSE frames are completely ignored.  These control
+ * bits should be set to [CTL_BCK] = 0, [CTL_DRP] = 0 in half-duplex
+ * mode. Since PAUSE packets only apply to full duplex operation, any
+ * PAUSE packet would constitute an exception which should be handled by
+ * the processing cores. PAUSE packets should not be forwarded.
+ * Internal: Notes: [PRE_STRP]: When [PRE_CHK] is set (indicating that
+ * the PREAMBLE will be sent), [PRE_STRP] determines if the PREAMBLE+SFD
+ * bytes are thrown away or sent to the Octane core as part of the
+ * packet. In either mode, the PREAMBLE+SFD bytes are not counted toward
+ * the packet size when checking against the MIN and MAX bounds.
+ * Furthermore, the bytes are skipped when locating the start of the L2
+ * header for DMAC and Control frame recognition.
+ */
+union cgxx_gmp_gmi_rxx_frm_ctl {
+	u64 u;
+	struct cgxx_gmp_gmi_rxx_frm_ctl_s {
+		u64 pre_chk                          : 1;
+		u64 pre_strp                         : 1;
+		u64 ctl_drp                          : 1;
+		u64 ctl_bck                          : 1;
+		u64 ctl_mcst                         : 1;
+		u64 ctl_smac                         : 1;
+		u64 pre_free                         : 1;
+		u64 reserved_7_8                     : 2;
+		u64 pre_align                        : 1;
+		u64 null_dis                         : 1;
+		u64 reserved_11                      : 1;
+		u64 ptp_mode                         : 1;
+		u64 rx_fc_type                       : 1;
+		u64 reserved_14_63                   : 50;
+	} s;
+	struct cgxx_gmp_gmi_rxx_frm_ctl_cn {
+		u64 pre_chk                          : 1;
+		u64 pre_strp                         : 1;
+		u64 ctl_drp                          : 1;
+		u64 ctl_bck                          : 1;
+		u64 ctl_mcst                         : 1;
+		u64 ctl_smac                         : 1;
+		u64 pre_free                         : 1;
+		u64 reserved_7                       : 1;
+		u64 reserved_8                       : 1;
+		u64 pre_align                        : 1;
+		u64 null_dis                         : 1;
+		u64 reserved_11                      : 1;
+		u64 ptp_mode                         : 1;
+		u64 rx_fc_type                       : 1;
+		u64 reserved_14_63                   : 50;
+	} cn;
+};
+
+static inline u64 CGXX_GMP_GMI_RXX_FRM_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_RXX_FRM_CTL(u64 a)
+{
+	return 0x38028 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_rx#_ifg
+ *
+ * CGX GMI Minimum Interframe-Gap Cycles Registers This register
+ * specifies the minimum number of interframe-gap (IFG) cycles between
+ * packets.
+ */
+union cgxx_gmp_gmi_rxx_ifg {
+	u64 u;
+	struct cgxx_gmp_gmi_rxx_ifg_s {
+		u64 ifg                              : 4;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct cgxx_gmp_gmi_rxx_ifg_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_RXX_IFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_RXX_IFG(u64 a)
+{
+	return 0x38058 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_rx#_int
+ *
+ * CGX GMP GMI RX Interrupt Registers These registers allow interrupts to
+ * be sent to the control processor. * Exception conditions \<10:0\> can
+ * also set the rcv/opcode in the received packet's work-queue entry.
+ * CGX()_GMP_GMI_RX()_FRM_CHK provides a bit mask for configuring which
+ * conditions set the error. In half duplex operation, the expectation is
+ * that collisions will appear as either MINERR or CAREXT errors.
+ * Internal: Notes: (1) exception conditions 10:0 can also set the
+ * rcv/opcode in the received packet's workQ entry.  The
+ * CGX()_GMP_GMI_RX()_FRM_CHK register provides a bit mask for
+ * configuring which conditions set the error.  (2) in half duplex
+ * operation, the expectation is that collisions will appear as either
+ * MINERR o r CAREXT errors.  (3) JABBER An RX jabber error indicates
+ * that a packet was received which is longer than the maximum allowed
+ * packet as defined by the system.  GMI will truncate the packet at the
+ * JABBER count. Failure to do so could lead to system instabilty.  (4)
+ * NIBERR This error is illegal at 1000Mbs speeds
+ * (CGX()_GMP_GMI_PRT()_CFG[SPEED]==0) and will never assert.  (5) MINERR
+ * total frame DA+SA+TL+DATA+PAD+FCS \< 64  (6) ALNERR Indicates that the
+ * packet received was not an integer number of bytes.  If FCS checking
+ * is enabled, ALNERR will only assert if the FCS is bad.  If FCS
+ * checking is disabled, ALNERR will assert in all non-integer frame
+ * cases.  (7) Collisions Collisions can only occur in half-duplex mode.
+ * A collision is assumed by the receiver when the slottime
+ * (CGX()_GMP_GMI_PRT()_CFG[SLOTTIME]) is not satisfied.  In 10/100 mode,
+ * this will result in a frame \< SLOTTIME.  In 1000 mode, it could
+ * result either in frame \< SLOTTIME or a carrier extend error with the
+ * SLOTTIME.  These conditions are visible by... . transfer ended before
+ * slottime COLDET . carrier extend error           CAREXT  (A) LENERR
+ * Length errors occur when the received packet does not match the length
+ * field.  LENERR is only checked for packets between 64 and 1500 bytes.
+ * For untagged frames, the length must exact match.  For tagged frames
+ * the length or length+4 must match.  (B) PCTERR checks that the frame
+ * begins with a valid PREAMBLE sequence. Does not check the number of
+ * PREAMBLE cycles.  (C) OVRERR *DON'T PUT IN HRM* OVRERR is an
+ * architectural assertion check internal to GMI to make sure no
+ * assumption was violated.  In a correctly operating system, this
+ * interrupt can never fire. GMI has an internal arbiter which selects
+ * which of four ports to buffer in the main RX FIFO.  If we normally
+ * buffer eight bytes, then each port will typically push a tick every
+ * eight cycles if the packet interface is going as fast as possible.  If
+ * there are four ports, they push every two cycles.  So that's the
+ * assumption.  That the inbound module will always be able to consume
+ * the tick before another is produced.  If that doesn't happen that's
+ * when OVRERR will assert."
+ */
+union cgxx_gmp_gmi_rxx_int {
+	u64 u;
+	struct cgxx_gmp_gmi_rxx_int_s {
+		u64 minerr                           : 1;
+		u64 carext                           : 1;
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 ovrerr                           : 1;
+		u64 pcterr                           : 1;
+		u64 rsverr                           : 1;
+		u64 falerr                           : 1;
+		u64 coldet                           : 1;
+		u64 ifgerr                           : 1;
+		u64 reserved_12_63                   : 52;
+	} s;
+	struct cgxx_gmp_gmi_rxx_int_cn {
+		u64 minerr                           : 1;
+		u64 carext                           : 1;
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 ovrerr                           : 1;
+		u64 pcterr                           : 1;
+		u64 rsverr                           : 1;
+		u64 falerr                           : 1;
+		u64 coldet                           : 1;
+		u64 ifgerr                           : 1;
+		u64 reserved_12_15                   : 4;
+		u64 reserved_16_63                   : 48;
+	} cn;
+};
+
+static inline u64 CGXX_GMP_GMI_RXX_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_RXX_INT(u64 a)
+{
+	return 0x38000 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_rx#_int_ena_w1c
+ *
+ * CGX GMP GMI RX Interrupt Enable Clear Registers This register clears
+ * interrupt enable bits.
+ */
+union cgxx_gmp_gmi_rxx_int_ena_w1c {
+	u64 u;
+	struct cgxx_gmp_gmi_rxx_int_ena_w1c_s {
+		u64 minerr                           : 1;
+		u64 carext                           : 1;
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 ovrerr                           : 1;
+		u64 pcterr                           : 1;
+		u64 rsverr                           : 1;
+		u64 falerr                           : 1;
+		u64 coldet                           : 1;
+		u64 ifgerr                           : 1;
+		u64 reserved_12_63                   : 52;
+	} s;
+	struct cgxx_gmp_gmi_rxx_int_ena_w1c_cn {
+		u64 minerr                           : 1;
+		u64 carext                           : 1;
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 ovrerr                           : 1;
+		u64 pcterr                           : 1;
+		u64 rsverr                           : 1;
+		u64 falerr                           : 1;
+		u64 coldet                           : 1;
+		u64 ifgerr                           : 1;
+		u64 reserved_12_15                   : 4;
+		u64 reserved_16_63                   : 48;
+	} cn;
+};
+
+static inline u64 CGXX_GMP_GMI_RXX_INT_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_RXX_INT_ENA_W1C(u64 a)
+{
+	return 0x38010 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_rx#_int_ena_w1s
+ *
+ * CGX GMP GMI RX Interrupt Enable Set Registers This register sets
+ * interrupt enable bits.
+ */
+union cgxx_gmp_gmi_rxx_int_ena_w1s {
+	u64 u;
+	struct cgxx_gmp_gmi_rxx_int_ena_w1s_s {
+		u64 minerr                           : 1;
+		u64 carext                           : 1;
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 ovrerr                           : 1;
+		u64 pcterr                           : 1;
+		u64 rsverr                           : 1;
+		u64 falerr                           : 1;
+		u64 coldet                           : 1;
+		u64 ifgerr                           : 1;
+		u64 reserved_12_63                   : 52;
+	} s;
+	struct cgxx_gmp_gmi_rxx_int_ena_w1s_cn {
+		u64 minerr                           : 1;
+		u64 carext                           : 1;
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 ovrerr                           : 1;
+		u64 pcterr                           : 1;
+		u64 rsverr                           : 1;
+		u64 falerr                           : 1;
+		u64 coldet                           : 1;
+		u64 ifgerr                           : 1;
+		u64 reserved_12_15                   : 4;
+		u64 reserved_16_63                   : 48;
+	} cn;
+};
+
+static inline u64 CGXX_GMP_GMI_RXX_INT_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_RXX_INT_ENA_W1S(u64 a)
+{
+	return 0x38018 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_rx#_int_w1s
+ *
+ * CGX GMP GMI RX Interrupt Set Registers This register sets interrupt
+ * bits.
+ */
+union cgxx_gmp_gmi_rxx_int_w1s {
+	u64 u;
+	struct cgxx_gmp_gmi_rxx_int_w1s_s {
+		u64 minerr                           : 1;
+		u64 carext                           : 1;
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 ovrerr                           : 1;
+		u64 pcterr                           : 1;
+		u64 rsverr                           : 1;
+		u64 falerr                           : 1;
+		u64 coldet                           : 1;
+		u64 ifgerr                           : 1;
+		u64 reserved_12_63                   : 52;
+	} s;
+	struct cgxx_gmp_gmi_rxx_int_w1s_cn {
+		u64 minerr                           : 1;
+		u64 carext                           : 1;
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 ovrerr                           : 1;
+		u64 pcterr                           : 1;
+		u64 rsverr                           : 1;
+		u64 falerr                           : 1;
+		u64 coldet                           : 1;
+		u64 ifgerr                           : 1;
+		u64 reserved_12_15                   : 4;
+		u64 reserved_16_63                   : 48;
+	} cn;
+};
+
+static inline u64 CGXX_GMP_GMI_RXX_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_RXX_INT_W1S(u64 a)
+{
+	return 0x38008 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_rx#_jabber
+ *
+ * CGX GMP Maximum Packet-Size Registers This register specifies the
+ * maximum size for packets, beyond which the GMI truncates.
+ */
+union cgxx_gmp_gmi_rxx_jabber {
+	u64 u;
+	struct cgxx_gmp_gmi_rxx_jabber_s {
+		u64 cnt                              : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_gmi_rxx_jabber_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_RXX_JABBER(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_RXX_JABBER(u64 a)
+{
+	return 0x38038 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_rx#_udd_skp
+ *
+ * CGX GMP GMI User-Defined Data Skip Registers This register specifies
+ * the amount of user-defined data (UDD) added before the start of the
+ * L2C data.  Internal: Notes: (1) The skip bytes are part of the packet
+ * and will be handled by NIX.  (2) The system can determine if the UDD
+ * bytes are included in the FCS check by using the FCSSEL field - if the
+ * FCS check is enabled.  (3) Assume that the preamble/sfd is always at
+ * the start of the frame - even before UDD bytes.  In most cases, there
+ * will be no preamble in these cases since it will be packet interface
+ * in direct communication to another packet interface (MAC to MAC)
+ * without a PHY involved.  (4) We can still do address filtering and
+ * control packet filtering is the user desires.  (5)
+ * CGX()_GMP_GMI_RX()_UDD_SKP[LEN] must be 0 in half-duplex operation
+ * unless CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] is clear.  If
+ * CGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] is clear, then
+ * CGX()_GMP_GMI_RX()_UDD_SKP[LEN] will normally be 8.  (6) In all cases,
+ * the UDD bytes will be sent down the packet interface as part of the
+ * packet.  The UDD bytes are never stripped from the actual packet.
+ */
+union cgxx_gmp_gmi_rxx_udd_skp {
+	u64 u;
+	struct cgxx_gmp_gmi_rxx_udd_skp_s {
+		u64 len                              : 7;
+		u64 reserved_7                       : 1;
+		u64 fcssel                           : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct cgxx_gmp_gmi_rxx_udd_skp_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_RXX_UDD_SKP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_RXX_UDD_SKP(u64 a)
+{
+	return 0x38048 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_smac#
+ *
+ * CGX GMI SMAC Registers
+ */
+union cgxx_gmp_gmi_smacx {
+	u64 u;
+	struct cgxx_gmp_gmi_smacx_s {
+		u64 smac                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_gmp_gmi_smacx_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_SMACX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_SMACX(u64 a)
+{
+	return 0x38230 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_append
+ *
+ * CGX GMI TX Append Control Registers
+ */
+union cgxx_gmp_gmi_txx_append {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_append_s {
+		u64 preamble                         : 1;
+		u64 pad                              : 1;
+		u64 fcs                              : 1;
+		u64 force_fcs                        : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct cgxx_gmp_gmi_txx_append_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_APPEND(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_APPEND(u64 a)
+{
+	return 0x38218 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_burst
+ *
+ * CGX GMI TX Burst-Counter Registers
+ */
+union cgxx_gmp_gmi_txx_burst {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_burst_s {
+		u64 burst                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_gmi_txx_burst_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_BURST(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_BURST(u64 a)
+{
+	return 0x38228 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_ctl
+ *
+ * CGX GMI Transmit Control Registers
+ */
+union cgxx_gmp_gmi_txx_ctl {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_ctl_s {
+		u64 xscol_en                         : 1;
+		u64 xsdef_en                         : 1;
+		u64 tx_fc_type                       : 1;
+		u64 link_drain                       : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct cgxx_gmp_gmi_txx_ctl_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_CTL(u64 a)
+{
+	return 0x38270 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_int
+ *
+ * CGX GMI TX Interrupt Registers
+ */
+union cgxx_gmp_gmi_txx_int {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_int_s {
+		u64 undflw                           : 1;
+		u64 xscol                            : 1;
+		u64 xsdef                            : 1;
+		u64 late_col                         : 1;
+		u64 ptp_lost                         : 1;
+		u64 reserved_5_63                    : 59;
+	} s;
+	struct cgxx_gmp_gmi_txx_int_cn {
+		u64 undflw                           : 1;
+		u64 xscol                            : 1;
+		u64 xsdef                            : 1;
+		u64 late_col                         : 1;
+		u64 ptp_lost                         : 1;
+		u64 reserved_5_7                     : 3;
+		u64 reserved_8                       : 1;
+		u64 reserved_9_63                    : 55;
+	} cn;
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_INT(u64 a)
+{
+	return 0x38500 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_int_ena_w1c
+ *
+ * CGX GMI TX Interrupt Enable Clear Registers This register clears
+ * interrupt enable bits.
+ */
+union cgxx_gmp_gmi_txx_int_ena_w1c {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_int_ena_w1c_s {
+		u64 undflw                           : 1;
+		u64 xscol                            : 1;
+		u64 xsdef                            : 1;
+		u64 late_col                         : 1;
+		u64 ptp_lost                         : 1;
+		u64 reserved_5_63                    : 59;
+	} s;
+	struct cgxx_gmp_gmi_txx_int_ena_w1c_cn {
+		u64 undflw                           : 1;
+		u64 xscol                            : 1;
+		u64 xsdef                            : 1;
+		u64 late_col                         : 1;
+		u64 ptp_lost                         : 1;
+		u64 reserved_5_7                     : 3;
+		u64 reserved_8                       : 1;
+		u64 reserved_9_63                    : 55;
+	} cn;
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_INT_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_INT_ENA_W1C(u64 a)
+{
+	return 0x38510 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_int_ena_w1s
+ *
+ * CGX GMI TX Interrupt Enable Set Registers This register sets interrupt
+ * enable bits.
+ */
+union cgxx_gmp_gmi_txx_int_ena_w1s {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_int_ena_w1s_s {
+		u64 undflw                           : 1;
+		u64 xscol                            : 1;
+		u64 xsdef                            : 1;
+		u64 late_col                         : 1;
+		u64 ptp_lost                         : 1;
+		u64 reserved_5_63                    : 59;
+	} s;
+	struct cgxx_gmp_gmi_txx_int_ena_w1s_cn {
+		u64 undflw                           : 1;
+		u64 xscol                            : 1;
+		u64 xsdef                            : 1;
+		u64 late_col                         : 1;
+		u64 ptp_lost                         : 1;
+		u64 reserved_5_7                     : 3;
+		u64 reserved_8                       : 1;
+		u64 reserved_9_63                    : 55;
+	} cn;
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_INT_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_INT_ENA_W1S(u64 a)
+{
+	return 0x38518 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_int_w1s
+ *
+ * CGX GMI TX Interrupt Set Registers This register sets interrupt bits.
+ */
+union cgxx_gmp_gmi_txx_int_w1s {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_int_w1s_s {
+		u64 undflw                           : 1;
+		u64 xscol                            : 1;
+		u64 xsdef                            : 1;
+		u64 late_col                         : 1;
+		u64 ptp_lost                         : 1;
+		u64 reserved_5_63                    : 59;
+	} s;
+	struct cgxx_gmp_gmi_txx_int_w1s_cn {
+		u64 undflw                           : 1;
+		u64 xscol                            : 1;
+		u64 xsdef                            : 1;
+		u64 late_col                         : 1;
+		u64 ptp_lost                         : 1;
+		u64 reserved_5_7                     : 3;
+		u64 reserved_8                       : 1;
+		u64 reserved_9_63                    : 55;
+	} cn;
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_INT_W1S(u64 a)
+{
+	return 0x38508 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_min_pkt
+ *
+ * CGX GMI TX Minimum-Size-Packet Registers
+ */
+union cgxx_gmp_gmi_txx_min_pkt {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_min_pkt_s {
+		u64 min_size                         : 8;
+		u64 reserved_8_63                    : 56;
+	} s;
+	/* struct cgxx_gmp_gmi_txx_min_pkt_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_MIN_PKT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_MIN_PKT(u64 a)
+{
+	return 0x38240 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_pause_pkt_interval
+ *
+ * CGX GMI TX PAUSE-Packet Transmission-Interval Registers This register
+ * specifies how often PAUSE packets are sent. Internal: Notes: Choosing
+ * proper values of CGX()_GMP_GMI_TX()_PAUSE_PKT_TIME[PTIME] and
+ * CGX()_GMP_GMI_TX()_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to
+ * the system designer.  It is suggested that TIME be much greater than
+ * INTERVAL and CGX()_GMP_GMI_TX()_PAUSE_ZERO[SEND] be set.  This allows
+ * a periodic refresh of the PAUSE count and then when the backpressure
+ * condition is lifted, a PAUSE packet with TIME==0 will be sent
+ * indicating that Octane is ready for additional data.  If the system
+ * chooses to not set CGX()_GMP_GMI_TX()_PAUSE_ZERO[SEND], then it is
+ * suggested that TIME and INTERVAL are programmed such that they
+ * satisify the following rule:  _ INTERVAL \<= TIME - (largest_pkt_size
+ * + IFG + pause_pkt_size)  where largest_pkt_size is that largest packet
+ * that the system can send (normally 1518B), IFG is the interframe gap
+ * and pause_pkt_size is the size of the PAUSE packet (normally 64B).
+ */
+union cgxx_gmp_gmi_txx_pause_pkt_interval {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_pause_pkt_interval_s {
+		u64 interval                         : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_gmi_txx_pause_pkt_interval_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(u64 a)
+{
+	return 0x38248 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_pause_pkt_time
+ *
+ * CGX GMI TX PAUSE Packet PAUSE-Time Registers
+ */
+union cgxx_gmp_gmi_txx_pause_pkt_time {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_pause_pkt_time_s {
+		u64 ptime                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_gmi_txx_pause_pkt_time_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(u64 a)
+{
+	return 0x38238 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_pause_togo
+ *
+ * CGX GMI TX Time-to-Backpressure Registers
+ */
+union cgxx_gmp_gmi_txx_pause_togo {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_pause_togo_s {
+		u64 ptime                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_gmi_txx_pause_togo_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_PAUSE_TOGO(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_PAUSE_TOGO(u64 a)
+{
+	return 0x38258 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_pause_zero
+ *
+ * CGX GMI TX PAUSE-Zero-Enable Registers
+ */
+union cgxx_gmp_gmi_txx_pause_zero {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_pause_zero_s {
+		u64 send                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_gmp_gmi_txx_pause_zero_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_PAUSE_ZERO(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_PAUSE_ZERO(u64 a)
+{
+	return 0x38260 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_sgmii_ctl
+ *
+ * CGX SGMII Control Registers
+ */
+union cgxx_gmp_gmi_txx_sgmii_ctl {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_sgmii_ctl_s {
+		u64 align                            : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_gmp_gmi_txx_sgmii_ctl_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_SGMII_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_SGMII_CTL(u64 a)
+{
+	return 0x38300 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_slot
+ *
+ * CGX GMI TX Slottime Counter Registers
+ */
+union cgxx_gmp_gmi_txx_slot {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_slot_s {
+		u64 slot                             : 10;
+		u64 reserved_10_63                   : 54;
+	} s;
+	/* struct cgxx_gmp_gmi_txx_slot_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_SLOT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_SLOT(u64 a)
+{
+	return 0x38220 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_soft_pause
+ *
+ * CGX GMI TX Software PAUSE Registers
+ */
+union cgxx_gmp_gmi_txx_soft_pause {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_soft_pause_s {
+		u64 ptime                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_gmi_txx_soft_pause_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_SOFT_PAUSE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_SOFT_PAUSE(u64 a)
+{
+	return 0x38250 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx#_thresh
+ *
+ * CGX GMI TX Threshold Registers
+ */
+union cgxx_gmp_gmi_txx_thresh {
+	u64 u;
+	struct cgxx_gmp_gmi_txx_thresh_s {
+		u64 cnt                              : 11;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct cgxx_gmp_gmi_txx_thresh_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TXX_THRESH(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TXX_THRESH(u64 a)
+{
+	return 0x38210 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx_col_attempt
+ *
+ * CGX TX Collision Attempts Before Dropping Frame Registers
+ */
+union cgxx_gmp_gmi_tx_col_attempt {
+	u64 u;
+	struct cgxx_gmp_gmi_tx_col_attempt_s {
+		u64 limit                            : 5;
+		u64 reserved_5_63                    : 59;
+	} s;
+	/* struct cgxx_gmp_gmi_tx_col_attempt_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TX_COL_ATTEMPT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TX_COL_ATTEMPT(void)
+{
+	return 0x39010;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx_ifg
+ *
+ * CGX GMI TX Interframe-Gap Cycles Registers Consider the following when
+ * programming IFG1 and IFG2: * For 10/100/1000 Mb/s half-duplex systems
+ * that require IEEE 802.3 compatibility, IFG1 must be in the range of
+ * 1-8, [IFG2] must be in the range of 4-12, and the [IFG1] + [IFG2] sum
+ * must be 12. * For 10/100/1000 Mb/s full-duplex systems that require
+ * IEEE 802.3 compatibility, IFG1 must be in the range of 1-11, [IFG2]
+ * must be in the range of 1-11, and the [IFG1] + [IFG2] sum must be 12.
+ * For all other systems, IFG1 and IFG2 can be any value in the range of
+ * 1-15, allowing for a total possible IFG sum of 2-30.
+ */
+union cgxx_gmp_gmi_tx_ifg {
+	u64 u;
+	struct cgxx_gmp_gmi_tx_ifg_s {
+		u64 ifg1                             : 4;
+		u64 ifg2                             : 4;
+		u64 reserved_8_63                    : 56;
+	} s;
+	/* struct cgxx_gmp_gmi_tx_ifg_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TX_IFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TX_IFG(void)
+{
+	return 0x39000;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx_jam
+ *
+ * CGX GMI TX JAM Pattern Registers This register provides the pattern
+ * used in JAM bytes.
+ */
+union cgxx_gmp_gmi_tx_jam {
+	u64 u;
+	struct cgxx_gmp_gmi_tx_jam_s {
+		u64 jam                              : 8;
+		u64 reserved_8_63                    : 56;
+	} s;
+	/* struct cgxx_gmp_gmi_tx_jam_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TX_JAM(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TX_JAM(void)
+{
+	return 0x39008;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx_lfsr
+ *
+ * CGX GMI TX LFSR Registers This register shows the contents of the
+ * linear feedback shift register (LFSR), which is used to implement
+ * truncated binary exponential backoff.
+ */
+union cgxx_gmp_gmi_tx_lfsr {
+	u64 u;
+	struct cgxx_gmp_gmi_tx_lfsr_s {
+		u64 lfsr                             : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_gmi_tx_lfsr_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TX_LFSR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TX_LFSR(void)
+{
+	return 0x39028;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx_pause_pkt_dmac
+ *
+ * CGX TX PAUSE-Packet DMAC-Field Registers
+ */
+union cgxx_gmp_gmi_tx_pause_pkt_dmac {
+	u64 u;
+	struct cgxx_gmp_gmi_tx_pause_pkt_dmac_s {
+		u64 dmac                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_gmp_gmi_tx_pause_pkt_dmac_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(void)
+{
+	return 0x39018;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_gmi_tx_pause_pkt_type
+ *
+ * CGX GMI TX PAUSE-Packet-PTYPE Field Registers This register provides
+ * the PTYPE field that is placed in outbound PAUSE packets.
+ */
+union cgxx_gmp_gmi_tx_pause_pkt_type {
+	u64 u;
+	struct cgxx_gmp_gmi_tx_pause_pkt_type_s {
+		u64 ptype                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_gmi_tx_pause_pkt_type_s cn; */
+};
+
+static inline u64 CGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(void)
+{
+	return 0x39020;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_misc#_cfg
+ *
+ * CGX GMP PCS Miscellaneous Control Registers This register contains
+ * general configuration that should not need to be changed from reset
+ * settings.  Internal: Per lmac diagnostic and chicken bits.
+ */
+union cgxx_gmp_miscx_cfg {
+	u64 u;
+	struct cgxx_gmp_miscx_cfg_s {
+		u64 tx_eee_quiet_credit_mode         : 1;
+		u64 tx_eee_wait_gmi_fast_idle        : 1;
+		u64 tx_qsgmii_port0_init             : 1;
+		u64 tx_eee_rx_sync_status_enable     : 1;
+		u64 pcs_alt_an                       : 1;
+		u64 reserved_5_7                     : 3;
+		u64 rx_pcs_sync_signal_detect        : 1;
+		u64 rx_pcs_sync_timeout              : 1;
+		u64 rx_pcs_eee_mode_enable           : 1;
+		u64 rx_pcs_lpi_enable                : 1;
+		u64 rx_pcs_802_rx_k                  : 1;
+		u64 rx_pcs_alt_qlb2i                 : 1;
+		u64 reserved_14_15                   : 2;
+		u64 rx_cgp_gser_throttle             : 1;
+		u64 rx_cgp_edet_filter               : 1;
+		u64 rx_cgp_edet_qlm_val              : 1;
+		u64 reserved_19_63                   : 45;
+	} s;
+	/* struct cgxx_gmp_miscx_cfg_s cn; */
+};
+
+static inline u64 CGXX_GMP_MISCX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_MISCX_CFG(u64 a)
+{
+	return 0x34000 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs#_an_expansion
+ *
+ * CGX GMP PCS AN Expansion register Register 6 AN status
+ */
+union cgxx_gmp_pcsx_an_expansion {
+	u64 u;
+	struct cgxx_gmp_pcsx_an_expansion_s {
+		u64 reserved_0                       : 1;
+		u64 page_received                    : 1;
+		u64 next_page_able                   : 1;
+		u64 reserved_3_63                    : 61;
+	} s;
+	/* struct cgxx_gmp_pcsx_an_expansion_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCSX_AN_EXPANSION(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCSX_AN_EXPANSION(u64 a)
+{
+	return 0x30a60 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs#_an_lp_abil_np
+ *
+ * CGX GMP PCS AN Link Partner Ability Next Page Register 8 This register
+ * contains the advertised ability of the link partners Next Page. The
+ * definition for this register is provided in 32.5.4.2 for changes to
+ * 28.2.4.1.4.
+ */
+union cgxx_gmp_pcsx_an_lp_abil_np {
+	u64 u;
+	struct cgxx_gmp_pcsx_an_lp_abil_np_s {
+		u64 m_u                              : 11;
+		u64 toggle                           : 1;
+		u64 ack2                             : 1;
+		u64 mp                               : 1;
+		u64 ack                              : 1;
+		u64 np                               : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_pcsx_an_lp_abil_np_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCSX_AN_LP_ABIL_NP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCSX_AN_LP_ABIL_NP(u64 a)
+{
+	return 0x30a80 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs#_an_np_tx
+ *
+ * CGX GMP PCS AN Next Page Transmit Register 7 Software programs this
+ * register with the contents of the AN message next page or unformatted
+ * next page link code word to be transmitted during autonegotiation.
+ * Next page exchange occurs after the base link code words have been
+ * exchanged if either end of the link segment sets the NP bit to 1,
+ * indicating that it has at least one next page to send. Once initiated,
+ * next page exchange continues until both ends of the link segment set
+ * their NP bits to 0. Both sides must be NP capable to use NP exchanges.
+ */
+union cgxx_gmp_pcsx_an_np_tx {
+	u64 u;
+	struct cgxx_gmp_pcsx_an_np_tx_s {
+		u64 m_u                              : 11;
+		u64 toggle                           : 1;
+		u64 ack2                             : 1;
+		u64 mp                               : 1;
+		u64 ack                              : 1;
+		u64 np                               : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_pcsx_an_np_tx_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCSX_AN_NP_TX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCSX_AN_NP_TX(u64 a)
+{
+	return 0x30a70 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs#_dbg_control
+ *
+ * CGX PCS Debug Control Registers
+ */
+union cgxx_gmp_pcsx_dbg_control {
+	u64 u;
+	struct cgxx_gmp_pcsx_dbg_control_s {
+		u64 us_clk_period                    : 7;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct cgxx_gmp_pcsx_dbg_control_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCSX_DBG_CONTROL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCSX_DBG_CONTROL(u64 a)
+{
+	return 0x31000 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs#_rx_eee_wake
+ *
+ * INTERNAL: CGX GMP PCS  RX EEE Wake Error Counter  Registers  Reserved.
+ * Internal: This register is used by PHY types that support EEE to count
+ * wake time faults where the PHY fails to complete its normal wake
+ * sequence within the time required for the specific PHY type. The
+ * definition of the fault event to be counted is defined for each PHY
+ * and may occur during a refresh or a wake-up as defined by the PHY.
+ * This 16-bit counter shall be reset to all zeros upon execution of the
+ * PCS reset. This counter shall be held at all ones in the case of
+ * overflow.
+ */
+union cgxx_gmp_pcsx_rx_eee_wake {
+	u64 u;
+	struct cgxx_gmp_pcsx_rx_eee_wake_s {
+		u64 error_counter                    : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_pcsx_rx_eee_wake_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCSX_RX_EEE_WAKE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCSX_RX_EEE_WAKE(u64 a)
+{
+	return 0x30910 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs#_rx_lpi_timing
+ *
+ * INTERNAL: CGX GMP PCS  RX EEE LPI Timing Parameters Registers
+ * Reserved. Internal: Receiver LPI timing parameters Tqr, Twr and Twtf.
+ */
+union cgxx_gmp_pcsx_rx_lpi_timing {
+	u64 u;
+	struct cgxx_gmp_pcsx_rx_lpi_timing_s {
+		u64 twtf                             : 18;
+		u64 reserved_18_19                   : 2;
+		u64 twr                              : 12;
+		u64 tqr                              : 20;
+		u64 reserved_52_63                   : 12;
+	} s;
+	/* struct cgxx_gmp_pcsx_rx_lpi_timing_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCSX_RX_LPI_TIMING(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCSX_RX_LPI_TIMING(u64 a)
+{
+	return 0x30900 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs#_status1
+ *
+ * CGX GMP PCS Status 1 Register PCS LPI Status, Link OK.  Register 3.1
+ */
+union cgxx_gmp_pcsx_status1 {
+	u64 u;
+	struct cgxx_gmp_pcsx_status1_s {
+		u64 reserved_0_1                     : 2;
+		u64 receive_link_status              : 1;
+		u64 reserved_3_7                     : 5;
+		u64 rx_lpi_indication                : 1;
+		u64 tx_lpi_indication                : 1;
+		u64 rx_lpi_received                  : 1;
+		u64 tx_lpi_received                  : 1;
+		u64 reserved_12_63                   : 52;
+	} s;
+	/* struct cgxx_gmp_pcsx_status1_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCSX_STATUS1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCSX_STATUS1(u64 a)
+{
+	return 0x30880 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs#_tx_lpi_timing
+ *
+ * INTERNAL: CGX GMP GMI  TX EEE LPI Timing Parameters Registers
+ * Reserved. Internal: Transmitter LPI timing parameters Tsl, Tql and
+ * Tul.
+ */
+union cgxx_gmp_pcsx_tx_lpi_timing {
+	u64 u;
+	struct cgxx_gmp_pcsx_tx_lpi_timing_s {
+		u64 tql                              : 19;
+		u64 reserved_19_31                   : 13;
+		u64 tul                              : 12;
+		u64 reserved_44_47                   : 4;
+		u64 tsl                              : 12;
+		u64 reserved_60_63                   : 4;
+	} s;
+	/* struct cgxx_gmp_pcsx_tx_lpi_timing_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCSX_TX_LPI_TIMING(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCSX_TX_LPI_TIMING(u64 a)
+{
+	return 0x30800 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_an#_adv
+ *
+ * CGX GMP PCS Autonegotiation Advertisement Registers
+ */
+union cgxx_gmp_pcs_anx_adv {
+	u64 u;
+	struct cgxx_gmp_pcs_anx_adv_s {
+		u64 reserved_0_4                     : 5;
+		u64 fd                               : 1;
+		u64 hfd                              : 1;
+		u64 pause                            : 2;
+		u64 reserved_9_11                    : 3;
+		u64 rem_flt                          : 2;
+		u64 reserved_14                      : 1;
+		u64 np                               : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_pcs_anx_adv_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_ANX_ADV(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_ANX_ADV(u64 a)
+{
+	return 0x30010 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_an#_ext_st
+ *
+ * CGX GMO PCS Autonegotiation Extended Status Registers
+ */
+union cgxx_gmp_pcs_anx_ext_st {
+	u64 u;
+	struct cgxx_gmp_pcs_anx_ext_st_s {
+		u64 reserved_0_11                    : 12;
+		u64 thou_thd                         : 1;
+		u64 thou_tfd                         : 1;
+		u64 thou_xhd                         : 1;
+		u64 thou_xfd                         : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_pcs_anx_ext_st_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_ANX_EXT_ST(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_ANX_EXT_ST(u64 a)
+{
+	return 0x30028 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_an#_lp_abil
+ *
+ * CGX GMP PCS Autonegotiation Link Partner Ability Registers This is the
+ * autonegotiation link partner ability register 5 as per IEEE 802.3,
+ * Clause 37.
+ */
+union cgxx_gmp_pcs_anx_lp_abil {
+	u64 u;
+	struct cgxx_gmp_pcs_anx_lp_abil_s {
+		u64 reserved_0_4                     : 5;
+		u64 fd                               : 1;
+		u64 hfd                              : 1;
+		u64 pause                            : 2;
+		u64 reserved_9_11                    : 3;
+		u64 rem_flt                          : 2;
+		u64 ack                              : 1;
+		u64 np                               : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_pcs_anx_lp_abil_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_ANX_LP_ABIL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_ANX_LP_ABIL(u64 a)
+{
+	return 0x30018 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_an#_results
+ *
+ * CGX GMP PCS Autonegotiation Results Registers This register is not
+ * valid when CGX()_GMP_PCS_MISC()_CTL[AN_OVRD] is set to 1. If
+ * CGX()_GMP_PCS_MISC()_CTL[AN_OVRD] is set to 0 and
+ * CGX()_GMP_PCS_AN()_RESULTS[AN_CPT] is set to 1, this register is
+ * valid.
+ */
+union cgxx_gmp_pcs_anx_results {
+	u64 u;
+	struct cgxx_gmp_pcs_anx_results_s {
+		u64 link_ok                          : 1;
+		u64 dup                              : 1;
+		u64 an_cpt                           : 1;
+		u64 spd                              : 2;
+		u64 pause                            : 2;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct cgxx_gmp_pcs_anx_results_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_ANX_RESULTS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_ANX_RESULTS(u64 a)
+{
+	return 0x30020 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_int#
+ *
+ * CGX GMP PCS Interrupt Registers
+ */
+union cgxx_gmp_pcs_intx {
+	u64 u;
+	struct cgxx_gmp_pcs_intx_s {
+		u64 lnkspd                           : 1;
+		u64 xmit                             : 1;
+		u64 an_err                           : 1;
+		u64 txfifu                           : 1;
+		u64 txfifo                           : 1;
+		u64 txbad                            : 1;
+		u64 rxerr                            : 1;
+		u64 rxbad                            : 1;
+		u64 rxlock                           : 1;
+		u64 an_bad                           : 1;
+		u64 sync_bad                         : 1;
+		u64 dup                              : 1;
+		u64 dbg_sync                         : 1;
+		u64 reserved_13_15                   : 3;
+		u64 an_page_received                 : 1;
+		u64 an_complete                      : 1;
+		u64 reserved_18_19                   : 2;
+		u64 eee_tx_change                    : 1;
+		u64 eee_rx_change                    : 1;
+		u64 eee_rx_link_fail                 : 1;
+		u64 reserved_23_63                   : 41;
+	} s;
+	/* struct cgxx_gmp_pcs_intx_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_INTX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_INTX(u64 a)
+{
+	return 0x30080 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_int#_ena_w1c
+ *
+ * CGX GMP PCS Interrupt Enable Clear Registers This register clears
+ * interrupt enable bits.
+ */
+union cgxx_gmp_pcs_intx_ena_w1c {
+	u64 u;
+	struct cgxx_gmp_pcs_intx_ena_w1c_s {
+		u64 lnkspd                           : 1;
+		u64 xmit                             : 1;
+		u64 an_err                           : 1;
+		u64 txfifu                           : 1;
+		u64 txfifo                           : 1;
+		u64 txbad                            : 1;
+		u64 rxerr                            : 1;
+		u64 rxbad                            : 1;
+		u64 rxlock                           : 1;
+		u64 an_bad                           : 1;
+		u64 sync_bad                         : 1;
+		u64 dup                              : 1;
+		u64 dbg_sync                         : 1;
+		u64 reserved_13_15                   : 3;
+		u64 an_page_received                 : 1;
+		u64 an_complete                      : 1;
+		u64 reserved_18_19                   : 2;
+		u64 eee_tx_change                    : 1;
+		u64 eee_rx_change                    : 1;
+		u64 eee_rx_link_fail                 : 1;
+		u64 reserved_23_63                   : 41;
+	} s;
+	/* struct cgxx_gmp_pcs_intx_ena_w1c_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_INTX_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_INTX_ENA_W1C(u64 a)
+{
+	return 0x30090 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_int#_ena_w1s
+ *
+ * CGX GMP PCS Interrupt Enable Set Registers This register sets
+ * interrupt enable bits.
+ */
+union cgxx_gmp_pcs_intx_ena_w1s {
+	u64 u;
+	struct cgxx_gmp_pcs_intx_ena_w1s_s {
+		u64 lnkspd                           : 1;
+		u64 xmit                             : 1;
+		u64 an_err                           : 1;
+		u64 txfifu                           : 1;
+		u64 txfifo                           : 1;
+		u64 txbad                            : 1;
+		u64 rxerr                            : 1;
+		u64 rxbad                            : 1;
+		u64 rxlock                           : 1;
+		u64 an_bad                           : 1;
+		u64 sync_bad                         : 1;
+		u64 dup                              : 1;
+		u64 dbg_sync                         : 1;
+		u64 reserved_13_15                   : 3;
+		u64 an_page_received                 : 1;
+		u64 an_complete                      : 1;
+		u64 reserved_18_19                   : 2;
+		u64 eee_tx_change                    : 1;
+		u64 eee_rx_change                    : 1;
+		u64 eee_rx_link_fail                 : 1;
+		u64 reserved_23_63                   : 41;
+	} s;
+	/* struct cgxx_gmp_pcs_intx_ena_w1s_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_INTX_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_INTX_ENA_W1S(u64 a)
+{
+	return 0x30098 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_int#_w1s
+ *
+ * CGX GMP PCS Interrupt Set Registers This register sets interrupt bits.
+ */
+union cgxx_gmp_pcs_intx_w1s {
+	u64 u;
+	struct cgxx_gmp_pcs_intx_w1s_s {
+		u64 lnkspd                           : 1;
+		u64 xmit                             : 1;
+		u64 an_err                           : 1;
+		u64 txfifu                           : 1;
+		u64 txfifo                           : 1;
+		u64 txbad                            : 1;
+		u64 rxerr                            : 1;
+		u64 rxbad                            : 1;
+		u64 rxlock                           : 1;
+		u64 an_bad                           : 1;
+		u64 sync_bad                         : 1;
+		u64 dup                              : 1;
+		u64 dbg_sync                         : 1;
+		u64 reserved_13_15                   : 3;
+		u64 an_page_received                 : 1;
+		u64 an_complete                      : 1;
+		u64 reserved_18_19                   : 2;
+		u64 eee_tx_change                    : 1;
+		u64 eee_rx_change                    : 1;
+		u64 eee_rx_link_fail                 : 1;
+		u64 reserved_23_63                   : 41;
+	} s;
+	/* struct cgxx_gmp_pcs_intx_w1s_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_INTX_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_INTX_W1S(u64 a)
+{
+	return 0x30088 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_link#_timer
+ *
+ * CGX GMP PCS Link Timer Registers This is the 1.6 ms nominal link timer
+ * register.
+ */
+union cgxx_gmp_pcs_linkx_timer {
+	u64 u;
+	struct cgxx_gmp_pcs_linkx_timer_s {
+		u64 count                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_pcs_linkx_timer_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_LINKX_TIMER(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_LINKX_TIMER(u64 a)
+{
+	return 0x30040 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_misc#_ctl
+ *
+ * CGX GMP SGMII Miscellaneous Control Registers Internal: SGMII bit [12]
+ * is really a misnomer, it is a decode  of pi_qlm_cfg pins to indicate
+ * SGMII or 1000Base-X modes.  Note: The SGMII AN Advertisement Register
+ * above will be sent during Auto Negotiation if [MAC_PHY] is set (1=PHY
+ * mode). If the bit is not set (0=MAC mode), the tx_Config_Reg\<14\>
+ * becomes ACK bit and tx_Config_Reg\<0\> is always 1. All other bits in
+ * tx_Config_Reg sent will be 0. The PHY dictates the Auto Negotiation
+ * results.
+ */
+union cgxx_gmp_pcs_miscx_ctl {
+	u64 u;
+	struct cgxx_gmp_pcs_miscx_ctl_s {
+		u64 samp_pt                          : 7;
+		u64 an_ovrd                          : 1;
+		u64 mode                             : 1;
+		u64 mac_phy                          : 1;
+		u64 loopbck2                         : 1;
+		u64 gmxeno                           : 1;
+		u64 reserved_12                      : 1;
+		u64 disp_en                          : 1;
+		u64 reserved_14_15                   : 2;
+		u64 qsgmii_comma_wd                  : 16;
+		u64 qsgmii_comma_wd_en               : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	struct cgxx_gmp_pcs_miscx_ctl_cn {
+		u64 samp_pt                          : 7;
+		u64 an_ovrd                          : 1;
+		u64 mode                             : 1;
+		u64 mac_phy                          : 1;
+		u64 loopbck2                         : 1;
+		u64 gmxeno                           : 1;
+		u64 reserved_12                      : 1;
+		u64 disp_en                          : 1;
+		u64 reserved_14_15                   : 2;
+		u64 qsgmii_comma_wd                  : 16;
+		u64 qsgmii_comma_wd_en               : 1;
+		u64 reserved_33_35                   : 3;
+		u64 reserved_36_63                   : 28;
+	} cn;
+};
+
+static inline u64 CGXX_GMP_PCS_MISCX_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_MISCX_CTL(u64 a)
+{
+	return 0x30078 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_mr#_control
+ *
+ * CGX GMP PCS Control Registers
+ */
+union cgxx_gmp_pcs_mrx_control {
+	u64 u;
+	struct cgxx_gmp_pcs_mrx_control_s {
+		u64 reserved_0_4                     : 5;
+		u64 uni                              : 1;
+		u64 spdmsb                           : 1;
+		u64 coltst                           : 1;
+		u64 dup                              : 1;
+		u64 rst_an                           : 1;
+		u64 reserved_10                      : 1;
+		u64 pwr_dn                           : 1;
+		u64 an_en                            : 1;
+		u64 spdlsb                           : 1;
+		u64 loopbck1                         : 1;
+		u64 reset                            : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_pcs_mrx_control_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_MRX_CONTROL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_MRX_CONTROL(u64 a)
+{
+	return 0x30000 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_mr#_status
+ *
+ * CGX GMP PCS Status Registers Bits \<15:9\> in this register indicate
+ * the ability to operate when CGX()_GMP_PCS_MISC()_CTL[MAC_PHY] is set
+ * to MAC mode. Bits \<15:9\> are always read as 0, indicating that the
+ * chip cannot operate in the corresponding modes. The field [RM_FLT] is
+ * a 'don't care' when the selected mode is SGMII/QSGMII.
+ */
+union cgxx_gmp_pcs_mrx_status {
+	u64 u;
+	struct cgxx_gmp_pcs_mrx_status_s {
+		u64 extnd                            : 1;
+		u64 reserved_1                       : 1;
+		u64 lnk_st                           : 1;
+		u64 an_abil                          : 1;
+		u64 rm_flt                           : 1;
+		u64 an_cpt                           : 1;
+		u64 prb_sup                          : 1;
+		u64 reserved_7                       : 1;
+		u64 ext_st                           : 1;
+		u64 hun_t2hd                         : 1;
+		u64 hun_t2fd                         : 1;
+		u64 ten_hd                           : 1;
+		u64 ten_fd                           : 1;
+		u64 hun_xhd                          : 1;
+		u64 hun_xfd                          : 1;
+		u64 hun_t4                           : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_pcs_mrx_status_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_MRX_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_MRX_STATUS(u64 a)
+{
+	return 0x30008 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_rx#_states
+ *
+ * CGX GMP PCS RX State-Machines States Registers
+ */
+union cgxx_gmp_pcs_rxx_states {
+	u64 u;
+	struct cgxx_gmp_pcs_rxx_states_s {
+		u64 an_st                            : 4;
+		u64 an_bad                           : 1;
+		u64 sync                             : 4;
+		u64 sync_bad                         : 1;
+		u64 rx_st                            : 5;
+		u64 rx_bad                           : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_pcs_rxx_states_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_RXX_STATES(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_RXX_STATES(u64 a)
+{
+	return 0x30058 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_rx#_sync
+ *
+ * CGX GMP PCS Code Group Synchronization Registers
+ */
+union cgxx_gmp_pcs_rxx_sync {
+	u64 u;
+	struct cgxx_gmp_pcs_rxx_sync_s {
+		u64 bit_lock                         : 1;
+		u64 sync                             : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct cgxx_gmp_pcs_rxx_sync_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_RXX_SYNC(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_RXX_SYNC(u64 a)
+{
+	return 0x30050 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_sgm#_an_adv
+ *
+ * CGX GMP PCS SGMII Autonegotiation Advertisement Registers This is the
+ * SGMII autonegotiation advertisement register (sent out as
+ * tx_Config_Reg\<15:0\> as defined in IEEE 802.3 clause 37). This
+ * register is sent during autonegotiation if
+ * CGX()_GMP_PCS_MISC()_CTL[MAC_PHY] is set (1 = PHY mode). If the bit is
+ * not set (0 = MAC mode), then tx_Config_Reg\<14\> becomes ACK bit and
+ * tx_Config_Reg\<0\> is always 1. All other bits in tx_Config_Reg sent
+ * will be 0. The PHY dictates the autonegotiation results.
+ */
+union cgxx_gmp_pcs_sgmx_an_adv {
+	u64 u;
+	struct cgxx_gmp_pcs_sgmx_an_adv_s {
+		u64 one                              : 1;
+		u64 reserved_1_9                     : 9;
+		u64 speed                            : 2;
+		u64 dup                              : 1;
+		u64 reserved_13                      : 1;
+		u64 ack                              : 1;
+		u64 link                             : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_gmp_pcs_sgmx_an_adv_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_SGMX_AN_ADV(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_SGMX_AN_ADV(u64 a)
+{
+	return 0x30068 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_sgm#_lp_adv
+ *
+ * CGX GMP PCS SGMII Link-Partner-Advertisement Registers This is the
+ * SGMII link partner advertisement register (received as
+ * rx_Config_Reg\<15:0\> as defined in IEEE 802.3 clause 37).
+ */
+union cgxx_gmp_pcs_sgmx_lp_adv {
+	u64 u;
+	struct cgxx_gmp_pcs_sgmx_lp_adv_s {
+		u64 one                              : 1;
+		u64 reserved_1_9                     : 9;
+		u64 speed                            : 2;
+		u64 dup                              : 1;
+		u64 reserved_13_14                   : 2;
+		u64 link                             : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	struct cgxx_gmp_pcs_sgmx_lp_adv_cn {
+		u64 one                              : 1;
+		u64 reserved_1_9                     : 9;
+		u64 speed                            : 2;
+		u64 dup                              : 1;
+		u64 reserved_13                      : 1;
+		u64 reserved_14                      : 1;
+		u64 link                             : 1;
+		u64 reserved_16_63                   : 48;
+	} cn;
+};
+
+static inline u64 CGXX_GMP_PCS_SGMX_LP_ADV(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_SGMX_LP_ADV(u64 a)
+{
+	return 0x30070 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_tx#_states
+ *
+ * CGX GMP PCS TX State-Machines States Registers
+ */
+union cgxx_gmp_pcs_txx_states {
+	u64 u;
+	struct cgxx_gmp_pcs_txx_states_s {
+		u64 ord_st                           : 4;
+		u64 tx_bad                           : 1;
+		u64 xmit                             : 2;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct cgxx_gmp_pcs_txx_states_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_TXX_STATES(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_TXX_STATES(u64 a)
+{
+	return 0x30060 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_gmp_pcs_tx_rx#_polarity
+ *
+ * CGX GMP PCS TX/RX Polarity Registers
+ * CGX()_GMP_PCS_TX_RX()_POLARITY[AUTORXPL] shows correct polarity needed
+ * on the link receive path after code group synchronization is achieved.
+ * When LMAC_TYPE=QSGMII, only lane 0 polarity data and settings are
+ * relevant and settings for lanes 1, 2 and 3 are unused.
+ */
+union cgxx_gmp_pcs_tx_rxx_polarity {
+	u64 u;
+	struct cgxx_gmp_pcs_tx_rxx_polarity_s {
+		u64 txplrt                           : 1;
+		u64 rxplrt                           : 1;
+		u64 autorxpl                         : 1;
+		u64 rxovrd                           : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct cgxx_gmp_pcs_tx_rxx_polarity_s cn; */
+};
+
+static inline u64 CGXX_GMP_PCS_TX_RXX_POLARITY(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_GMP_PCS_TX_RXX_POLARITY(u64 a)
+{
+	return 0x30048 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_msix_pba#
+ *
+ * CGX MSI-X Pending Bit Array Registers This register is the MSI-X PBA
+ * table, the bit number is indexed by the CGX_INT_VEC_E enumeration.
+ */
+union cgxx_msix_pbax {
+	u64 u;
+	struct cgxx_msix_pbax_s {
+		u64 pend                             : 64;
+	} s;
+	/* struct cgxx_msix_pbax_s cn; */
+};
+
+static inline u64 CGXX_MSIX_PBAX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_MSIX_PBAX(u64 a)
+{
+	return 0xf0000 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_msix_vec#_addr
+ *
+ * CGX MSI-X Vector Table Address Registers This register is the MSI-X
+ * vector table, indexed by the CGX_INT_VEC_E enumeration.
+ */
+union cgxx_msix_vecx_addr {
+	u64 u;
+	struct cgxx_msix_vecx_addr_s {
+		u64 secvec                           : 1;
+		u64 reserved_1                       : 1;
+		u64 addr                             : 51;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct cgxx_msix_vecx_addr_s cn; */
+};
+
+static inline u64 CGXX_MSIX_VECX_ADDR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_MSIX_VECX_ADDR(u64 a)
+{
+	return 0 + 0x10 * a;
+}
+
+/**
+ * Register (RSL) cgx#_msix_vec#_ctl
+ *
+ * CGX MSI-X Vector Table Control and Data Registers This register is the
+ * MSI-X vector table, indexed by the CGX_INT_VEC_E enumeration.
+ */
+union cgxx_msix_vecx_ctl {
+	u64 u;
+	struct cgxx_msix_vecx_ctl_s {
+		u64 data                             : 32;
+		u64 mask                             : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	/* struct cgxx_msix_vecx_ctl_s cn; */
+};
+
+static inline u64 CGXX_MSIX_VECX_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_MSIX_VECX_CTL(u64 a)
+{
+	return 8 + 0x10 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_bp_test
+ *
+ * INTERNAL: CGX SMU TX Backpressure Test Registers
+ */
+union cgxx_smux_bp_test {
+	u64 u;
+	struct cgxx_smux_bp_test_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 8;
+		u64 reserved_24_47                   : 24;
+		u64 enable                           : 4;
+		u64 reserved_52_63                   : 12;
+	} s;
+	/* struct cgxx_smux_bp_test_s cn; */
+};
+
+static inline u64 CGXX_SMUX_BP_TEST(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_BP_TEST(u64 a)
+{
+	return 0x20230 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_cbfc_ctl
+ *
+ * CGX SMU PFC Control Registers Internal: INTERNAL: XOFF for a specific
+ * class/channel \<i\> is XOFF\<i\> = ([PHYS_EN]\<i\> & cmr_rx_phys_bp) |
+ * ([LOGL_EN]\<i\> & cmr_rx_logl_xoff\<i\>).
+ */
+union cgxx_smux_cbfc_ctl {
+	u64 u;
+	struct cgxx_smux_cbfc_ctl_s {
+		u64 rx_en                            : 1;
+		u64 tx_en                            : 1;
+		u64 drp_en                           : 1;
+		u64 bck_en                           : 1;
+		u64 reserved_4_31                    : 28;
+		u64 logl_en                          : 16;
+		u64 phys_en                          : 16;
+	} s;
+	/* struct cgxx_smux_cbfc_ctl_s cn; */
+};
+
+static inline u64 CGXX_SMUX_CBFC_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_CBFC_CTL(u64 a)
+{
+	return 0x20218 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_ctrl
+ *
+ * CGX SMU Control Registers
+ */
+union cgxx_smux_ctrl {
+	u64 u;
+	struct cgxx_smux_ctrl_s {
+		u64 rx_idle                          : 1;
+		u64 tx_idle                          : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct cgxx_smux_ctrl_s cn; */
+};
+
+static inline u64 CGXX_SMUX_CTRL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_CTRL(u64 a)
+{
+	return 0x20200 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_ext_loopback
+ *
+ * CGX SMU External Loopback Registers In loopback mode, the IFG1+IFG2 of
+ * local and remote parties must match exactly; otherwise loopback FIFO
+ * will overrun: CGX()_SMU()_TX_INT[LB_OVRFLW].
+ */
+union cgxx_smux_ext_loopback {
+	u64 u;
+	struct cgxx_smux_ext_loopback_s {
+		u64 thresh                           : 6;
+		u64 reserved_6_7                     : 2;
+		u64 depth                            : 6;
+		u64 reserved_14_15                   : 2;
+		u64 en                               : 1;
+		u64 reserved_17_63                   : 47;
+	} s;
+	/* struct cgxx_smux_ext_loopback_s cn; */
+};
+
+static inline u64 CGXX_SMUX_EXT_LOOPBACK(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_EXT_LOOPBACK(u64 a)
+{
+	return 0x20208 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_hg2_control
+ *
+ * CGX SMU HiGig2 Control Registers HiGig2 TX- and RX-enable are normally
+ * set together for HiGig2 messaging. Setting just the TX or RX bit
+ * results in only the HG2 message transmit or receive capability.
+ * Setting [PHYS_EN] and [LOGL_EN] to 1 allows link PAUSE or backpressure
+ * to NIX as per the received HiGig2 message. Setting these fields to 0
+ * disables link PAUSE and backpressure to NIX in response to received
+ * messages.  CGX()_SMU()_TX_CTL[HG_EN] must be set (to enable HiGig)
+ * whenever either [HG2TX_EN] or [HG2RX_EN] are set.
+ * CGX()_SMU()_RX_UDD_SKP[LEN] must be set to 16 (to select HiGig2)
+ * whenever either [HG2TX_EN] or [HG2RX_EN] are set.
+ * CGX()_CMR_RX_OVR_BP[EN]\<0\> must be set and
+ * CGX()_CMR_RX_OVR_BP[BP]\<0\> must be cleared to 0 (to forcibly disable
+ * hardware-automatic 802.3 PAUSE packet generation) with the HiGig2
+ * Protocol when [HG2TX_EN] = 0. (The HiGig2 protocol is indicated by
+ * CGX()_SMU()_TX_CTL[HG_EN] = 1 and CGX()_SMU()_RX_UDD_SKP[LEN]=16.)
+ * Hardware can only autogenerate backpressure via HiGig2 messages
+ * (optionally, when [HG2TX_EN] = 1) with the HiGig2 protocol.
+ */
+union cgxx_smux_hg2_control {
+	u64 u;
+	struct cgxx_smux_hg2_control_s {
+		u64 logl_en                          : 16;
+		u64 phys_en                          : 1;
+		u64 hg2rx_en                         : 1;
+		u64 hg2tx_en                         : 1;
+		u64 reserved_19_63                   : 45;
+	} s;
+	/* struct cgxx_smux_hg2_control_s cn; */
+};
+
+static inline u64 CGXX_SMUX_HG2_CONTROL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_HG2_CONTROL(u64 a)
+{
+	return 0x20210 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_mmsi_ctl_sta
+ *
+ * CGX SMU MAC Merge Service Interface (MMSI) Control/Status Registers
+ * MMSI control and status registers for frame preemption mode. Refer to
+ * IEEE 802.3br, Clause 99.
+ */
+union cgxx_smux_mmsi_ctl_sta {
+	u64 u;
+	struct cgxx_smux_mmsi_ctl_sta_s {
+		u64 p_en                             : 1;
+		u64 dis_v                            : 1;
+		u64 afs                              : 2;
+		u64 v_sta                            : 3;
+		u64 tx_pactive                       : 1;
+		u64 reserved_8_31                    : 24;
+		u64 v_time                           : 24;
+		u64 reserved_56_63                   : 8;
+	} s;
+	/* struct cgxx_smux_mmsi_ctl_sta_s cn; */
+};
+
+static inline u64 CGXX_SMUX_MMSI_CTL_STA(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_MMSI_CTL_STA(u64 a)
+{
+	return 0x20220 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_bad_col_ctrl
+ *
+ * CGX SMU RX Bad Column High Registers
+ */
+union cgxx_smux_rx_bad_col_ctrl {
+	u64 u;
+	struct cgxx_smux_rx_bad_col_ctrl_s {
+		u64 lane_rxc                         : 16;
+		u64 state                            : 3;
+		u64 val                              : 1;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct cgxx_smux_rx_bad_col_ctrl_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_BAD_COL_CTRL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_BAD_COL_CTRL(u64 a)
+{
+	return 0x20060 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_bad_col_data_hi
+ *
+ * CGX SMU RX Bad Column Low Registers
+ */
+union cgxx_smux_rx_bad_col_data_hi {
+	u64 u;
+	struct cgxx_smux_rx_bad_col_data_hi_s {
+		u64 lane_rxd                         : 64;
+	} s;
+	/* struct cgxx_smux_rx_bad_col_data_hi_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_BAD_COL_DATA_HI(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_BAD_COL_DATA_HI(u64 a)
+{
+	return 0x20058 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_bad_col_data_lo
+ *
+ * CGX SMU RX Bad Column Low Registers
+ */
+union cgxx_smux_rx_bad_col_data_lo {
+	u64 u;
+	struct cgxx_smux_rx_bad_col_data_lo_s {
+		u64 lane_rxd                         : 64;
+	} s;
+	/* struct cgxx_smux_rx_bad_col_data_lo_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_BAD_COL_DATA_LO(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_BAD_COL_DATA_LO(u64 a)
+{
+	return 0x20050 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_ctl
+ *
+ * CGX SMU RX Control Registers
+ */
+union cgxx_smux_rx_ctl {
+	u64 u;
+	struct cgxx_smux_rx_ctl_s {
+		u64 status                           : 2;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct cgxx_smux_rx_ctl_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_CTL(u64 a)
+{
+	return 0x20048 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_decision
+ *
+ * CGX SMU Packet Decision Registers This register specifies the byte
+ * count used to determine when to accept or to filter a packet. As each
+ * byte in a packet is received by CGX, the L2 byte count (i.e. the
+ * number of bytes from the beginning of the L2 header (DMAC)) is
+ * compared against CNT. In normal operation, the L2 header begins after
+ * the PREAMBLE + SFD (CGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 1) and any
+ * optional UDD skip data (CGX()_SMU()_RX_UDD_SKP[LEN]).
+ */
+union cgxx_smux_rx_decision {
+	u64 u;
+	struct cgxx_smux_rx_decision_s {
+		u64 cnt                              : 5;
+		u64 reserved_5_63                    : 59;
+	} s;
+	/* struct cgxx_smux_rx_decision_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_DECISION(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_DECISION(u64 a)
+{
+	return 0x20038 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_frm_chk
+ *
+ * CGX SMU RX Frame Check Registers The CSRs provide the enable bits for
+ * a subset of errors passed to CMR encoded.
+ */
+union cgxx_smux_rx_frm_chk {
+	u64 u;
+	struct cgxx_smux_rx_frm_chk_s {
+		u64 reserved_0_2                     : 3;
+		u64 jabber                           : 1;
+		u64 fcserr_d                         : 1;
+		u64 fcserr_c                         : 1;
+		u64 reserved_6                       : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct cgxx_smux_rx_frm_chk_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_FRM_CHK(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_FRM_CHK(u64 a)
+{
+	return 0x20028 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_frm_ctl
+ *
+ * CGX SMU RX Frame Control Registers This register controls the handling
+ * of the frames. The [CTL_BCK] and [CTL_DRP] bits control how the
+ * hardware handles incoming PAUSE packets. The most common modes of
+ * operation: _ [CTL_BCK] = 1, [CTL_DRP] = 1: hardware handles everything
+ * _ [CTL_BCK] = 0, [CTL_DRP] = 0: software sees all PAUSE frames _
+ * [CTL_BCK] = 0, [CTL_DRP] = 1: all PAUSE frames are completely ignored
+ * These control bits should be set to [CTL_BCK] = 0, [CTL_DRP] = 0 in
+ * half-duplex mode. Since PAUSE packets only apply to full duplex
+ * operation, any PAUSE packet would constitute an exception which should
+ * be handled by the processing cores. PAUSE packets should not be
+ * forwarded.
+ */
+union cgxx_smux_rx_frm_ctl {
+	u64 u;
+	struct cgxx_smux_rx_frm_ctl_s {
+		u64 pre_chk                          : 1;
+		u64 pre_strp                         : 1;
+		u64 ctl_drp                          : 1;
+		u64 ctl_bck                          : 1;
+		u64 ctl_mcst                         : 1;
+		u64 ctl_smac                         : 1;
+		u64 reserved_6_11                    : 6;
+		u64 ptp_mode                         : 1;
+		u64 reserved_13_63                   : 51;
+	} s;
+	/* struct cgxx_smux_rx_frm_ctl_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_FRM_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_FRM_CTL(u64 a)
+{
+	return 0x20020 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_int
+ *
+ * CGX SMU Receive Interrupt Registers SMU Interrupt Register. Internal:
+ * Exception conditions \<9\> and \<4:0\> can also set the rcv/opcode in
+ * the received packet's work queue entry. CGX()_SMU()_RX_FRM_CHK
+ * provides a bit mask for configuring which conditions set the error.
+ */
+union cgxx_smux_rx_int {
+	u64 u;
+	struct cgxx_smux_rx_int_s {
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 pcterr                           : 1;
+		u64 rsverr                           : 1;
+		u64 loc_fault                        : 1;
+		u64 rem_fault                        : 1;
+		u64 bad_seq                          : 1;
+		u64 bad_term                         : 1;
+		u64 hg2fld                           : 1;
+		u64 hg2cc                            : 1;
+		u64 badver                           : 1;
+		u64 badrsp                           : 1;
+		u64 reserved_14_63                   : 50;
+	} s;
+	/* struct cgxx_smux_rx_int_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_INT(u64 a)
+{
+	return 0x20000 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_int_ena_w1c
+ *
+ * CGX SMU Receive Interrupt Enable Clear Registers This register clears
+ * interrupt enable bits.
+ */
+union cgxx_smux_rx_int_ena_w1c {
+	u64 u;
+	struct cgxx_smux_rx_int_ena_w1c_s {
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 pcterr                           : 1;
+		u64 rsverr                           : 1;
+		u64 loc_fault                        : 1;
+		u64 rem_fault                        : 1;
+		u64 bad_seq                          : 1;
+		u64 bad_term                         : 1;
+		u64 hg2fld                           : 1;
+		u64 hg2cc                            : 1;
+		u64 badver                           : 1;
+		u64 badrsp                           : 1;
+		u64 reserved_14_63                   : 50;
+	} s;
+	/* struct cgxx_smux_rx_int_ena_w1c_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_INT_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_INT_ENA_W1C(u64 a)
+{
+	return 0x20010 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_int_ena_w1s
+ *
+ * CGX SMU Receive Interrupt Enable Set Registers This register sets
+ * interrupt enable bits.
+ */
+union cgxx_smux_rx_int_ena_w1s {
+	u64 u;
+	struct cgxx_smux_rx_int_ena_w1s_s {
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 pcterr                           : 1;
+		u64 rsverr                           : 1;
+		u64 loc_fault                        : 1;
+		u64 rem_fault                        : 1;
+		u64 bad_seq                          : 1;
+		u64 bad_term                         : 1;
+		u64 hg2fld                           : 1;
+		u64 hg2cc                            : 1;
+		u64 badver                           : 1;
+		u64 badrsp                           : 1;
+		u64 reserved_14_63                   : 50;
+	} s;
+	/* struct cgxx_smux_rx_int_ena_w1s_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_INT_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_INT_ENA_W1S(u64 a)
+{
+	return 0x20018 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_int_w1s
+ *
+ * CGX SMU Receive Interrupt Set Registers This register sets interrupt
+ * bits.
+ */
+union cgxx_smux_rx_int_w1s {
+	u64 u;
+	struct cgxx_smux_rx_int_w1s_s {
+		u64 jabber                           : 1;
+		u64 fcserr                           : 1;
+		u64 rcverr                           : 1;
+		u64 skperr                           : 1;
+		u64 pcterr                           : 1;
+		u64 rsverr                           : 1;
+		u64 loc_fault                        : 1;
+		u64 rem_fault                        : 1;
+		u64 bad_seq                          : 1;
+		u64 bad_term                         : 1;
+		u64 hg2fld                           : 1;
+		u64 hg2cc                            : 1;
+		u64 badver                           : 1;
+		u64 badrsp                           : 1;
+		u64 reserved_14_63                   : 50;
+	} s;
+	/* struct cgxx_smux_rx_int_w1s_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_INT_W1S(u64 a)
+{
+	return 0x20008 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_jabber
+ *
+ * CGX SMU Maximum Packet-Size Registers This register specifies the
+ * maximum size for packets, beyond which the SMU truncates. Internal:
+ * JABBER[CNT] is checked against the packet that arrives from SPU.  The
+ * checking is performed before preamble is stripped or PTP is inserted.
+ * If present, preamble is counted as eight bytes of the incoming packet.
+ */
+union cgxx_smux_rx_jabber {
+	u64 u;
+	struct cgxx_smux_rx_jabber_s {
+		u64 cnt                              : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_smux_rx_jabber_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_JABBER(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_JABBER(u64 a)
+{
+	return 0x20030 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_udd_skp
+ *
+ * CGX SMU User-Defined Data Skip Registers Internal: (1) The skip bytes
+ * are part of the packet and will be sent down the NCB packet interface
+ * and will be handled by NIX.  (2) The system can determine if the UDD
+ * bytes are included in the FCS check by using the FCSSEL field if the
+ * FCS check is enabled.  (3) Assume that the preamble/sfd is always at
+ * the start of the frame even before UDD bytes.  In most cases, there
+ * will be no preamble in these cases since it will be packet interface
+ * in direct communication to another packet interface (MAC to MAC)
+ * without a PHY involved.  (4) We can still do address filtering and
+ * control packet filtering if the user desires.  (5) In all cases, the
+ * UDD bytes will be sent down the packet interface as part of the
+ * packet.  The UDD bytes are never stripped from the actual packet.
+ */
+union cgxx_smux_rx_udd_skp {
+	u64 u;
+	struct cgxx_smux_rx_udd_skp_s {
+		u64 len                              : 7;
+		u64 reserved_7                       : 1;
+		u64 fcssel                           : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct cgxx_smux_rx_udd_skp_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_UDD_SKP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_UDD_SKP(u64 a)
+{
+	return 0x20040 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_wol_ctrl0
+ *
+ * CGX SMU RX Wake-on-LAN Control 0 Registers
+ */
+union cgxx_smux_rx_wol_ctrl0 {
+	u64 u;
+	struct cgxx_smux_rx_wol_ctrl0_s {
+		u64 dmac                             : 48;
+		u64 pswd_len                         : 4;
+		u64 reserved_52_63                   : 12;
+	} s;
+	/* struct cgxx_smux_rx_wol_ctrl0_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_WOL_CTRL0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_WOL_CTRL0(u64 a)
+{
+	return 0x20068 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_wol_ctrl1
+ *
+ * CGX SMU RX Wake-on-LAN Control 1 Registers
+ */
+union cgxx_smux_rx_wol_ctrl1 {
+	u64 u;
+	struct cgxx_smux_rx_wol_ctrl1_s {
+		u64 pswd                             : 64;
+	} s;
+	/* struct cgxx_smux_rx_wol_ctrl1_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_WOL_CTRL1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_WOL_CTRL1(u64 a)
+{
+	return 0x20070 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_wol_int
+ *
+ * CGX SMU RX WOL Interrupt Registers These registers allow WOL
+ * interrupts to be sent to the control processor.
+ */
+union cgxx_smux_rx_wol_int {
+	u64 u;
+	struct cgxx_smux_rx_wol_int_s {
+		u64 wol_rcvd                         : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_smux_rx_wol_int_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_WOL_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_WOL_INT(u64 a)
+{
+	return 0x20078 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_wol_int_ena_w1c
+ *
+ * CGX SMU RX WOL Interrupt Enable Clear Registers This register clears
+ * interrupt enable bits.
+ */
+union cgxx_smux_rx_wol_int_ena_w1c {
+	u64 u;
+	struct cgxx_smux_rx_wol_int_ena_w1c_s {
+		u64 wol_rcvd                         : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_smux_rx_wol_int_ena_w1c_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_WOL_INT_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_WOL_INT_ENA_W1C(u64 a)
+{
+	return 0x20088 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_wol_int_ena_w1s
+ *
+ * CGX SMU RX WOL Interrupt Enable Set Registers This register sets
+ * interrupt enable bits.
+ */
+union cgxx_smux_rx_wol_int_ena_w1s {
+	u64 u;
+	struct cgxx_smux_rx_wol_int_ena_w1s_s {
+		u64 wol_rcvd                         : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_smux_rx_wol_int_ena_w1s_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_WOL_INT_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_WOL_INT_ENA_W1S(u64 a)
+{
+	return 0x20090 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_rx_wol_int_w1s
+ *
+ * CGX SMU RX WOL Interrupt Set Registers This register sets interrupt
+ * bits.
+ */
+union cgxx_smux_rx_wol_int_w1s {
+	u64 u;
+	struct cgxx_smux_rx_wol_int_w1s_s {
+		u64 wol_rcvd                         : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_smux_rx_wol_int_w1s_s cn; */
+};
+
+static inline u64 CGXX_SMUX_RX_WOL_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_RX_WOL_INT_W1S(u64 a)
+{
+	return 0x20080 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_smac
+ *
+ * CGX SMU SMAC Registers
+ */
+union cgxx_smux_smac {
+	u64 u;
+	struct cgxx_smux_smac_s {
+		u64 smac                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_smux_smac_s cn; */
+};
+
+static inline u64 CGXX_SMUX_SMAC(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_SMAC(u64 a)
+{
+	return 0x20108 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_append
+ *
+ * CGX SMU TX Append Control Registers For more details on the
+ * interactions between FCS and PAD, see also the description of
+ * CGX()_SMU()_TX_MIN_PKT[MIN_SIZE].
+ */
+union cgxx_smux_tx_append {
+	u64 u;
+	struct cgxx_smux_tx_append_s {
+		u64 preamble                         : 1;
+		u64 pad                              : 1;
+		u64 fcs_d                            : 1;
+		u64 fcs_c                            : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct cgxx_smux_tx_append_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_APPEND(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_APPEND(u64 a)
+{
+	return 0x20100 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_ctl
+ *
+ * CGX SMU Transmit Control Registers
+ */
+union cgxx_smux_tx_ctl {
+	u64 u;
+	struct cgxx_smux_tx_ctl_s {
+		u64 dic_en                           : 1;
+		u64 uni_en                           : 1;
+		u64 x4a_dis                          : 1;
+		u64 mia_en                           : 1;
+		u64 ls                               : 2;
+		u64 ls_byp                           : 1;
+		u64 l2p_bp_conv                      : 1;
+		u64 hg_en                            : 1;
+		u64 hg_pause_hgi                     : 2;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct cgxx_smux_tx_ctl_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_CTL(u64 a)
+{
+	return 0x20178 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_dack
+ *
+ * CGX SMU TX Drop Counters Registers
+ */
+union cgxx_smux_tx_dack {
+	u64 u;
+	struct cgxx_smux_tx_dack_s {
+		u64 dpi_sdrop_ack                    : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_smux_tx_dack_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_DACK(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_DACK(u64 a)
+{
+	return 0x201b0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_dcnt
+ *
+ * CGX SMU TX Drop Counters Registers
+ */
+union cgxx_smux_tx_dcnt {
+	u64 u;
+	struct cgxx_smux_tx_dcnt_s {
+		u64 dpi_sdrop_cnt                    : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_smux_tx_dcnt_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_DCNT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_DCNT(u64 a)
+{
+	return 0x201a8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_eee
+ *
+ * INTERNAL: CGX SMU TX EEE Configure Registers  Resvered. Internal:
+ * These registers control when SMU TX requests to enter or exist LPI.
+ * Those registers take effect only when EEE is supported and enabled for
+ * a given LMAC.
+ */
+union cgxx_smux_tx_eee {
+	u64 u;
+	struct cgxx_smux_tx_eee_s {
+		u64 idle_thresh                      : 28;
+		u64 reserved_28                      : 1;
+		u64 force_lpi                        : 1;
+		u64 wakeup                           : 1;
+		u64 auto_lpi                         : 1;
+		u64 idle_cnt                         : 28;
+		u64 reserved_60_61                   : 2;
+		u64 tx_lpi_wake                      : 1;
+		u64 tx_lpi                           : 1;
+	} s;
+	/* struct cgxx_smux_tx_eee_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_EEE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_EEE(u64 a)
+{
+	return 0x20190 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_eee_timer_status
+ *
+ * INTERNAL: CGX SMU TX EEE TIMER STATUS Registers  Reserved. Internal:
+ * These registers configure SMU TX EEE timing parameters.
+ */
+union cgxx_smux_tx_eee_timer_status {
+	u64 u;
+	struct cgxx_smux_tx_eee_timer_status_s {
+		u64 lpi_wake_cnt                     : 16;
+		u64 reserved_16_30                   : 15;
+		u64 wake_timer_done                  : 1;
+		u64 link_ok_cnt                      : 30;
+		u64 reserved_62                      : 1;
+		u64 link_timer_done                  : 1;
+	} s;
+	/* struct cgxx_smux_tx_eee_timer_status_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_EEE_TIMER_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_EEE_TIMER_STATUS(u64 a)
+{
+	return 0x201a0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_eee_timing
+ *
+ * INTERNAL: CGX SMU TX EEE TIMING Parameter Registers  Reserved.
+ * Internal: These registers configure SMU TX EEE timing parameters.
+ */
+union cgxx_smux_tx_eee_timing {
+	u64 u;
+	struct cgxx_smux_tx_eee_timing_s {
+		u64 w_sys_tx_min                     : 16;
+		u64 reserved_16_31                   : 16;
+		u64 link_ok_min                      : 30;
+		u64 reserved_62_63                   : 2;
+	} s;
+	/* struct cgxx_smux_tx_eee_timing_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_EEE_TIMING(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_EEE_TIMING(u64 a)
+{
+	return 0x20198 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_ifg
+ *
+ * CGX SMU TX Interframe-Gap Cycles Registers Programming IFG1 and IFG2:
+ * * For XAUI/RXAUI/10G/25G/40G/50G/100G systems that require IEEE 802.3
+ * compatibility, the [IFG1]+[IFG2] sum must be 12. * In loopback mode,
+ * the [IFG1]+[IFG2] of local and remote parties must match exactly;
+ * otherwise loopback FIFO will overrun: CGX()_SMU()_TX_INT[LB_OVRFLW]. *
+ * When CGX()_SMU()_TX_CTL[DIC_EN] is set, [IFG1]+[IFG2] sum must be at
+ * least 8. The behavior of smaller values is un-determined. * When
+ * CGX()_SMU()_TX_CTL[DIC_EN] is cleared, the minimum value of
+ * [IFG1]+[IFG2] is 1 for 40G/50G/100G LMAC_TYPE configurations and 5 for
+ * all other values. The behavior of smaller values is un-determined.
+ * Internal: When CGX()_SMU()_TX_CTL[DIC_EN] is set, SMU TX treats
+ * ([IFG1]+[IFG2]) \< 8 as 8 for 40G/50G/100G MACs and ([IFG1]+[IFG2]) \<
+ * 8 as 8 for other MACs. When CGX()_SMU()_TX_CTL[DIC_EN] is cleared, SMU
+ * TX can work correctly with any IFG1 and IFG2.
+ */
+union cgxx_smux_tx_ifg {
+	u64 u;
+	struct cgxx_smux_tx_ifg_s {
+		u64 ifg1                             : 4;
+		u64 ifg2                             : 4;
+		u64 mia_amt                          : 2;
+		u64 reserved_10_15                   : 6;
+		u64 mia_cnt                          : 8;
+		u64 reserved_24_63                   : 40;
+	} s;
+	/* struct cgxx_smux_tx_ifg_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_IFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_IFG(u64 a)
+{
+	return 0x20160 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_int
+ *
+ * CGX SMU TX Interrupt Registers
+ */
+union cgxx_smux_tx_int {
+	u64 u;
+	struct cgxx_smux_tx_int_s {
+		u64 undflw                           : 1;
+		u64 xchange                          : 1;
+		u64 fake_commit                      : 1;
+		u64 lb_undflw                        : 1;
+		u64 lb_ovrflw                        : 1;
+		u64 dpi_sdrop                        : 1;
+		u64 reserved_6_63                    : 58;
+	} s;
+	/* struct cgxx_smux_tx_int_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_INT(u64 a)
+{
+	return 0x20140 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_int_ena_w1c
+ *
+ * CGX SMU TX Interrupt Enable Clear Registers This register clears
+ * interrupt enable bits.
+ */
+union cgxx_smux_tx_int_ena_w1c {
+	u64 u;
+	struct cgxx_smux_tx_int_ena_w1c_s {
+		u64 undflw                           : 1;
+		u64 xchange                          : 1;
+		u64 fake_commit                      : 1;
+		u64 lb_undflw                        : 1;
+		u64 lb_ovrflw                        : 1;
+		u64 dpi_sdrop                        : 1;
+		u64 reserved_6_63                    : 58;
+	} s;
+	/* struct cgxx_smux_tx_int_ena_w1c_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_INT_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_INT_ENA_W1C(u64 a)
+{
+	return 0x20150 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_int_ena_w1s
+ *
+ * CGX SMU TX Interrupt Enable Set Registers This register sets interrupt
+ * enable bits.
+ */
+union cgxx_smux_tx_int_ena_w1s {
+	u64 u;
+	struct cgxx_smux_tx_int_ena_w1s_s {
+		u64 undflw                           : 1;
+		u64 xchange                          : 1;
+		u64 fake_commit                      : 1;
+		u64 lb_undflw                        : 1;
+		u64 lb_ovrflw                        : 1;
+		u64 dpi_sdrop                        : 1;
+		u64 reserved_6_63                    : 58;
+	} s;
+	/* struct cgxx_smux_tx_int_ena_w1s_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_INT_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_INT_ENA_W1S(u64 a)
+{
+	return 0x20158 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_int_w1s
+ *
+ * CGX SMU TX Interrupt Set Registers This register sets interrupt bits.
+ */
+union cgxx_smux_tx_int_w1s {
+	u64 u;
+	struct cgxx_smux_tx_int_w1s_s {
+		u64 undflw                           : 1;
+		u64 xchange                          : 1;
+		u64 fake_commit                      : 1;
+		u64 lb_undflw                        : 1;
+		u64 lb_ovrflw                        : 1;
+		u64 dpi_sdrop                        : 1;
+		u64 reserved_6_63                    : 58;
+	} s;
+	/* struct cgxx_smux_tx_int_w1s_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_INT_W1S(u64 a)
+{
+	return 0x20148 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_min_pkt
+ *
+ * CGX SMU TX Minimum-Size-Packet Registers Internal: [MIN_SIZE] less
+ * than 16 will be ignored by hardware which will use 16 instead.
+ */
+union cgxx_smux_tx_min_pkt {
+	u64 u;
+	struct cgxx_smux_tx_min_pkt_s {
+		u64 min_size                         : 8;
+		u64 reserved_8_63                    : 56;
+	} s;
+	/* struct cgxx_smux_tx_min_pkt_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_MIN_PKT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_MIN_PKT(u64 a)
+{
+	return 0x20118 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_pause_pkt_dmac
+ *
+ * CGX SMU TX PAUSE-Packet DMAC-Field Registers This register provides
+ * the DMAC value that is placed in outbound PAUSE packets.
+ */
+union cgxx_smux_tx_pause_pkt_dmac {
+	u64 u;
+	struct cgxx_smux_tx_pause_pkt_dmac_s {
+		u64 dmac                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_smux_tx_pause_pkt_dmac_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_PAUSE_PKT_DMAC(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_PAUSE_PKT_DMAC(u64 a)
+{
+	return 0x20168 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_pause_pkt_interval
+ *
+ * CGX SMU TX PAUSE-Packet Transmission-Interval Registers This register
+ * specifies how often PAUSE packets are sent.
+ */
+union cgxx_smux_tx_pause_pkt_interval {
+	u64 u;
+	struct cgxx_smux_tx_pause_pkt_interval_s {
+		u64 interval                         : 16;
+		u64 hg2_intra_interval               : 16;
+		u64 hg2_intra_en                     : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	/* struct cgxx_smux_tx_pause_pkt_interval_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_PAUSE_PKT_INTERVAL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_PAUSE_PKT_INTERVAL(u64 a)
+{
+	return 0x20120 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_pause_pkt_time
+ *
+ * CGX SMU TX PAUSE Packet Time Registers
+ */
+union cgxx_smux_tx_pause_pkt_time {
+	u64 u;
+	struct cgxx_smux_tx_pause_pkt_time_s {
+		u64 p_time                           : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_smux_tx_pause_pkt_time_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_PAUSE_PKT_TIME(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_PAUSE_PKT_TIME(u64 a)
+{
+	return 0x20110 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_pause_pkt_type
+ *
+ * CGX SMU TX PAUSE-Packet P_TYPE-Field Registers This register provides
+ * the P_TYPE field that is placed in outbound PAUSE packets.
+ */
+union cgxx_smux_tx_pause_pkt_type {
+	u64 u;
+	struct cgxx_smux_tx_pause_pkt_type_s {
+		u64 p_type                           : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_smux_tx_pause_pkt_type_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_PAUSE_PKT_TYPE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_PAUSE_PKT_TYPE(u64 a)
+{
+	return 0x20170 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_pause_togo
+ *
+ * CGX SMU TX Time-to-Backpressure Registers
+ */
+union cgxx_smux_tx_pause_togo {
+	u64 u;
+	struct cgxx_smux_tx_pause_togo_s {
+		u64 p_time                           : 16;
+		u64 msg_time                         : 16;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_smux_tx_pause_togo_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_PAUSE_TOGO(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_PAUSE_TOGO(u64 a)
+{
+	return 0x20130 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_pause_zero
+ *
+ * CGX SMU TX PAUSE Zero Registers
+ */
+union cgxx_smux_tx_pause_zero {
+	u64 u;
+	struct cgxx_smux_tx_pause_zero_s {
+		u64 send                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_smux_tx_pause_zero_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_PAUSE_ZERO(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_PAUSE_ZERO(u64 a)
+{
+	return 0x20138 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_soft_pause
+ *
+ * CGX SMU TX Soft PAUSE Registers
+ */
+union cgxx_smux_tx_soft_pause {
+	u64 u;
+	struct cgxx_smux_tx_soft_pause_s {
+		u64 p_time                           : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_smux_tx_soft_pause_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_SOFT_PAUSE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_SOFT_PAUSE(u64 a)
+{
+	return 0x20128 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_smu#_tx_thresh
+ *
+ * CGX SMU TX Threshold Registers
+ */
+union cgxx_smux_tx_thresh {
+	u64 u;
+	struct cgxx_smux_tx_thresh_s {
+		u64 cnt                              : 12;
+		u64 reserved_12_15                   : 4;
+		u64 dpi_thresh                       : 5;
+		u64 reserved_21_23                   : 3;
+		u64 dpi_depth                        : 5;
+		u64 reserved_29_31                   : 3;
+		u64 ecnt                             : 12;
+		u64 reserved_44_63                   : 20;
+	} s;
+	/* struct cgxx_smux_tx_thresh_s cn; */
+};
+
+static inline u64 CGXX_SMUX_TX_THRESH(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SMUX_TX_THRESH(u64 a)
+{
+	return 0x20180 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_an_adv
+ *
+ * CGX SPU Autonegotiation Advertisement Registers Software programs this
+ * register with the contents of the AN-link code word base page to be
+ * transmitted during autonegotiation. (See IEEE 802.3 section 73.6 for
+ * details.) Any write operations to this register prior to completion of
+ * autonegotiation, as indicated by CGX()_SPU()_AN_STATUS[AN_COMPLETE],
+ * should be followed by a renegotiation in order for the new values to
+ * take effect. Renegotiation is initiated by setting
+ * CGX()_SPU()_AN_CONTROL[AN_RESTART]. Once autonegotiation has
+ * completed, software can examine this register along with
+ * CGX()_SPU()_AN_LP_BASE to determine the highest common denominator
+ * technology.
+ */
+union cgxx_spux_an_adv {
+	u64 u;
+	struct cgxx_spux_an_adv_s {
+		u64 s                                : 5;
+		u64 e                                : 5;
+		u64 pause                            : 1;
+		u64 asm_dir                          : 1;
+		u64 xnp_able                         : 1;
+		u64 rf                               : 1;
+		u64 ack                              : 1;
+		u64 np                               : 1;
+		u64 t                                : 5;
+		u64 a1g_kx                           : 1;
+		u64 a10g_kx4                         : 1;
+		u64 a10g_kr                          : 1;
+		u64 a40g_kr4                         : 1;
+		u64 a40g_cr4                         : 1;
+		u64 a100g_cr10                       : 1;
+		u64 a100g_kp4                        : 1;
+		u64 a100g_kr4                        : 1;
+		u64 a100g_cr4                        : 1;
+		u64 a25g_krs_crs                     : 1;
+		u64 a25g_kr_cr                       : 1;
+		u64 arsv                             : 12;
+		u64 a25g_rs_fec_req                  : 1;
+		u64 a25g_br_fec_req                  : 1;
+		u64 fec_able                         : 1;
+		u64 fec_req                          : 1;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_spux_an_adv_s cn; */
+};
+
+static inline u64 CGXX_SPUX_AN_ADV(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_AN_ADV(u64 a)
+{
+	return 0x10198 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_an_bp_status
+ *
+ * CGX SPU Autonegotiation Backplane Ethernet & BASE-R Copper Status
+ * Registers The contents of this register are updated during
+ * autonegotiation and are valid when CGX()_SPU()_AN_STATUS[AN_COMPLETE]
+ * is set. At that time, one of the port type bits will be set depending
+ * on the AN priority resolution. The port types are listed in order of
+ * decreasing priority. If a BASE-R type is negotiated then [FEC] or
+ * [RS_FEC] will be set to indicate whether/which FEC operation has been
+ * negotiated and will be clear otherwise.
+ */
+union cgxx_spux_an_bp_status {
+	u64 u;
+	struct cgxx_spux_an_bp_status_s {
+		u64 bp_an_able                       : 1;
+		u64 n1g_kx                           : 1;
+		u64 n10g_kx4                         : 1;
+		u64 n10g_kr                          : 1;
+		u64 n25g_kr1                         : 1;
+		u64 n25g_cr1                         : 1;
+		u64 n25g_krs_crs                     : 1;
+		u64 n25g_kr_cr                       : 1;
+		u64 n40g_kr4                         : 1;
+		u64 n40g_cr4                         : 1;
+		u64 n50g_kr2                         : 1;
+		u64 n50g_cr2                         : 1;
+		u64 n100g_cr10                       : 1;
+		u64 n100g_kp4                        : 1;
+		u64 n100g_kr4                        : 1;
+		u64 n100g_cr4                        : 1;
+		u64 fec                              : 1;
+		u64 rs_fec                           : 1;
+		u64 reserved_18_63                   : 46;
+	} s;
+	/* struct cgxx_spux_an_bp_status_s cn; */
+};
+
+static inline u64 CGXX_SPUX_AN_BP_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_AN_BP_STATUS(u64 a)
+{
+	return 0x101b8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_an_control
+ *
+ * CGX SPU Autonegotiation Control Registers
+ */
+union cgxx_spux_an_control {
+	u64 u;
+	struct cgxx_spux_an_control_s {
+		u64 reserved_0_8                     : 9;
+		u64 an_restart                       : 1;
+		u64 reserved_10_11                   : 2;
+		u64 an_en                            : 1;
+		u64 xnp_en                           : 1;
+		u64 reserved_14                      : 1;
+		u64 an_reset                         : 1;
+		u64 an_arb_link_chk_en               : 1;
+		u64 usx_an_arb_link_chk_en           : 1;
+		u64 reserved_18_63                   : 46;
+	} s;
+	/* struct cgxx_spux_an_control_s cn; */
+};
+
+static inline u64 CGXX_SPUX_AN_CONTROL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_AN_CONTROL(u64 a)
+{
+	return 0x10188 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_an_lp_base
+ *
+ * CGX SPU Autonegotiation Link-Partner Base-Page Ability Registers This
+ * register captures the contents of the latest AN link code word base
+ * page received from the link partner during autonegotiation. (See IEEE
+ * 802.3 section 73.6 for details.) CGX()_SPU()_AN_STATUS[PAGE_RX] is set
+ * when this register is updated by hardware.
+ */
+union cgxx_spux_an_lp_base {
+	u64 u;
+	struct cgxx_spux_an_lp_base_s {
+		u64 s                                : 5;
+		u64 e                                : 5;
+		u64 pause                            : 1;
+		u64 asm_dir                          : 1;
+		u64 xnp_able                         : 1;
+		u64 rf                               : 1;
+		u64 ack                              : 1;
+		u64 np                               : 1;
+		u64 t                                : 5;
+		u64 a1g_kx                           : 1;
+		u64 a10g_kx4                         : 1;
+		u64 a10g_kr                          : 1;
+		u64 a40g_kr4                         : 1;
+		u64 a40g_cr4                         : 1;
+		u64 a100g_cr10                       : 1;
+		u64 a100g_kp4                        : 1;
+		u64 a100g_kr4                        : 1;
+		u64 a100g_cr4                        : 1;
+		u64 a25g_krs_crs                     : 1;
+		u64 a25g_kr_cr                       : 1;
+		u64 arsv                             : 12;
+		u64 a25g_rs_fec_req                  : 1;
+		u64 a25g_br_fec_req                  : 1;
+		u64 fec_able                         : 1;
+		u64 fec_req                          : 1;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_spux_an_lp_base_s cn; */
+};
+
+static inline u64 CGXX_SPUX_AN_LP_BASE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_AN_LP_BASE(u64 a)
+{
+	return 0x101a0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_an_lp_xnp
+ *
+ * CGX SPU Autonegotiation Link Partner Extended Next Page Ability
+ * Registers This register captures the contents of the latest next page
+ * code word received from the link partner during autonegotiation, if
+ * any. See IEEE 802.3 section 73.7.7 for details.
+ */
+union cgxx_spux_an_lp_xnp {
+	u64 u;
+	struct cgxx_spux_an_lp_xnp_s {
+		u64 m_u                              : 11;
+		u64 toggle                           : 1;
+		u64 ack2                             : 1;
+		u64 mp                               : 1;
+		u64 ack                              : 1;
+		u64 np                               : 1;
+		u64 u                                : 32;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_spux_an_lp_xnp_s cn; */
+};
+
+static inline u64 CGXX_SPUX_AN_LP_XNP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_AN_LP_XNP(u64 a)
+{
+	return 0x101b0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_an_status
+ *
+ * CGX SPU Autonegotiation Status Registers
+ */
+union cgxx_spux_an_status {
+	u64 u;
+	struct cgxx_spux_an_status_s {
+		u64 lp_an_able                       : 1;
+		u64 reserved_1                       : 1;
+		u64 link_status                      : 1;
+		u64 an_able                          : 1;
+		u64 rmt_flt                          : 1;
+		u64 an_complete                      : 1;
+		u64 page_rx                          : 1;
+		u64 xnp_stat                         : 1;
+		u64 reserved_8                       : 1;
+		u64 prl_flt                          : 1;
+		u64 reserved_10_63                   : 54;
+	} s;
+	/* struct cgxx_spux_an_status_s cn; */
+};
+
+static inline u64 CGXX_SPUX_AN_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_AN_STATUS(u64 a)
+{
+	return 0x10190 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_an_xnp_tx
+ *
+ * CGX SPU Autonegotiation Extended Next Page Transmit Registers Software
+ * programs this register with the contents of the AN message next page
+ * or unformatted next page link code word to be transmitted during
+ * autonegotiation. Next page exchange occurs after the base link code
+ * words have been exchanged if either end of the link segment sets the
+ * NP bit to 1, indicating that it has at least one next page to send.
+ * Once initiated, next page exchange continues until both ends of the
+ * link segment set their NP bits to 0. See IEEE 802.3 section 73.7.7 for
+ * details.
+ */
+union cgxx_spux_an_xnp_tx {
+	u64 u;
+	struct cgxx_spux_an_xnp_tx_s {
+		u64 m_u                              : 11;
+		u64 toggle                           : 1;
+		u64 ack2                             : 1;
+		u64 mp                               : 1;
+		u64 ack                              : 1;
+		u64 np                               : 1;
+		u64 u                                : 32;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct cgxx_spux_an_xnp_tx_s cn; */
+};
+
+static inline u64 CGXX_SPUX_AN_XNP_TX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_AN_XNP_TX(u64 a)
+{
+	return 0x101a8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_algn_status
+ *
+ * CGX SPU Multilane BASE-R PCS Alignment-Status Registers This register
+ * implements the IEEE 802.3 multilane BASE-R PCS alignment status 1-4
+ * registers (3.50-3.53). It is valid only when the LPCS type is
+ * 40GBASE-R, 50GBASE-R, 100GBASE-R, (CGX()_CMR()_CONFIG[LMAC_TYPE] =
+ * CGX_LMAC_TYPES_E::FORTYG_R,FIFTYG_R,HUNDREDG_R), and always returns
+ * 0x0 for all other LPCS types. Service interfaces (lanes) 19-0 (100G)
+ * and 3-0 (all others) are mapped to PCS lanes 19-0 or 3-0 via
+ * CGX()_SPU()_BR_LANE_MAP()[LN_MAPPING]. For 100G, logical lane 0 fans
+ * out to service interfaces 0-4, logical lane 1 fans out to service
+ * interfaces 5-9, ... etc. For all other modes, logical lanes and
+ * service interfaces are identical. Logical interfaces (lanes) map to
+ * SerDes lanes via CGX()_CMR()_CONFIG[LANE_TO_SDS] (programmable).
+ */
+union cgxx_spux_br_algn_status {
+	u64 u;
+	struct cgxx_spux_br_algn_status_s {
+		u64 block_lock                       : 20;
+		u64 reserved_20_29                   : 10;
+		u64 alignd                           : 1;
+		u64 reserved_31_40                   : 10;
+		u64 marker_lock                      : 20;
+		u64 reserved_61_63                   : 3;
+	} s;
+	/* struct cgxx_spux_br_algn_status_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_ALGN_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_ALGN_STATUS(u64 a)
+{
+	return 0x10050 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_lane_map#
+ *
+ * CGX SPU 40,50,100GBASE-R Lane-Mapping Registers This register
+ * implements the IEEE 802.3 lane 0-19 mapping registers (3.400-3.403).
+ * It is valid only when the LPCS type is 40GBASE-R, 50GBASE-R,
+ * 100GBASE-R, USXGMII (CGX()_CMR()_CONFIG[LMAC_TYPE]), and always
+ * returns 0x0 for all other LPCS types. The LNx_MAPPING field for each
+ * programmed PCS lane (called service interface in 802.3) is valid when
+ * that lane has achieved alignment marker lock on the receive side (i.e.
+ * the associated CGX()_SPU()_BR_ALGN_STATUS[MARKER_LOCK] = 1), and is
+ * invalid otherwise. When valid, it returns the actual detected receive
+ * PCS lane number based on the received alignment marker contents
+ * received on that service interface.  In RS-FEC mode the LNx_MAPPING
+ * field is valid when that lane has achieved alignment marker lock on
+ * the receive side (i.e. the associated
+ * CGX()_SPU()_RSFEC_STATUS[AMPS_LOCK] = 1), and is invalid otherwise.
+ * When valid, it returns the actual detected receive FEC lane number
+ * based on the received alignment marker contents received on that
+ * logical lane therefore expect for RS-FEC that LNx_MAPPING = x.  The
+ * mapping is flexible because IEEE 802.3 allows multilane BASE-R receive
+ * lanes to be re-ordered. Note that for the transmit side, each logical
+ * lane is mapped to a physical SerDes lane based on the programming of
+ * CGX()_CMR()_CONFIG[LANE_TO_SDS]. For the receive side,
+ * CGX()_CMR()_CONFIG[LANE_TO_SDS] specifies the logical lane to physical
+ * SerDes lane mapping, and this register specifies the service interface
+ * (or lane) to PCS lane mapping.
+ */
+union cgxx_spux_br_lane_mapx {
+	u64 u;
+	struct cgxx_spux_br_lane_mapx_s {
+		u64 ln_mapping                       : 6;
+		u64 reserved_6_63                    : 58;
+	} s;
+	/* struct cgxx_spux_br_lane_mapx_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_LANE_MAPX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_LANE_MAPX(u64 a, u64 b)
+{
+	return 0x10600 + 0x40000 * a + 8 * b;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_pmd_control
+ *
+ * CGX SPU BASE-R PMD Control Registers
+ */
+union cgxx_spux_br_pmd_control {
+	u64 u;
+	struct cgxx_spux_br_pmd_control_s {
+		u64 train_restart                    : 1;
+		u64 train_en                         : 1;
+		u64 use_lane_poly                    : 1;
+		u64 max_wait_disable                 : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	struct cgxx_spux_br_pmd_control_cn96xx {
+		u64 train_restart                    : 1;
+		u64 train_en                         : 1;
+		u64 use_lane_poly                    : 1;
+		u64 reserved_3_63                    : 61;
+	} cn96xx;
+	/* struct cgxx_spux_br_pmd_control_s cnf95xxp1; */
+	/* struct cgxx_spux_br_pmd_control_cn96xx cnf95xxp2; */
+};
+
+static inline u64 CGXX_SPUX_BR_PMD_CONTROL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_PMD_CONTROL(u64 a)
+{
+	return 0x100a8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_pmd_ld_cup
+ *
+ * INTERNAL:CGX SPU BASE-R PMD Local Device Coefficient Update Registers
+ * This register implements MDIO register 1.154 of 802.3-2012 Section 5
+ * CL45 for 10GBASE-R and and of 802.3by-2016 CL45 for 25GBASE-R. Note
+ * that for 10G, 25G LN0_ only is used.  It implements  MDIO registers
+ * 1.1300-1.1303 for all other BASE-R modes (40G, 50G, 100G) per
+ * 802.3bj-2014 CL45. Note that for 50G LN0_ and LN1_ only are used.  The
+ * fields in this register are read/write even though they are specified
+ * as read-only in 802.3.  The register is automatically cleared at the
+ * start of training. When link training is in progress, each field
+ * reflects the contents of the coefficient update field in the
+ * associated lane's outgoing training frame.  If
+ * CGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is set, then this register
+ * must be updated by software during link training and hardware updates
+ * are disabled. If CGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is clear,
+ * this register is automatically updated by hardware, and it should not
+ * be written by software. The lane fields in this register are indexed
+ * by logical PCS lane ID.
+ */
+union cgxx_spux_br_pmd_ld_cup {
+	u64 u;
+	struct cgxx_spux_br_pmd_ld_cup_s {
+		u64 ln0_cup                          : 16;
+		u64 ln1_cup                          : 16;
+		u64 ln2_cup                          : 16;
+		u64 ln3_cup                          : 16;
+	} s;
+	/* struct cgxx_spux_br_pmd_ld_cup_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_PMD_LD_CUP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_PMD_LD_CUP(u64 a)
+{
+	return 0x100c8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_pmd_ld_rep
+ *
+ * INTERNAL:CGX SPU BASE-R PMD Local Device Status Report Registers  This
+ * register implements MDIO register 1.155 of 802.3-2012 Section 5 CL45
+ * for 10GBASE-R and and of 802.3by-2016 CL45 for 25GBASE-R. Note that
+ * for 10G, 25G LN0_ only is used.  It implements  MDIO registers
+ * 1.1400-1.1403 for all other BASE-R modes (40G, 50G, 100G) per
+ * 802.3bj-2014 CL45. Note that for 50G LN0_ and LN1_ only are used.  The
+ * fields in this register are read/write even though they are specified
+ * as read-only in 802.3.  The register is automatically cleared at the
+ * start of training. Each field reflects the contents of the status
+ * report field in the associated lane's outgoing training frame.  If
+ * CGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is set, then this register
+ * must be updated by software during link training and hardware updates
+ * are disabled. If CGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is clear,
+ * this register is automatically updated by hardware, and it should not
+ * be written by software. The lane fields in this register are indexed
+ * by logical PCS lane ID.
+ */
+union cgxx_spux_br_pmd_ld_rep {
+	u64 u;
+	struct cgxx_spux_br_pmd_ld_rep_s {
+		u64 ln0_rep                          : 16;
+		u64 ln1_rep                          : 16;
+		u64 ln2_rep                          : 16;
+		u64 ln3_rep                          : 16;
+	} s;
+	/* struct cgxx_spux_br_pmd_ld_rep_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_PMD_LD_REP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_PMD_LD_REP(u64 a)
+{
+	return 0x100d0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_pmd_lp_cup
+ *
+ * INTERNAL:CGX SPU BASE-R PMD Link Partner Coefficient Update Registers
+ * This register implements MDIO register 1.152 of 802.3-2012 Section 5
+ * CL45 for 10GBASE-R and and of 802.3by-2016 CL45 for 25GBASE-R. Note
+ * that for 10G, 25G LN0_ only is used.  It implements  MDIO registers
+ * 1.1100-1.1103 for all other BASE-R modes (40G, 50G, 100G) per
+ * 802.3bj-2014 CL45. Note that for 50G LN0_ and LN1_ only are used.  The
+ * register is automatically cleared at the start of training. Each field
+ * reflects the contents of the coefficient update field in the lane's
+ * most recently received training frame. This register should not be
+ * written when link training is enabled, i.e. when
+ * CGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is set. The lane fields in this
+ * register are indexed by logical PCS lane ID.
+ */
+union cgxx_spux_br_pmd_lp_cup {
+	u64 u;
+	struct cgxx_spux_br_pmd_lp_cup_s {
+		u64 ln0_cup                          : 16;
+		u64 ln1_cup                          : 16;
+		u64 ln2_cup                          : 16;
+		u64 ln3_cup                          : 16;
+	} s;
+	/* struct cgxx_spux_br_pmd_lp_cup_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_PMD_LP_CUP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_PMD_LP_CUP(u64 a)
+{
+	return 0x100b8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_pmd_lp_rep
+ *
+ * INTERNAL:CGX SPU BASE-R PMD Link Partner Status Report Registers  This
+ * register implements MDIO register 1.153 of 802.3-2012 Section 5 CL45
+ * for 10GBASE-R and and of 802.3by-2016 CL45 for 25GBASE-R. Note that
+ * for 10G, 25G LN0_ only is used.  It implements  MDIO registers
+ * 1.1200-1.1203 for all other BASE-R modes (40G, 50G, 100G) per
+ * 802.3bj-2014 CL45. Note that for 50G LN0_ and LN1_ only are used.  The
+ * register is automatically cleared at the start of training. Each field
+ * reflects the contents of the coefficient update field in the lane's
+ * most recently received training frame. This register should not be
+ * written when link training is enabled, i.e. when
+ * CGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is set. The lane fields in this
+ * register are indexed by logical PCS lane ID.
+ */
+union cgxx_spux_br_pmd_lp_rep {
+	u64 u;
+	struct cgxx_spux_br_pmd_lp_rep_s {
+		u64 ln0_rep                          : 16;
+		u64 ln1_rep                          : 16;
+		u64 ln2_rep                          : 16;
+		u64 ln3_rep                          : 16;
+	} s;
+	/* struct cgxx_spux_br_pmd_lp_rep_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_PMD_LP_REP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_PMD_LP_REP(u64 a)
+{
+	return 0x100c0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_pmd_status
+ *
+ * INTERNAL:CGX SPU BASE-R PMD Status Registers  The lane fields in this
+ * register are indexed by logical PCS lane ID. The lane 0 field (LN0_*)
+ * is valid for 10GBASE-R, 25GBASE-R, 40GBASE-R, 50GBASE-R and
+ * 100GBASE-R. The lane 1 field (LN1_*) is valid for 40GBASE-R, 50GBASE-R
+ * and 100GBASE-R. The remaining fields (LN2_*, LN3_*) are only valid for
+ * 40GBASE-R and 100GBASE-R.
+ */
+union cgxx_spux_br_pmd_status {
+	u64 u;
+	struct cgxx_spux_br_pmd_status_s {
+		u64 ln0_train_status                 : 4;
+		u64 ln1_train_status                 : 4;
+		u64 ln2_train_status                 : 4;
+		u64 ln3_train_status                 : 4;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_spux_br_pmd_status_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_PMD_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_PMD_STATUS(u64 a)
+{
+	return 0x100b0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_status1
+ *
+ * CGX SPU BASE-R Status 1 Registers
+ */
+union cgxx_spux_br_status1 {
+	u64 u;
+	struct cgxx_spux_br_status1_s {
+		u64 blk_lock                         : 1;
+		u64 hi_ber                           : 1;
+		u64 prbs31                           : 1;
+		u64 prbs9                            : 1;
+		u64 reserved_4_11                    : 8;
+		u64 rcv_lnk                          : 1;
+		u64 reserved_13_63                   : 51;
+	} s;
+	/* struct cgxx_spux_br_status1_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_STATUS1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_STATUS1(u64 a)
+{
+	return 0x10030 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_status2
+ *
+ * CGX SPU BASE-R Status 2 Registers This register implements a
+ * combination of the following IEEE 802.3 registers: * BASE-R PCS status
+ * 2 (MDIO address 3.33). * BASE-R BER high-order counter (MDIO address
+ * 3.44). * Errored-blocks high-order counter (MDIO address 3.45).  Note
+ * that the relative locations of some fields have been moved from IEEE
+ * 802.3 in order to make the register layout more software friendly: the
+ * BER counter high-order and low-order bits from sections 3.44 and 3.33
+ * have been combined into the contiguous, 22-bit [BER_CNT] field;
+ * likewise, the errored-blocks counter high-order and low-order bits
+ * from section 3.45 have been combined into the contiguous, 22-bit
+ * [ERR_BLKS] field.
+ */
+union cgxx_spux_br_status2 {
+	u64 u;
+	struct cgxx_spux_br_status2_s {
+		u64 reserved_0_13                    : 14;
+		u64 latched_ber                      : 1;
+		u64 latched_lock                     : 1;
+		u64 ber_cnt                          : 22;
+		u64 reserved_38_39                   : 2;
+		u64 err_blks                         : 22;
+		u64 reserved_62_63                   : 2;
+	} s;
+	/* struct cgxx_spux_br_status2_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_STATUS2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_STATUS2(u64 a)
+{
+	return 0x10038 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_tp_control
+ *
+ * CGX SPU BASE-R Test-Pattern Control Registers Refer to the test
+ * pattern methodology described in 802.3 sections 49.2.8 and 82.2.10.
+ */
+union cgxx_spux_br_tp_control {
+	u64 u;
+	struct cgxx_spux_br_tp_control_s {
+		u64 dp_sel                           : 1;
+		u64 tp_sel                           : 1;
+		u64 rx_tp_en                         : 1;
+		u64 tx_tp_en                         : 1;
+		u64 prbs31_tx                        : 1;
+		u64 prbs31_rx                        : 1;
+		u64 prbs9_tx                         : 1;
+		u64 scramble_tp                      : 2;
+		u64 pr_tp_data_type                  : 1;
+		u64 reserved_10_63                   : 54;
+	} s;
+	/* struct cgxx_spux_br_tp_control_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_TP_CONTROL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_TP_CONTROL(u64 a)
+{
+	return 0x10040 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_tp_err_cnt
+ *
+ * CGX SPU BASE-R Test-Pattern Error-Count Registers This register
+ * provides the BASE-R PCS test-pattern error counter.
+ */
+union cgxx_spux_br_tp_err_cnt {
+	u64 u;
+	struct cgxx_spux_br_tp_err_cnt_s {
+		u64 err_cnt                          : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_spux_br_tp_err_cnt_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_TP_ERR_CNT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_TP_ERR_CNT(u64 a)
+{
+	return 0x10048 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_tp_seed_a
+ *
+ * CGX SPU BASE-R Test-Pattern Seed A Registers Refer to the test pattern
+ * methodology described in 802.3 sections 49.2.8 and 82.2.10.
+ */
+union cgxx_spux_br_tp_seed_a {
+	u64 u;
+	struct cgxx_spux_br_tp_seed_a_s {
+		u64 tp_seed_a                        : 58;
+		u64 reserved_58_63                   : 6;
+	} s;
+	/* struct cgxx_spux_br_tp_seed_a_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_TP_SEED_A(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_TP_SEED_A(u64 a)
+{
+	return 0x10060 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_br_tp_seed_b
+ *
+ * CGX SPU BASE-R Test-Pattern Seed B Registers Refer to the test pattern
+ * methodology described in 802.3 sections 49.2.8 and 82.2.10.
+ */
+union cgxx_spux_br_tp_seed_b {
+	u64 u;
+	struct cgxx_spux_br_tp_seed_b_s {
+		u64 tp_seed_b                        : 58;
+		u64 reserved_58_63                   : 6;
+	} s;
+	/* struct cgxx_spux_br_tp_seed_b_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BR_TP_SEED_B(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BR_TP_SEED_B(u64 a)
+{
+	return 0x10068 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_bx_status
+ *
+ * CGX SPU BASE-X Status Registers
+ */
+union cgxx_spux_bx_status {
+	u64 u;
+	struct cgxx_spux_bx_status_s {
+		u64 lsync                            : 4;
+		u64 reserved_4_10                    : 7;
+		u64 pattst                           : 1;
+		u64 alignd                           : 1;
+		u64 reserved_13_63                   : 51;
+	} s;
+	/* struct cgxx_spux_bx_status_s cn; */
+};
+
+static inline u64 CGXX_SPUX_BX_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_BX_STATUS(u64 a)
+{
+	return 0x10028 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_control1
+ *
+ * CGX SPU Control 1 Registers
+ */
+union cgxx_spux_control1 {
+	u64 u;
+	struct cgxx_spux_control1_s {
+		u64 reserved_0_1                     : 2;
+		u64 spd                              : 4;
+		u64 spdsel0                          : 1;
+		u64 reserved_7_10                    : 4;
+		u64 lo_pwr                           : 1;
+		u64 reserved_12                      : 1;
+		u64 spdsel1                          : 1;
+		u64 loopbck                          : 1;
+		u64 reset                            : 1;
+		u64 usxgmii_type                     : 3;
+		u64 usxgmii_rate                     : 3;
+		u64 disable_am                       : 1;
+		u64 reserved_23_63                   : 41;
+	} s;
+	struct cgxx_spux_control1_cn96xxp1 {
+		u64 reserved_0_1                     : 2;
+		u64 spd                              : 4;
+		u64 spdsel0                          : 1;
+		u64 reserved_7_10                    : 4;
+		u64 lo_pwr                           : 1;
+		u64 reserved_12                      : 1;
+		u64 spdsel1                          : 1;
+		u64 loopbck                          : 1;
+		u64 reset                            : 1;
+		u64 usxgmii_type                     : 3;
+		u64 usxgmii_rate                     : 3;
+		u64 reserved_22_63                   : 42;
+	} cn96xxp1;
+	/* struct cgxx_spux_control1_s cn96xxp3; */
+	/* struct cgxx_spux_control1_cn96xxp1 cnf95xxp1; */
+	struct cgxx_spux_control1_cnf95xxp2 {
+		u64 reserved_0_1                     : 2;
+		u64 spd                              : 4;
+		u64 spdsel0                          : 1;
+		u64 reserved_7_10                    : 4;
+		u64 lo_pwr                           : 1;
+		u64 reserved_12                      : 1;
+		u64 spdsel1                          : 1;
+		u64 loopbck                          : 1;
+		u64 reset                            : 1;
+		u64 usxgmii_type                     : 3;
+		u64 usxgmii_rate                     : 3;
+		u64 reserved_22                      : 1;
+		u64 reserved_23_63                   : 41;
+	} cnf95xxp2;
+};
+
+static inline u64 CGXX_SPUX_CONTROL1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_CONTROL1(u64 a)
+{
+	return 0x10000 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_control2
+ *
+ * CGX SPU Control 2 Registers
+ */
+union cgxx_spux_control2 {
+	u64 u;
+	struct cgxx_spux_control2_s {
+		u64 pcs_type                         : 4;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct cgxx_spux_control2_s cn; */
+};
+
+static inline u64 CGXX_SPUX_CONTROL2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_CONTROL2(u64 a)
+{
+	return 0x10018 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_fec_abil
+ *
+ * CGX SPU Forward Error Correction Ability Registers
+ */
+union cgxx_spux_fec_abil {
+	u64 u;
+	struct cgxx_spux_fec_abil_s {
+		u64 fec_abil                         : 1;
+		u64 err_abil                         : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct cgxx_spux_fec_abil_s cn; */
+};
+
+static inline u64 CGXX_SPUX_FEC_ABIL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_FEC_ABIL(u64 a)
+{
+	return 0x100d8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_fec_control
+ *
+ * CGX SPU Forward Error Correction Control Registers
+ */
+union cgxx_spux_fec_control {
+	u64 u;
+	struct cgxx_spux_fec_control_s {
+		u64 fec_en                           : 2;
+		u64 err_en                           : 1;
+		u64 fec_byp_ind_en                   : 1;
+		u64 fec_byp_cor_en                   : 1;
+		u64 reserved_5_63                    : 59;
+	} s;
+	/* struct cgxx_spux_fec_control_s cn; */
+};
+
+static inline u64 CGXX_SPUX_FEC_CONTROL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_FEC_CONTROL(u64 a)
+{
+	return 0x100e0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_fec_ln#_rsfec_err
+ *
+ * CGX SPU Reed-Solomon FEC Symbol Error Counter for FEC Lanes 0-3
+ * Registers This register is valid only when Reed-Solomon FEC is
+ * enabled. The symbol error counters are defined in 802.3 section
+ * 91.6.11 (for 100G and extended to 50G) and 802.3by-2016 section
+ * 108.6.9 (for 25G and extended to USXGMII). The counter is reset to all
+ * zeros when the register is read, and held at all ones in case of
+ * overflow.  The reset operation takes precedence over the increment
+ * operation; if the register is read on the same clock cycle as an
+ * increment operation, the counter is reset to all zeros and the
+ * increment operation is lost. The counters are writable for test
+ * purposes, rather than read-only as specified in IEEE 802.3.
+ */
+union cgxx_spux_fec_lnx_rsfec_err {
+	u64 u;
+	struct cgxx_spux_fec_lnx_rsfec_err_s {
+		u64 symb_err_cnt                     : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_spux_fec_lnx_rsfec_err_s cn; */
+};
+
+static inline u64 CGXX_SPUX_FEC_LNX_RSFEC_ERR(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_FEC_LNX_RSFEC_ERR(u64 a, u64 b)
+{
+	return 0x10900 + 0x40000 * a + 8 * b;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_int
+ *
+ * CGX SPU Interrupt Registers
+ */
+union cgxx_spux_int {
+	u64 u;
+	struct cgxx_spux_int_s {
+		u64 rx_link_up                       : 1;
+		u64 rx_link_down                     : 1;
+		u64 err_blk                          : 1;
+		u64 bitlckls                         : 1;
+		u64 synlos                           : 1;
+		u64 algnlos                          : 1;
+		u64 dbg_sync                         : 1;
+		u64 bip_err                          : 1;
+		u64 fec_corr                         : 1;
+		u64 fec_uncorr                       : 1;
+		u64 an_page_rx                       : 1;
+		u64 an_link_good                     : 1;
+		u64 an_complete                      : 1;
+		u64 training_done                    : 1;
+		u64 training_failure                 : 1;
+		u64 fec_align_status                 : 1;
+		u64 rsfec_corr                       : 1;
+		u64 rsfec_uncorr                     : 1;
+		u64 hi_ser                           : 1;
+		u64 usx_an_lnk_st                    : 1;
+		u64 usx_an_cpt                       : 1;
+		u64 reserved_21_63                   : 43;
+	} s;
+	/* struct cgxx_spux_int_s cn; */
+};
+
+static inline u64 CGXX_SPUX_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_INT(u64 a)
+{
+	return 0x10220 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_int_ena_w1c
+ *
+ * CGX SPU Interrupt Enable Clear Registers This register clears
+ * interrupt enable bits.
+ */
+union cgxx_spux_int_ena_w1c {
+	u64 u;
+	struct cgxx_spux_int_ena_w1c_s {
+		u64 rx_link_up                       : 1;
+		u64 rx_link_down                     : 1;
+		u64 err_blk                          : 1;
+		u64 bitlckls                         : 1;
+		u64 synlos                           : 1;
+		u64 algnlos                          : 1;
+		u64 dbg_sync                         : 1;
+		u64 bip_err                          : 1;
+		u64 fec_corr                         : 1;
+		u64 fec_uncorr                       : 1;
+		u64 an_page_rx                       : 1;
+		u64 an_link_good                     : 1;
+		u64 an_complete                      : 1;
+		u64 training_done                    : 1;
+		u64 training_failure                 : 1;
+		u64 fec_align_status                 : 1;
+		u64 rsfec_corr                       : 1;
+		u64 rsfec_uncorr                     : 1;
+		u64 hi_ser                           : 1;
+		u64 usx_an_lnk_st                    : 1;
+		u64 usx_an_cpt                       : 1;
+		u64 reserved_21_63                   : 43;
+	} s;
+	/* struct cgxx_spux_int_ena_w1c_s cn; */
+};
+
+static inline u64 CGXX_SPUX_INT_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_INT_ENA_W1C(u64 a)
+{
+	return 0x10230 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_int_ena_w1s
+ *
+ * CGX SPU Interrupt Enable Set Registers This register sets interrupt
+ * enable bits.
+ */
+union cgxx_spux_int_ena_w1s {
+	u64 u;
+	struct cgxx_spux_int_ena_w1s_s {
+		u64 rx_link_up                       : 1;
+		u64 rx_link_down                     : 1;
+		u64 err_blk                          : 1;
+		u64 bitlckls                         : 1;
+		u64 synlos                           : 1;
+		u64 algnlos                          : 1;
+		u64 dbg_sync                         : 1;
+		u64 bip_err                          : 1;
+		u64 fec_corr                         : 1;
+		u64 fec_uncorr                       : 1;
+		u64 an_page_rx                       : 1;
+		u64 an_link_good                     : 1;
+		u64 an_complete                      : 1;
+		u64 training_done                    : 1;
+		u64 training_failure                 : 1;
+		u64 fec_align_status                 : 1;
+		u64 rsfec_corr                       : 1;
+		u64 rsfec_uncorr                     : 1;
+		u64 hi_ser                           : 1;
+		u64 usx_an_lnk_st                    : 1;
+		u64 usx_an_cpt                       : 1;
+		u64 reserved_21_63                   : 43;
+	} s;
+	/* struct cgxx_spux_int_ena_w1s_s cn; */
+};
+
+static inline u64 CGXX_SPUX_INT_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_INT_ENA_W1S(u64 a)
+{
+	return 0x10238 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_int_w1s
+ *
+ * CGX SPU Interrupt Set Registers This register sets interrupt bits.
+ */
+union cgxx_spux_int_w1s {
+	u64 u;
+	struct cgxx_spux_int_w1s_s {
+		u64 rx_link_up                       : 1;
+		u64 rx_link_down                     : 1;
+		u64 err_blk                          : 1;
+		u64 bitlckls                         : 1;
+		u64 synlos                           : 1;
+		u64 algnlos                          : 1;
+		u64 dbg_sync                         : 1;
+		u64 bip_err                          : 1;
+		u64 fec_corr                         : 1;
+		u64 fec_uncorr                       : 1;
+		u64 an_page_rx                       : 1;
+		u64 an_link_good                     : 1;
+		u64 an_complete                      : 1;
+		u64 training_done                    : 1;
+		u64 training_failure                 : 1;
+		u64 fec_align_status                 : 1;
+		u64 rsfec_corr                       : 1;
+		u64 rsfec_uncorr                     : 1;
+		u64 hi_ser                           : 1;
+		u64 usx_an_lnk_st                    : 1;
+		u64 usx_an_cpt                       : 1;
+		u64 reserved_21_63                   : 43;
+	} s;
+	/* struct cgxx_spux_int_w1s_s cn; */
+};
+
+static inline u64 CGXX_SPUX_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_INT_W1S(u64 a)
+{
+	return 0x10228 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_ln#_br_bip_err_cnt
+ *
+ * CGX SPU 40,50,100GBASE-R BIP Error-Counter Registers This register
+ * implements the IEEE 802.3 BIP error-counter registers for PCS lanes
+ * 0-19 (3.200-3.203). It is valid only when the LPCS type is 40GBASE-R,
+ * 50GBASE-R, 100GBASE-R, (CGX()_CMR()_CONFIG[LMAC_TYPE]), and always
+ * returns 0x0 for all other LPCS types. The counters are indexed by the
+ * RX PCS lane number based on the alignment marker detected on each lane
+ * and captured in CGX()_SPU()_BR_LANE_MAP(). Each counter counts the BIP
+ * errors for its PCS lane, and is held at all ones in case of overflow.
+ * The counters are reset to all zeros when this register is read by
+ * software.  The reset operation takes precedence over the increment
+ * operation; if the register is read on the same clock cycle as an
+ * increment operation, the counter is reset to all zeros and the
+ * increment operation is lost. The counters are writable for test
+ * purposes, rather than read-only as specified in IEEE 802.3.
+ */
+union cgxx_spux_lnx_br_bip_err_cnt {
+	u64 u;
+	struct cgxx_spux_lnx_br_bip_err_cnt_s {
+		u64 bip_err_cnt                      : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_spux_lnx_br_bip_err_cnt_s cn; */
+};
+
+static inline u64 CGXX_SPUX_LNX_BR_BIP_ERR_CNT(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_LNX_BR_BIP_ERR_CNT(u64 a, u64 b)
+{
+	return 0x10500 + 0x40000 * a + 8 * b;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_ln#_fec_corr_blks
+ *
+ * CGX SPU FEC Corrected-Blocks Counters 0-19 Registers This register is
+ * valid only when the LPCS type is BASE-R
+ * (CGX()_CMR()_CONFIG[LMAC_TYPE]) and applies to BASE-R FEC and Reed-
+ * Solomon FEC (RS-FEC). When BASE-R FEC is enabled, the FEC corrected-
+ * block counters are defined in IEEE 802.3 section 74.8.4.1. Each
+ * corrected-blocks counter increments by one for a corrected FEC block,
+ * i.e. an FEC block that has been received with invalid parity on the
+ * associated PCS lane and has been corrected by the FEC decoder. The
+ * counter is reset to all zeros when the register is read, and held at
+ * all ones in case of overflow.  The reset operation takes precedence
+ * over the increment operation; if the register is read on the same
+ * clock cycle as an increment operation, the counter is reset to all
+ * zeros and the increment operation is lost. The counters are writable
+ * for test purposes, rather than read-only as specified in IEEE 802.3.
+ */
+union cgxx_spux_lnx_fec_corr_blks {
+	u64 u;
+	struct cgxx_spux_lnx_fec_corr_blks_s {
+		u64 ln_corr_blks                     : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_spux_lnx_fec_corr_blks_s cn; */
+};
+
+static inline u64 CGXX_SPUX_LNX_FEC_CORR_BLKS(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_LNX_FEC_CORR_BLKS(u64 a, u64 b)
+{
+	return 0x10700 + 0x40000 * a + 8 * b;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_ln#_fec_uncorr_blks
+ *
+ * CGX SPU FEC Uncorrected-Blocks Counters 0-19 Registers This register
+ * is valid only when the LPCS type is BASE-R
+ * (CGX()_CMR()_CONFIG[LMAC_TYPE]) and applies to BASE-R FEC and Reed-
+ * Solomon FEC (RS-FEC). When BASE-R FEC is enabled, the FEC corrected-
+ * block counters are defined in IEEE 802.3 section 74.8.4.2. Each
+ * uncorrected-blocks counter increments by one for an uncorrected FEC
+ * block, i.e. an FEC block that has been received with invalid parity on
+ * the associated PCS lane and has not been corrected by the FEC decoder.
+ * The counter is reset to all zeros when the register is read, and held
+ * at all ones in case of overflow.  The reset operation takes precedence
+ * over the increment operation; if the register is read on the same
+ * clock cycle as an increment operation, the counter is reset to all
+ * zeros and the increment operation is lost. The counters are writable
+ * for test purposes, rather than read-only as specified in IEEE 802.3.
+ */
+union cgxx_spux_lnx_fec_uncorr_blks {
+	u64 u;
+	struct cgxx_spux_lnx_fec_uncorr_blks_s {
+		u64 ln_uncorr_blks                   : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_spux_lnx_fec_uncorr_blks_s cn; */
+};
+
+static inline u64 CGXX_SPUX_LNX_FEC_UNCORR_BLKS(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_LNX_FEC_UNCORR_BLKS(u64 a, u64 b)
+{
+	return 0x10800 + 0x40000 * a + 8 * b;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_lpcs_states
+ *
+ * CGX SPU BASE-X Transmit/Receive States Registers
+ */
+union cgxx_spux_lpcs_states {
+	u64 u;
+	struct cgxx_spux_lpcs_states_s {
+		u64 deskew_sm                        : 3;
+		u64 reserved_3                       : 1;
+		u64 deskew_am_found                  : 20;
+		u64 bx_rx_sm                         : 2;
+		u64 reserved_26_27                   : 2;
+		u64 br_rx_sm                         : 3;
+		u64 reserved_31_63                   : 33;
+	} s;
+	/* struct cgxx_spux_lpcs_states_s cn; */
+};
+
+static inline u64 CGXX_SPUX_LPCS_STATES(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_LPCS_STATES(u64 a)
+{
+	return 0x10208 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_misc_control
+ *
+ * CGX SPU Miscellaneous Control Registers "* RX logical PCS lane
+ * polarity vector \<3:0\> = [XOR_RXPLRT]\<3:0\> ^ {4{[RXPLRT]}}. * TX
+ * logical PCS lane polarity vector \<3:0\> = [XOR_TXPLRT]\<3:0\> ^
+ * {4{[TXPLRT]}}.  In short, keep [RXPLRT] and [TXPLRT] cleared, and use
+ * [XOR_RXPLRT] and [XOR_TXPLRT] fields to define the polarity per
+ * logical PCS lane. Only bit 0 of vector is used for 10GBASE-R, and only
+ * bits 1:0 of vector are used for RXAUI."
+ */
+union cgxx_spux_misc_control {
+	u64 u;
+	struct cgxx_spux_misc_control_s {
+		u64 txplrt                           : 1;
+		u64 rxplrt                           : 1;
+		u64 xor_txplrt                       : 4;
+		u64 xor_rxplrt                       : 4;
+		u64 intlv_rdisp                      : 1;
+		u64 skip_after_term                  : 1;
+		u64 rx_packet_dis                    : 1;
+		u64 rx_edet_signal_ok                : 1;
+		u64 reserved_14_63                   : 50;
+	} s;
+	/* struct cgxx_spux_misc_control_s cn; */
+};
+
+static inline u64 CGXX_SPUX_MISC_CONTROL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_MISC_CONTROL(u64 a)
+{
+	return 0x10218 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_rsfec_corr
+ *
+ * CGX SPU Reed-Solomon FEC Corrected Codeword Counter Register This
+ * register implements the IEEE 802.3 RS-FEC corrected codewords counter
+ * described in 802.3 section 91.6.8 (for 100G and extended to 50G) and
+ * 802.3by-2016 section 108.6.7 (for 25G and extended to USXGMII).
+ */
+union cgxx_spux_rsfec_corr {
+	u64 u;
+	struct cgxx_spux_rsfec_corr_s {
+		u64 cw_cnt                           : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_spux_rsfec_corr_s cn; */
+};
+
+static inline u64 CGXX_SPUX_RSFEC_CORR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_RSFEC_CORR(u64 a)
+{
+	return 0x10088 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_rsfec_status
+ *
+ * CGX SPU Reed-Solomon FEC Status Registers This register implements the
+ * IEEE 802.3 RS-FEC status and lane mapping registers as described in
+ * 802.3 section 91.6 (for 100G and extended to 50G) and 802.3by-2016
+ * section 108-6 (for 25G and extended to USXGMII).
+ */
+union cgxx_spux_rsfec_status {
+	u64 u;
+	struct cgxx_spux_rsfec_status_s {
+		u64 fec_lane_mapping                 : 8;
+		u64 fec_align_status                 : 1;
+		u64 amps_lock                        : 4;
+		u64 hi_ser                           : 1;
+		u64 fec_byp_ind_abil                 : 1;
+		u64 fec_byp_cor_abil                 : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_spux_rsfec_status_s cn; */
+};
+
+static inline u64 CGXX_SPUX_RSFEC_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_RSFEC_STATUS(u64 a)
+{
+	return 0x10080 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_rsfec_uncorr
+ *
+ * CGX SPU Reed-Solomon FEC Uncorrected Codeword Counter Register This
+ * register implements the IEEE 802.3 RS-FEC uncorrected codewords
+ * counter described in 802.3 section 91.6.9 (for 100G and extended to
+ * 50G) and 802.3by-2016 section 108.6.8 (for 25G and extended to
+ * USXGMII).
+ */
+union cgxx_spux_rsfec_uncorr {
+	u64 u;
+	struct cgxx_spux_rsfec_uncorr_s {
+		u64 cw_cnt                           : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_spux_rsfec_uncorr_s cn; */
+};
+
+static inline u64 CGXX_SPUX_RSFEC_UNCORR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_RSFEC_UNCORR(u64 a)
+{
+	return 0x10090 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_rx_eee_wake
+ *
+ * INTERNAL: CGX SPU  RX EEE Wake Error Counter  Registers  Reserved.
+ * Internal: A counter that is incremented each time that the LPI receive
+ * state diagram enters the RX_WTF state indicating that a wake time
+ * fault has been detected.
+ */
+union cgxx_spux_rx_eee_wake {
+	u64 u;
+	struct cgxx_spux_rx_eee_wake_s {
+		u64 wtf_error_counter                : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_spux_rx_eee_wake_s cn; */
+};
+
+static inline u64 CGXX_SPUX_RX_EEE_WAKE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_RX_EEE_WAKE(u64 a)
+{
+	return 0x103e0 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_rx_lpi_timing
+ *
+ * INTERNAL: CGX SPU RX EEE LPI Timing Parameters Registers  Reserved.
+ * Internal: This register specifies receiver LPI timing parameters Tqr,
+ * Twr and Twtf.
+ */
+union cgxx_spux_rx_lpi_timing {
+	u64 u;
+	struct cgxx_spux_rx_lpi_timing_s {
+		u64 twtf                             : 20;
+		u64 twr                              : 20;
+		u64 tqr                              : 20;
+		u64 reserved_60_61                   : 2;
+		u64 rx_lpi_fw                        : 1;
+		u64 rx_lpi_en                        : 1;
+	} s;
+	/* struct cgxx_spux_rx_lpi_timing_s cn; */
+};
+
+static inline u64 CGXX_SPUX_RX_LPI_TIMING(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_RX_LPI_TIMING(u64 a)
+{
+	return 0x103c0 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_rx_lpi_timing2
+ *
+ * INTERNAL: CGX SPU RX EEE LPI Timing2 Parameters Registers  Reserved.
+ * Internal: This register specifies receiver LPI timing parameters
+ * hold_off_timer.
+ */
+union cgxx_spux_rx_lpi_timing2 {
+	u64 u;
+	struct cgxx_spux_rx_lpi_timing2_s {
+		u64 hold_off_timer                   : 20;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct cgxx_spux_rx_lpi_timing2_s cn; */
+};
+
+static inline u64 CGXX_SPUX_RX_LPI_TIMING2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_RX_LPI_TIMING2(u64 a)
+{
+	return 0x10420 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_rx_mrk_cnt
+ *
+ * CGX SPU Receiver Marker Interval Count Control Registers
+ */
+union cgxx_spux_rx_mrk_cnt {
+	u64 u;
+	struct cgxx_spux_rx_mrk_cnt_s {
+		u64 mrk_cnt                          : 20;
+		u64 reserved_20_43                   : 24;
+		u64 by_mrk_100g                      : 1;
+		u64 reserved_45_47                   : 3;
+		u64 ram_mrk_cnt                      : 8;
+		u64 reserved_56_63                   : 8;
+	} s;
+	/* struct cgxx_spux_rx_mrk_cnt_s cn; */
+};
+
+static inline u64 CGXX_SPUX_RX_MRK_CNT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_RX_MRK_CNT(u64 a)
+{
+	return 0x103a0 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_spd_abil
+ *
+ * CGX SPU PCS Speed Ability Registers
+ */
+union cgxx_spux_spd_abil {
+	u64 u;
+	struct cgxx_spux_spd_abil_s {
+		u64 tengb                            : 1;
+		u64 tenpasst                         : 1;
+		u64 usxgmii                          : 1;
+		u64 twentyfivegb                     : 1;
+		u64 fortygb                          : 1;
+		u64 fiftygb                          : 1;
+		u64 hundredgb                        : 1;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct cgxx_spux_spd_abil_s cn; */
+};
+
+static inline u64 CGXX_SPUX_SPD_ABIL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_SPD_ABIL(u64 a)
+{
+	return 0x10010 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_status1
+ *
+ * CGX SPU Status 1 Registers
+ */
+union cgxx_spux_status1 {
+	u64 u;
+	struct cgxx_spux_status1_s {
+		u64 reserved_0                       : 1;
+		u64 lpable                           : 1;
+		u64 rcv_lnk                          : 1;
+		u64 reserved_3_6                     : 4;
+		u64 flt                              : 1;
+		u64 rx_lpi_indication                : 1;
+		u64 tx_lpi_indication                : 1;
+		u64 rx_lpi_received                  : 1;
+		u64 tx_lpi_received                  : 1;
+		u64 reserved_12_63                   : 52;
+	} s;
+	/* struct cgxx_spux_status1_s cn; */
+};
+
+static inline u64 CGXX_SPUX_STATUS1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_STATUS1(u64 a)
+{
+	return 0x10008 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_status2
+ *
+ * CGX SPU Status 2 Registers
+ */
+union cgxx_spux_status2 {
+	u64 u;
+	struct cgxx_spux_status2_s {
+		u64 tengb_r                          : 1;
+		u64 tengb_x                          : 1;
+		u64 tengb_w                          : 1;
+		u64 tengb_t                          : 1;
+		u64 usxgmii_r                        : 1;
+		u64 twentyfivegb_r                   : 1;
+		u64 fortygb_r                        : 1;
+		u64 fiftygb_r                        : 1;
+		u64 hundredgb_r                      : 1;
+		u64 reserved_9                       : 1;
+		u64 rcvflt                           : 1;
+		u64 xmtflt                           : 1;
+		u64 reserved_12_13                   : 2;
+		u64 dev                              : 2;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_spux_status2_s cn; */
+};
+
+static inline u64 CGXX_SPUX_STATUS2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_STATUS2(u64 a)
+{
+	return 0x10020 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_tx_lpi_timing
+ *
+ * INTERNAL: CGX SPU TX EEE LPI Timing Parameters Registers  Reserved.
+ * Internal: Transmit LPI timing parameters Tsl, Tql and Tul
+ */
+union cgxx_spux_tx_lpi_timing {
+	u64 u;
+	struct cgxx_spux_tx_lpi_timing_s {
+		u64 tql                              : 19;
+		u64 reserved_19_31                   : 13;
+		u64 tul                              : 12;
+		u64 reserved_44_47                   : 4;
+		u64 tsl                              : 12;
+		u64 reserved_60                      : 1;
+		u64 tx_lpi_ignore_twl                : 1;
+		u64 tx_lpi_fw                        : 1;
+		u64 tx_lpi_en                        : 1;
+	} s;
+	/* struct cgxx_spux_tx_lpi_timing_s cn; */
+};
+
+static inline u64 CGXX_SPUX_TX_LPI_TIMING(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_TX_LPI_TIMING(u64 a)
+{
+	return 0x10400 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_tx_lpi_timing2
+ *
+ * INTERNAL: CGX SPU TX EEE LPI Timing2 Parameters Registers  Reserved.
+ * Internal: This register specifies transmit LPI timer parameters.
+ */
+union cgxx_spux_tx_lpi_timing2 {
+	u64 u;
+	struct cgxx_spux_tx_lpi_timing2_s {
+		u64 t1u                              : 8;
+		u64 reserved_8_11                    : 4;
+		u64 twl                              : 12;
+		u64 reserved_24_31                   : 8;
+		u64 twl2                             : 12;
+		u64 reserved_44_47                   : 4;
+		u64 tbyp                             : 12;
+		u64 reserved_60_63                   : 4;
+	} s;
+	/* struct cgxx_spux_tx_lpi_timing2_s cn; */
+};
+
+static inline u64 CGXX_SPUX_TX_LPI_TIMING2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_TX_LPI_TIMING2(u64 a)
+{
+	return 0x10440 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_tx_mrk_cnt
+ *
+ * CGX SPU Transmitter Marker Interval Count Control Registers
+ */
+union cgxx_spux_tx_mrk_cnt {
+	u64 u;
+	struct cgxx_spux_tx_mrk_cnt_s {
+		u64 mrk_cnt                          : 20;
+		u64 reserved_20_43                   : 24;
+		u64 by_mrk_100g                      : 1;
+		u64 reserved_45_47                   : 3;
+		u64 ram_mrk_cnt                      : 8;
+		u64 reserved_56_63                   : 8;
+	} s;
+	/* struct cgxx_spux_tx_mrk_cnt_s cn; */
+};
+
+static inline u64 CGXX_SPUX_TX_MRK_CNT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_TX_MRK_CNT(u64 a)
+{
+	return 0x10380 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_usx_an_adv
+ *
+ * CGX SPU USXGMII Autonegotiation Advertisement Registers Software
+ * programs this register with the contents of the AN-link code word base
+ * page to be transmitted during autonegotiation. Any write operations to
+ * this register prior to completion of autonegotiation should be
+ * followed by a renegotiation in order for the new values to take
+ * effect. Once autonegotiation has completed, software can examine this
+ * register along with CGX()_SPU()_USX_AN_ADV to determine the highest
+ * common denominator technology. The format for this register is from
+ * USXGMII Multiport specification section 1.1.2 Table 2.
+ */
+union cgxx_spux_usx_an_adv {
+	u64 u;
+	struct cgxx_spux_usx_an_adv_s {
+		u64 set                              : 1;
+		u64 reserved_1_6                     : 6;
+		u64 eee_clk_stop_abil                : 1;
+		u64 eee_abil                         : 1;
+		u64 spd                              : 3;
+		u64 dplx                             : 1;
+		u64 reserved_13_14                   : 2;
+		u64 lnk_st                           : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_spux_usx_an_adv_s cn; */
+};
+
+static inline u64 CGXX_SPUX_USX_AN_ADV(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_USX_AN_ADV(u64 a)
+{
+	return 0x101d0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_usx_an_control
+ *
+ * CGX SPU USXGMII Autonegotiation Control Register
+ */
+union cgxx_spux_usx_an_control {
+	u64 u;
+	struct cgxx_spux_usx_an_control_s {
+		u64 reserved_0_8                     : 9;
+		u64 rst_an                           : 1;
+		u64 reserved_10_11                   : 2;
+		u64 an_en                            : 1;
+		u64 reserved_13_14                   : 2;
+		u64 an_reset                         : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_spux_usx_an_control_s cn; */
+};
+
+static inline u64 CGXX_SPUX_USX_AN_CONTROL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_USX_AN_CONTROL(u64 a)
+{
+	return 0x101c0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_usx_an_expansion
+ *
+ * CGX SPU USXGMII Autonegotiation Expansion Register This register is
+ * only used to signal page reception.
+ */
+union cgxx_spux_usx_an_expansion {
+	u64 u;
+	struct cgxx_spux_usx_an_expansion_s {
+		u64 reserved_0                       : 1;
+		u64 an_page_received                 : 1;
+		u64 next_page_able                   : 1;
+		u64 reserved_3_63                    : 61;
+	} s;
+	/* struct cgxx_spux_usx_an_expansion_s cn; */
+};
+
+static inline u64 CGXX_SPUX_USX_AN_EXPANSION(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_USX_AN_EXPANSION(u64 a)
+{
+	return 0x101e0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_usx_an_flow_ctrl
+ *
+ * CGX SPU USXGMII Flow Control Registers This register is used by
+ * software to affect USXGMII AN hardware behavior.
+ */
+union cgxx_spux_usx_an_flow_ctrl {
+	u64 u;
+	struct cgxx_spux_usx_an_flow_ctrl_s {
+		u64 start_idle_detect                : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct cgxx_spux_usx_an_flow_ctrl_s cn; */
+};
+
+static inline u64 CGXX_SPUX_USX_AN_FLOW_CTRL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_USX_AN_FLOW_CTRL(u64 a)
+{
+	return 0x101e8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_usx_an_link_timer
+ *
+ * CGX SPU USXGMII Link Timer Registers This is the link timer register.
+ */
+union cgxx_spux_usx_an_link_timer {
+	u64 u;
+	struct cgxx_spux_usx_an_link_timer_s {
+		u64 count                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_spux_usx_an_link_timer_s cn; */
+};
+
+static inline u64 CGXX_SPUX_USX_AN_LINK_TIMER(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_USX_AN_LINK_TIMER(u64 a)
+{
+	return 0x101f0 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_usx_an_lp_abil
+ *
+ * CGX SPU USXGMII Autonegotiation Link-Partner Advertisement Registers
+ * This register captures the contents of the latest AN link code word
+ * base page received from the link partner during autonegotiation. This
+ * is register 5 per IEEE 802.3, Clause 37.
+ * CGX()_SPU()_USX_AN_EXPANSION[AN_PAGE_RECEIVED] is set when this
+ * register is updated by hardware.
+ */
+union cgxx_spux_usx_an_lp_abil {
+	u64 u;
+	struct cgxx_spux_usx_an_lp_abil_s {
+		u64 set                              : 1;
+		u64 reserved_1_6                     : 6;
+		u64 eee_clk_stop_abil                : 1;
+		u64 eee_abil                         : 1;
+		u64 spd                              : 3;
+		u64 dplx                             : 1;
+		u64 reserved_13_14                   : 2;
+		u64 lnk_st                           : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct cgxx_spux_usx_an_lp_abil_s cn; */
+};
+
+static inline u64 CGXX_SPUX_USX_AN_LP_ABIL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_USX_AN_LP_ABIL(u64 a)
+{
+	return 0x101d8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu#_usx_an_status
+ *
+ * CGX SPU USXGMII Autonegotiation Status Register
+ */
+union cgxx_spux_usx_an_status {
+	u64 u;
+	struct cgxx_spux_usx_an_status_s {
+		u64 extnd                            : 1;
+		u64 reserved_1                       : 1;
+		u64 lnk_st                           : 1;
+		u64 an_abil                          : 1;
+		u64 rmt_flt                          : 1;
+		u64 an_cpt                           : 1;
+		u64 reserved_6_63                    : 58;
+	} s;
+	/* struct cgxx_spux_usx_an_status_s cn; */
+};
+
+static inline u64 CGXX_SPUX_USX_AN_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPUX_USX_AN_STATUS(u64 a)
+{
+	return 0x101c8 + 0x40000 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu_dbg_control
+ *
+ * CGX SPU Debug Control Registers
+ */
+union cgxx_spu_dbg_control {
+	u64 u;
+	struct cgxx_spu_dbg_control_s {
+		u64 marker_rxp                       : 15;
+		u64 reserved_15                      : 1;
+		u64 scramble_dis                     : 1;
+		u64 reserved_17_18                   : 2;
+		u64 br_pmd_train_soft_en             : 1;
+		u64 reserved_20_27                   : 8;
+		u64 timestamp_norm_dis               : 1;
+		u64 an_nonce_match_dis               : 1;
+		u64 br_ber_mon_dis                   : 1;
+		u64 rf_cw_mon_erly_restart_dis       : 1;
+		u64 us_clk_period                    : 12;
+		u64 ms_clk_period                    : 12;
+		u64 reserved_56_63                   : 8;
+	} s;
+	struct cgxx_spu_dbg_control_cn96xxp1 {
+		u64 marker_rxp                       : 15;
+		u64 reserved_15                      : 1;
+		u64 scramble_dis                     : 1;
+		u64 reserved_17_18                   : 2;
+		u64 br_pmd_train_soft_en             : 1;
+		u64 reserved_20_27                   : 8;
+		u64 timestamp_norm_dis               : 1;
+		u64 an_nonce_match_dis               : 1;
+		u64 br_ber_mon_dis                   : 1;
+		u64 reserved_31                      : 1;
+		u64 us_clk_period                    : 12;
+		u64 ms_clk_period                    : 12;
+		u64 reserved_56_63                   : 8;
+	} cn96xxp1;
+	/* struct cgxx_spu_dbg_control_s cn96xxp3; */
+	/* struct cgxx_spu_dbg_control_cn96xxp1 cnf95xxp1; */
+	/* struct cgxx_spu_dbg_control_s cnf95xxp2; */
+};
+
+static inline u64 CGXX_SPU_DBG_CONTROL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPU_DBG_CONTROL(void)
+{
+	return 0x10300;
+}
+
+/**
+ * Register (RSL) cgx#_spu_sds#_skew_status
+ *
+ * CGX SPU SerDes Lane Skew Status Registers This register provides
+ * SerDes lane skew status. One register per physical SerDes lane.
+ */
+union cgxx_spu_sdsx_skew_status {
+	u64 u;
+	struct cgxx_spu_sdsx_skew_status_s {
+		u64 skew_status                      : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct cgxx_spu_sdsx_skew_status_s cn; */
+};
+
+static inline u64 CGXX_SPU_SDSX_SKEW_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPU_SDSX_SKEW_STATUS(u64 a)
+{
+	return 0x10340 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu_sds#_states
+ *
+ * CGX SPU SerDes States Registers This register provides SerDes lane
+ * states. One register per physical SerDes lane.
+ */
+union cgxx_spu_sdsx_states {
+	u64 u;
+	struct cgxx_spu_sdsx_states_s {
+		u64 bx_sync_sm                       : 4;
+		u64 br_sh_cnt                        : 11;
+		u64 br_block_lock                    : 1;
+		u64 br_sh_invld_cnt                  : 7;
+		u64 reserved_23                      : 1;
+		u64 fec_sync_cnt                     : 4;
+		u64 fec_block_sync                   : 1;
+		u64 reserved_29                      : 1;
+		u64 an_rx_sm                         : 2;
+		u64 an_arb_sm                        : 3;
+		u64 reserved_35                      : 1;
+		u64 train_lock_bad_markers           : 3;
+		u64 train_lock_found_1st_marker      : 1;
+		u64 train_frame_lock                 : 1;
+		u64 train_code_viol                  : 1;
+		u64 train_sm                         : 3;
+		u64 reserved_45_47                   : 3;
+		u64 am_lock_sm                       : 2;
+		u64 am_lock_invld_cnt                : 2;
+		u64 reserved_52_63                   : 12;
+	} s;
+	/* struct cgxx_spu_sdsx_states_s cn; */
+};
+
+static inline u64 CGXX_SPU_SDSX_STATES(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPU_SDSX_STATES(u64 a)
+{
+	return 0x10360 + 8 * a;
+}
+
+/**
+ * Register (RSL) cgx#_spu_usxgmii_control
+ *
+ * CGX SPU Common USXGMII Control Register This register is the common
+ * control register that enables USXGMII Mode. The fields in this
+ * register are preserved across any LMAC soft-resets. For an LMAC in
+ * soft- reset state in USXGMII mode, the CGX will transmit Remote Fault
+ * BASE-R blocks.
+ */
+union cgxx_spu_usxgmii_control {
+	u64 u;
+	struct cgxx_spu_usxgmii_control_s {
+		u64 enable                           : 1;
+		u64 usxgmii_type                     : 3;
+		u64 sds_id                           : 2;
+		u64 reserved_6_63                    : 58;
+	} s;
+	/* struct cgxx_spu_usxgmii_control_s cn; */
+};
+
+static inline u64 CGXX_SPU_USXGMII_CONTROL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 CGXX_SPU_USXGMII_CONTROL(void)
+{
+	return 0x10920;
+}
+
+#endif /* __CSRS_CGX_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx2/csrs/csrs-lmt.h b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-lmt.h
new file mode 100644
index 0000000..625470b
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-lmt.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+#ifndef __CSRS_LMT_H__
+#define __CSRS_LMT_H__
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * LMT.
+ *
+ * This file is auto generated.  Do not edit.
+ *
+ */
+
+/**
+ * Register (RVU_PFVF_BAR2) lmt_lf_lmtcancel
+ *
+ * RVU VF LMT Cancel Register
+ */
+union lmt_lf_lmtcancel {
+	u64 u;
+	struct lmt_lf_lmtcancel_s {
+		u64 data                             : 64;
+	} s;
+	/* struct lmt_lf_lmtcancel_s cn; */
+};
+
+static inline u64 LMT_LF_LMTCANCEL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 LMT_LF_LMTCANCEL(void)
+{
+	return 0x400;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) lmt_lf_lmtline#
+ *
+ * RVU VF LMT Line Registers
+ */
+union lmt_lf_lmtlinex {
+	u64 u;
+	struct lmt_lf_lmtlinex_s {
+		u64 data                             : 64;
+	} s;
+	/* struct lmt_lf_lmtlinex_s cn; */
+};
+
+static inline u64 LMT_LF_LMTLINEX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 LMT_LF_LMTLINEX(u64 a)
+{
+	return 0 + 8 * a;
+}
+
+#endif /* __CSRS_LMT_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx2/csrs/csrs-mio_emm.h b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-mio_emm.h
new file mode 100644
index 0000000..a5a4740
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-mio_emm.h
@@ -0,0 +1,1193 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+#ifndef __CSRS_MIO_EMM_H__
+#define __CSRS_MIO_EMM_H__
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * MIO_EMM.
+ *
+ * This file is auto generated.  Do not edit.
+ *
+ */
+
+/**
+ * Enumeration mio_emm_bar_e
+ *
+ * eMMC Base Address Register Enumeration Enumerates the base address
+ * registers.
+ */
+#define MIO_EMM_BAR_E_MIO_EMM_PF_BAR0_CN8 (0x87e009000000ll)
+#define MIO_EMM_BAR_E_MIO_EMM_PF_BAR0_CN8_SIZE 0x800000ull
+#define MIO_EMM_BAR_E_MIO_EMM_PF_BAR0_CN9 (0x87e009000000ll)
+#define MIO_EMM_BAR_E_MIO_EMM_PF_BAR0_CN9_SIZE 0x10000ull
+#define MIO_EMM_BAR_E_MIO_EMM_PF_BAR4 (0x87e009f00000ll)
+#define MIO_EMM_BAR_E_MIO_EMM_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration mio_emm_int_vec_e
+ *
+ * eMMC MSI-X Vector Enumeration Enumerates the MSI-X interrupt vectors.
+ */
+#define MIO_EMM_INT_VEC_E_DMA_INT_DONE (8)
+#define MIO_EMM_INT_VEC_E_DMA_INT_FIFO (7)
+#define MIO_EMM_INT_VEC_E_EMM_BUF_DONE (0)
+#define MIO_EMM_INT_VEC_E_EMM_CMD_DONE (1)
+#define MIO_EMM_INT_VEC_E_EMM_CMD_ERR (3)
+#define MIO_EMM_INT_VEC_E_EMM_DMA_DONE (2)
+#define MIO_EMM_INT_VEC_E_EMM_DMA_ERR (4)
+#define MIO_EMM_INT_VEC_E_EMM_SWITCH_DONE (5)
+#define MIO_EMM_INT_VEC_E_EMM_SWITCH_ERR (6)
+#define MIO_EMM_INT_VEC_E_NCB_FLT (9)
+#define MIO_EMM_INT_VEC_E_NCB_RAS (0xa)
+
+/**
+ * Register (RSL) mio_emm_access_wdog
+ *
+ * eMMC Access Watchdog Register
+ */
+union mio_emm_access_wdog {
+	u64 u;
+	struct mio_emm_access_wdog_s {
+		u64 clk_cnt                          : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct mio_emm_access_wdog_s cn; */
+};
+
+static inline u64 MIO_EMM_ACCESS_WDOG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_ACCESS_WDOG(void)
+{
+	return 0x20f0;
+}
+
+/**
+ * Register (RSL) mio_emm_buf_dat
+ *
+ * eMMC Data Buffer Access Register
+ */
+union mio_emm_buf_dat {
+	u64 u;
+	struct mio_emm_buf_dat_s {
+		u64 dat                              : 64;
+	} s;
+	/* struct mio_emm_buf_dat_s cn; */
+};
+
+static inline u64 MIO_EMM_BUF_DAT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_BUF_DAT(void)
+{
+	return 0x20e8;
+}
+
+/**
+ * Register (RSL) mio_emm_buf_idx
+ *
+ * eMMC Data Buffer Address Register
+ */
+union mio_emm_buf_idx {
+	u64 u;
+	struct mio_emm_buf_idx_s {
+		u64 offset                           : 6;
+		u64 buf_num                          : 1;
+		u64 reserved_7_15                    : 9;
+		u64 inc                              : 1;
+		u64 reserved_17_63                   : 47;
+	} s;
+	/* struct mio_emm_buf_idx_s cn; */
+};
+
+static inline u64 MIO_EMM_BUF_IDX(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_BUF_IDX(void)
+{
+	return 0x20e0;
+}
+
+/**
+ * Register (RSL) mio_emm_calb
+ *
+ * eMMC Calbration Register This register initiates delay line
+ * characterization.
+ */
+union mio_emm_calb {
+	u64 u;
+	struct mio_emm_calb_s {
+		u64 start                            : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct mio_emm_calb_s cn; */
+};
+
+static inline u64 MIO_EMM_CALB(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_CALB(void)
+{
+	return 0x20c0;
+}
+
+/**
+ * Register (RSL) mio_emm_cfg
+ *
+ * eMMC Configuration Register
+ */
+union mio_emm_cfg {
+	u64 u;
+	struct mio_emm_cfg_s {
+		u64 bus_ena                          : 4;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct mio_emm_cfg_s cn; */
+};
+
+static inline u64 MIO_EMM_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_CFG(void)
+{
+	return 0x2000;
+}
+
+/**
+ * Register (RSL) mio_emm_cmd
+ *
+ * eMMC Command Register
+ */
+union mio_emm_cmd {
+	u64 u;
+	struct mio_emm_cmd_s {
+		u64 arg                              : 32;
+		u64 cmd_idx                          : 6;
+		u64 rtype_xor                        : 3;
+		u64 ctype_xor                        : 2;
+		u64 reserved_43_48                   : 6;
+		u64 offset                           : 6;
+		u64 dbuf                             : 1;
+		u64 reserved_56_58                   : 3;
+		u64 cmd_val                          : 1;
+		u64 bus_id                           : 2;
+		u64 skip_busy                        : 1;
+		u64 reserved_63                      : 1;
+	} s;
+	/* struct mio_emm_cmd_s cn; */
+};
+
+static inline u64 MIO_EMM_CMD(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_CMD(void)
+{
+	return 0x2058;
+}
+
+/**
+ * Register (RSL) mio_emm_comp
+ *
+ * eMMC Compensation Register
+ */
+union mio_emm_comp {
+	u64 u;
+	struct mio_emm_comp_s {
+		u64 nctl                             : 3;
+		u64 reserved_3_7                     : 5;
+		u64 pctl                             : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct mio_emm_comp_s cn; */
+};
+
+static inline u64 MIO_EMM_COMP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_COMP(void)
+{
+	return 0x2040;
+}
+
+/**
+ * Register (RSL) mio_emm_debug
+ *
+ * eMMC Debug Register
+ */
+union mio_emm_debug {
+	u64 u;
+	struct mio_emm_debug_s {
+		u64 clk_on                           : 1;
+		u64 reserved_1_7                     : 7;
+		u64 cmd_sm                           : 4;
+		u64 data_sm                          : 4;
+		u64 dma_sm                           : 4;
+		u64 emmc_clk_disable                 : 1;
+		u64 rdsync_rst                       : 1;
+		u64 reserved_22_63                   : 42;
+	} s;
+	struct mio_emm_debug_cn96xxp1 {
+		u64 clk_on                           : 1;
+		u64 reserved_1_7                     : 7;
+		u64 cmd_sm                           : 4;
+		u64 data_sm                          : 4;
+		u64 dma_sm                           : 4;
+		u64 reserved_20_63                   : 44;
+	} cn96xxp1;
+	/* struct mio_emm_debug_s cn96xxp3; */
+	/* struct mio_emm_debug_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 MIO_EMM_DEBUG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DEBUG(void)
+{
+	return 0x20f8;
+}
+
+/**
+ * Register (RSL) mio_emm_dma
+ *
+ * eMMC External DMA Configuration Register
+ */
+union mio_emm_dma {
+	u64 u;
+	struct mio_emm_dma_s {
+		u64 card_addr                        : 32;
+		u64 block_cnt                        : 16;
+		u64 multi                            : 1;
+		u64 rw                               : 1;
+		u64 rel_wr                           : 1;
+		u64 thres                            : 6;
+		u64 dat_null                         : 1;
+		u64 sector                           : 1;
+		u64 dma_val                          : 1;
+		u64 bus_id                           : 2;
+		u64 skip_busy                        : 1;
+		u64 extra_args                       : 1;
+	} s;
+	struct mio_emm_dma_cn8 {
+		u64 card_addr                        : 32;
+		u64 block_cnt                        : 16;
+		u64 multi                            : 1;
+		u64 rw                               : 1;
+		u64 rel_wr                           : 1;
+		u64 thres                            : 6;
+		u64 dat_null                         : 1;
+		u64 sector                           : 1;
+		u64 dma_val                          : 1;
+		u64 bus_id                           : 2;
+		u64 skip_busy                        : 1;
+		u64 reserved_63                      : 1;
+	} cn8;
+	struct mio_emm_dma_cn9 {
+		u64 card_addr                        : 32;
+		u64 block_cnt                        : 16;
+		u64 multi                            : 1;
+		u64 rw                               : 1;
+		u64 reserved_50                      : 1;
+		u64 thres                            : 6;
+		u64 dat_null                         : 1;
+		u64 sector                           : 1;
+		u64 dma_val                          : 1;
+		u64 bus_id                           : 2;
+		u64 skip_busy                        : 1;
+		u64 extra_args                       : 1;
+	} cn9;
+};
+
+static inline u64 MIO_EMM_DMA(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA(void)
+{
+	return 0x2050;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_adr
+ *
+ * eMMC DMA Address Register This register sets the address for eMMC/SD
+ * flash transfers to/from memory. Sixty-four-bit operations must be used
+ * to access this register. This register is updated by the DMA hardware
+ * and can be reloaded by the values placed in the MIO_EMM_DMA_FIFO_ADR.
+ */
+union mio_emm_dma_adr {
+	u64 u;
+	struct mio_emm_dma_adr_s {
+		u64 adr                              : 53;
+		u64 reserved_53_63                   : 11;
+	} s;
+	struct mio_emm_dma_adr_cn8 {
+		u64 adr                              : 49;
+		u64 reserved_49_63                   : 15;
+	} cn8;
+	/* struct mio_emm_dma_adr_s cn9; */
+};
+
+static inline u64 MIO_EMM_DMA_ADR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_ADR(void)
+{
+	return 0x188;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_arg
+ *
+ * eMMC External DMA Extra Arguments Register
+ */
+union mio_emm_dma_arg {
+	u64 u;
+	struct mio_emm_dma_arg_s {
+		u64 cmd23_args                       : 8;
+		u64 force_pgm                        : 1;
+		u64 context_id                       : 4;
+		u64 tag_req                          : 1;
+		u64 pack_cmd                         : 1;
+		u64 rel_wr                           : 1;
+		u64 alt_cmd                          : 6;
+		u64 skip_blk_cmd                     : 1;
+		u64 reserved_23_31                   : 9;
+		u64 alt_cmd_arg                      : 32;
+	} s;
+	/* struct mio_emm_dma_arg_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_ARG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_ARG(void)
+{
+	return 0x2090;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_cfg
+ *
+ * eMMC DMA Configuration Register This register controls the internal
+ * DMA engine used with the eMMC/SD flash controller. Sixty- four-bit
+ * operations must be used to access this register. This register is
+ * updated by the hardware DMA engine and can also be reloaded by writes
+ * to the MIO_EMM_DMA_FIFO_CMD register.
+ */
+union mio_emm_dma_cfg {
+	u64 u;
+	struct mio_emm_dma_cfg_s {
+		u64 reserved_0_35                    : 36;
+		u64 size                             : 20;
+		u64 endian                           : 1;
+		u64 swap8                            : 1;
+		u64 swap16                           : 1;
+		u64 swap32                           : 1;
+		u64 reserved_60                      : 1;
+		u64 clr                              : 1;
+		u64 rw                               : 1;
+		u64 en                               : 1;
+	} s;
+	/* struct mio_emm_dma_cfg_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_CFG(void)
+{
+	return 0x180;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_fifo_adr
+ *
+ * eMMC Internal DMA FIFO Address Register This register specifies the
+ * internal address that is loaded into the eMMC internal DMA FIFO. The
+ * FIFO is used to queue up operations for the
+ * MIO_EMM_DMA_CFG/MIO_EMM_DMA_ADR when the DMA completes successfully.
+ */
+union mio_emm_dma_fifo_adr {
+	u64 u;
+	struct mio_emm_dma_fifo_adr_s {
+		u64 reserved_0_2                     : 3;
+		u64 adr                              : 50;
+		u64 reserved_53_63                   : 11;
+	} s;
+	struct mio_emm_dma_fifo_adr_cn8 {
+		u64 reserved_0_2                     : 3;
+		u64 adr                              : 46;
+		u64 reserved_49_63                   : 15;
+	} cn8;
+	/* struct mio_emm_dma_fifo_adr_s cn9; */
+};
+
+static inline u64 MIO_EMM_DMA_FIFO_ADR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_FIFO_ADR(void)
+{
+	return 0x170;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_fifo_cfg
+ *
+ * eMMC Internal DMA FIFO Configuration Register This register controls
+ * DMA FIFO operations.
+ */
+union mio_emm_dma_fifo_cfg {
+	u64 u;
+	struct mio_emm_dma_fifo_cfg_s {
+		u64 count                            : 5;
+		u64 reserved_5_7                     : 3;
+		u64 int_lvl                          : 5;
+		u64 reserved_13_15                   : 3;
+		u64 clr                              : 1;
+		u64 reserved_17_63                   : 47;
+	} s;
+	/* struct mio_emm_dma_fifo_cfg_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_FIFO_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_FIFO_CFG(void)
+{
+	return 0x160;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_fifo_cmd
+ *
+ * eMMC Internal DMA FIFO Command Register This register specifies a
+ * command that is loaded into the eMMC internal DMA FIFO.  The FIFO is
+ * used to queue up operations for the MIO_EMM_DMA_CFG/MIO_EMM_DMA_ADR
+ * when the DMA completes successfully. Writes to this register store
+ * both the MIO_EMM_DMA_FIFO_CMD and the MIO_EMM_DMA_FIFO_ADR contents
+ * into the FIFO and increment the MIO_EMM_DMA_FIFO_CFG[COUNT] field.
+ * Note: This register has a similar format to MIO_EMM_DMA_CFG with the
+ * exception that the EN and CLR fields are absent. These are supported
+ * in MIO_EMM_DMA_FIFO_CFG.
+ */
+union mio_emm_dma_fifo_cmd {
+	u64 u;
+	struct mio_emm_dma_fifo_cmd_s {
+		u64 reserved_0_35                    : 36;
+		u64 size                             : 20;
+		u64 endian                           : 1;
+		u64 swap8                            : 1;
+		u64 swap16                           : 1;
+		u64 swap32                           : 1;
+		u64 intdis                           : 1;
+		u64 reserved_61                      : 1;
+		u64 rw                               : 1;
+		u64 reserved_63                      : 1;
+	} s;
+	/* struct mio_emm_dma_fifo_cmd_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_FIFO_CMD(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_FIFO_CMD(void)
+{
+	return 0x178;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_int
+ *
+ * eMMC DMA Interrupt Register Sixty-four-bit operations must be used to
+ * access this register.
+ */
+union mio_emm_dma_int {
+	u64 u;
+	struct mio_emm_dma_int_s {
+		u64 done                             : 1;
+		u64 fifo                             : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct mio_emm_dma_int_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_INT(void)
+{
+	return 0x190;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_int_ena_w1c
+ *
+ * eMMC DMA Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union mio_emm_dma_int_ena_w1c {
+	u64 u;
+	struct mio_emm_dma_int_ena_w1c_s {
+		u64 done                             : 1;
+		u64 fifo                             : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct mio_emm_dma_int_ena_w1c_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_INT_ENA_W1C(void)
+{
+	return 0x1a8;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_int_ena_w1s
+ *
+ * eMMC DMA Interrupt Enable Set Register This register sets interrupt
+ * enable bits.
+ */
+union mio_emm_dma_int_ena_w1s {
+	u64 u;
+	struct mio_emm_dma_int_ena_w1s_s {
+		u64 done                             : 1;
+		u64 fifo                             : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct mio_emm_dma_int_ena_w1s_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_INT_ENA_W1S(void)
+{
+	return 0x1a0;
+}
+
+/**
+ * Register (RSL) mio_emm_dma_int_w1s
+ *
+ * eMMC DMA Interrupt Set Register This register sets interrupt bits.
+ */
+union mio_emm_dma_int_w1s {
+	u64 u;
+	struct mio_emm_dma_int_w1s_s {
+		u64 done                             : 1;
+		u64 fifo                             : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct mio_emm_dma_int_w1s_s cn; */
+};
+
+static inline u64 MIO_EMM_DMA_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_DMA_INT_W1S(void)
+{
+	return 0x198;
+}
+
+/**
+ * Register (RSL) mio_emm_int
+ *
+ * eMMC Interrupt Register
+ */
+union mio_emm_int {
+	u64 u;
+	struct mio_emm_int_s {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 ncb_flt                          : 1;
+		u64 ncb_ras                          : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	struct mio_emm_int_cn8 {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 reserved_7_63                    : 57;
+	} cn8;
+	/* struct mio_emm_int_s cn9; */
+};
+
+static inline u64 MIO_EMM_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_INT(void)
+{
+	return 0x2078;
+}
+
+/**
+ * Register (RSL) mio_emm_int_ena_w1c
+ *
+ * eMMC Interrupt Enable Clear Register This register clears interrupt
+ * enable bits.
+ */
+union mio_emm_int_ena_w1c {
+	u64 u;
+	struct mio_emm_int_ena_w1c_s {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 ncb_flt                          : 1;
+		u64 ncb_ras                          : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	struct mio_emm_int_ena_w1c_cn8 {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 reserved_7_63                    : 57;
+	} cn8;
+	/* struct mio_emm_int_ena_w1c_s cn9; */
+};
+
+static inline u64 MIO_EMM_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_INT_ENA_W1C(void)
+{
+	return 0x20b8;
+}
+
+/**
+ * Register (RSL) mio_emm_int_ena_w1s
+ *
+ * eMMC Interrupt Enable Set Register This register sets interrupt enable
+ * bits.
+ */
+union mio_emm_int_ena_w1s {
+	u64 u;
+	struct mio_emm_int_ena_w1s_s {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 ncb_flt                          : 1;
+		u64 ncb_ras                          : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	struct mio_emm_int_ena_w1s_cn8 {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 reserved_7_63                    : 57;
+	} cn8;
+	/* struct mio_emm_int_ena_w1s_s cn9; */
+};
+
+static inline u64 MIO_EMM_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_INT_ENA_W1S(void)
+{
+	return 0x20b0;
+}
+
+/**
+ * Register (RSL) mio_emm_int_w1s
+ *
+ * eMMC Interrupt Set Register This register sets interrupt bits.
+ */
+union mio_emm_int_w1s {
+	u64 u;
+	struct mio_emm_int_w1s_s {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 ncb_flt                          : 1;
+		u64 ncb_ras                          : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	struct mio_emm_int_w1s_cn8 {
+		u64 buf_done                         : 1;
+		u64 cmd_done                         : 1;
+		u64 dma_done                         : 1;
+		u64 cmd_err                          : 1;
+		u64 dma_err                          : 1;
+		u64 switch_done                      : 1;
+		u64 switch_err                       : 1;
+		u64 reserved_7_63                    : 57;
+	} cn8;
+	/* struct mio_emm_int_w1s_s cn9; */
+};
+
+static inline u64 MIO_EMM_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_INT_W1S(void)
+{
+	return 0x2080;
+}
+
+/**
+ * Register (RSL) mio_emm_io_ctl
+ *
+ * eMMC I/O Control Register
+ */
+union mio_emm_io_ctl {
+	u64 u;
+	struct mio_emm_io_ctl_s {
+		u64 slew                             : 1;
+		u64 reserved_1                       : 1;
+		u64 drive                            : 2;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct mio_emm_io_ctl_s cn; */
+};
+
+static inline u64 MIO_EMM_IO_CTL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_IO_CTL(void)
+{
+	return 0x2040;
+}
+
+/**
+ * Register (RSL) mio_emm_mode#
+ *
+ * eMMC Operating Mode Register
+ */
+union mio_emm_modex {
+	u64 u;
+	struct mio_emm_modex_s {
+		u64 clk_lo                           : 16;
+		u64 clk_hi                           : 16;
+		u64 power_class                      : 4;
+		u64 clk_swap                         : 1;
+		u64 reserved_37_39                   : 3;
+		u64 bus_width                        : 3;
+		u64 reserved_43_47                   : 5;
+		u64 hs_timing                        : 1;
+		u64 hs200_timing                     : 1;
+		u64 hs400_timing                     : 1;
+		u64 reserved_51_63                   : 13;
+	} s;
+	struct mio_emm_modex_cn8 {
+		u64 clk_lo                           : 16;
+		u64 clk_hi                           : 16;
+		u64 power_class                      : 4;
+		u64 reserved_36_39                   : 4;
+		u64 bus_width                        : 3;
+		u64 reserved_43_47                   : 5;
+		u64 hs_timing                        : 1;
+		u64 reserved_49_63                   : 15;
+	} cn8;
+	struct mio_emm_modex_cn96xxp1 {
+		u64 clk_lo                           : 16;
+		u64 clk_hi                           : 16;
+		u64 power_class                      : 4;
+		u64 reserved_36_39                   : 4;
+		u64 bus_width                        : 3;
+		u64 reserved_43_47                   : 5;
+		u64 hs_timing                        : 1;
+		u64 hs200_timing                     : 1;
+		u64 hs400_timing                     : 1;
+		u64 reserved_51_63                   : 13;
+	} cn96xxp1;
+	/* struct mio_emm_modex_s cn96xxp3; */
+	/* struct mio_emm_modex_s cnf95xx; */
+};
+
+static inline u64 MIO_EMM_MODEX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_MODEX(u64 a)
+{
+	return 0x2008 + 8 * a;
+}
+
+/**
+ * Register (RSL) mio_emm_msix_pba#
+ *
+ * eMMC MSI-X Pending Bit Array Registers This register is the MSI-X PBA
+ * table; the bit number is indexed by the MIO_EMM_INT_VEC_E enumeration.
+ */
+union mio_emm_msix_pbax {
+	u64 u;
+	struct mio_emm_msix_pbax_s {
+		u64 pend                             : 64;
+	} s;
+	/* struct mio_emm_msix_pbax_s cn; */
+};
+
+static inline u64 MIO_EMM_MSIX_PBAX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_MSIX_PBAX(u64 a)
+{
+	return 0xf0000 + 8 * a;
+}
+
+/**
+ * Register (RSL) mio_emm_msix_vec#_addr
+ *
+ * eMMC MSI-X Vector-Table Address Register This register is the MSI-X
+ * vector table, indexed by the MIO_EMM_INT_VEC_E enumeration.
+ */
+union mio_emm_msix_vecx_addr {
+	u64 u;
+	struct mio_emm_msix_vecx_addr_s {
+		u64 secvec                           : 1;
+		u64 reserved_1                       : 1;
+		u64 addr                             : 51;
+		u64 reserved_53_63                   : 11;
+	} s;
+	struct mio_emm_msix_vecx_addr_cn8 {
+		u64 secvec                           : 1;
+		u64 reserved_1                       : 1;
+		u64 addr                             : 47;
+		u64 reserved_49_63                   : 15;
+	} cn8;
+	/* struct mio_emm_msix_vecx_addr_s cn9; */
+};
+
+static inline u64 MIO_EMM_MSIX_VECX_ADDR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_MSIX_VECX_ADDR(u64 a)
+{
+	return 0 + 0x10 * a;
+}
+
+/**
+ * Register (RSL) mio_emm_msix_vec#_ctl
+ *
+ * eMMC MSI-X Vector-Table Control and Data Register This register is the
+ * MSI-X vector table, indexed by the MIO_EMM_INT_VEC_E enumeration.
+ */
+union mio_emm_msix_vecx_ctl {
+	u64 u;
+	struct mio_emm_msix_vecx_ctl_s {
+		u64 data                             : 32;
+		u64 mask                             : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	struct mio_emm_msix_vecx_ctl_cn8 {
+		u64 data                             : 20;
+		u64 reserved_20_31                   : 12;
+		u64 mask                             : 1;
+		u64 reserved_33_63                   : 31;
+	} cn8;
+	/* struct mio_emm_msix_vecx_ctl_s cn9; */
+};
+
+static inline u64 MIO_EMM_MSIX_VECX_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_MSIX_VECX_CTL(u64 a)
+{
+	return 8 + 0x10 * a;
+}
+
+/**
+ * Register (RSL) mio_emm_rca
+ *
+ * eMMC Relative Card Address Register
+ */
+union mio_emm_rca {
+	u64 u;
+	struct mio_emm_rca_s {
+		u64 card_rca                         : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct mio_emm_rca_s cn; */
+};
+
+static inline u64 MIO_EMM_RCA(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_RCA(void)
+{
+	return 0x20a0;
+}
+
+/**
+ * Register (RSL) mio_emm_rsp_hi
+ *
+ * eMMC Response Data High Register
+ */
+union mio_emm_rsp_hi {
+	u64 u;
+	struct mio_emm_rsp_hi_s {
+		u64 dat                              : 64;
+	} s;
+	/* struct mio_emm_rsp_hi_s cn; */
+};
+
+static inline u64 MIO_EMM_RSP_HI(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_RSP_HI(void)
+{
+	return 0x2070;
+}
+
+/**
+ * Register (RSL) mio_emm_rsp_lo
+ *
+ * eMMC Response Data Low Register
+ */
+union mio_emm_rsp_lo {
+	u64 u;
+	struct mio_emm_rsp_lo_s {
+		u64 dat                              : 64;
+	} s;
+	/* struct mio_emm_rsp_lo_s cn; */
+};
+
+static inline u64 MIO_EMM_RSP_LO(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_RSP_LO(void)
+{
+	return 0x2068;
+}
+
+/**
+ * Register (RSL) mio_emm_rsp_sts
+ *
+ * eMMC Response Status Register
+ */
+union mio_emm_rsp_sts {
+	u64 u;
+	struct mio_emm_rsp_sts_s {
+		u64 cmd_done                         : 1;
+		u64 cmd_idx                          : 6;
+		u64 cmd_type                         : 2;
+		u64 rsp_type                         : 3;
+		u64 rsp_val                          : 1;
+		u64 rsp_bad_sts                      : 1;
+		u64 rsp_crc_err                      : 1;
+		u64 rsp_timeout                      : 1;
+		u64 stp_val                          : 1;
+		u64 stp_bad_sts                      : 1;
+		u64 stp_crc_err                      : 1;
+		u64 stp_timeout                      : 1;
+		u64 rsp_busybit                      : 1;
+		u64 blk_crc_err                      : 1;
+		u64 blk_timeout                      : 1;
+		u64 dbuf                             : 1;
+		u64 reserved_24_27                   : 4;
+		u64 dbuf_err                         : 1;
+		u64 reserved_29_54                   : 26;
+		u64 acc_timeout                      : 1;
+		u64 dma_pend                         : 1;
+		u64 dma_val                          : 1;
+		u64 switch_val                       : 1;
+		u64 cmd_val                          : 1;
+		u64 bus_id                           : 2;
+		u64 reserved_62_63                   : 2;
+	} s;
+	/* struct mio_emm_rsp_sts_s cn; */
+};
+
+static inline u64 MIO_EMM_RSP_STS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_RSP_STS(void)
+{
+	return 0x2060;
+}
+
+/**
+ * Register (RSL) mio_emm_sample
+ *
+ * eMMC Sampling Register
+ */
+union mio_emm_sample {
+	u64 u;
+	struct mio_emm_sample_s {
+		u64 dat_cnt                          : 10;
+		u64 reserved_10_15                   : 6;
+		u64 cmd_cnt                          : 10;
+		u64 reserved_26_63                   : 38;
+	} s;
+	/* struct mio_emm_sample_s cn; */
+};
+
+static inline u64 MIO_EMM_SAMPLE(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_SAMPLE(void)
+{
+	return 0x2090;
+}
+
+/**
+ * Register (RSL) mio_emm_sts_mask
+ *
+ * eMMC Status Mask Register
+ */
+union mio_emm_sts_mask {
+	u64 u;
+	struct mio_emm_sts_mask_s {
+		u64 sts_msk                          : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct mio_emm_sts_mask_s cn; */
+};
+
+static inline u64 MIO_EMM_STS_MASK(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_STS_MASK(void)
+{
+	return 0x2098;
+}
+
+/**
+ * Register (RSL) mio_emm_switch
+ *
+ * eMMC Operating Mode Switch Register This register allows software to
+ * change eMMC related parameters associated with a specific BUS_ID.  The
+ * MIO_EMM_MODE() registers contain the current setting for each BUS.
+ * This register is also used to switch the [CLK_HI] and [CLK_LO]
+ * settings associated with the common EMMC_CLK.  These settings can only
+ * be changed when [BUS_ID] = 0.
+ */
+union mio_emm_switch {
+	u64 u;
+	struct mio_emm_switch_s {
+		u64 clk_lo                           : 16;
+		u64 clk_hi                           : 16;
+		u64 power_class                      : 4;
+		u64 clk_swap                         : 1;
+		u64 reserved_37_39                   : 3;
+		u64 bus_width                        : 3;
+		u64 reserved_43_47                   : 5;
+		u64 hs_timing                        : 1;
+		u64 hs200_timing                     : 1;
+		u64 hs400_timing                     : 1;
+		u64 reserved_51_55                   : 5;
+		u64 switch_err2                      : 1;
+		u64 switch_err1                      : 1;
+		u64 switch_err0                      : 1;
+		u64 switch_exe                       : 1;
+		u64 bus_id                           : 2;
+		u64 reserved_62_63                   : 2;
+	} s;
+	struct mio_emm_switch_cn8 {
+		u64 clk_lo                           : 16;
+		u64 clk_hi                           : 16;
+		u64 power_class                      : 4;
+		u64 reserved_36_39                   : 4;
+		u64 bus_width                        : 3;
+		u64 reserved_43_47                   : 5;
+		u64 hs_timing                        : 1;
+		u64 reserved_49_55                   : 7;
+		u64 switch_err2                      : 1;
+		u64 switch_err1                      : 1;
+		u64 switch_err0                      : 1;
+		u64 switch_exe                       : 1;
+		u64 bus_id                           : 2;
+		u64 reserved_62_63                   : 2;
+	} cn8;
+	struct mio_emm_switch_cn96xxp1 {
+		u64 clk_lo                           : 16;
+		u64 clk_hi                           : 16;
+		u64 power_class                      : 4;
+		u64 reserved_36_39                   : 4;
+		u64 bus_width                        : 3;
+		u64 reserved_43_47                   : 5;
+		u64 hs_timing                        : 1;
+		u64 hs200_timing                     : 1;
+		u64 hs400_timing                     : 1;
+		u64 reserved_51_55                   : 5;
+		u64 switch_err2                      : 1;
+		u64 switch_err1                      : 1;
+		u64 switch_err0                      : 1;
+		u64 switch_exe                       : 1;
+		u64 bus_id                           : 2;
+		u64 reserved_62_63                   : 2;
+	} cn96xxp1;
+	/* struct mio_emm_switch_s cn96xxp3; */
+	/* struct mio_emm_switch_s cnf95xx; */
+};
+
+static inline u64 MIO_EMM_SWITCH(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_SWITCH(void)
+{
+	return 0x2048;
+}
+
+/**
+ * Register (RSL) mio_emm_tap
+ *
+ * eMMC TAP Delay Register This register indicates the delay line
+ * characteristics.
+ */
+union mio_emm_tap {
+	u64 u;
+	struct mio_emm_tap_s {
+		u64 delay                            : 8;
+		u64 reserved_8_63                    : 56;
+	} s;
+	/* struct mio_emm_tap_s cn; */
+};
+
+static inline u64 MIO_EMM_TAP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_TAP(void)
+{
+	return 0x20c8;
+}
+
+/**
+ * Register (RSL) mio_emm_timing
+ *
+ * eMMC Timing Register This register determines the number of tap delays
+ * the EMM_DAT, EMM_DS, and EMM_CMD lines are transmitted or received in
+ * relation to EMM_CLK. These values should only be changed when the eMMC
+ * bus is idle.
+ */
+union mio_emm_timing {
+	u64 u;
+	struct mio_emm_timing_s {
+		u64 data_out_tap                     : 6;
+		u64 reserved_6_15                    : 10;
+		u64 data_in_tap                      : 6;
+		u64 reserved_22_31                   : 10;
+		u64 cmd_out_tap                      : 6;
+		u64 reserved_38_47                   : 10;
+		u64 cmd_in_tap                       : 6;
+		u64 reserved_54_63                   : 10;
+	} s;
+	/* struct mio_emm_timing_s cn; */
+};
+
+static inline u64 MIO_EMM_TIMING(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_TIMING(void)
+{
+	return 0x20d0;
+}
+
+/**
+ * Register (RSL) mio_emm_wdog
+ *
+ * eMMC Watchdog Register
+ */
+union mio_emm_wdog {
+	u64 u;
+	struct mio_emm_wdog_s {
+		u64 clk_cnt                          : 26;
+		u64 reserved_26_63                   : 38;
+	} s;
+	/* struct mio_emm_wdog_s cn; */
+};
+
+static inline u64 MIO_EMM_WDOG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 MIO_EMM_WDOG(void)
+{
+	return 0x2088;
+}
+
+#endif /* __CSRS_MIO_EMM_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx2/csrs/csrs-nix.h b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-nix.h
new file mode 100644
index 0000000..2908f25
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-nix.h
@@ -0,0 +1,10404 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+#ifndef __CSRS_NIX_H__
+#define __CSRS_NIX_H__
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * NIX.
+ *
+ * This file is auto generated.  Do not edit.
+ *
+ */
+
+/**
+ * Enumeration nix_af_int_vec_e
+ *
+ * NIX Admin Function Interrupt Vector Enumeration Enumerates the NIX AF
+ * MSI-X interrupt vectors.
+ */
+#define NIX_AF_INT_VEC_E_AF_ERR (3)
+#define NIX_AF_INT_VEC_E_AQ_DONE (2)
+#define NIX_AF_INT_VEC_E_GEN (1)
+#define NIX_AF_INT_VEC_E_POISON (4)
+#define NIX_AF_INT_VEC_E_RVU (0)
+
+/**
+ * Enumeration nix_aq_comp_e
+ *
+ * NIX Completion Enumeration Enumerates the values of
+ * NIX_AQ_RES_S[COMPCODE].
+ */
+#define NIX_AQ_COMP_E_CTX_FAULT (4)
+#define NIX_AQ_COMP_E_CTX_POISON (3)
+#define NIX_AQ_COMP_E_GOOD (1)
+#define NIX_AQ_COMP_E_LOCKERR (5)
+#define NIX_AQ_COMP_E_NOTDONE (0)
+#define NIX_AQ_COMP_E_SQB_ALLOC_FAIL (6)
+#define NIX_AQ_COMP_E_SWERR (2)
+
+/**
+ * Enumeration nix_aq_ctype_e
+ *
+ * NIX Context Type Enumeration Enumerates NIX_AQ_INST_S[CTYPE] values.
+ */
+#define NIX_AQ_CTYPE_E_CQ (2)
+#define NIX_AQ_CTYPE_E_DYNO (5)
+#define NIX_AQ_CTYPE_E_MCE (3)
+#define NIX_AQ_CTYPE_E_RQ (0)
+#define NIX_AQ_CTYPE_E_RSS (4)
+#define NIX_AQ_CTYPE_E_SQ (1)
+
+/**
+ * Enumeration nix_aq_instop_e
+ *
+ * NIX Admin Queue Opcode Enumeration Enumerates NIX_AQ_INST_S[OP]
+ * values.
+ */
+#define NIX_AQ_INSTOP_E_INIT (1)
+#define NIX_AQ_INSTOP_E_LOCK (4)
+#define NIX_AQ_INSTOP_E_NOP (0)
+#define NIX_AQ_INSTOP_E_READ (3)
+#define NIX_AQ_INSTOP_E_UNLOCK (5)
+#define NIX_AQ_INSTOP_E_WRITE (2)
+
+/**
+ * Enumeration nix_chan_e
+ *
+ * NIX Channel Number Enumeration Enumerates the receive and transmit
+ * channels, and values of NIX_RX_PARSE_S[CHAN],
+ * NIX_SQ_CTX_S[DEFAULT_CHAN]. CNXXXX implements a subset of these
+ * channels. Specifically, only channels for links enumerated by
+ * NIX_LINK_E are implemented.  Internal: P2X/X2P channel enumeration for
+ * t9x.
+ */
+#define NIX_CHAN_E_CGXX_LMACX_CHX(a, b, c)	\
+	(0x800 + 0x100 * (a) + 0x10 * (b) + (c))
+#define NIX_CHAN_E_LBKX_CHX(a, b) (0 + 0x100 * (a) + (b))
+#define NIX_CHAN_E_RX(a) (0 + 0x100 * (a))
+#define NIX_CHAN_E_SDP_CHX(a) (0x700 + (a))
+
+/**
+ * Enumeration nix_colorresult_e
+ *
+ * NIX Color Result Enumeration Enumerates the values of
+ * NIX_MEM_RESULT_S[COLOR], NIX_AF_TL1()_MD_DEBUG1[COLOR] and
+ * NIX_AF_TL1()_MD_DEBUG1[COLOR].
+ */
+#define NIX_COLORRESULT_E_GREEN (0)
+#define NIX_COLORRESULT_E_RED_DROP (3)
+#define NIX_COLORRESULT_E_RED_SEND (2)
+#define NIX_COLORRESULT_E_YELLOW (1)
+
+/**
+ * Enumeration nix_cqerrint_e
+ *
+ * NIX Completion Queue Interrupt Enumeration Enumerates the bit index of
+ * NIX_CQ_CTX_S[CQ_ERR_INT,CQ_ERR_INT_ENA].
+ */
+#define NIX_CQERRINT_E_CQE_FAULT (2)
+#define NIX_CQERRINT_E_DOOR_ERR (0)
+#define NIX_CQERRINT_E_WR_FULL (1)
+
+/**
+ * Enumeration nix_intf_e
+ *
+ * NIX Interface Number Enumeration Enumerates the bit index of
+ * NIX_AF_STATUS[CALIBRATE_STATUS].
+ */
+#define NIX_INTF_E_CGXX(a) (0 + (a))
+#define NIX_INTF_E_LBKX(a) (3 + (a))
+#define NIX_INTF_E_SDP (4)
+
+/**
+ * Enumeration nix_lf_int_vec_e
+ *
+ * NIX Local Function Interrupt Vector Enumeration Enumerates the NIX
+ * MSI-X interrupt vectors per LF.
+ */
+#define NIX_LF_INT_VEC_E_CINTX(a) (0x40 + (a))
+#define NIX_LF_INT_VEC_E_ERR_INT (0x81)
+#define NIX_LF_INT_VEC_E_GINT (0x80)
+#define NIX_LF_INT_VEC_E_POISON (0x82)
+#define NIX_LF_INT_VEC_E_QINTX(a) (0 + (a))
+
+/**
+ * Enumeration nix_link_e
+ *
+ * NIX Link Number Enumeration Enumerates the receive and transmit links,
+ * and LINK index of NIX_AF_RX_LINK()_CFG, NIX_AF_RX_LINK()_WRR_CFG,
+ * NIX_AF_TX_LINK()_NORM_CREDIT, NIX_AF_TX_LINK()_HW_XOFF and
+ * NIX_AF_TL3_TL2()_LINK()_CFG.
+ */
+#define NIX_LINK_E_CGXX_LMACX(a, b) (0 + 4 * (a) + (b))
+#define NIX_LINK_E_LBKX(a) (0xc + (a))
+#define NIX_LINK_E_MC (0xe)
+#define NIX_LINK_E_SDP (0xd)
+
+/**
+ * Enumeration nix_lsoalg_e
+ *
+ * NIX Large Send Offload Algorithm Enumeration Enumerates
+ * NIX_AF_LSO_FORMAT()_FIELD()[ALG] values. Specifies algorithm for
+ * modifying the associated LSO packet field.
+ */
+#define NIX_LSOALG_E_ADD_OFFSET (3)
+#define NIX_LSOALG_E_ADD_PAYLEN (2)
+#define NIX_LSOALG_E_ADD_SEGNUM (1)
+#define NIX_LSOALG_E_NOP (0)
+#define NIX_LSOALG_E_TCP_FLAGS (4)
+
+/**
+ * Enumeration nix_maxsqesz_e
+ *
+ * NIX Maximum SQE Size Enumeration Enumerates the values of
+ * NIX_SQ_CTX_S[MAX_SQE_SIZE].
+ */
+#define NIX_MAXSQESZ_E_W16 (0)
+#define NIX_MAXSQESZ_E_W8 (1)
+
+/**
+ * Enumeration nix_mdtype_e
+ *
+ * NIX Meta Descriptor Type Enumeration Enumerates values of
+ * NIX_AF_MDQ()_MD_DEBUG[MD_TYPE].
+ */
+#define NIX_MDTYPE_E_FLUSH (1)
+#define NIX_MDTYPE_E_PMD (2)
+#define NIX_MDTYPE_E_RSVD (0)
+
+/**
+ * Enumeration nix_mnqerr_e
+ *
+ * NIX Meta-Descriptor Enqueue Error Enumeration Enumerates
+ * NIX_LF_MNQ_ERR_DBG[ERRCODE] values.
+ */
+#define NIX_MNQERR_E_CQ_QUERY_ERR (6)
+#define NIX_MNQERR_E_LSO_ERR (5)
+#define NIX_MNQERR_E_MAXLEN_ERR (8)
+#define NIX_MNQERR_E_MAX_SQE_SIZE_ERR (7)
+#define NIX_MNQERR_E_SQB_FAULT (2)
+#define NIX_MNQERR_E_SQB_POISON (3)
+#define NIX_MNQERR_E_SQE_SIZEM1_ZERO (9)
+#define NIX_MNQERR_E_SQ_CTX_FAULT (0)
+#define NIX_MNQERR_E_SQ_CTX_POISON (1)
+#define NIX_MNQERR_E_TOTAL_ERR (4)
+
+/**
+ * Enumeration nix_ndc_rx_port_e
+ *
+ * NIX Receive NDC Port Enumeration Enumerates NIX receive NDC
+ * (NDC_IDX_E::NIX()_RX) ports and the PORT index of
+ * NDC_AF_PORT()_RT()_RW()_REQ_PC and NDC_AF_PORT()_RT()_RW()_LAT_PC.
+ */
+#define NIX_NDC_RX_PORT_E_AQ (0)
+#define NIX_NDC_RX_PORT_E_CINT (2)
+#define NIX_NDC_RX_PORT_E_CQ (1)
+#define NIX_NDC_RX_PORT_E_MC (3)
+#define NIX_NDC_RX_PORT_E_PKT (4)
+#define NIX_NDC_RX_PORT_E_RQ (5)
+
+/**
+ * Enumeration nix_ndc_tx_port_e
+ *
+ * NIX Transmit NDC Port Enumeration Enumerates NIX transmit NDC
+ * (NDC_IDX_E::NIX()_TX) ports and the PORT index of
+ * NDC_AF_PORT()_RT()_RW()_REQ_PC and NDC_AF_PORT()_RT()_RW()_LAT_PC.
+ */
+#define NIX_NDC_TX_PORT_E_DEQ (3)
+#define NIX_NDC_TX_PORT_E_DMA (4)
+#define NIX_NDC_TX_PORT_E_ENQ (1)
+#define NIX_NDC_TX_PORT_E_LMT (0)
+#define NIX_NDC_TX_PORT_E_MNQ (2)
+#define NIX_NDC_TX_PORT_E_XQE (5)
+
+/**
+ * Enumeration nix_re_opcode_e
+ *
+ * NIX Receive Error Opcode Enumeration Enumerates
+ * NIX_RX_PARSE_S[ERRCODE] values when NIX_RX_PARSE_S[ERRLEV] =
+ * NPC_ERRLEV_E::RE.
+ */
+#define NIX_RE_OPCODE_E_OL2_LENMISM (0x12)
+#define NIX_RE_OPCODE_E_OVERSIZE (0x11)
+#define NIX_RE_OPCODE_E_RE_DMAPKT (0xf)
+#define NIX_RE_OPCODE_E_RE_FCS (7)
+#define NIX_RE_OPCODE_E_RE_FCS_RCV (8)
+#define NIX_RE_OPCODE_E_RE_JABBER (2)
+#define NIX_RE_OPCODE_E_RE_NONE (0)
+#define NIX_RE_OPCODE_E_RE_PARTIAL (1)
+#define NIX_RE_OPCODE_E_RE_RX_CTL (0xb)
+#define NIX_RE_OPCODE_E_RE_SKIP (0xc)
+#define NIX_RE_OPCODE_E_RE_TERMINATE (9)
+#define NIX_RE_OPCODE_E_UNDERSIZE (0x10)
+
+/**
+ * Enumeration nix_redalg_e
+ *
+ * NIX Red Algorithm Enumeration Enumerates the different algorithms of
+ * NIX_SEND_EXT_S[SHP_RA].
+ */
+#define NIX_REDALG_E_DISCARD (3)
+#define NIX_REDALG_E_SEND (1)
+#define NIX_REDALG_E_STALL (2)
+#define NIX_REDALG_E_STD (0)
+
+/**
+ * Enumeration nix_rqint_e
+ *
+ * NIX Receive Queue Interrupt Enumeration Enumerates the bit index of
+ * NIX_RQ_CTX_S[RQ_INT,RQ_INT_ENA].
+ */
+#define NIX_RQINT_E_DROP (0)
+#define NIX_RQINT_E_RX(a) (0 + (a))
+#define NIX_RQINT_E_RED (1)
+
+/**
+ * Enumeration nix_rx_actionop_e
+ *
+ * NIX Receive Action Opcode Enumeration Enumerates the values of
+ * NIX_RX_ACTION_S[OP].
+ */
+#define NIX_RX_ACTIONOP_E_DROP (0)
+#define NIX_RX_ACTIONOP_E_MCAST (3)
+#define NIX_RX_ACTIONOP_E_MIRROR (6)
+#define NIX_RX_ACTIONOP_E_PF_FUNC_DROP (5)
+#define NIX_RX_ACTIONOP_E_RSS (4)
+#define NIX_RX_ACTIONOP_E_UCAST (1)
+#define NIX_RX_ACTIONOP_E_UCAST_IPSEC (2)
+
+/**
+ * Enumeration nix_rx_mcop_e
+ *
+ * NIX Receive Multicast/Mirror Opcode Enumeration Enumerates the values
+ * of NIX_RX_MCE_S[OP].
+ */
+#define NIX_RX_MCOP_E_RQ (0)
+#define NIX_RX_MCOP_E_RSS (1)
+
+/**
+ * Enumeration nix_rx_perrcode_e
+ *
+ * NIX Receive Protocol Error Code Enumeration Enumerates
+ * NIX_RX_PARSE_S[ERRCODE] values when NIX_RX_PARSE_S[ERRLEV] =
+ * NPC_ERRLEV_E::NIX.
+ */
+#define NIX_RX_PERRCODE_E_BUFS_OFLOW (0xa)
+#define NIX_RX_PERRCODE_E_DATA_FAULT (8)
+#define NIX_RX_PERRCODE_E_IL3_LEN (0x20)
+#define NIX_RX_PERRCODE_E_IL4_CHK (0x22)
+#define NIX_RX_PERRCODE_E_IL4_LEN (0x21)
+#define NIX_RX_PERRCODE_E_IL4_PORT (0x23)
+#define NIX_RX_PERRCODE_E_MCAST_FAULT (4)
+#define NIX_RX_PERRCODE_E_MCAST_POISON (6)
+#define NIX_RX_PERRCODE_E_MEMOUT (9)
+#define NIX_RX_PERRCODE_E_MIRROR_FAULT (5)
+#define NIX_RX_PERRCODE_E_MIRROR_POISON (7)
+#define NIX_RX_PERRCODE_E_NPC_RESULT_ERR (2)
+#define NIX_RX_PERRCODE_E_OL3_LEN (0x10)
+#define NIX_RX_PERRCODE_E_OL4_CHK (0x12)
+#define NIX_RX_PERRCODE_E_OL4_LEN (0x11)
+#define NIX_RX_PERRCODE_E_OL4_PORT (0x13)
+
+/**
+ * Enumeration nix_send_status_e
+ *
+ * NIX Send Completion Status Enumeration Enumerates values of
+ * NIX_SEND_COMP_S[STATUS] and NIX_LF_SEND_ERR_DBG[ERRCODE].
+ */
+#define NIX_SEND_STATUS_E_DATA_FAULT (0x16)
+#define NIX_SEND_STATUS_E_DATA_POISON (0x17)
+#define NIX_SEND_STATUS_E_GOOD (0)
+#define NIX_SEND_STATUS_E_INVALID_SUBDC (0x14)
+#define NIX_SEND_STATUS_E_JUMP_FAULT (7)
+#define NIX_SEND_STATUS_E_JUMP_POISON (8)
+#define NIX_SEND_STATUS_E_LOCK_VIOL (0x21)
+#define NIX_SEND_STATUS_E_NPC_DROP_ACTION (0x20)
+#define NIX_SEND_STATUS_E_NPC_MCAST_ABORT (0x24)
+#define NIX_SEND_STATUS_E_NPC_MCAST_CHAN_ERR (0x23)
+#define NIX_SEND_STATUS_E_NPC_UCAST_CHAN_ERR (0x22)
+#define NIX_SEND_STATUS_E_NPC_VTAG_PTR_ERR (0x25)
+#define NIX_SEND_STATUS_E_NPC_VTAG_SIZE_ERR (0x26)
+#define NIX_SEND_STATUS_E_SEND_CRC_ERR (0x10)
+#define NIX_SEND_STATUS_E_SEND_EXT_ERR (6)
+#define NIX_SEND_STATUS_E_SEND_HDR_ERR (5)
+#define NIX_SEND_STATUS_E_SEND_IMM_ERR (0x11)
+#define NIX_SEND_STATUS_E_SEND_MEM_ERR (0x13)
+#define NIX_SEND_STATUS_E_SEND_MEM_FAULT (0x27)
+#define NIX_SEND_STATUS_E_SEND_SG_ERR (0x12)
+#define NIX_SEND_STATUS_E_SQB_FAULT (3)
+#define NIX_SEND_STATUS_E_SQB_POISON (4)
+#define NIX_SEND_STATUS_E_SQ_CTX_FAULT (1)
+#define NIX_SEND_STATUS_E_SQ_CTX_POISON (2)
+#define NIX_SEND_STATUS_E_SUBDC_ORDER_ERR (0x15)
+
+/**
+ * Enumeration nix_sendcrcalg_e
+ *
+ * NIX Send CRC Algorithm Enumeration Enumerates the CRC algorithm used,
+ * see NIX_SEND_CRC_S[ALG].
+ */
+#define NIX_SENDCRCALG_E_CRC32 (0)
+#define NIX_SENDCRCALG_E_CRC32C (1)
+#define NIX_SENDCRCALG_E_ONES16 (2)
+
+/**
+ * Enumeration nix_sendl3type_e
+ *
+ * NIX Send Layer 3 Header Type Enumeration Enumerates values of
+ * NIX_SEND_HDR_S[OL3TYPE], NIX_SEND_HDR_S[IL3TYPE]. Internal: Encoding
+ * matches DPDK TX IP types: \<pre\> PKT_TX_IP_CKSUM      (1ULL \<\< 54)
+ * PKT_TX_IPV4          (1ULL \<\< 55) PKT_TX_IPV6          (1ULL \<\<
+ * 56)  PKT_TX_OUTER_IP_CKSUM(1ULL \<\< 58) PKT_TX_OUTER_IPV4    (1ULL
+ * \<\< 59) PKT_TX_OUTER_IPV6    (1ULL \<\< 60) \</pre\>
+ */
+#define NIX_SENDL3TYPE_E_IP4 (2)
+#define NIX_SENDL3TYPE_E_IP4_CKSUM (3)
+#define NIX_SENDL3TYPE_E_IP6 (4)
+#define NIX_SENDL3TYPE_E_NONE (0)
+
+/**
+ * Enumeration nix_sendl4type_e
+ *
+ * NIX Send Layer 4 Header Type Enumeration Enumerates values of
+ * NIX_SEND_HDR_S[OL4TYPE], NIX_SEND_HDR_S[IL4TYPE]. Internal: Encoding
+ * matches DPDK TX L4 types. \<pre\> PKT_TX_L4_NO_CKSUM   (0ULL \<\< 52)
+ * // Disable L4 cksum of TX pkt. PKT_TX_TCP_CKSUM     (1ULL \<\< 52)  //
+ * TCP cksum of TX pkt. computed by nic. PKT_TX_SCTP_CKSUM    (2ULL \<\<
+ * 52)  // SCTP cksum of TX pkt. computed by nic. PKT_TX_UDP_CKSUM
+ * (3ULL \<\< 52)  // UDP cksum of TX pkt. computed by nic. \</pre\>
+ */
+#define NIX_SENDL4TYPE_E_NONE (0)
+#define NIX_SENDL4TYPE_E_SCTP_CKSUM (2)
+#define NIX_SENDL4TYPE_E_TCP_CKSUM (1)
+#define NIX_SENDL4TYPE_E_UDP_CKSUM (3)
+
+/**
+ * Enumeration nix_sendldtype_e
+ *
+ * NIX Send Load Type Enumeration Enumerates the load transaction types
+ * for reading segment bytes specified by NIX_SEND_SG_S[LD_TYPE] and
+ * NIX_SEND_JUMP_S[LD_TYPE].  Internal: The hardware implementation
+ * treats undefined encodings as LDD load type.
+ */
+#define NIX_SENDLDTYPE_E_LDD (0)
+#define NIX_SENDLDTYPE_E_LDT (1)
+#define NIX_SENDLDTYPE_E_LDWB (2)
+
+/**
+ * Enumeration nix_sendmemalg_e
+ *
+ * NIX Memory Modify Algorithm Enumeration Enumerates the different
+ * algorithms for modifying memory; see NIX_SEND_MEM_S[ALG]. mbufs_freed
+ * is the number of gather buffers freed to NPA for the send descriptor.
+ * See NIX_SEND_HDR_S[DF] and NIX_SEND_SG_S[I*].
+ */
+#define NIX_SENDMEMALG_E_ADD (8)
+#define NIX_SENDMEMALG_E_ADDLEN (0xa)
+#define NIX_SENDMEMALG_E_ADDMBUF (0xc)
+#define NIX_SENDMEMALG_E_SET (0)
+#define NIX_SENDMEMALG_E_SETRSLT (2)
+#define NIX_SENDMEMALG_E_SETTSTMP (1)
+#define NIX_SENDMEMALG_E_SUB (9)
+#define NIX_SENDMEMALG_E_SUBLEN (0xb)
+#define NIX_SENDMEMALG_E_SUBMBUF (0xd)
+
+/**
+ * Enumeration nix_sendmemdsz_e
+ *
+ * NIX Memory Data Size Enumeration Enumerates the datum size for
+ * modifying memory; see NIX_SEND_MEM_S[DSZ].
+ */
+#define NIX_SENDMEMDSZ_E_B16 (2)
+#define NIX_SENDMEMDSZ_E_B32 (1)
+#define NIX_SENDMEMDSZ_E_B64 (0)
+#define NIX_SENDMEMDSZ_E_B8 (3)
+
+/**
+ * Enumeration nix_sqint_e
+ *
+ * NIX Send Queue Interrupt Enumeration Enumerates the bit index of
+ * NIX_SQ_CTX_S[SQ_INT,SQ_INT_ENA].
+ */
+#define NIX_SQINT_E_LMT_ERR (0)
+#define NIX_SQINT_E_MNQ_ERR (1)
+#define NIX_SQINT_E_SEND_ERR (2)
+#define NIX_SQINT_E_SQB_ALLOC_FAIL (3)
+
+/**
+ * Enumeration nix_sqoperr_e
+ *
+ * NIX SQ Operation Error Enumeration Enumerates
+ * NIX_LF_SQ_OP_ERR_DBG[ERRCODE] values.
+ */
+#define NIX_SQOPERR_E_MAX_SQE_SIZE_ERR (4)
+#define NIX_SQOPERR_E_SQB_FAULT (7)
+#define NIX_SQOPERR_E_SQB_NULL (6)
+#define NIX_SQOPERR_E_SQE_OFLOW (5)
+#define NIX_SQOPERR_E_SQE_SIZEM1_ZERO (8)
+#define NIX_SQOPERR_E_SQ_CTX_FAULT (1)
+#define NIX_SQOPERR_E_SQ_CTX_POISON (2)
+#define NIX_SQOPERR_E_SQ_DISABLED (3)
+#define NIX_SQOPERR_E_SQ_OOR (0)
+
+/**
+ * Enumeration nix_stat_lf_rx_e
+ *
+ * NIX Local Function Receive Statistics Enumeration Enumerates the last
+ * index of NIX_AF_LF()_RX_STAT() and NIX_LF_RX_STAT().
+ */
+#define NIX_STAT_LF_RX_E_RX_BCAST (2)
+#define NIX_STAT_LF_RX_E_RX_DROP (4)
+#define NIX_STAT_LF_RX_E_RX_DROP_OCTS (5)
+#define NIX_STAT_LF_RX_E_RX_DRP_BCAST (8)
+#define NIX_STAT_LF_RX_E_RX_DRP_L3BCAST (0xa)
+#define NIX_STAT_LF_RX_E_RX_DRP_L3MCAST (0xb)
+#define NIX_STAT_LF_RX_E_RX_DRP_MCAST (9)
+#define NIX_STAT_LF_RX_E_RX_ERR (7)
+#define NIX_STAT_LF_RX_E_RX_FCS (6)
+#define NIX_STAT_LF_RX_E_RX_MCAST (3)
+#define NIX_STAT_LF_RX_E_RX_OCTS (0)
+#define NIX_STAT_LF_RX_E_RX_UCAST (1)
+
+/**
+ * Enumeration nix_stat_lf_tx_e
+ *
+ * NIX Local Function Transmit Statistics Enumeration Enumerates the
+ * index of NIX_AF_LF()_TX_STAT() and NIX_LF_TX_STAT(). These statistics
+ * do not account for packet replication due to NIX_TX_ACTION_S[OP] =
+ * NIX_TX_ACTIONOP_E::MCAST.
+ */
+#define NIX_STAT_LF_TX_E_TX_BCAST (1)
+#define NIX_STAT_LF_TX_E_TX_DROP (3)
+#define NIX_STAT_LF_TX_E_TX_MCAST (2)
+#define NIX_STAT_LF_TX_E_TX_OCTS (4)
+#define NIX_STAT_LF_TX_E_TX_UCAST (0)
+
+/**
+ * Enumeration nix_stype_e
+ *
+ * NIX SQB Caching Type Enumeration Enumerates the values of
+ * NIX_SQ_CTX_S[SQE_STYPE].
+ */
+#define NIX_STYPE_E_STF (0)
+#define NIX_STYPE_E_STP (2)
+#define NIX_STYPE_E_STT (1)
+
+/**
+ * Enumeration nix_subdc_e
+ *
+ * NIX Subdescriptor Operation Enumeration Enumerates send and receive
+ * subdescriptor codes. The codes differentiate subdescriptors within a
+ * NIX send or receive descriptor, excluding NIX_SEND_HDR_S for send and
+ * NIX_CQE_HDR_S/NIX_WQE_HDR_S for receive, which are determined by their
+ * position as the first subdescriptor, and NIX_RX_PARSE_S, which is
+ * determined by its position as the second subdescriptor.
+ */
+#define NIX_SUBDC_E_CRC (2)
+#define NIX_SUBDC_E_EXT (1)
+#define NIX_SUBDC_E_IMM (3)
+#define NIX_SUBDC_E_JUMP (6)
+#define NIX_SUBDC_E_MEM (5)
+#define NIX_SUBDC_E_NOP (0)
+#define NIX_SUBDC_E_SG (4)
+#define NIX_SUBDC_E_SOD (0xf)
+#define NIX_SUBDC_E_WORK (7)
+
+/**
+ * Enumeration nix_tx_actionop_e
+ *
+ * NIX Transmit Action Opcode Enumeration Enumerates the values of
+ * NIX_TX_ACTION_S[OP].
+ */
+#define NIX_TX_ACTIONOP_E_DROP (0)
+#define NIX_TX_ACTIONOP_E_DROP_VIOL (5)
+#define NIX_TX_ACTIONOP_E_MCAST (3)
+#define NIX_TX_ACTIONOP_E_UCAST_CHAN (2)
+#define NIX_TX_ACTIONOP_E_UCAST_DEFAULT (1)
+
+/**
+ * Enumeration nix_tx_vtagop_e
+ *
+ * NIX Transmit Vtag Opcode Enumeration Enumerates the values of
+ * NIX_TX_VTAG_ACTION_S[VTAG0_OP,VTAG1_OP].
+ */
+#define NIX_TX_VTAGOP_E_INSERT (1)
+#define NIX_TX_VTAGOP_E_NOP (0)
+#define NIX_TX_VTAGOP_E_REPLACE (2)
+
+/**
+ * Enumeration nix_txlayer_e
+ *
+ * NIX Transmit Layer Enumeration Enumerates the values of
+ * NIX_AF_LSO_FORMAT()_FIELD()[LAYER].
+ */
+#define NIX_TXLAYER_E_IL3 (2)
+#define NIX_TXLAYER_E_IL4 (3)
+#define NIX_TXLAYER_E_OL3 (0)
+#define NIX_TXLAYER_E_OL4 (1)
+
+/**
+ * Enumeration nix_vtagsize_e
+ *
+ * NIX Vtag Size Enumeration Enumerates the values of
+ * NIX_AF_TX_VTAG_DEF()_CTL[SIZE] and NIX_AF_LF()_RX_VTAG_TYPE()[SIZE].
+ */
+#define NIX_VTAGSIZE_E_T4 (0)
+#define NIX_VTAGSIZE_E_T8 (1)
+
+/**
+ * Enumeration nix_xqe_type_e
+ *
+ * NIX WQE/CQE Type Enumeration Enumerates the values of
+ * NIX_WQE_HDR_S[WQE_TYPE], NIX_CQE_HDR_S[CQE_TYPE].
+ */
+#define NIX_XQE_TYPE_E_INVALID (0)
+#define NIX_XQE_TYPE_E_RX (1)
+#define NIX_XQE_TYPE_E_RX_IPSECD (4)
+#define NIX_XQE_TYPE_E_RX_IPSECH (3)
+#define NIX_XQE_TYPE_E_RX_IPSECS (2)
+#define NIX_XQE_TYPE_E_SEND (8)
+
+/**
+ * Enumeration nix_xqesz_e
+ *
+ * NIX WQE/CQE Size Enumeration Enumerates the values of
+ * NIX_AF_LF()_CFG[XQE_SIZE].
+ */
+#define NIX_XQESZ_E_W16 (1)
+#define NIX_XQESZ_E_W64 (0)
+
+/**
+ * Structure nix_aq_inst_s
+ *
+ * NIX Admin Queue Instruction Structure This structure specifies the AQ
+ * instruction. Instructions and associated software structures are
+ * stored in memory as little-endian unless NIX_AF_CFG[AF_BE] is set.
+ * Hardware reads of NIX_AQ_INST_S do not allocate into LLC.  Hardware
+ * reads and writes of the context structure selected by [CTYPE], [LF]
+ * and [CINDEX] use the NDC and LLC caching style configured for that
+ * context. For example: * When [CTYPE] = NIX_AQ_CTYPE_E::RQ: use
+ * NIX_AF_LF()_RSS_CFG[CACHING] and NIX_AF_LF()_RSS_CFG[WAY_MASK]. * When
+ * [CTYPE] = NIX_AQ_CTYPE_E::MCE: use NIX_AF_RX_MCAST_CFG[CACHING] and
+ * NIX_AF_RX_MCAST_CFG[WAY_MASK].
+ */
+union nix_aq_inst_s {
+	u64 u[2];
+	struct nix_aq_inst_s_s {
+		u64 op                               : 4;
+		u64 ctype                            : 4;
+		u64 lf                               : 7;
+		u64 reserved_15_23                   : 9;
+		u64 cindex                           : 20;
+		u64 reserved_44_62                   : 19;
+		u64 doneint                          : 1;
+		u64 res_addr                         : 64;
+	} s;
+	/* struct nix_aq_inst_s_s cn; */
+};
+
+/**
+ * Structure nix_aq_res_s
+ *
+ * NIX Admin Queue Result Structure NIX writes this structure after it
+ * completes the NIX_AQ_INST_S instruction. The result structure is
+ * exactly 16 bytes, and each instruction completion produces exactly one
+ * result structure.  Results and associated software structures are
+ * stored in memory as little-endian unless NIX_AF_CFG[AF_BE] is set.
+ * When [OP] = NIX_AQ_INSTOP_E::INIT, WRITE or READ, this structure is
+ * immediately followed by context read or write data. See
+ * NIX_AQ_INSTOP_E.  Hardware writes of NIX_AQ_RES_S and context data
+ * always allocate into LLC. Hardware reads of context data do not
+ * allocate into LLC.
+ */
+union nix_aq_res_s {
+	u64 u[2];
+	struct nix_aq_res_s_s {
+		u64 op                               : 4;
+		u64 ctype                            : 4;
+		u64 compcode                         : 8;
+		u64 doneint                          : 1;
+		u64 reserved_17_63                   : 47;
+		u64 reserved_64_127                  : 64;
+	} s;
+	/* struct nix_aq_res_s_s cn; */
+};
+
+/**
+ * Structure nix_cint_hw_s
+ *
+ * NIX Completion Interrupt Context Hardware Structure This structure
+ * contains context state maintained by hardware for each completion
+ * interrupt (CINT) in NDC/LLC/DRAM. Software accesses this structure
+ * with the NIX_LF_CINT()* registers. Hardware maintains a table of
+ * NIX_AF_CONST2[CINTS] contiguous NIX_CINT_HW_S structures per LF
+ * starting at AF IOVA NIX_AF_LF()_CINTS_BASE. Always stored in byte
+ * invariant little-endian format (LE8).
+ */
+union nix_cint_hw_s {
+	u64 u[2];
+	struct nix_cint_hw_s_s {
+		u64 ecount                           : 32;
+		u64 qcount                           : 16;
+		u64 intr                             : 1;
+		u64 ena                              : 1;
+		u64 timer_idx                        : 8;
+		u64 reserved_58_63                   : 6;
+		u64 ecount_wait                      : 32;
+		u64 qcount_wait                      : 16;
+		u64 time_wait                        : 8;
+		u64 reserved_120_127                 : 8;
+	} s;
+	/* struct nix_cint_hw_s_s cn; */
+};
+
+/**
+ * Structure nix_cq_ctx_s
+ *
+ * NIX Completion Queue Context Structure This structure contains context
+ * state maintained by hardware for each CQ in NDC/LLC/DRAM. Software
+ * uses the same structure format to read and write an CQ context with
+ * the NIX admin queue.
+ */
+union nix_cq_ctx_s {
+	u64 u[4];
+	struct nix_cq_ctx_s_s {
+		u64 base                             : 64;
+		u64 reserved_64_67                   : 4;
+		u64 bp_ena                           : 1;
+		u64 reserved_69_71                   : 3;
+		u64 bpid                             : 9;
+		u64 reserved_81_83                   : 3;
+		u64 qint_idx                         : 7;
+		u64 cq_err                           : 1;
+		u64 cint_idx                         : 7;
+		u64 avg_con                          : 9;
+		u64 wrptr                            : 20;
+		u64 tail                             : 20;
+		u64 head                             : 20;
+		u64 avg_level                        : 8;
+		u64 update_time                      : 16;
+		u64 bp                               : 8;
+		u64 drop                             : 8;
+		u64 drop_ena                         : 1;
+		u64 ena                              : 1;
+		u64 reserved_210_211                 : 2;
+		u64 substream                        : 20;
+		u64 caching                          : 1;
+		u64 reserved_233_235                 : 3;
+		u64 qsize                            : 4;
+		u64 cq_err_int                       : 8;
+		u64 cq_err_int_ena                   : 8;
+	} s;
+	/* struct nix_cq_ctx_s_s cn; */
+};
+
+/**
+ * Structure nix_cqe_hdr_s
+ *
+ * NIX Completion Queue Entry Header Structure This 64-bit structure
+ * defines the first word of every CQE. It is immediately followed by
+ * NIX_RX_PARSE_S in a receive CQE, and by NIX_SEND_COMP_S in a send
+ * completion CQE. Stored in memory as little-endian unless
+ * NIX_AF_LF()_CFG[BE] is set.
+ */
+union nix_cqe_hdr_s {
+	u64 u;
+	struct nix_cqe_hdr_s_s {
+		u64 tag                              : 32;
+		u64 q                                : 20;
+		u64 reserved_52_57                   : 6;
+		u64 node                             : 2;
+		u64 cqe_type                         : 4;
+	} s;
+	/* struct nix_cqe_hdr_s_s cn; */
+};
+
+/**
+ * Structure nix_inst_hdr_s
+ *
+ * NIX Instruction Header Structure This structure defines the
+ * instruction header that precedes the packet header supplied to NPC for
+ * packets to be transmitted by NIX.
+ */
+union nix_inst_hdr_s {
+	u64 u;
+	struct nix_inst_hdr_s_s {
+		u64 pf_func                          : 16;
+		u64 sq                               : 20;
+		u64 reserved_36_63                   : 28;
+	} s;
+	/* struct nix_inst_hdr_s_s cn; */
+};
+
+/**
+ * Structure nix_iova_s
+ *
+ * NIX I/O Virtual Address Structure
+ */
+union nix_iova_s {
+	u64 u;
+	struct nix_iova_s_s {
+		u64 addr                             : 64;
+	} s;
+	/* struct nix_iova_s_s cn; */
+};
+
+/**
+ * Structure nix_ipsec_dyno_s
+ *
+ * INTERNAL: NIX IPSEC Dynamic Ordering Counter Structure  Internal: Not
+ * used; no IPSEC fast-path.
+ */
+union nix_ipsec_dyno_s {
+	u32 u;
+	struct nix_ipsec_dyno_s_s {
+		u32 count                            : 32;
+	} s;
+	/* struct nix_ipsec_dyno_s_s cn; */
+};
+
+/**
+ * Structure nix_mem_result_s
+ *
+ * NIX Memory Value Structure When
+ * NIX_SEND_MEM_S[ALG]=NIX_SENDMEMALG_E::SETRSLT, the value written to
+ * memory is formed with this structure.
+ */
+union nix_mem_result_s {
+	u64 u;
+	struct nix_mem_result_s_s {
+		u64 v                                : 1;
+		u64 color                            : 2;
+		u64 reserved_3_63                    : 61;
+	} s;
+	/* struct nix_mem_result_s_s cn; */
+};
+
+/**
+ * Structure nix_op_q_wdata_s
+ *
+ * NIX Statistics Operation Write Data Structure This structure specifies
+ * the write data format of an atomic 64-bit load-and-add of some
+ * NIX_LF_RQ_OP_*, NIX_LF_SQ_OP* and NIX_LF_CQ_OP* registers.
+ */
+union nix_op_q_wdata_s {
+	u64 u;
+	struct nix_op_q_wdata_s_s {
+		u64 reserved_0_31                    : 32;
+		u64 q                                : 20;
+		u64 reserved_52_63                   : 12;
+	} s;
+	/* struct nix_op_q_wdata_s_s cn; */
+};
+
+/**
+ * Structure nix_qint_hw_s
+ *
+ * NIX Queue Interrupt Context Hardware Structure This structure contains
+ * context state maintained by hardware for each queue interrupt (QINT)
+ * in NDC/LLC/DRAM. Software accesses this structure with the
+ * NIX_LF_QINT()* registers. Hardware maintains a table of
+ * NIX_AF_CONST2[QINTS] contiguous NIX_QINT_HW_S structures per LF
+ * starting at IOVA NIX_AF_LF()_QINTS_BASE. Always stored in byte
+ * invariant little-endian format (LE8).
+ */
+union nix_qint_hw_s {
+	u32 u;
+	struct nix_qint_hw_s_s {
+		u32 count                            : 22;
+		u32 reserved_22_30                   : 9;
+		u32 ena                              : 1;
+	} s;
+	/* struct nix_qint_hw_s_s cn; */
+};
+
+/**
+ * Structure nix_rq_ctx_hw_s
+ *
+ * NIX Receive Queue Context Structure This structure contains context
+ * state maintained by hardware for each RQ in NDC/LLC/DRAM. Software
+ * uses the equivalent NIX_RQ_CTX_S structure format to read and write an
+ * RQ context with the NIX admin queue. Always stored in byte invariant
+ * little-endian format (LE8).
+ */
+union nix_rq_ctx_hw_s {
+	u64 u[16];
+	struct nix_rq_ctx_hw_s_s {
+		u64 ena                              : 1;
+		u64 sso_ena                          : 1;
+		u64 ipsech_ena                       : 1;
+		u64 ena_wqwd                         : 1;
+		u64 cq                               : 20;
+		u64 substream                        : 20;
+		u64 wqe_aura                         : 20;
+		u64 spb_aura                         : 20;
+		u64 lpb_aura                         : 20;
+		u64 sso_grp                          : 10;
+		u64 sso_tt                           : 2;
+		u64 pb_caching                       : 2;
+		u64 wqe_caching                      : 1;
+		u64 xqe_drop_ena                     : 1;
+		u64 spb_drop_ena                     : 1;
+		u64 lpb_drop_ena                     : 1;
+		u64 wqe_skip                         : 2;
+		u64 reserved_124_127                 : 4;
+		u64 reserved_128_139                 : 12;
+		u64 spb_sizem1                       : 6;
+		u64 reserved_146_150                 : 5;
+		u64 spb_ena                          : 1;
+		u64 lpb_sizem1                       : 12;
+		u64 first_skip                       : 7;
+		u64 reserved_171                     : 1;
+		u64 later_skip                       : 6;
+		u64 xqe_imm_size                     : 6;
+		u64 reserved_184_189                 : 6;
+		u64 xqe_imm_copy                     : 1;
+		u64 xqe_hdr_split                    : 1;
+		u64 xqe_drop                         : 8;
+		u64 xqe_pass                         : 8;
+		u64 wqe_pool_drop                    : 8;
+		u64 wqe_pool_pass                    : 8;
+		u64 spb_aura_drop                    : 8;
+		u64 spb_aura_pass                    : 8;
+		u64 spb_pool_drop                    : 8;
+		u64 spb_pool_pass                    : 8;
+		u64 lpb_aura_drop                    : 8;
+		u64 lpb_aura_pass                    : 8;
+		u64 lpb_pool_drop                    : 8;
+		u64 lpb_pool_pass                    : 8;
+		u64 reserved_288_319                 : 32;
+		u64 ltag                             : 24;
+		u64 good_utag                        : 8;
+		u64 bad_utag                         : 8;
+		u64 flow_tagw                        : 6;
+		u64 reserved_366_383                 : 18;
+		u64 octs                             : 48;
+		u64 reserved_432_447                 : 16;
+		u64 pkts                             : 48;
+		u64 reserved_496_511                 : 16;
+		u64 drop_octs                        : 48;
+		u64 reserved_560_575                 : 16;
+		u64 drop_pkts                        : 48;
+		u64 reserved_624_639                 : 16;
+		u64 re_pkts                          : 48;
+		u64 reserved_688_702                 : 15;
+		u64 ena_copy                         : 1;
+		u64 reserved_704_739                 : 36;
+		u64 rq_int                           : 8;
+		u64 rq_int_ena                       : 8;
+		u64 qint_idx                         : 7;
+		u64 reserved_763_767                 : 5;
+		u64 reserved_768_831                 : 64;
+		u64 reserved_832_895                 : 64;
+		u64 reserved_896_959                 : 64;
+		u64 reserved_960_1023                : 64;
+	} s;
+	/* struct nix_rq_ctx_hw_s_s cn; */
+};
+
+/**
+ * Structure nix_rq_ctx_s
+ *
+ * NIX Receive Queue Context Structure This structure specifies the
+ * format used by software to read and write an RQ context with the NIX
+ * admin queue.
+ */
+union nix_rq_ctx_s {
+	u64 u[16];
+	struct nix_rq_ctx_s_s {
+		u64 ena                              : 1;
+		u64 sso_ena                          : 1;
+		u64 ipsech_ena                       : 1;
+		u64 ena_wqwd                         : 1;
+		u64 cq                               : 20;
+		u64 substream                        : 20;
+		u64 wqe_aura                         : 20;
+		u64 spb_aura                         : 20;
+		u64 lpb_aura                         : 20;
+		u64 sso_grp                          : 10;
+		u64 sso_tt                           : 2;
+		u64 pb_caching                       : 2;
+		u64 wqe_caching                      : 1;
+		u64 xqe_drop_ena                     : 1;
+		u64 spb_drop_ena                     : 1;
+		u64 lpb_drop_ena                     : 1;
+		u64 reserved_122_127                 : 6;
+		u64 reserved_128_139                 : 12;
+		u64 spb_sizem1                       : 6;
+		u64 wqe_skip                         : 2;
+		u64 reserved_148_150                 : 3;
+		u64 spb_ena                          : 1;
+		u64 lpb_sizem1                       : 12;
+		u64 first_skip                       : 7;
+		u64 reserved_171                     : 1;
+		u64 later_skip                       : 6;
+		u64 xqe_imm_size                     : 6;
+		u64 reserved_184_189                 : 6;
+		u64 xqe_imm_copy                     : 1;
+		u64 xqe_hdr_split                    : 1;
+		u64 xqe_drop                         : 8;
+		u64 xqe_pass                         : 8;
+		u64 wqe_pool_drop                    : 8;
+		u64 wqe_pool_pass                    : 8;
+		u64 spb_aura_drop                    : 8;
+		u64 spb_aura_pass                    : 8;
+		u64 spb_pool_drop                    : 8;
+		u64 spb_pool_pass                    : 8;
+		u64 lpb_aura_drop                    : 8;
+		u64 lpb_aura_pass                    : 8;
+		u64 lpb_pool_drop                    : 8;
+		u64 lpb_pool_pass                    : 8;
+		u64 reserved_288_291                 : 4;
+		u64 rq_int                           : 8;
+		u64 rq_int_ena                       : 8;
+		u64 qint_idx                         : 7;
+		u64 reserved_315_319                 : 5;
+		u64 ltag                             : 24;
+		u64 good_utag                        : 8;
+		u64 bad_utag                         : 8;
+		u64 flow_tagw                        : 6;
+		u64 reserved_366_383                 : 18;
+		u64 octs                             : 48;
+		u64 reserved_432_447                 : 16;
+		u64 pkts                             : 48;
+		u64 reserved_496_511                 : 16;
+		u64 drop_octs                        : 48;
+		u64 reserved_560_575                 : 16;
+		u64 drop_pkts                        : 48;
+		u64 reserved_624_639                 : 16;
+		u64 re_pkts                          : 48;
+		u64 reserved_688_703                 : 16;
+		u64 reserved_704_767                 : 64;
+		u64 reserved_768_831                 : 64;
+		u64 reserved_832_895                 : 64;
+		u64 reserved_896_959                 : 64;
+		u64 reserved_960_1023                : 64;
+	} s;
+	/* struct nix_rq_ctx_s_s cn; */
+};
+
+/**
+ * Structure nix_rsse_s
+ *
+ * NIX Receive Side Scaling Entry Structure This structure specifies the
+ * format of each hardware entry in the NIX RSS tables in NDC/LLC/DRAM.
+ * See NIX_AF_LF()_RSS_BASE and NIX_AF_LF()_RSS_GRP(). Software uses the
+ * same structure format to read and write an RSS table entry with the
+ * NIX admin queue.
+ */
+union nix_rsse_s {
+	u32 u;
+	struct nix_rsse_s_s {
+		u32 rq                               : 20;
+		u32 reserved_20_31                   : 12;
+	} s;
+	/* struct nix_rsse_s_s cn; */
+};
+
+/**
+ * Structure nix_rx_action_s
+ *
+ * NIX Receive Action Structure This structure defines the format of
+ * NPC_RESULT_S[ACTION] for a receive packet.
+ */
+union nix_rx_action_s {
+	u64 u;
+	struct nix_rx_action_s_s {
+		u64 op                               : 4;
+		u64 pf_func                          : 16;
+		u64 index                            : 20;
+		u64 match_id                         : 16;
+		u64 flow_key_alg                     : 5;
+		u64 reserved_61_63                   : 3;
+	} s;
+	/* struct nix_rx_action_s_s cn; */
+};
+
+/**
+ * Structure nix_rx_imm_s
+ *
+ * NIX Receive Immediate Subdescriptor Structure The receive immediate
+ * subdescriptor indicates that bytes immediately following this
+ * NIX_RX_IMM_S (after skipping [APAD] bytes) were saved from the
+ * received packet. The next subdescriptor following this NIX_RX_IMM_S
+ * (when one exists) will follow the immediate bytes, after rounding up
+ * the address to a multiple of 16 bytes.
+ */
+union nix_rx_imm_s {
+	u64 u;
+	struct nix_rx_imm_s_s {
+		u64 size                             : 16;
+		u64 apad                             : 3;
+		u64 reserved_19_59                   : 41;
+		u64 subdc                            : 4;
+	} s;
+	/* struct nix_rx_imm_s_s cn; */
+};
+
+/**
+ * Structure nix_rx_mce_s
+ *
+ * NIX Receive Multicast/Mirror Entry Structure This structure specifies
+ * the format of entries in the NIX receive multicast/mirror table
+ * maintained by hardware in NDC/LLC/DRAM. See NIX_AF_RX_MCAST_BASE and
+ * NIX_AF_RX_MCAST_CFG. Note the table may contain both multicast and
+ * mirror replication lists. Software uses the same structure format to
+ * read and write a multicast/mirror table entry with the NIX admin
+ * queue.
+ */
+union nix_rx_mce_s {
+	u64 u;
+	struct nix_rx_mce_s_s {
+		u64 op                               : 2;
+		u64 reserved_2                       : 1;
+		u64 eol                              : 1;
+		u64 index                            : 20;
+		u64 reserved_24_31                   : 8;
+		u64 pf_func                          : 16;
+		u64 next                             : 16;
+	} s;
+	/* struct nix_rx_mce_s_s cn; */
+};
+
+/**
+ * Structure nix_rx_parse_s
+ *
+ * NIX Receive Parse Structure This structure contains the receive packet
+ * parse result. It immediately follows NIX_CQE_HDR_S in a receive CQE,
+ * or NIX_WQE_HDR_S in a receive WQE. Stored in memory as little-endian
+ * unless NIX_AF_LF()_CFG[BE] is set.  Header layers are always 2-byte
+ * aligned, so all header pointers in this structure ([EOH_PTR], [LAPTR]
+ * through [LHPTR], [VTAG*_PTR]) are even.
+ */
+union nix_rx_parse_s {
+	u64 u[7];
+	struct nix_rx_parse_s_s {
+		u64 chan                             : 12;
+		u64 desc_sizem1                      : 5;
+		u64 imm_copy                         : 1;
+		u64 express                          : 1;
+		u64 wqwd                             : 1;
+		u64 errlev                           : 4;
+		u64 errcode                          : 8;
+		u64 latype                           : 4;
+		u64 lbtype                           : 4;
+		u64 lctype                           : 4;
+		u64 ldtype                           : 4;
+		u64 letype                           : 4;
+		u64 lftype                           : 4;
+		u64 lgtype                           : 4;
+		u64 lhtype                           : 4;
+		u64 pkt_lenm1                        : 16;
+		u64 l2m                              : 1;
+		u64 l2b                              : 1;
+		u64 l3m                              : 1;
+		u64 l3b                              : 1;
+		u64 vtag0_valid                      : 1;
+		u64 vtag0_gone                       : 1;
+		u64 vtag1_valid                      : 1;
+		u64 vtag1_gone                       : 1;
+		u64 pkind                            : 6;
+		u64 reserved_94_95                   : 2;
+		u64 vtag0_tci                        : 16;
+		u64 vtag1_tci                        : 16;
+		u64 laflags                          : 8;
+		u64 lbflags                          : 8;
+		u64 lcflags                          : 8;
+		u64 ldflags                          : 8;
+		u64 leflags                          : 8;
+		u64 lfflags                          : 8;
+		u64 lgflags                          : 8;
+		u64 lhflags                          : 8;
+		u64 eoh_ptr                          : 8;
+		u64 wqe_aura                         : 20;
+		u64 pb_aura                          : 20;
+		u64 match_id                         : 16;
+		u64 laptr                            : 8;
+		u64 lbptr                            : 8;
+		u64 lcptr                            : 8;
+		u64 ldptr                            : 8;
+		u64 leptr                            : 8;
+		u64 lfptr                            : 8;
+		u64 lgptr                            : 8;
+		u64 lhptr                            : 8;
+		u64 vtag0_ptr                        : 8;
+		u64 vtag1_ptr                        : 8;
+		u64 flow_key_alg                     : 5;
+		u64 reserved_341_383                 : 43;
+		u64 reserved_384_447                 : 64;
+	} s;
+	/* struct nix_rx_parse_s_s cn; */
+};
+
+/**
+ * Structure nix_rx_sg_s
+ *
+ * NIX Receive Scatter/Gather Subdescriptor Structure The receive
+ * scatter/gather subdescriptor specifies one to three segments of packet
+ * data bytes. There may be multiple NIX_RX_SG_Ss in each NIX receive
+ * descriptor.  NIX_RX_SG_S is immediately followed by one NIX_IOVA_S
+ * word when [SEGS] = 1, three NIX_IOVA_S words when [SEGS] \>= 2. Each
+ * NIX_IOVA_S word specifies the LF IOVA of first packet data byte in the
+ * corresponding segment; first NIX_IOVA_S word for segment 1, second
+ * word for segment 2, third word for segment 3. Note the third word is
+ * present when [SEGS] \>= 2 but only valid when [SEGS] = 3.
+ */
+union nix_rx_sg_s {
+	u64 u;
+	struct nix_rx_sg_s_s {
+		u64 seg1_size                        : 16;
+		u64 seg2_size                        : 16;
+		u64 seg3_size                        : 16;
+		u64 segs                             : 2;
+		u64 reserved_50_59                   : 10;
+		u64 subdc                            : 4;
+	} s;
+	/* struct nix_rx_sg_s_s cn; */
+};
+
+/**
+ * Structure nix_rx_vtag_action_s
+ *
+ * NIX Receive Vtag Action Structure This structure defines the format of
+ * NPC_RESULT_S[VTAG_ACTION] for a receive packet. It specifies up to two
+ * Vtags (e.g. C-VLAN/S-VLAN tags, 802.1BR E-TAG) for optional capture
+ * and/or stripping.
+ */
+union nix_rx_vtag_action_s {
+	u64 u;
+	struct nix_rx_vtag_action_s_s {
+		u64 vtag0_relptr                     : 8;
+		u64 vtag0_lid                        : 3;
+		u64 reserved_11                      : 1;
+		u64 vtag0_type                       : 3;
+		u64 vtag0_valid                      : 1;
+		u64 reserved_16_31                   : 16;
+		u64 vtag1_relptr                     : 8;
+		u64 vtag1_lid                        : 3;
+		u64 reserved_43                      : 1;
+		u64 vtag1_type                       : 3;
+		u64 vtag1_valid                      : 1;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nix_rx_vtag_action_s_s cn; */
+};
+
+/**
+ * Structure nix_send_comp_s
+ *
+ * NIX Send Completion Structure This structure immediately follows
+ * NIX_CQE_HDR_S in a send completion CQE.
+ */
+union nix_send_comp_s {
+	u64 u;
+	struct nix_send_comp_s_s {
+		u64 status                           : 8;
+		u64 sqe_id                           : 16;
+		u64 reserved_24_63                   : 40;
+	} s;
+	/* struct nix_send_comp_s_s cn; */
+};
+
+/**
+ * Structure nix_send_crc_s
+ *
+ * NIX Send CRC Subdescriptor Structure The send CRC subdescriptor
+ * specifies a CRC calculation be performed during transmission. Ignored
+ * when present in a send descriptor with NIX_SEND_EXT_S[LSO] set. There
+ * may be up to two NIX_SEND_CRC_Ss per send descriptor.  NIX_SEND_CRC_S
+ * constraints: * When present, NIX_SEND_CRC_S subdescriptors must
+ * precede all NIX_SEND_SG_S, NIX_SEND_IMM_S and NIX_SEND_MEM_S
+ * subdescriptors in the send descriptor. * NIX_SEND_CRC_S subdescriptors
+ * must follow the same order as their checksum and insert regions in the
+ * packet, i.e. the checksum and insert regions of a NIX_SEND_CRC_S must
+ * come after the checksum and insert regions of a preceding
+ * NIX_SEND_CRC_S. There must be no overlap between any NIX_SEND_CRC_S
+ * checksum and insert regions. * If either
+ * NIX_SEND_HDR_S[OL4TYPE,IL4TYPE] = NIX_SENDL4TYPE_E::SCTP_CKSUM, the
+ * SCTP checksum region and NIX_SEND_CRC_S insert region must not
+ * overlap, and likewise the NIX_SEND_CRC_S checksum region and SCTP
+ * insert region must not overlap. * If either
+ * NIX_SEND_HDR_S[OL3TYPE,IL3TYPE] = NIX_SENDL3TYPE_E::IP4_CKSUM, the
+ * IPv4 header checksum region and NIX_SEND_CRC_S insert region must not
+ * overlap. * Any checksums inserted by
+ * NIX_SEND_HDR_S[OL3TYPE,OL4TYPE,IL3TYPE,IL4TYPE] must be outside of the
+ * NIX_SEND_CRC_S checksum and insert regions.  Hardware adjusts [START],
+ * [SIZE] and [INSERT] as needed to account for any VLAN inserted by
+ * NIX_SEND_EXT_S[VLAN*] or Vtag inserted by NIX_TX_VTAG_ACTION_S.
+ */
+union nix_send_crc_s {
+	u64 u[2];
+	struct nix_send_crc_s_s {
+		u64 size                             : 16;
+		u64 start                            : 16;
+		u64 insert                           : 16;
+		u64 reserved_48_57                   : 10;
+		u64 alg                              : 2;
+		u64 subdc                            : 4;
+		u64 iv                               : 32;
+		u64 reserved_96_127                  : 32;
+	} s;
+	/* struct nix_send_crc_s_s cn; */
+};
+
+/**
+ * Structure nix_send_ext_s
+ *
+ * NIX Send Extended Header Subdescriptor Structure The send extended
+ * header specifies LSO, VLAN insertion, timestamp and/or scheduling
+ * services on the packet. If present, it must immediately follow
+ * NIX_SEND_HDR_S. All fields are assumed to be zero when this
+ * subdescriptor is not present.
+ */
+union nix_send_ext_s {
+	u64 u[2];
+	struct nix_send_ext_s_s {
+		u64 lso_mps                          : 14;
+		u64 lso                              : 1;
+		u64 tstmp                            : 1;
+		u64 lso_sb                           : 8;
+		u64 lso_format                       : 5;
+		u64 reserved_29_31                   : 3;
+		u64 shp_chg                          : 9;
+		u64 shp_dis                          : 1;
+		u64 shp_ra                           : 2;
+		u64 markptr                          : 8;
+		u64 markform                         : 7;
+		u64 mark_en                          : 1;
+		u64 subdc                            : 4;
+		u64 vlan0_ins_ptr                    : 8;
+		u64 vlan0_ins_tci                    : 16;
+		u64 vlan1_ins_ptr                    : 8;
+		u64 vlan1_ins_tci                    : 16;
+		u64 vlan0_ins_ena                    : 1;
+		u64 vlan1_ins_ena                    : 1;
+		u64 reserved_114_127                 : 14;
+	} s;
+	/* struct nix_send_ext_s_s cn; */
+};
+
+/**
+ * Structure nix_send_hdr_s
+ *
+ * NIX Send Header Subdescriptor Structure The send header is the first
+ * subdescriptor of every send descriptor.
+ */
+union nix_send_hdr_s {
+	u64 u[2];
+	struct nix_send_hdr_s_s {
+		u64 total                            : 18;
+		u64 reserved_18                      : 1;
+		u64 df                               : 1;
+		u64 aura                             : 20;
+		u64 sizem1                           : 3;
+		u64 pnc                              : 1;
+		u64 sq                               : 20;
+		u64 ol3ptr                           : 8;
+		u64 ol4ptr                           : 8;
+		u64 il3ptr                           : 8;
+		u64 il4ptr                           : 8;
+		u64 ol3type                          : 4;
+		u64 ol4type                          : 4;
+		u64 il3type                          : 4;
+		u64 il4type                          : 4;
+		u64 sqe_id                           : 16;
+	} s;
+	/* struct nix_send_hdr_s_s cn; */
+};
+
+/**
+ * Structure nix_send_imm_s
+ *
+ * NIX Send Immediate Subdescriptor Structure The send immediate
+ * subdescriptor requests that bytes immediately following this
+ * NIX_SEND_IMM_S (after skipping [APAD] bytes) are to be included in the
+ * packet data. The next subdescriptor following this NIX_SEND_IMM_S
+ * (when one exists) will follow the immediate bytes, after rounding up
+ * the address to a multiple of 16 bytes.  There may be multiple
+ * NIX_SEND_IMM_S in one NIX send descriptor. A NIX_SEND_IMM_S is ignored
+ * in a NIX send descriptor if the sum of all prior
+ * NIX_SEND_SG_S[SEG*_SIZE]s and NIX_SEND_IMM_S[SIZE]s meets or exceeds
+ * NIX_SEND_HDR_S[TOTAL].  When NIX_SEND_EXT_S[LSO] is set in the
+ * descriptor, all NIX_SEND_IMM_S bytes must be included in the first
+ * NIX_SEND_EXT_S[LSO_SB] bytes of the source packet.
+ */
+union nix_send_imm_s {
+	u64 u;
+	struct nix_send_imm_s_s {
+		u64 size                             : 16;
+		u64 apad                             : 3;
+		u64 reserved_19_59                   : 41;
+		u64 subdc                            : 4;
+	} s;
+	/* struct nix_send_imm_s_s cn; */
+};
+
+/**
+ * Structure nix_send_jump_s
+ *
+ * NIX Send Jump Subdescriptor Structure The send jump subdescriptor
+ * selects a new address for fetching the remaining subdescriptors of a
+ * send descriptor. This allows software to create a send descriptor
+ * longer than SQE size selected by NIX_SQ_CTX_S[MAX_SQE_SIZE].  There
+ * can be only one NIX_SEND_JUMP_S subdescriptor in a send descriptor. If
+ * present, it must immediately follow NIX_SEND_HDR_S if NIX_SEND_EXT_S
+ * is not present, else it must immediately follow NIX_SEND_EXT_S. In
+ * either case, it must terminate the SQE enqueued by software.
+ */
+union nix_send_jump_s {
+	u64 u[2];
+	struct nix_send_jump_s_s {
+		u64 sizem1                           : 7;
+		u64 reserved_7_13                    : 7;
+		u64 ld_type                          : 2;
+		u64 aura                             : 20;
+		u64 reserved_36_58                   : 23;
+		u64 f                                : 1;
+		u64 subdc                            : 4;
+		u64 addr                             : 64;
+	} s;
+	/* struct nix_send_jump_s_s cn; */
+};
+
+/**
+ * Structure nix_send_mem_s
+ *
+ * NIX Send Memory Subdescriptor Structure The send memory subdescriptor
+ * atomically sets, increments or decrements a memory location.
+ * NIX_SEND_MEM_S subdescriptors must follow all NIX_SEND_SG_S and
+ * NIX_SEND_IMM_S subdescriptors in the NIX send descriptor. NIX will not
+ * initiate the memory update for this subdescriptor until after it has
+ * completed all LLC/DRAM fetches that service all prior NIX_SEND_SG_S
+ * subdescriptors. The memory update is executed once, even if the packet
+ * is replicated due to NIX_TX_ACTION_S[OP] = NIX_TX_ACTIONOP_E::MCAST.
+ * Performance is best if a memory decrement by one is used rather than
+ * any other memory set/increment/decrement. (Less internal bus bandwidth
+ * is used with memory decrements by one.)  When NIX_SEND_EXT_S[LSO] is
+ * set in the descriptor, NIX executes the memory update only while
+ * processing the last LSO segment, after processing prior segments.
+ */
+union nix_send_mem_s {
+	u64 u[2];
+	struct nix_send_mem_s_s {
+		u64 offset                           : 16;
+		u64 reserved_16_52                   : 37;
+		u64 wmem                             : 1;
+		u64 dsz                              : 2;
+		u64 alg                              : 4;
+		u64 subdc                            : 4;
+		u64 addr                             : 64;
+	} s;
+	/* struct nix_send_mem_s_s cn; */
+};
+
+/**
+ * Structure nix_send_sg_s
+ *
+ * NIX Send Scatter/Gather Subdescriptor Structure The send
+ * scatter/gather subdescriptor requests one to three segments of packet
+ * data bytes to be transmitted. There may be multiple NIX_SEND_SG_Ss in
+ * each NIX send descriptor.  NIX_SEND_SG_S is immediately followed by
+ * one NIX_IOVA_S word when [SEGS] = 1, three NIX_IOVA_S words when
+ * [SEGS] \>= 2. Each NIX_IOVA_S word specifies the LF IOVA of first
+ * packet data byte in the corresponding segment; first NIX_IOVA_S word
+ * for segment 1, second word for segment 2, third word for segment 3.
+ * Note the third word is present when [SEGS] \>= 2 but only valid when
+ * [SEGS] = 3.  If the sum of all prior NIX_SEND_SG_S[SEG*_SIZE]s and
+ * NIX_SEND_IMM_S[SIZE]s meets or exceeds NIX_SEND_HDR_S[TOTAL], this
+ * subdescriptor will not contribute any packet data but may free buffers
+ * to NPA (see [I1]).
+ */
+union nix_send_sg_s {
+	u64 u;
+	struct nix_send_sg_s_s {
+		u64 seg1_size                        : 16;
+		u64 seg2_size                        : 16;
+		u64 seg3_size                        : 16;
+		u64 segs                             : 2;
+		u64 reserved_50_54                   : 5;
+		u64 i1                               : 1;
+		u64 i2                               : 1;
+		u64 i3                               : 1;
+		u64 ld_type                          : 2;
+		u64 subdc                            : 4;
+	} s;
+	/* struct nix_send_sg_s_s cn; */
+};
+
+/**
+ * Structure nix_send_work_s
+ *
+ * NIX Send Work Subdescriptor Structure This subdescriptor adds work to
+ * the SSO. At most one NIX_SEND_WORK_S subdescriptor can exist in the
+ * NIX send descriptor. If a NIX_SEND_WORK_S exists in the descriptor, it
+ * must be the last subdescriptor. NIX will not initiate the work add for
+ * this subdescriptor until after (1) it has completed all LLC/DRAM
+ * fetches that service all prior NIX_SEND_SG_S subdescriptors, (2) it
+ * has fetched all subdescriptors in the descriptor, and (3) all
+ * NIX_SEND_MEM_S[WMEM]=1 LLC/DRAM updates have completed.  Provided the
+ * path of descriptors from the SQ through NIX to an output FIFO is
+ * unmodified between the descriptors (as should normally be the case,
+ * but it is possible for software to change the path), NIX also (1) will
+ * submit the SSO add works from all descriptors in the SQ in order, and
+ * (2) will not submit an SSO work add until after all prior descriptors
+ * in the SQ have completed their NIX_SEND_SG_S processing, and (3) will
+ * not submit an SSO work add until after it has fetched all
+ * subdescriptors from prior descriptors in the SQ.  When
+ * NIX_SEND_EXT_S[LSO] is set in the descriptor, NIX executes the
+ * NIX_SEND_WORK_S work add only while processing the last LSO segment,
+ * after processing prior segments.  Hardware ignores NIX_SEND_WORK_S
+ * when NIX_SQ_CTX_S[SSO_ENA] is clear.
+ */
+union nix_send_work_s {
+	u64 u[2];
+	struct nix_send_work_s_s {
+		u64 tag                              : 32;
+		u64 tt                               : 2;
+		u64 grp                              : 10;
+		u64 reserved_44_59                   : 16;
+		u64 subdc                            : 4;
+		u64 addr                             : 64;
+	} s;
+	/* struct nix_send_work_s_s cn; */
+};
+
+/**
+ * Structure nix_sq_ctx_hw_s
+ *
+ * NIX SQ Context Hardware Structure This structure contains context
+ * state maintained by hardware for each SQ in NDC/LLC/DRAM. Software
+ * uses the equivalent NIX_SQ_CTX_S structure format to read and write an
+ * SQ context with the NIX admin queue. Always stored in byte invariant
+ * little-endian format (LE8).
+ */
+union nix_sq_ctx_hw_s {
+	u64 u[16];
+	struct nix_sq_ctx_hw_s_s {
+		u64 ena                              : 1;
+		u64 substream                        : 20;
+		u64 max_sqe_size                     : 2;
+		u64 sqe_way_mask                     : 16;
+		u64 sqb_aura                         : 20;
+		u64 gbl_rsvd1                        : 5;
+		u64 cq_id                            : 20;
+		u64 cq_ena                           : 1;
+		u64 qint_idx                         : 6;
+		u64 gbl_rsvd2                        : 1;
+		u64 sq_int                           : 8;
+		u64 sq_int_ena                       : 8;
+		u64 xoff                             : 1;
+		u64 sqe_stype                        : 2;
+		u64 gbl_rsvd                         : 17;
+		u64 head_sqb                         : 64;
+		u64 head_offset                      : 6;
+		u64 sqb_dequeue_count                : 16;
+		u64 default_chan                     : 12;
+		u64 sdp_mcast                        : 1;
+		u64 sso_ena                          : 1;
+		u64 dse_rsvd1                        : 28;
+		u64 sqb_enqueue_count                : 16;
+		u64 tail_offset                      : 6;
+		u64 lmt_dis                          : 1;
+		u64 smq_rr_quantum                   : 24;
+		u64 dnq_rsvd1                        : 17;
+		u64 tail_sqb                         : 64;
+		u64 next_sqb                         : 64;
+		u64 mnq_dis                          : 1;
+		u64 smq                              : 9;
+		u64 smq_pend                         : 1;
+		u64 smq_next_sq                      : 20;
+		u64 smq_next_sq_vld                  : 1;
+		u64 scm1_rsvd2                       : 32;
+		u64 smenq_sqb                        : 64;
+		u64 smenq_offset                     : 6;
+		u64 cq_limit                         : 8;
+		u64 smq_rr_count                     : 25;
+		u64 scm_lso_rem                      : 18;
+		u64 scm_dq_rsvd0                     : 7;
+		u64 smq_lso_segnum                   : 8;
+		u64 vfi_lso_total                    : 18;
+		u64 vfi_lso_sizem1                   : 3;
+		u64 vfi_lso_sb                       : 8;
+		u64 vfi_lso_mps                      : 14;
+		u64 vfi_lso_vlan0_ins_ena            : 1;
+		u64 vfi_lso_vlan1_ins_ena            : 1;
+		u64 vfi_lso_vld                      : 1;
+		u64 smenq_next_sqb_vld               : 1;
+		u64 scm_dq_rsvd1                     : 9;
+		u64 smenq_next_sqb                   : 64;
+		u64 seb_rsvd1                        : 64;
+		u64 drop_pkts                        : 48;
+		u64 drop_octs_lsw                    : 16;
+		u64 drop_octs_msw                    : 32;
+		u64 pkts_lsw                         : 32;
+		u64 pkts_msw                         : 16;
+		u64 octs                             : 48;
+	} s;
+	/* struct nix_sq_ctx_hw_s_s cn; */
+};
+
+/**
+ * Structure nix_sq_ctx_s
+ *
+ * NIX Send Queue Context Structure This structure specifies the format
+ * used by software with the NIX admin queue to read and write a send
+ * queue's NIX_SQ_CTX_HW_S structure maintained by hardware in
+ * NDC/LLC/DRAM.  The SQ statistics ([OCTS], [PKTS], [DROP_OCTS],
+ * [DROP_PKTS]) do not account for packet replication due to
+ * NIX_TX_ACTION_S[OP] = NIX_TX_ACTIONOP_E::MCAST.
+ */
+union nix_sq_ctx_s {
+	u64 u[16];
+	struct nix_sq_ctx_s_s {
+		u64 ena                              : 1;
+		u64 qint_idx                         : 6;
+		u64 substream                        : 20;
+		u64 sdp_mcast                        : 1;
+		u64 cq                               : 20;
+		u64 sqe_way_mask                     : 16;
+		u64 smq                              : 9;
+		u64 cq_ena                           : 1;
+		u64 xoff                             : 1;
+		u64 sso_ena                          : 1;
+		u64 smq_rr_quantum                   : 24;
+		u64 default_chan                     : 12;
+		u64 sqb_count                        : 16;
+		u64 smq_rr_count                     : 25;
+		u64 sqb_aura                         : 20;
+		u64 sq_int                           : 8;
+		u64 sq_int_ena                       : 8;
+		u64 sqe_stype                        : 2;
+		u64 reserved_191                     : 1;
+		u64 max_sqe_size                     : 2;
+		u64 cq_limit                         : 8;
+		u64 lmt_dis                          : 1;
+		u64 mnq_dis                          : 1;
+		u64 smq_next_sq                      : 20;
+		u64 smq_lso_segnum                   : 8;
+		u64 tail_offset                      : 6;
+		u64 smenq_offset                     : 6;
+		u64 head_offset                      : 6;
+		u64 smenq_next_sqb_vld               : 1;
+		u64 smq_pend                         : 1;
+		u64 smq_next_sq_vld                  : 1;
+		u64 reserved_253_255                 : 3;
+		u64 next_sqb                         : 64;
+		u64 tail_sqb                         : 64;
+		u64 smenq_sqb                        : 64;
+		u64 smenq_next_sqb                   : 64;
+		u64 head_sqb                         : 64;
+		u64 reserved_576_583                 : 8;
+		u64 vfi_lso_total                    : 18;
+		u64 vfi_lso_sizem1                   : 3;
+		u64 vfi_lso_sb                       : 8;
+		u64 vfi_lso_mps                      : 14;
+		u64 vfi_lso_vlan0_ins_ena            : 1;
+		u64 vfi_lso_vlan1_ins_ena            : 1;
+		u64 vfi_lso_vld                      : 1;
+		u64 reserved_630_639                 : 10;
+		u64 scm_lso_rem                      : 18;
+		u64 reserved_658_703                 : 46;
+		u64 octs                             : 48;
+		u64 reserved_752_767                 : 16;
+		u64 pkts                             : 48;
+		u64 reserved_816_831                 : 16;
+		u64 reserved_832_895                 : 64;
+		u64 drop_octs                        : 48;
+		u64 reserved_944_959                 : 16;
+		u64 drop_pkts                        : 48;
+		u64 reserved_1008_1023               : 16;
+	} s;
+	/* struct nix_sq_ctx_s_s cn; */
+};
+
+/**
+ * Structure nix_tx_action_s
+ *
+ * NIX Transmit Action Structure This structure defines the format of
+ * NPC_RESULT_S[ACTION] for a transmit packet.
+ */
+union nix_tx_action_s {
+	u64 u;
+	struct nix_tx_action_s_s {
+		u64 op                               : 4;
+		u64 reserved_4_11                    : 8;
+		u64 index                            : 20;
+		u64 match_id                         : 16;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nix_tx_action_s_s cn; */
+};
+
+/**
+ * Structure nix_tx_vtag_action_s
+ *
+ * NIX Transmit Vtag Action Structure This structure defines the format
+ * of NPC_RESULT_S[VTAG_ACTION] for a transmit packet. It specifies the
+ * optional insertion or replacement of up to two Vtags (e.g.
+ * C-VLAN/S-VLAN tags, 802.1BR E-TAG).  If two Vtags are specified: * The
+ * Vtag 0 byte offset from packet start (see [VTAG0_RELPTR]) must be less
+ * than or equal to the Vtag 1 byte offset. * Hardware executes the Vtag
+ * 0 action first, Vtag 1 action second. * If Vtag 0 is inserted,
+ * hardware adjusts the Vtag 1 byte offset accordingly. Thus, if the two
+ * offsets are equal in the structure, hardware inserts Vtag 1
+ * immediately after Vtag 0 in the packet.  A Vtag must not be inserted
+ * or replaced within an outer or inner L3/L4 header, but may be inserted
+ * or replaced within an outer L4 payload.
+ */
+union nix_tx_vtag_action_s {
+	u64 u;
+	struct nix_tx_vtag_action_s_s {
+		u64 vtag0_relptr                     : 8;
+		u64 vtag0_lid                        : 3;
+		u64 reserved_11                      : 1;
+		u64 vtag0_op                         : 2;
+		u64 reserved_14_15                   : 2;
+		u64 vtag0_def                        : 10;
+		u64 reserved_26_31                   : 6;
+		u64 vtag1_relptr                     : 8;
+		u64 vtag1_lid                        : 3;
+		u64 reserved_43                      : 1;
+		u64 vtag1_op                         : 2;
+		u64 reserved_46_47                   : 2;
+		u64 vtag1_def                        : 10;
+		u64 reserved_58_63                   : 6;
+	} s;
+	/* struct nix_tx_vtag_action_s_s cn; */
+};
+
+/**
+ * Structure nix_wqe_hdr_s
+ *
+ * NIX Work Queue Entry Header Structure This 64-bit structure defines
+ * the first word of every receive WQE generated by NIX. It is
+ * immediately followed by NIX_RX_PARSE_S. Stored in memory as little-
+ * endian unless NIX_AF_LF()_CFG[BE] is set.
+ */
+union nix_wqe_hdr_s {
+	u64 u;
+	struct nix_wqe_hdr_s_s {
+		u64 tag                              : 32;
+		u64 tt                               : 2;
+		u64 grp                              : 10;
+		u64 node                             : 2;
+		u64 q                                : 14;
+		u64 wqe_type                         : 4;
+	} s;
+	/* struct nix_wqe_hdr_s_s cn; */
+};
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_aq_base
+ *
+ * NIX AF Admin Queue Base Address Register
+ */
+union nixx_af_aq_base {
+	u64 u;
+	struct nixx_af_aq_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 base_addr                        : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_aq_base_s cn; */
+};
+
+static inline u64 NIXX_AF_AQ_BASE(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AQ_BASE(void)
+{
+	return 0x410;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_aq_cfg
+ *
+ * NIX AF Admin Queue Configuration Register
+ */
+union nixx_af_aq_cfg {
+	u64 u;
+	struct nixx_af_aq_cfg_s {
+		u64 qsize                            : 4;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct nixx_af_aq_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_AQ_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AQ_CFG(void)
+{
+	return 0x400;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_aq_done
+ *
+ * NIX AF Admin Queue Done Count Register
+ */
+union nixx_af_aq_done {
+	u64 u;
+	struct nixx_af_aq_done_s {
+		u64 done                             : 20;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct nixx_af_aq_done_s cn; */
+};
+
+static inline u64 NIXX_AF_AQ_DONE(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AQ_DONE(void)
+{
+	return 0x450;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_aq_done_ack
+ *
+ * NIX AF Admin Queue Done Count Ack Register This register is written by
+ * software to acknowledge interrupts.
+ */
+union nixx_af_aq_done_ack {
+	u64 u;
+	struct nixx_af_aq_done_ack_s {
+		u64 done_ack                         : 20;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct nixx_af_aq_done_ack_s cn; */
+};
+
+static inline u64 NIXX_AF_AQ_DONE_ACK(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AQ_DONE_ACK(void)
+{
+	return 0x460;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_aq_done_ena_w1c
+ *
+ * NIX AF Admin Queue Done Interrupt Enable Clear Register
+ */
+union nixx_af_aq_done_ena_w1c {
+	u64 u;
+	struct nixx_af_aq_done_ena_w1c_s {
+		u64 done                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_aq_done_ena_w1c_s cn; */
+};
+
+static inline u64 NIXX_AF_AQ_DONE_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AQ_DONE_ENA_W1C(void)
+{
+	return 0x498;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_aq_done_ena_w1s
+ *
+ * NIX AF Admin Queue Done Interrupt Enable Set Register
+ */
+union nixx_af_aq_done_ena_w1s {
+	u64 u;
+	struct nixx_af_aq_done_ena_w1s_s {
+		u64 done                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_aq_done_ena_w1s_s cn; */
+};
+
+static inline u64 NIXX_AF_AQ_DONE_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AQ_DONE_ENA_W1S(void)
+{
+	return 0x490;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_aq_done_int
+ *
+ * INTERNAL: NIX AF Admin Queue Done Interrupt Register
+ */
+union nixx_af_aq_done_int {
+	u64 u;
+	struct nixx_af_aq_done_int_s {
+		u64 done                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_aq_done_int_s cn; */
+};
+
+static inline u64 NIXX_AF_AQ_DONE_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AQ_DONE_INT(void)
+{
+	return 0x480;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_aq_done_int_w1s
+ *
+ * INTERNAL: NIX AF Admin Queue Done Interrupt Set Register
+ */
+union nixx_af_aq_done_int_w1s {
+	u64 u;
+	struct nixx_af_aq_done_int_w1s_s {
+		u64 done                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_aq_done_int_w1s_s cn; */
+};
+
+static inline u64 NIXX_AF_AQ_DONE_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AQ_DONE_INT_W1S(void)
+{
+	return 0x488;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_aq_done_timer
+ *
+ * NIX AF Admin Queue Done Interrupt Timer Register
+ */
+union nixx_af_aq_done_timer {
+	u64 u;
+	struct nixx_af_aq_done_timer_s {
+		u64 count                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct nixx_af_aq_done_timer_s cn; */
+};
+
+static inline u64 NIXX_AF_AQ_DONE_TIMER(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AQ_DONE_TIMER(void)
+{
+	return 0x470;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_aq_done_wait
+ *
+ * NIX AF Admin Queue Done Interrupt Coalescing Wait Register Specifies
+ * the queue interrupt coalescing settings.
+ */
+union nixx_af_aq_done_wait {
+	u64 u;
+	struct nixx_af_aq_done_wait_s {
+		u64 num_wait                         : 20;
+		u64 reserved_20_31                   : 12;
+		u64 time_wait                        : 16;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_af_aq_done_wait_s cn; */
+};
+
+static inline u64 NIXX_AF_AQ_DONE_WAIT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AQ_DONE_WAIT(void)
+{
+	return 0x440;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_aq_door
+ *
+ * NIX AF Admin Queue Doorbell Register Software writes to this register
+ * to enqueue entries to AQ.
+ */
+union nixx_af_aq_door {
+	u64 u;
+	struct nixx_af_aq_door_s {
+		u64 count                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct nixx_af_aq_door_s cn; */
+};
+
+static inline u64 NIXX_AF_AQ_DOOR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AQ_DOOR(void)
+{
+	return 0x430;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_aq_status
+ *
+ * NIX AF Admin Queue Status Register
+ */
+union nixx_af_aq_status {
+	u64 u;
+	struct nixx_af_aq_status_s {
+		u64 reserved_0_3                     : 4;
+		u64 head_ptr                         : 20;
+		u64 reserved_24_35                   : 12;
+		u64 tail_ptr                         : 20;
+		u64 reserved_56_61                   : 6;
+		u64 aq_busy                          : 1;
+		u64 aq_err                           : 1;
+	} s;
+	struct nixx_af_aq_status_cn {
+		u64 reserved_0_3                     : 4;
+		u64 head_ptr                         : 20;
+		u64 reserved_24_31                   : 8;
+		u64 reserved_32_35                   : 4;
+		u64 tail_ptr                         : 20;
+		u64 reserved_56_61                   : 6;
+		u64 aq_busy                          : 1;
+		u64 aq_err                           : 1;
+	} cn;
+};
+
+static inline u64 NIXX_AF_AQ_STATUS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AQ_STATUS(void)
+{
+	return 0x420;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_avg_delay
+ *
+ * NIX AF Queue Average Delay Register
+ */
+union nixx_af_avg_delay {
+	u64 u;
+	struct nixx_af_avg_delay_s {
+		u64 avg_dly                          : 19;
+		u64 reserved_19_23                   : 5;
+		u64 avg_timer                        : 16;
+		u64 reserved_40_63                   : 24;
+	} s;
+	/* struct nixx_af_avg_delay_s cn; */
+};
+
+static inline u64 NIXX_AF_AVG_DELAY(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_AVG_DELAY(void)
+{
+	return 0xe0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_bar2_alias#
+ *
+ * NIX Admin Function  BAR2 Alias Registers These registers alias to the
+ * NIX BAR2 registers for the PF and function selected by
+ * NIX_AF_BAR2_SEL[PF_FUNC].  Internal: Not implemented. Placeholder for
+ * bug33464.
+ */
+union nixx_af_bar2_aliasx {
+	u64 u;
+	struct nixx_af_bar2_aliasx_s {
+		u64 data                             : 64;
+	} s;
+	/* struct nixx_af_bar2_aliasx_s cn; */
+};
+
+static inline u64 NIXX_AF_BAR2_ALIASX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_BAR2_ALIASX(u64 a)
+{
+	return 0x9100000 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_bar2_sel
+ *
+ * NIX Admin Function BAR2 Select Register This register configures BAR2
+ * accesses from the NIX_AF_BAR2_ALIAS() registers in BAR0. Internal: Not
+ * implemented. Placeholder for bug33464.
+ */
+union nixx_af_bar2_sel {
+	u64 u;
+	struct nixx_af_bar2_sel_s {
+		u64 alias_pf_func                    : 16;
+		u64 alias_ena                        : 1;
+		u64 reserved_17_63                   : 47;
+	} s;
+	/* struct nixx_af_bar2_sel_s cn; */
+};
+
+static inline u64 NIXX_AF_BAR2_SEL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_BAR2_SEL(void)
+{
+	return 0x9000000;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_blk_rst
+ *
+ * NIX AF Block Reset Register
+ */
+union nixx_af_blk_rst {
+	u64 u;
+	struct nixx_af_blk_rst_s {
+		u64 rst                              : 1;
+		u64 reserved_1_62                    : 62;
+		u64 busy                             : 1;
+	} s;
+	/* struct nixx_af_blk_rst_s cn; */
+};
+
+static inline u64 NIXX_AF_BLK_RST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_BLK_RST(void)
+{
+	return 0xb0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_cfg
+ *
+ * NIX AF General Configuration Register
+ */
+union nixx_af_cfg {
+	u64 u;
+	struct nixx_af_cfg_s {
+		u64 force_cond_clk_en                : 1;
+		u64 force_rx_gbl_clk_en              : 1;
+		u64 force_rx_strm_clk_en             : 1;
+		u64 force_cqm_clk_en                 : 1;
+		u64 force_seb_clk_en                 : 1;
+		u64 force_sqm_clk_en                 : 1;
+		u64 force_pse_clk_en                 : 1;
+		u64 reserved_7                       : 1;
+		u64 af_be                            : 1;
+		u64 calibrate_x2p                    : 1;
+		u64 force_intf_clk_en                : 1;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_af_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_CFG(void)
+{
+	return 0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_cint_delay
+ *
+ * NIX AF Completion Interrupt Delay Register
+ */
+union nixx_af_cint_delay {
+	u64 u;
+	struct nixx_af_cint_delay_s {
+		u64 cint_dly                         : 10;
+		u64 reserved_10_15                   : 6;
+		u64 cint_timer                       : 16;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct nixx_af_cint_delay_s cn; */
+};
+
+static inline u64 NIXX_AF_CINT_DELAY(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_CINT_DELAY(void)
+{
+	return 0xf0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_cint_timer#
+ *
+ * NIX AF Completion Interrupt Timer Registers
+ */
+union nixx_af_cint_timerx {
+	u64 u;
+	struct nixx_af_cint_timerx_s {
+		u64 expir_time                       : 16;
+		u64 cint                             : 7;
+		u64 reserved_23                      : 1;
+		u64 lf                               : 8;
+		u64 active                           : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	/* struct nixx_af_cint_timerx_s cn; */
+};
+
+static inline u64 NIXX_AF_CINT_TIMERX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_CINT_TIMERX(u64 a)
+{
+	return 0x1a40 + 0x40000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_const
+ *
+ * NIX AF Constants Register This register contains constants for
+ * software discovery.
+ */
+union nixx_af_const {
+	u64 u;
+	struct nixx_af_const_s {
+		u64 cgx_lmac_channels                : 8;
+		u64 cgx_lmacs                        : 4;
+		u64 num_cgx                          : 4;
+		u64 lbk_channels                     : 8;
+		u64 num_lbk                          : 4;
+		u64 num_sdp                          : 4;
+		u64 reserved_32_47                   : 16;
+		u64 links                            : 8;
+		u64 intfs                            : 4;
+		u64 reserved_60_63                   : 4;
+	} s;
+	/* struct nixx_af_const_s cn; */
+};
+
+static inline u64 NIXX_AF_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_CONST(void)
+{
+	return 0x20;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_const1
+ *
+ * NIX AF Constants 1 Register This register contains constants for
+ * software discovery.
+ */
+union nixx_af_const1 {
+	u64 u;
+	struct nixx_af_const1_s {
+		u64 sdp_channels                     : 12;
+		u64 rx_bpids                         : 12;
+		u64 lf_tx_stats                      : 8;
+		u64 lf_rx_stats                      : 8;
+		u64 lso_format_fields                : 8;
+		u64 lso_formats                      : 8;
+		u64 reserved_56_63                   : 8;
+	} s;
+	/* struct nixx_af_const1_s cn; */
+};
+
+static inline u64 NIXX_AF_CONST1(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_CONST1(void)
+{
+	return 0x28;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_const2
+ *
+ * NIX AF Constants 2 Register This register contains constants for
+ * software discovery.
+ */
+union nixx_af_const2 {
+	u64 u;
+	struct nixx_af_const2_s {
+		u64 lfs                              : 12;
+		u64 qints                            : 12;
+		u64 cints                            : 12;
+		u64 reserved_36_63                   : 28;
+	} s;
+	/* struct nixx_af_const2_s cn; */
+};
+
+static inline u64 NIXX_AF_CONST2(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_CONST2(void)
+{
+	return 0x30;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_const3
+ *
+ * NIX AF Constants 2 Register This register contains constants for
+ * software discovery.
+ */
+union nixx_af_const3 {
+	u64 u;
+	struct nixx_af_const3_s {
+		u64 sq_ctx_log2bytes                 : 4;
+		u64 rq_ctx_log2bytes                 : 4;
+		u64 cq_ctx_log2bytes                 : 4;
+		u64 rsse_log2bytes                   : 4;
+		u64 mce_log2bytes                    : 4;
+		u64 qint_log2bytes                   : 4;
+		u64 cint_log2bytes                   : 4;
+		u64 dyno_log2bytes                   : 4;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct nixx_af_const3_s cn; */
+};
+
+static inline u64 NIXX_AF_CONST3(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_CONST3(void)
+{
+	return 0x38;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_cq_const
+ *
+ * NIX AF CQ Constants Register This register contains constants for
+ * software discovery.
+ */
+union nixx_af_cq_const {
+	u64 u;
+	struct nixx_af_cq_const_s {
+		u64 queues_per_lf                    : 24;
+		u64 reserved_24_63                   : 40;
+	} s;
+	/* struct nixx_af_cq_const_s cn; */
+};
+
+static inline u64 NIXX_AF_CQ_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_CQ_CONST(void)
+{
+	return 0x48;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_cqm_bp_test
+ *
+ * INTERNAL: NIX AF CQM Backpressure Test Registers
+ */
+union nixx_af_cqm_bp_test {
+	u64 u;
+	struct nixx_af_cqm_bp_test_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 24;
+		u64 enable                           : 12;
+		u64 reserved_52_63                   : 12;
+	} s;
+	/* struct nixx_af_cqm_bp_test_s cn; */
+};
+
+static inline u64 NIXX_AF_CQM_BP_TEST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_CQM_BP_TEST(void)
+{
+	return 0x48c0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_cqm_eco
+ *
+ * INTERNAL: AF CQM ECO Register
+ */
+union nixx_af_cqm_eco {
+	u64 u;
+	struct nixx_af_cqm_eco_s {
+		u64 eco_rw                           : 64;
+	} s;
+	/* struct nixx_af_cqm_eco_s cn; */
+};
+
+static inline u64 NIXX_AF_CQM_ECO(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_CQM_ECO(void)
+{
+	return 0x590;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_csi_eco
+ *
+ * INTERNAL: AF CSI ECO Register
+ */
+union nixx_af_csi_eco {
+	u64 u;
+	struct nixx_af_csi_eco_s {
+		u64 eco_rw                           : 64;
+	} s;
+	/* struct nixx_af_csi_eco_s cn; */
+};
+
+static inline u64 NIXX_AF_CSI_ECO(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_CSI_ECO(void)
+{
+	return 0x580;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_err_int
+ *
+ * NIX Admin Function Error Interrupt Register
+ */
+union nixx_af_err_int {
+	u64 u;
+	struct nixx_af_err_int_s {
+		u64 rx_mcast_data_fault              : 1;
+		u64 rx_mirror_data_fault             : 1;
+		u64 rx_mcast_wqe_fault               : 1;
+		u64 rx_mirror_wqe_fault              : 1;
+		u64 rx_mce_fault                     : 1;
+		u64 rx_mce_list_err                  : 1;
+		u64 rx_unmapped_pf_func              : 1;
+		u64 reserved_7_11                    : 5;
+		u64 aq_door_err                      : 1;
+		u64 aq_res_fault                     : 1;
+		u64 aq_inst_fault                    : 1;
+		u64 reserved_15_63                   : 49;
+	} s;
+	/* struct nixx_af_err_int_s cn; */
+};
+
+static inline u64 NIXX_AF_ERR_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_ERR_INT(void)
+{
+	return 0x180;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_err_int_ena_w1c
+ *
+ * NIX Admin Function Error Interrupt Enable Clear Register This register
+ * clears interrupt enable bits.
+ */
+union nixx_af_err_int_ena_w1c {
+	u64 u;
+	struct nixx_af_err_int_ena_w1c_s {
+		u64 rx_mcast_data_fault              : 1;
+		u64 rx_mirror_data_fault             : 1;
+		u64 rx_mcast_wqe_fault               : 1;
+		u64 rx_mirror_wqe_fault              : 1;
+		u64 rx_mce_fault                     : 1;
+		u64 rx_mce_list_err                  : 1;
+		u64 rx_unmapped_pf_func              : 1;
+		u64 reserved_7_11                    : 5;
+		u64 aq_door_err                      : 1;
+		u64 aq_res_fault                     : 1;
+		u64 aq_inst_fault                    : 1;
+		u64 reserved_15_63                   : 49;
+	} s;
+	/* struct nixx_af_err_int_ena_w1c_s cn; */
+};
+
+static inline u64 NIXX_AF_ERR_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_ERR_INT_ENA_W1C(void)
+{
+	return 0x198;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_err_int_ena_w1s
+ *
+ * NIX Admin Function Error Interrupt Enable Set Register This register
+ * sets interrupt enable bits.
+ */
+union nixx_af_err_int_ena_w1s {
+	u64 u;
+	struct nixx_af_err_int_ena_w1s_s {
+		u64 rx_mcast_data_fault              : 1;
+		u64 rx_mirror_data_fault             : 1;
+		u64 rx_mcast_wqe_fault               : 1;
+		u64 rx_mirror_wqe_fault              : 1;
+		u64 rx_mce_fault                     : 1;
+		u64 rx_mce_list_err                  : 1;
+		u64 rx_unmapped_pf_func              : 1;
+		u64 reserved_7_11                    : 5;
+		u64 aq_door_err                      : 1;
+		u64 aq_res_fault                     : 1;
+		u64 aq_inst_fault                    : 1;
+		u64 reserved_15_63                   : 49;
+	} s;
+	/* struct nixx_af_err_int_ena_w1s_s cn; */
+};
+
+static inline u64 NIXX_AF_ERR_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_ERR_INT_ENA_W1S(void)
+{
+	return 0x190;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_err_int_w1s
+ *
+ * NIX Admin Function Error Interrupt Set Register This register sets
+ * interrupt bits.
+ */
+union nixx_af_err_int_w1s {
+	u64 u;
+	struct nixx_af_err_int_w1s_s {
+		u64 rx_mcast_data_fault              : 1;
+		u64 rx_mirror_data_fault             : 1;
+		u64 rx_mcast_wqe_fault               : 1;
+		u64 rx_mirror_wqe_fault              : 1;
+		u64 rx_mce_fault                     : 1;
+		u64 rx_mce_list_err                  : 1;
+		u64 rx_unmapped_pf_func              : 1;
+		u64 reserved_7_11                    : 5;
+		u64 aq_door_err                      : 1;
+		u64 aq_res_fault                     : 1;
+		u64 aq_inst_fault                    : 1;
+		u64 reserved_15_63                   : 49;
+	} s;
+	/* struct nixx_af_err_int_w1s_s cn; */
+};
+
+static inline u64 NIXX_AF_ERR_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_ERR_INT_W1S(void)
+{
+	return 0x188;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_expr_tx_fifo_status
+ *
+ * INTERNAL: NIX AF Express Transmit FIFO Status Register  Internal:
+ * 802.3br frame preemption/express path is defeatured. Old definition:
+ * Status of FIFO which transmits express packets to CGX and LBK.
+ */
+union nixx_af_expr_tx_fifo_status {
+	u64 u;
+	struct nixx_af_expr_tx_fifo_status_s {
+		u64 count                            : 12;
+		u64 reserved_12_63                   : 52;
+	} s;
+	/* struct nixx_af_expr_tx_fifo_status_s cn; */
+};
+
+static inline u64 NIXX_AF_EXPR_TX_FIFO_STATUS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_EXPR_TX_FIFO_STATUS(void)
+{
+	return 0x640;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_gen_int
+ *
+ * NIX AF General Interrupt Register
+ */
+union nixx_af_gen_int {
+	u64 u;
+	struct nixx_af_gen_int_s {
+		u64 rx_mcast_drop                    : 1;
+		u64 rx_mirror_drop                   : 1;
+		u64 reserved_2                       : 1;
+		u64 tl1_drain                        : 1;
+		u64 smq_flush_done                   : 1;
+		u64 reserved_5_63                    : 59;
+	} s;
+	/* struct nixx_af_gen_int_s cn; */
+};
+
+static inline u64 NIXX_AF_GEN_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_GEN_INT(void)
+{
+	return 0x160;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_gen_int_ena_w1c
+ *
+ * NIX AF General Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union nixx_af_gen_int_ena_w1c {
+	u64 u;
+	struct nixx_af_gen_int_ena_w1c_s {
+		u64 rx_mcast_drop                    : 1;
+		u64 rx_mirror_drop                   : 1;
+		u64 reserved_2                       : 1;
+		u64 tl1_drain                        : 1;
+		u64 smq_flush_done                   : 1;
+		u64 reserved_5_63                    : 59;
+	} s;
+	/* struct nixx_af_gen_int_ena_w1c_s cn; */
+};
+
+static inline u64 NIXX_AF_GEN_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_GEN_INT_ENA_W1C(void)
+{
+	return 0x178;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_gen_int_ena_w1s
+ *
+ * NIX AF General Interrupt Enable Set Register This register sets
+ * interrupt enable bits.
+ */
+union nixx_af_gen_int_ena_w1s {
+	u64 u;
+	struct nixx_af_gen_int_ena_w1s_s {
+		u64 rx_mcast_drop                    : 1;
+		u64 rx_mirror_drop                   : 1;
+		u64 reserved_2                       : 1;
+		u64 tl1_drain                        : 1;
+		u64 smq_flush_done                   : 1;
+		u64 reserved_5_63                    : 59;
+	} s;
+	/* struct nixx_af_gen_int_ena_w1s_s cn; */
+};
+
+static inline u64 NIXX_AF_GEN_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_GEN_INT_ENA_W1S(void)
+{
+	return 0x170;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_gen_int_w1s
+ *
+ * NIX AF General Interrupt Set Register This register sets interrupt
+ * bits.
+ */
+union nixx_af_gen_int_w1s {
+	u64 u;
+	struct nixx_af_gen_int_w1s_s {
+		u64 rx_mcast_drop                    : 1;
+		u64 rx_mirror_drop                   : 1;
+		u64 reserved_2                       : 1;
+		u64 tl1_drain                        : 1;
+		u64 smq_flush_done                   : 1;
+		u64 reserved_5_63                    : 59;
+	} s;
+	/* struct nixx_af_gen_int_w1s_s cn; */
+};
+
+static inline u64 NIXX_AF_GEN_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_GEN_INT_W1S(void)
+{
+	return 0x168;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_cfg
+ *
+ * NIX AF Local Function Configuration Registers
+ */
+union nixx_af_lfx_cfg {
+	u64 u;
+	struct nixx_af_lfx_cfg_s {
+		u64 npa_pf_func                      : 16;
+		u64 sso_pf_func                      : 16;
+		u64 be                               : 1;
+		u64 xqe_size                         : 2;
+		u64 reserved_35_63                   : 29;
+	} s;
+	/* struct nixx_af_lfx_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_CFG(u64 a)
+{
+	return 0x4000 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_cints_base
+ *
+ * NIX AF Local Function Completion Interrupts Base Address Registers
+ * This register specifies the base AF IOVA of LF's completion interrupt
+ * context table in NDC/LLC/DRAM. The table consists of
+ * NIX_AF_CONST2[CINTS] contiguous NIX_CINT_HW_S structures.  After
+ * writing to this register, software should read it back to ensure that
+ * the write has completed before accessing any NIX_LF_CINT()_*
+ * registers.
+ */
+union nixx_af_lfx_cints_base {
+	u64 u;
+	struct nixx_af_lfx_cints_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_lfx_cints_base_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_CINTS_BASE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_CINTS_BASE(u64 a)
+{
+	return 0x4130 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_cints_cfg
+ *
+ * NIX AF Local Function Completion Interrupts Configuration Registers
+ * This register controls access to the LF's completion interrupt context
+ * table in NDC/LLC/DRAM. The table consists of NIX_AF_CONST2[CINTS]
+ * contiguous NIX_CINT_HW_S structures. The size of each structure is 1
+ * \<\< NIX_AF_CONST3[CINT_LOG2BYTES].  After writing to this register,
+ * software should read it back to ensure that the write has completed
+ * before accessing any NIX_LF_CINT()_* registers.
+ */
+union nixx_af_lfx_cints_cfg {
+	u64 u;
+	struct nixx_af_lfx_cints_cfg_s {
+		u64 reserved_0_19                    : 20;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_37_63                   : 27;
+	} s;
+	/* struct nixx_af_lfx_cints_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_CINTS_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_CINTS_CFG(u64 a)
+{
+	return 0x4120 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_cqs_base
+ *
+ * NIX AF Local Function Completion Queues Base Address Register This
+ * register specifies the base AF IOVA of the LF's CQ context table. The
+ * table consists of NIX_AF_LF()_CQS_CFG[MAX_QUEUESM1]+1 contiguous
+ * NIX_CQ_CTX_S structures.
+ */
+union nixx_af_lfx_cqs_base {
+	u64 u;
+	struct nixx_af_lfx_cqs_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_lfx_cqs_base_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_CQS_BASE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_CQS_BASE(u64 a)
+{
+	return 0x4070 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_cqs_cfg
+ *
+ * NIX AF Local Function Completion Queues Configuration Register This
+ * register configures completion queues in the LF.
+ */
+union nixx_af_lfx_cqs_cfg {
+	u64 u;
+	struct nixx_af_lfx_cqs_cfg_s {
+		u64 max_queuesm1                     : 20;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_37_63                   : 27;
+	} s;
+	/* struct nixx_af_lfx_cqs_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_CQS_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_CQS_CFG(u64 a)
+{
+	return 0x4060 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_lock#
+ *
+ * NIX AF Local Function Lockdown Registers Internal: The NIX lockdown
+ * depth of 32 bytes is shallow compared to 96 bytes for NIC and meant
+ * for outer MAC and/or VLAN (optionally preceded by a small number of
+ * skip bytes). NPC's MCAM can be used for deeper protocol-aware
+ * lockdown.
+ */
+union nixx_af_lfx_lockx {
+	u64 u;
+	struct nixx_af_lfx_lockx_s {
+		u64 data                             : 32;
+		u64 bit_ena                          : 32;
+	} s;
+	/* struct nixx_af_lfx_lockx_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_LOCKX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_LOCKX(u64 a, u64 b)
+{
+	return 0x4300 + 0x20000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_qints_base
+ *
+ * NIX AF Local Function Queue Interrupts Base Address Registers This
+ * register specifies the base AF IOVA of LF's queue interrupt context
+ * table in NDC/LLC/DRAM. The table consists of NIX_AF_CONST2[QINTS]
+ * contiguous NIX_QINT_HW_S structures.  After writing to this register,
+ * software should read it back to ensure that the write has completed
+ * before accessing any NIX_LF_QINT()_* registers.
+ */
+union nixx_af_lfx_qints_base {
+	u64 u;
+	struct nixx_af_lfx_qints_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_lfx_qints_base_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_QINTS_BASE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_QINTS_BASE(u64 a)
+{
+	return 0x4110 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_qints_cfg
+ *
+ * NIX AF Local Function Queue Interrupts Configuration Registers This
+ * register controls access to the LF's queue interrupt context table in
+ * NDC/LLC/DRAM. The table consists of NIX_AF_CONST2[QINTS] contiguous
+ * NIX_QINT_HW_S structures. The size of each structure is 1 \<\<
+ * NIX_AF_CONST3[QINT_LOG2BYTES].  After writing to this register,
+ * software should read it back to ensure that the write has completed
+ * before accessing any NIX_LF_QINT()_* registers.
+ */
+union nixx_af_lfx_qints_cfg {
+	u64 u;
+	struct nixx_af_lfx_qints_cfg_s {
+		u64 reserved_0_19                    : 20;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_37_63                   : 27;
+	} s;
+	/* struct nixx_af_lfx_qints_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_QINTS_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_QINTS_CFG(u64 a)
+{
+	return 0x4100 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rqs_base
+ *
+ * NIX AF Local Function Receive Queues Base Address Register This
+ * register specifies the base AF IOVA of the LF's RQ context table. The
+ * table consists of NIX_AF_LF()_RQS_CFG[MAX_QUEUESM1]+1 contiguous
+ * NIX_RQ_CTX_S structures.
+ */
+union nixx_af_lfx_rqs_base {
+	u64 u;
+	struct nixx_af_lfx_rqs_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_lfx_rqs_base_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_RQS_BASE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RQS_BASE(u64 a)
+{
+	return 0x4050 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rqs_cfg
+ *
+ * NIX AF Local Function Receive Queues Configuration Register This
+ * register configures receive queues in the LF.
+ */
+union nixx_af_lfx_rqs_cfg {
+	u64 u;
+	struct nixx_af_lfx_rqs_cfg_s {
+		u64 max_queuesm1                     : 20;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_37_63                   : 27;
+	} s;
+	/* struct nixx_af_lfx_rqs_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_RQS_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RQS_CFG(u64 a)
+{
+	return 0x4040 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rss_base
+ *
+ * NIX AF Local Function Receive Size Scaling Table Base Address Register
+ * This register specifies the base AF IOVA of the RSS table per LF. The
+ * table is present when NIX_AF_LF()_RSS_CFG[ENA] is set and consists of
+ * 1 \<\< (NIX_AF_LF()_RSS_CFG[SIZE] + 8) contiguous NIX_RSSE_S
+ * structures, where the size of each structure is 1 \<\<
+ * NIX_AF_CONST3[RSSE_LOG2BYTES]. See NIX_AF_LF()_RSS_GRP().
+ */
+union nixx_af_lfx_rss_base {
+	u64 u;
+	struct nixx_af_lfx_rss_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_lfx_rss_base_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_RSS_BASE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RSS_BASE(u64 a)
+{
+	return 0x40d0 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rss_cfg
+ *
+ * NIX AF Local Function Receive Size Scaling Table Configuration
+ * Register See NIX_AF_LF()_RSS_BASE and NIX_AF_LF()_RSS_GRP().
+ */
+union nixx_af_lfx_rss_cfg {
+	u64 u;
+	struct nixx_af_lfx_rss_cfg_s {
+		u64 size                             : 4;
+		u64 ena                              : 1;
+		u64 adder_is_tag_lsb                 : 1;
+		u64 reserved_6_19                    : 14;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_37_63                   : 27;
+	} s;
+	struct nixx_af_lfx_rss_cfg_cn96xxp1 {
+		u64 size                             : 4;
+		u64 ena                              : 1;
+		u64 reserved_5_19                    : 15;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_37_63                   : 27;
+	} cn96xxp1;
+	/* struct nixx_af_lfx_rss_cfg_s cn96xxp3; */
+	/* struct nixx_af_lfx_rss_cfg_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 NIXX_AF_LFX_RSS_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RSS_CFG(u64 a)
+{
+	return 0x40c0 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rss_grp#
+ *
+ * NIX AF Local Function Receive Side Scaling Group Registers A receive
+ * packet targets a LF's RSS group when its NIX_RX_ACTION_S[OP] =
+ * NIX_RX_ACTIONOP_E::RSS, or its target multicast list has an entry with
+ * NIX_RX_MCE_S[OP] = NIX_RX_MCOP_E::RSS. The RSS group index (this
+ * register's last index) is NIX_RX_ACTION_S[INDEX] or
+ * NIX_RX_MCE_S[INDEX].  The RSS computation is as follows: * The
+ * packet's flow_tag (see NIX_LF_RX_SECRET()) and RSS group are used to
+ * select a NIX_RSSE_S entry in the LF's RSS table (see [SIZEM1]). *
+ * NIX_RSSE_S selects the packet's destination RQ.
+ */
+union nixx_af_lfx_rss_grpx {
+	u64 u;
+	struct nixx_af_lfx_rss_grpx_s {
+		u64 offset                           : 11;
+		u64 reserved_11_15                   : 5;
+		u64 sizem1                           : 3;
+		u64 reserved_19_63                   : 45;
+	} s;
+	/* struct nixx_af_lfx_rss_grpx_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_RSS_GRPX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RSS_GRPX(u64 a, u64 b)
+{
+	return 0x4600 + 0x20000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rx_cfg
+ *
+ * NIX AF Local Function Receive Configuration Register
+ */
+union nixx_af_lfx_rx_cfg {
+	u64 u;
+	struct nixx_af_lfx_rx_cfg_s {
+		u64 reserved_0_31                    : 32;
+		u64 drop_re                          : 1;
+		u64 lenerr_en                        : 1;
+		u64 ip6_udp_opt                      : 1;
+		u64 dis_apad                         : 1;
+		u64 csum_il4                         : 1;
+		u64 csum_ol4                         : 1;
+		u64 len_il4                          : 1;
+		u64 len_il3                          : 1;
+		u64 len_ol4                          : 1;
+		u64 len_ol3                          : 1;
+		u64 reserved_42_63                   : 22;
+	} s;
+	struct nixx_af_lfx_rx_cfg_cn96xxp1 {
+		u64 reserved_0_31                    : 32;
+		u64 reserved_32                      : 1;
+		u64 lenerr_en                        : 1;
+		u64 ip6_udp_opt                      : 1;
+		u64 dis_apad                         : 1;
+		u64 csum_il4                         : 1;
+		u64 csum_ol4                         : 1;
+		u64 len_il4                          : 1;
+		u64 len_il3                          : 1;
+		u64 len_ol4                          : 1;
+		u64 len_ol3                          : 1;
+		u64 reserved_42_63                   : 22;
+	} cn96xxp1;
+	/* struct nixx_af_lfx_rx_cfg_s cn96xxp3; */
+	/* struct nixx_af_lfx_rx_cfg_s cnf95xx; */
+};
+
+static inline u64 NIXX_AF_LFX_RX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RX_CFG(u64 a)
+{
+	return 0x40a0 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rx_ipsec_cfg0
+ *
+ * INTERNAL: NIX AF LF Receive IPSEC Configuration Registers  Internal:
+ * Not used; no IPSEC fast-path.
+ */
+union nixx_af_lfx_rx_ipsec_cfg0 {
+	u64 u;
+	struct nixx_af_lfx_rx_ipsec_cfg0_s {
+		u64 lenm1_max                        : 14;
+		u64 reserved_14_15                   : 2;
+		u64 sa_pow2_size                     : 4;
+		u64 tag_const                        : 24;
+		u64 tt                               : 2;
+		u64 defcpt                           : 1;
+		u64 hshcpt                           : 1;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_af_lfx_rx_ipsec_cfg0_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_RX_IPSEC_CFG0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RX_IPSEC_CFG0(u64 a)
+{
+	return 0x4140 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rx_ipsec_cfg1
+ *
+ * INTERNAL: NIX AF LF Receive IPSEC Security Association Configuration
+ * Register  Internal: Not used; no IPSEC fast-path.
+ */
+union nixx_af_lfx_rx_ipsec_cfg1 {
+	u64 u;
+	struct nixx_af_lfx_rx_ipsec_cfg1_s {
+		u64 sa_idx_max                       : 32;
+		u64 sa_idx_w                         : 5;
+		u64 reserved_37_63                   : 27;
+	} s;
+	/* struct nixx_af_lfx_rx_ipsec_cfg1_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_RX_IPSEC_CFG1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RX_IPSEC_CFG1(u64 a)
+{
+	return 0x4148 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rx_ipsec_dyno_base
+ *
+ * INTERNAL: NIX AF LF Receive IPSEC Dynamic Ordering Base Address
+ * Registers  Internal: Not used; no IPSEC fast-path.
+ */
+union nixx_af_lfx_rx_ipsec_dyno_base {
+	u64 u;
+	struct nixx_af_lfx_rx_ipsec_dyno_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_lfx_rx_ipsec_dyno_base_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_RX_IPSEC_DYNO_BASE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RX_IPSEC_DYNO_BASE(u64 a)
+{
+	return 0x4158 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rx_ipsec_dyno_cfg
+ *
+ * INTERNAL: NIX AF LF Receive IPSEC Dynamic Ordering Base Address
+ * Registers  Internal: Not used; no IPSEC fast-path.
+ */
+union nixx_af_lfx_rx_ipsec_dyno_cfg {
+	u64 u;
+	struct nixx_af_lfx_rx_ipsec_dyno_cfg_s {
+		u64 dyno_idx_w                       : 4;
+		u64 dyno_ena                         : 1;
+		u64 reserved_5_19                    : 15;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_37_63                   : 27;
+	} s;
+	/* struct nixx_af_lfx_rx_ipsec_dyno_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_RX_IPSEC_DYNO_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RX_IPSEC_DYNO_CFG(u64 a)
+{
+	return 0x4150 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rx_ipsec_sa_base
+ *
+ * INTERNAL: NIX AF LF Receive IPSEC Security Association Base Address
+ * Register  Internal: Not used; no IPSEC fast-path.
+ */
+union nixx_af_lfx_rx_ipsec_sa_base {
+	u64 u;
+	struct nixx_af_lfx_rx_ipsec_sa_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_lfx_rx_ipsec_sa_base_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_RX_IPSEC_SA_BASE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RX_IPSEC_SA_BASE(u64 a)
+{
+	return 0x4170 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rx_stat#
+ *
+ * NIX AF Local Function Receive Statistics Registers The last dimension
+ * indicates which statistic, and is enumerated by NIX_STAT_LF_RX_E.
+ */
+union nixx_af_lfx_rx_statx {
+	u64 u;
+	struct nixx_af_lfx_rx_statx_s {
+		u64 stat                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_af_lfx_rx_statx_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_RX_STATX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RX_STATX(u64 a, u64 b)
+{
+	return 0x4500 + 0x20000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_rx_vtag_type#
+ *
+ * NIX AF Local Function Receive Vtag Type Registers These registers
+ * specify optional Vtag (e.g. VLAN, E-TAG) actions for received packets.
+ * Indexed by NIX_RX_VTAG_ACTION_S[VTAG*_TYPE].
+ */
+union nixx_af_lfx_rx_vtag_typex {
+	u64 u;
+	struct nixx_af_lfx_rx_vtag_typex_s {
+		u64 size                             : 1;
+		u64 reserved_1_3                     : 3;
+		u64 strip                            : 1;
+		u64 capture                          : 1;
+		u64 reserved_6_63                    : 58;
+	} s;
+	/* struct nixx_af_lfx_rx_vtag_typex_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_RX_VTAG_TYPEX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_RX_VTAG_TYPEX(u64 a, u64 b)
+{
+	return 0x4200 + 0x20000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_sqs_base
+ *
+ * NIX AF Local Function Send Queues Base Address Register This register
+ * specifies the base AF IOVA of the LF's SQ context table. The table
+ * consists of NIX_AF_LF()_SQS_CFG[MAX_QUEUESM1]+1 contiguous
+ * NIX_SQ_CTX_HW_S structures.
+ */
+union nixx_af_lfx_sqs_base {
+	u64 u;
+	struct nixx_af_lfx_sqs_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_lfx_sqs_base_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_SQS_BASE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_SQS_BASE(u64 a)
+{
+	return 0x4030 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_sqs_cfg
+ *
+ * NIX AF Local Function Send Queues Configuration Register This register
+ * configures send queues in the LF.
+ */
+union nixx_af_lfx_sqs_cfg {
+	u64 u;
+	struct nixx_af_lfx_sqs_cfg_s {
+		u64 max_queuesm1                     : 20;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_37_63                   : 27;
+	} s;
+	/* struct nixx_af_lfx_sqs_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_SQS_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_SQS_CFG(u64 a)
+{
+	return 0x4020 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_tx_cfg
+ *
+ * NIX AF Local Function Transmit Configuration Register
+ */
+union nixx_af_lfx_tx_cfg {
+	u64 u;
+	struct nixx_af_lfx_tx_cfg_s {
+		u64 vlan0_ins_etype                  : 16;
+		u64 vlan1_ins_etype                  : 16;
+		u64 send_tstmp_ena                   : 1;
+		u64 lock_viol_cqe_ena                : 1;
+		u64 lock_ena                         : 1;
+		u64 reserved_35_63                   : 29;
+	} s;
+	/* struct nixx_af_lfx_tx_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_TX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_TX_CFG(u64 a)
+{
+	return 0x4080 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_tx_cfg2
+ *
+ * NIX AF Local Function Transmit Configuration Register
+ */
+union nixx_af_lfx_tx_cfg2 {
+	u64 u;
+	struct nixx_af_lfx_tx_cfg2_s {
+		u64 lmt_ena                          : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_lfx_tx_cfg2_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_TX_CFG2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_TX_CFG2(u64 a)
+{
+	return 0x4028 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_tx_parse_cfg
+ *
+ * NIX AF Local Function Transmit Parse Configuration Register
+ */
+union nixx_af_lfx_tx_parse_cfg {
+	u64 u;
+	struct nixx_af_lfx_tx_parse_cfg_s {
+		u64 pkind                            : 6;
+		u64 reserved_6_63                    : 58;
+	} s;
+	/* struct nixx_af_lfx_tx_parse_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_TX_PARSE_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_TX_PARSE_CFG(u64 a)
+{
+	return 0x4090 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_tx_stat#
+ *
+ * NIX AF Local Function Transmit Statistics Registers The last dimension
+ * indicates which statistic, and is enumerated by NIX_STAT_LF_TX_E.
+ */
+union nixx_af_lfx_tx_statx {
+	u64 u;
+	struct nixx_af_lfx_tx_statx_s {
+		u64 stat                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_af_lfx_tx_statx_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_TX_STATX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_TX_STATX(u64 a, u64 b)
+{
+	return 0x4400 + 0x20000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf#_tx_status
+ *
+ * NIX AF LF Transmit Status Register
+ */
+union nixx_af_lfx_tx_status {
+	u64 u;
+	struct nixx_af_lfx_tx_status_s {
+		u64 sq_ctx_err                       : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_lfx_tx_status_s cn; */
+};
+
+static inline u64 NIXX_AF_LFX_TX_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LFX_TX_STATUS(u64 a)
+{
+	return 0x4180 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lf_rst
+ *
+ * NIX Admin Function LF Reset Register
+ */
+union nixx_af_lf_rst {
+	u64 u;
+	struct nixx_af_lf_rst_s {
+		u64 lf                               : 8;
+		u64 reserved_8_11                    : 4;
+		u64 exec                             : 1;
+		u64 reserved_13_63                   : 51;
+	} s;
+	/* struct nixx_af_lf_rst_s cn; */
+};
+
+static inline u64 NIXX_AF_LF_RST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LF_RST(void)
+{
+	return 0x150;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lso_cfg
+ *
+ * NIX AF Large Send Offload Configuration Register
+ */
+union nixx_af_lso_cfg {
+	u64 u;
+	struct nixx_af_lso_cfg_s {
+		u64 tcp_lsf                          : 16;
+		u64 tcp_msf                          : 16;
+		u64 tcp_fsf                          : 16;
+		u64 reserved_48_62                   : 15;
+		u64 enable                           : 1;
+	} s;
+	/* struct nixx_af_lso_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_LSO_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LSO_CFG(void)
+{
+	return 0xa8;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_lso_format#_field#
+ *
+ * NIX AF Large Send Offload Format Field Registers These registers
+ * specify LSO packet modification formats. Each format may modify up to
+ * eight packet fields with the following constraints: * If fewer than
+ * eight fields are modified, [ALG] must be NIX_LSOALG_E::NOP in the
+ * unused field registers. * Modified fields must be specified in
+ * contiguous field registers starting with NIX_AF_LSO_FORMAT()_FIELD(0).
+ * * Modified fields cannot overlap. * Multiple fields with the same
+ * [LAYER] value must be specified in ascending [OFFSET] order. * Fields
+ * in different layers must be specified in ascending [LAYER] order.
+ */
+union nixx_af_lso_formatx_fieldx {
+	u64 u;
+	struct nixx_af_lso_formatx_fieldx_s {
+		u64 offset                           : 8;
+		u64 layer                            : 2;
+		u64 reserved_10_11                   : 2;
+		u64 sizem1                           : 2;
+		u64 reserved_14_15                   : 2;
+		u64 alg                              : 3;
+		u64 reserved_19_63                   : 45;
+	} s;
+	/* struct nixx_af_lso_formatx_fieldx_s cn; */
+};
+
+static inline u64 NIXX_AF_LSO_FORMATX_FIELDX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_LSO_FORMATX_FIELDX(u64 a, u64 b)
+{
+	return 0x1b00 + 0x10000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mark_format#_ctl
+ *
+ * NIX AF Packet Marking Format Registers Describes packet marking
+ * calculations for YELLOW and for NIX_COLORRESULT_E::RED_SEND packets.
+ * NIX_SEND_EXT_S[MARKFORM] selects the CSR used for the packet
+ * descriptor.  All the packet marking offset calculations assume big-
+ * endian bits within a byte.  For example, if NIX_SEND_EXT_S[MARKPTR] is
+ * 3 and [OFFSET] is 5 and the packet is YELLOW, the NIX marking hardware
+ * would do this:  _  byte[3]\<2:0\> |=   [Y_VAL]\<3:1\> _
+ * byte[3]\<2:0\> &= ~[Y_MASK]\<3:1\> _  byte[4]\<7\>   |=   [Y_VAL]\<0\>
+ * _  byte[4]\<7\>   &= ~[Y_MASK]\<0\>  where byte[3] is the third byte
+ * in the packet, and byte[4] the fourth.  For another example, if
+ * NIX_SEND_EXT_S[MARKPTR] is 3 and [OFFSET] is 0 and the packet is
+ * NIX_COLORRESULT_E::RED_SEND,  _   byte[3]\<7:4\> |=   [R_VAL]\<3:0\> _
+ * byte[3]\<7:4\> &= ~[R_MASK]\<3:0\>
+ */
+union nixx_af_mark_formatx_ctl {
+	u64 u;
+	struct nixx_af_mark_formatx_ctl_s {
+		u64 r_val                            : 4;
+		u64 r_mask                           : 4;
+		u64 y_val                            : 4;
+		u64 y_mask                           : 4;
+		u64 offset                           : 3;
+		u64 reserved_19_63                   : 45;
+	} s;
+	/* struct nixx_af_mark_formatx_ctl_s cn; */
+};
+
+static inline u64 NIXX_AF_MARK_FORMATX_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MARK_FORMATX_CTL(u64 a)
+{
+	return 0x900 + 0x40000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mc_mirror_const
+ *
+ * NIX AF Multicast/Mirror Constants Register This register contains
+ * constants for software discovery.
+ */
+union nixx_af_mc_mirror_const {
+	u64 u;
+	struct nixx_af_mc_mirror_const_s {
+		u64 buf_size                         : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct nixx_af_mc_mirror_const_s cn; */
+};
+
+static inline u64 NIXX_AF_MC_MIRROR_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MC_MIRROR_CONST(void)
+{
+	return 0x98;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mdq#_cir
+ *
+ * NIX AF Meta Descriptor Queue Committed Information Rate Registers This
+ * register has the same bit fields as NIX_AF_TL1()_CIR.
+ */
+union nixx_af_mdqx_cir {
+	u64 u;
+	struct nixx_af_mdqx_cir_s {
+		u64 enable                           : 1;
+		u64 rate_mantissa                    : 8;
+		u64 rate_exponent                    : 4;
+		u64 rate_divider_exponent            : 4;
+		u64 reserved_17_28                   : 12;
+		u64 burst_mantissa                   : 8;
+		u64 burst_exponent                   : 4;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_mdqx_cir_s cn; */
+};
+
+static inline u64 NIXX_AF_MDQX_CIR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MDQX_CIR(u64 a)
+{
+	return 0x1420 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mdq#_md_debug
+ *
+ * NIX AF Meta Descriptor Queue Meta Descriptor State Debug Registers
+ * This register provides access to the meta descriptor at the front of
+ * the MDQ. An MDQ can hold up to 8 packet meta descriptors (PMD) and one
+ * flush meta descriptor (FMD).
+ */
+union nixx_af_mdqx_md_debug {
+	u64 u;
+	struct nixx_af_mdqx_md_debug_s {
+		u64 pkt_len                          : 16;
+		u64 red_algo_override                : 2;
+		u64 shp_dis                          : 1;
+		u64 reserved_19                      : 1;
+		u64 shp_chg                          : 9;
+		u64 reserved_29_31                   : 3;
+		u64 sqm_pkt_id                       : 13;
+		u64 reserved_45_60                   : 16;
+		u64 md_type                          : 2;
+		u64 reserved_63                      : 1;
+	} s;
+	/* struct nixx_af_mdqx_md_debug_s cn; */
+};
+
+static inline u64 NIXX_AF_MDQX_MD_DEBUG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MDQX_MD_DEBUG(u64 a)
+{
+	return 0x14c0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mdq#_parent
+ *
+ * NIX AF Meta Descriptor Queue Topology Registers
+ */
+union nixx_af_mdqx_parent {
+	u64 u;
+	struct nixx_af_mdqx_parent_s {
+		u64 reserved_0_15                    : 16;
+		u64 parent                           : 9;
+		u64 reserved_25_63                   : 39;
+	} s;
+	/* struct nixx_af_mdqx_parent_s cn; */
+};
+
+static inline u64 NIXX_AF_MDQX_PARENT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MDQX_PARENT(u64 a)
+{
+	return 0x1480 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mdq#_pir
+ *
+ * NIX AF Meta Descriptor Queue Peak Information Rate Registers This
+ * register has the same bit fields as NIX_AF_TL1()_CIR.
+ */
+union nixx_af_mdqx_pir {
+	u64 u;
+	struct nixx_af_mdqx_pir_s {
+		u64 enable                           : 1;
+		u64 rate_mantissa                    : 8;
+		u64 rate_exponent                    : 4;
+		u64 rate_divider_exponent            : 4;
+		u64 reserved_17_28                   : 12;
+		u64 burst_mantissa                   : 8;
+		u64 burst_exponent                   : 4;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_mdqx_pir_s cn; */
+};
+
+static inline u64 NIXX_AF_MDQX_PIR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MDQX_PIR(u64 a)
+{
+	return 0x1430 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mdq#_pointers
+ *
+ * INTERNAL: NIX AF Meta Descriptor 4 Linked List Pointers Debug Register
+ * This register has the same bit fields as NIX_AF_TL4()_POINTERS.
+ */
+union nixx_af_mdqx_pointers {
+	u64 u;
+	struct nixx_af_mdqx_pointers_s {
+		u64 next                             : 9;
+		u64 reserved_9_15                    : 7;
+		u64 prev                             : 9;
+		u64 reserved_25_63                   : 39;
+	} s;
+	/* struct nixx_af_mdqx_pointers_s cn; */
+};
+
+static inline u64 NIXX_AF_MDQX_POINTERS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MDQX_POINTERS(u64 a)
+{
+	return 0x1460 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mdq#_ptr_fifo
+ *
+ * INTERNAL: NIX Meta Descriptor Queue Pointer FIFO State Debug Registers
+ */
+union nixx_af_mdqx_ptr_fifo {
+	u64 u;
+	struct nixx_af_mdqx_ptr_fifo_s {
+		u64 tail                             : 4;
+		u64 head                             : 4;
+		u64 p_con                            : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct nixx_af_mdqx_ptr_fifo_s cn; */
+};
+
+static inline u64 NIXX_AF_MDQX_PTR_FIFO(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MDQX_PTR_FIFO(u64 a)
+{
+	return 0x14d0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mdq#_sched_state
+ *
+ * NIX AF Meta Descriptor Queue Scheduling Control State Registers This
+ * register has the same bit fields as NIX_AF_TL2()_SCHED_STATE.
+ */
+union nixx_af_mdqx_sched_state {
+	u64 u;
+	struct nixx_af_mdqx_sched_state_s {
+		u64 rr_count                         : 25;
+		u64 reserved_25_63                   : 39;
+	} s;
+	/* struct nixx_af_mdqx_sched_state_s cn; */
+};
+
+static inline u64 NIXX_AF_MDQX_SCHED_STATE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MDQX_SCHED_STATE(u64 a)
+{
+	return 0x1440 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mdq#_schedule
+ *
+ * NIX AF Meta Descriptor Queue Scheduling Control Registers This
+ * register has the same bit fields as NIX_AF_TL2()_SCHEDULE.
+ */
+union nixx_af_mdqx_schedule {
+	u64 u;
+	struct nixx_af_mdqx_schedule_s {
+		u64 rr_quantum                       : 24;
+		u64 prio                             : 4;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct nixx_af_mdqx_schedule_s cn; */
+};
+
+static inline u64 NIXX_AF_MDQX_SCHEDULE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MDQX_SCHEDULE(u64 a)
+{
+	return 0x1400 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mdq#_shape
+ *
+ * NIX AF Meta Descriptor Queue Shaping Control Registers This register
+ * has the same bit fields as NIX_AF_TL3()_SHAPE.
+ */
+union nixx_af_mdqx_shape {
+	u64 u;
+	struct nixx_af_mdqx_shape_s {
+		u64 adjust                           : 9;
+		u64 red_algo                         : 2;
+		u64 red_disable                      : 1;
+		u64 yellow_disable                   : 1;
+		u64 reserved_13_23                   : 11;
+		u64 length_disable                   : 1;
+		u64 schedule_list                    : 2;
+		u64 reserved_27_63                   : 37;
+	} s;
+	/* struct nixx_af_mdqx_shape_s cn; */
+};
+
+static inline u64 NIXX_AF_MDQX_SHAPE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MDQX_SHAPE(u64 a)
+{
+	return 0x1410 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mdq#_shape_state
+ *
+ * NIX AF Meta Descriptor Queue Shaping State Registers This register has
+ * the same bit fields as NIX_AF_TL2()_SHAPE_STATE. This register must
+ * not be written during normal operation.
+ */
+union nixx_af_mdqx_shape_state {
+	u64 u;
+	struct nixx_af_mdqx_shape_state_s {
+		u64 cir_accum                        : 26;
+		u64 pir_accum                        : 26;
+		u64 color                            : 2;
+		u64 reserved_54_63                   : 10;
+	} s;
+	/* struct nixx_af_mdqx_shape_state_s cn; */
+};
+
+static inline u64 NIXX_AF_MDQX_SHAPE_STATE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MDQX_SHAPE_STATE(u64 a)
+{
+	return 0x1450 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mdq#_sw_xoff
+ *
+ * NIX AF Meta Descriptor Controlled XOFF Registers This register has the
+ * same bit fields as NIX_AF_TL1()_SW_XOFF
+ */
+union nixx_af_mdqx_sw_xoff {
+	u64 u;
+	struct nixx_af_mdqx_sw_xoff_s {
+		u64 xoff                             : 1;
+		u64 drain                            : 1;
+		u64 reserved_2                       : 1;
+		u64 drain_irq                        : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct nixx_af_mdqx_sw_xoff_s cn; */
+};
+
+static inline u64 NIXX_AF_MDQX_SW_XOFF(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MDQX_SW_XOFF(u64 a)
+{
+	return 0x1470 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_mdq_const
+ *
+ * NIX AF Meta Descriptor Queue Constants Register This register contains
+ * constants for software discovery.
+ */
+union nixx_af_mdq_const {
+	u64 u;
+	struct nixx_af_mdq_const_s {
+		u64 count                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct nixx_af_mdq_const_s cn; */
+};
+
+static inline u64 NIXX_AF_MDQ_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_MDQ_CONST(void)
+{
+	return 0x90;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_ndc_cfg
+ *
+ * NIX AF General Configuration Register
+ */
+union nixx_af_ndc_cfg {
+	u64 u;
+	struct nixx_af_ndc_cfg_s {
+		u64 ndc_ign_pois                     : 1;
+		u64 byp_sq                           : 1;
+		u64 byp_sqb                          : 1;
+		u64 byp_cqs                          : 1;
+		u64 byp_cints                        : 1;
+		u64 byp_dyno                         : 1;
+		u64 byp_mce                          : 1;
+		u64 byp_rqc                          : 1;
+		u64 byp_rsse                         : 1;
+		u64 byp_mc_data                      : 1;
+		u64 byp_mc_wqe                       : 1;
+		u64 byp_mr_data                      : 1;
+		u64 byp_mr_wqe                       : 1;
+		u64 byp_qints                        : 1;
+		u64 reserved_14_63                   : 50;
+	} s;
+	/* struct nixx_af_ndc_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_NDC_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_NDC_CFG(void)
+{
+	return 0x18;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_ndc_rx_sync
+ *
+ * NIX AF Receive NDC Sync Register Used to synchronize the NIX receive
+ * NDC (NDC_IDX_E::NIX()_RX).
+ */
+union nixx_af_ndc_rx_sync {
+	u64 u;
+	struct nixx_af_ndc_rx_sync_s {
+		u64 lf                               : 8;
+		u64 reserved_8_11                    : 4;
+		u64 exec                             : 1;
+		u64 reserved_13_63                   : 51;
+	} s;
+	/* struct nixx_af_ndc_rx_sync_s cn; */
+};
+
+static inline u64 NIXX_AF_NDC_RX_SYNC(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_NDC_RX_SYNC(void)
+{
+	return 0x3e0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_ndc_tx_sync
+ *
+ * NIX AF NDC_TX Sync Register Used to synchronize the NIX transmit NDC
+ * (NDC_IDX_E::NIX()_TX).
+ */
+union nixx_af_ndc_tx_sync {
+	u64 u;
+	struct nixx_af_ndc_tx_sync_s {
+		u64 lf                               : 8;
+		u64 reserved_8_11                    : 4;
+		u64 exec                             : 1;
+		u64 reserved_13_63                   : 51;
+	} s;
+	/* struct nixx_af_ndc_tx_sync_s cn; */
+};
+
+static inline u64 NIXX_AF_NDC_TX_SYNC(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_NDC_TX_SYNC(void)
+{
+	return 0x3f0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_norm_tx_fifo_status
+ *
+ * NIX AF Normal Transmit FIFO Status Register Status of FIFO which
+ * transmits normal packets to CGX and LBK.
+ */
+union nixx_af_norm_tx_fifo_status {
+	u64 u;
+	struct nixx_af_norm_tx_fifo_status_s {
+		u64 count                            : 12;
+		u64 reserved_12_63                   : 52;
+	} s;
+	/* struct nixx_af_norm_tx_fifo_status_s cn; */
+};
+
+static inline u64 NIXX_AF_NORM_TX_FIFO_STATUS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_NORM_TX_FIFO_STATUS(void)
+{
+	return 0x648;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pq#_dbg_arb_link_exp
+ *
+ * INTERNAL: NIX AF PQ Arb Link EXPRESS Debug Register
+ */
+union nixx_af_pqx_dbg_arb_link_exp {
+	u64 u;
+	struct nixx_af_pqx_dbg_arb_link_exp_s {
+		u64 req                              : 1;
+		u64 act_c_con                        : 1;
+		u64 cnt                              : 2;
+		u64 reserved_4_5                     : 2;
+		u64 rr_mask                          : 1;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct nixx_af_pqx_dbg_arb_link_exp_s cn; */
+};
+
+static inline u64 NIXX_AF_PQX_DBG_ARB_LINK_EXP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PQX_DBG_ARB_LINK_EXP(u64 a)
+{
+	return 0xce8 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pq#_dbg_arb_link_nrm
+ *
+ * INTERNAL: NIX AF PQ Arb Link NORMAL Debug Register
+ */
+union nixx_af_pqx_dbg_arb_link_nrm {
+	u64 u;
+	struct nixx_af_pqx_dbg_arb_link_nrm_s {
+		u64 req                              : 1;
+		u64 act_c_con                        : 1;
+		u64 cnt                              : 2;
+		u64 reserved_4_5                     : 2;
+		u64 rr_mask                          : 1;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct nixx_af_pqx_dbg_arb_link_nrm_s cn; */
+};
+
+static inline u64 NIXX_AF_PQX_DBG_ARB_LINK_NRM(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PQX_DBG_ARB_LINK_NRM(u64 a)
+{
+	return 0xce0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pq#_dbg_arb_link_sdp
+ *
+ * INTERNAL: NIX AF PQ Arb Link SDP Debug Register
+ */
+union nixx_af_pqx_dbg_arb_link_sdp {
+	u64 u;
+	struct nixx_af_pqx_dbg_arb_link_sdp_s {
+		u64 req                              : 1;
+		u64 act_c_con                        : 1;
+		u64 cnt                              : 2;
+		u64 reserved_4_5                     : 2;
+		u64 rr_mask                          : 1;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct nixx_af_pqx_dbg_arb_link_sdp_s cn; */
+};
+
+static inline u64 NIXX_AF_PQX_DBG_ARB_LINK_SDP(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PQX_DBG_ARB_LINK_SDP(u64 a)
+{
+	return 0xcf0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pq_arb_crd_rdy_debug
+ *
+ * INTERNAL: NIX AF PQ_ARB Node Credit Ready Registers  NIX AF PQ ARB
+ * Credit ready register
+ */
+union nixx_af_pq_arb_crd_rdy_debug {
+	u64 u;
+	struct nixx_af_pq_arb_crd_rdy_debug_s {
+		u64 node_crd_rdy                     : 28;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct nixx_af_pq_arb_crd_rdy_debug_s cn; */
+};
+
+static inline u64 NIXX_AF_PQ_ARB_CRD_RDY_DEBUG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PQ_ARB_CRD_RDY_DEBUG(void)
+{
+	return 0xf10;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pq_arb_dwrr_msk_debug
+ *
+ * INTERNAL: NIX AF PQ_ARB DWRR mask set read only debug Registers
+ */
+union nixx_af_pq_arb_dwrr_msk_debug {
+	u64 u;
+	struct nixx_af_pq_arb_dwrr_msk_debug_s {
+		u64 node_dwrr_mask_set               : 28;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct nixx_af_pq_arb_dwrr_msk_debug_s cn; */
+};
+
+static inline u64 NIXX_AF_PQ_ARB_DWRR_MSK_DEBUG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PQ_ARB_DWRR_MSK_DEBUG(void)
+{
+	return 0xf30;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pq_arb_node_gnt_debug
+ *
+ * INTERNAL: NIX AF PQ_ARB Node Grant vector Registers
+ */
+union nixx_af_pq_arb_node_gnt_debug {
+	u64 u;
+	struct nixx_af_pq_arb_node_gnt_debug_s {
+		u64 node_grant_vec                   : 28;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct nixx_af_pq_arb_node_gnt_debug_s cn; */
+};
+
+static inline u64 NIXX_AF_PQ_ARB_NODE_GNT_DEBUG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PQ_ARB_NODE_GNT_DEBUG(void)
+{
+	return 0xf20;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pq_arb_node_req_debug
+ *
+ * INTERNAL: NIX AF PQ_ARB Node Request Debug Registers  NIX AF PQ ARB
+ * Node Request Debug register
+ */
+union nixx_af_pq_arb_node_req_debug {
+	u64 u;
+	struct nixx_af_pq_arb_node_req_debug_s {
+		u64 node_req                         : 28;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct nixx_af_pq_arb_node_req_debug_s cn; */
+};
+
+static inline u64 NIXX_AF_PQ_ARB_NODE_REQ_DEBUG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PQ_ARB_NODE_REQ_DEBUG(void)
+{
+	return 0xf00;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pq_arb_shape_vld_dbg
+ *
+ * INTERNAL: NIX AF PQ_ARB shape valid set Register
+ */
+union nixx_af_pq_arb_shape_vld_dbg {
+	u64 u;
+	struct nixx_af_pq_arb_shape_vld_dbg_s {
+		u64 node_shape_vld_set               : 28;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct nixx_af_pq_arb_shape_vld_dbg_s cn; */
+};
+
+static inline u64 NIXX_AF_PQ_ARB_SHAPE_VLD_DBG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PQ_ARB_SHAPE_VLD_DBG(void)
+{
+	return 0xf40;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pq_dbg_arb_0
+ *
+ * INTERNAL: NIX AF PQ Arb Debug 0 Register
+ */
+union nixx_af_pq_dbg_arb_0 {
+	u64 u;
+	struct nixx_af_pq_dbg_arb_0_s {
+		u64 rr_mask_clr                      : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_pq_dbg_arb_0_s cn; */
+};
+
+static inline u64 NIXX_AF_PQ_DBG_ARB_0(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PQ_DBG_ARB_0(void)
+{
+	return 0xcf8;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pq_lnk_#_dwrr_msk_dbg
+ *
+ * INTERNAL: NIX AF PQ_ARB Physical Link DWRR MASK Registers
+ */
+union nixx_af_pq_lnk_x_dwrr_msk_dbg {
+	u64 u;
+	struct nixx_af_pq_lnk_x_dwrr_msk_dbg_s {
+		u64 link_dwrr_mask_set               : 28;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct nixx_af_pq_lnk_x_dwrr_msk_dbg_s cn; */
+};
+
+static inline u64 NIXX_AF_PQ_LNK_X_DWRR_MSK_DBG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PQ_LNK_X_DWRR_MSK_DBG(u64 a)
+{
+	return 0x1100 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pse_400_rate_divider
+ *
+ * INTERNAL: NIX AF PSE 400 Rate Divider Register
+ */
+union nixx_af_pse_400_rate_divider {
+	u64 u;
+	struct nixx_af_pse_400_rate_divider_s {
+		u64 rate_div_cfg                     : 9;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct nixx_af_pse_400_rate_divider_s cn; */
+};
+
+static inline u64 NIXX_AF_PSE_400_RATE_DIVIDER(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PSE_400_RATE_DIVIDER(void)
+{
+	return 0x830;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pse_active_cycles_pc
+ *
+ * NIX AF Active Cycles Register These registers are indexed by the
+ * conditional clock domain number.
+ */
+union nixx_af_pse_active_cycles_pc {
+	u64 u;
+	struct nixx_af_pse_active_cycles_pc_s {
+		u64 act_cyc                          : 64;
+	} s;
+	/* struct nixx_af_pse_active_cycles_pc_s cn; */
+};
+
+static inline u64 NIXX_AF_PSE_ACTIVE_CYCLES_PC(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PSE_ACTIVE_CYCLES_PC(void)
+{
+	return 0x8c0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pse_bp_test0
+ *
+ * INTERNAL: NIX AF PSE Backpressure Test 0 Register
+ */
+union nixx_af_pse_bp_test0 {
+	u64 u;
+	struct nixx_af_pse_bp_test0_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_63                   : 52;
+	} s;
+	struct nixx_af_pse_bp_test0_cn96xxp1 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 8;
+		u64 reserved_24_59                   : 36;
+		u64 enable                           : 4;
+	} cn96xxp1;
+	struct nixx_af_pse_bp_test0_cn96xxp3 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 reserved_16_19                   : 4;
+		u64 bp_cfg                           : 12;
+		u64 reserved_32_55                   : 24;
+		u64 reserved_56_57                   : 2;
+		u64 enable                           : 6;
+	} cn96xxp3;
+	/* struct nixx_af_pse_bp_test0_cn96xxp1 cnf95xxp1; */
+	struct nixx_af_pse_bp_test0_cnf95xxp2 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 8;
+		u64 reserved_24_31                   : 8;
+		u64 reserved_32_55                   : 24;
+		u64 reserved_56_59                   : 4;
+		u64 enable                           : 4;
+	} cnf95xxp2;
+};
+
+static inline u64 NIXX_AF_PSE_BP_TEST0(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PSE_BP_TEST0(void)
+{
+	return 0x840;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pse_bp_test1
+ *
+ * INTERNAL: NIX AF PSE Backpressure Test 1 Register
+ */
+union nixx_af_pse_bp_test1 {
+	u64 u;
+	struct nixx_af_pse_bp_test1_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 10;
+		u64 reserved_26_63                   : 38;
+	} s;
+	struct nixx_af_pse_bp_test1_cn96xxp1 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 8;
+		u64 reserved_24_59                   : 36;
+		u64 enable                           : 4;
+	} cn96xxp1;
+	struct nixx_af_pse_bp_test1_cn96xxp3 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 10;
+		u64 reserved_26_31                   : 6;
+		u64 reserved_32_55                   : 24;
+		u64 reserved_56_58                   : 3;
+		u64 enable                           : 5;
+	} cn96xxp3;
+	/* struct nixx_af_pse_bp_test1_cn96xxp1 cnf95xxp1; */
+	struct nixx_af_pse_bp_test1_cnf95xxp2 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 8;
+		u64 reserved_24_31                   : 8;
+		u64 reserved_32_55                   : 24;
+		u64 reserved_56_59                   : 4;
+		u64 enable                           : 4;
+	} cnf95xxp2;
+};
+
+static inline u64 NIXX_AF_PSE_BP_TEST1(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PSE_BP_TEST1(void)
+{
+	return 0x850;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pse_bp_test2
+ *
+ * INTERNAL: NIX AF PSE Backpressure Test 2 Register
+ */
+union nixx_af_pse_bp_test2 {
+	u64 u;
+	struct nixx_af_pse_bp_test2_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 10;
+		u64 reserved_26_63                   : 38;
+	} s;
+	struct nixx_af_pse_bp_test2_cn96xxp1 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 8;
+		u64 reserved_24_59                   : 36;
+		u64 enable                           : 4;
+	} cn96xxp1;
+	struct nixx_af_pse_bp_test2_cn96xxp3 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 10;
+		u64 reserved_26_31                   : 6;
+		u64 reserved_32_55                   : 24;
+		u64 reserved_56_58                   : 3;
+		u64 enable                           : 5;
+	} cn96xxp3;
+	/* struct nixx_af_pse_bp_test2_cn96xxp1 cnf95xxp1; */
+	struct nixx_af_pse_bp_test2_cnf95xxp2 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 8;
+		u64 reserved_24_31                   : 8;
+		u64 reserved_32_55                   : 24;
+		u64 reserved_56_59                   : 4;
+		u64 enable                           : 4;
+	} cnf95xxp2;
+};
+
+static inline u64 NIXX_AF_PSE_BP_TEST2(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PSE_BP_TEST2(void)
+{
+	return 0x860;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pse_bp_test3
+ *
+ * INTERNAL: NIX AF PSE Backpressure Test 3 Register
+ */
+union nixx_af_pse_bp_test3 {
+	u64 u;
+	struct nixx_af_pse_bp_test3_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 10;
+		u64 reserved_26_63                   : 38;
+	} s;
+	struct nixx_af_pse_bp_test3_cn96xxp1 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 8;
+		u64 reserved_24_59                   : 36;
+		u64 enable                           : 4;
+	} cn96xxp1;
+	struct nixx_af_pse_bp_test3_cn96xxp3 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 10;
+		u64 reserved_26_31                   : 6;
+		u64 reserved_32_55                   : 24;
+		u64 reserved_56_58                   : 3;
+		u64 enable                           : 5;
+	} cn96xxp3;
+	/* struct nixx_af_pse_bp_test3_cn96xxp1 cnf95xxp1; */
+	struct nixx_af_pse_bp_test3_cnf95xxp2 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 8;
+		u64 reserved_24_31                   : 8;
+		u64 reserved_32_55                   : 24;
+		u64 reserved_56_59                   : 4;
+		u64 enable                           : 4;
+	} cnf95xxp2;
+};
+
+static inline u64 NIXX_AF_PSE_BP_TEST3(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PSE_BP_TEST3(void)
+{
+	return 0x870;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pse_channel_level
+ *
+ * NIX AF PSE Channel Level Register
+ */
+union nixx_af_pse_channel_level {
+	u64 u;
+	struct nixx_af_pse_channel_level_s {
+		u64 bp_level                         : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_pse_channel_level_s cn; */
+};
+
+static inline u64 NIXX_AF_PSE_CHANNEL_LEVEL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PSE_CHANNEL_LEVEL(void)
+{
+	return 0x800;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pse_const
+ *
+ * NIX AF PSE Constants Register This register contains constants for
+ * software discovery.
+ */
+union nixx_af_pse_const {
+	u64 u;
+	struct nixx_af_pse_const_s {
+		u64 levels                           : 4;
+		u64 reserved_4_7                     : 4;
+		u64 mark_formats                     : 8;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct nixx_af_pse_const_s cn; */
+};
+
+static inline u64 NIXX_AF_PSE_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PSE_CONST(void)
+{
+	return 0x60;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pse_eco
+ *
+ * INTERNAL: AF PSE ECO Register
+ */
+union nixx_af_pse_eco {
+	u64 u;
+	struct nixx_af_pse_eco_s {
+		u64 eco_rw                           : 64;
+	} s;
+	/* struct nixx_af_pse_eco_s cn; */
+};
+
+static inline u64 NIXX_AF_PSE_ECO(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PSE_ECO(void)
+{
+	return 0x5d0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pse_expr_bp_test
+ *
+ * INTERNAL: NIX AF PSE Express Backpressure Test Register  Internal:
+ * 802.3br frame preemption/express path is defeatured.
+ */
+union nixx_af_pse_expr_bp_test {
+	u64 u;
+	struct nixx_af_pse_expr_bp_test_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 32;
+		u64 enable                           : 16;
+	} s;
+	/* struct nixx_af_pse_expr_bp_test_s cn; */
+};
+
+static inline u64 NIXX_AF_PSE_EXPR_BP_TEST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PSE_EXPR_BP_TEST(void)
+{
+	return 0x890;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pse_norm_bp_test
+ *
+ * INTERNAL: NIX AF PSE Normal Backpressure Test Register
+ */
+union nixx_af_pse_norm_bp_test {
+	u64 u;
+	struct nixx_af_pse_norm_bp_test_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 32;
+		u64 reserved_48_63                   : 16;
+	} s;
+	struct nixx_af_pse_norm_bp_test_cn96xxp1 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 32;
+		u64 enable                           : 16;
+	} cn96xxp1;
+	struct nixx_af_pse_norm_bp_test_cn96xxp3 {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 12;
+		u64 reserved_28_57                   : 30;
+		u64 enable                           : 6;
+	} cn96xxp3;
+	/* struct nixx_af_pse_norm_bp_test_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 NIXX_AF_PSE_NORM_BP_TEST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PSE_NORM_BP_TEST(void)
+{
+	return 0x880;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_pse_shaper_cfg
+ *
+ * NIX AF PSE Shaper Configuration Register
+ */
+union nixx_af_pse_shaper_cfg {
+	u64 u;
+	struct nixx_af_pse_shaper_cfg_s {
+		u64 red_send_as_yellow               : 1;
+		u64 color_aware                      : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct nixx_af_pse_shaper_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_PSE_SHAPER_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_PSE_SHAPER_CFG(void)
+{
+	return 0x810;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_ras
+ *
+ * NIX AF RAS Interrupt Register This register is intended for delivery
+ * of RAS events to the SCP, so should be ignored by OS drivers.
+ */
+union nixx_af_ras {
+	u64 u;
+	struct nixx_af_ras_s {
+		u64 rx_mce_poison                    : 1;
+		u64 rx_mcast_wqe_poison              : 1;
+		u64 rx_mirror_wqe_poison             : 1;
+		u64 rx_mcast_data_poison             : 1;
+		u64 rx_mirror_data_poison            : 1;
+		u64 reserved_5_31                    : 27;
+		u64 aq_ctx_poison                    : 1;
+		u64 aq_res_poison                    : 1;
+		u64 aq_inst_poison                   : 1;
+		u64 reserved_35_63                   : 29;
+	} s;
+	/* struct nixx_af_ras_s cn; */
+};
+
+static inline u64 NIXX_AF_RAS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RAS(void)
+{
+	return 0x1a0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_ras_ena_w1c
+ *
+ * NIX AF RAS Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union nixx_af_ras_ena_w1c {
+	u64 u;
+	struct nixx_af_ras_ena_w1c_s {
+		u64 rx_mce_poison                    : 1;
+		u64 rx_mcast_wqe_poison              : 1;
+		u64 rx_mirror_wqe_poison             : 1;
+		u64 rx_mcast_data_poison             : 1;
+		u64 rx_mirror_data_poison            : 1;
+		u64 reserved_5_31                    : 27;
+		u64 aq_ctx_poison                    : 1;
+		u64 aq_res_poison                    : 1;
+		u64 aq_inst_poison                   : 1;
+		u64 reserved_35_63                   : 29;
+	} s;
+	/* struct nixx_af_ras_ena_w1c_s cn; */
+};
+
+static inline u64 NIXX_AF_RAS_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RAS_ENA_W1C(void)
+{
+	return 0x1b8;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_ras_ena_w1s
+ *
+ * NIX AF RAS Interrupt Enable Set Register This register sets interrupt
+ * enable bits.
+ */
+union nixx_af_ras_ena_w1s {
+	u64 u;
+	struct nixx_af_ras_ena_w1s_s {
+		u64 rx_mce_poison                    : 1;
+		u64 rx_mcast_wqe_poison              : 1;
+		u64 rx_mirror_wqe_poison             : 1;
+		u64 rx_mcast_data_poison             : 1;
+		u64 rx_mirror_data_poison            : 1;
+		u64 reserved_5_31                    : 27;
+		u64 aq_ctx_poison                    : 1;
+		u64 aq_res_poison                    : 1;
+		u64 aq_inst_poison                   : 1;
+		u64 reserved_35_63                   : 29;
+	} s;
+	/* struct nixx_af_ras_ena_w1s_s cn; */
+};
+
+static inline u64 NIXX_AF_RAS_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RAS_ENA_W1S(void)
+{
+	return 0x1b0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_ras_w1s
+ *
+ * NIX AF RAS Interrupt Set Register This register sets interrupt bits.
+ */
+union nixx_af_ras_w1s {
+	u64 u;
+	struct nixx_af_ras_w1s_s {
+		u64 rx_mce_poison                    : 1;
+		u64 rx_mcast_wqe_poison              : 1;
+		u64 rx_mirror_wqe_poison             : 1;
+		u64 rx_mcast_data_poison             : 1;
+		u64 rx_mirror_data_poison            : 1;
+		u64 reserved_5_31                    : 27;
+		u64 aq_ctx_poison                    : 1;
+		u64 aq_res_poison                    : 1;
+		u64 aq_inst_poison                   : 1;
+		u64 reserved_35_63                   : 29;
+	} s;
+	/* struct nixx_af_ras_w1s_s cn; */
+};
+
+static inline u64 NIXX_AF_RAS_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RAS_W1S(void)
+{
+	return 0x1a8;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_reb_bp_test#
+ *
+ * INTERNAL: NIX AF REB Backpressure Test Registers
+ */
+union nixx_af_reb_bp_testx {
+	u64 u;
+	struct nixx_af_reb_bp_testx_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 8;
+		u64 reserved_24_47                   : 24;
+		u64 enable                           : 4;
+		u64 reserved_52_63                   : 12;
+	} s;
+	/* struct nixx_af_reb_bp_testx_s cn; */
+};
+
+static inline u64 NIXX_AF_REB_BP_TESTX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_REB_BP_TESTX(u64 a)
+{
+	return 0x4840 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rq_const
+ *
+ * NIX AF RQ Constants Register This register contains constants for
+ * software discovery.
+ */
+union nixx_af_rq_const {
+	u64 u;
+	struct nixx_af_rq_const_s {
+		u64 queues_per_lf                    : 24;
+		u64 reserved_24_63                   : 40;
+	} s;
+	/* struct nixx_af_rq_const_s cn; */
+};
+
+static inline u64 NIXX_AF_RQ_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RQ_CONST(void)
+{
+	return 0x50;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rqm_bp_test
+ *
+ * INTERNAL: NIX AF REB Backpressure Test Registers
+ */
+union nixx_af_rqm_bp_test {
+	u64 u;
+	struct nixx_af_rqm_bp_test_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 16;
+		u64 reserved_32_47                   : 16;
+		u64 enable                           : 8;
+		u64 reserved_56_63                   : 8;
+	} s;
+	/* struct nixx_af_rqm_bp_test_s cn; */
+};
+
+static inline u64 NIXX_AF_RQM_BP_TEST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RQM_BP_TEST(void)
+{
+	return 0x4880;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rqm_eco
+ *
+ * INTERNAL: AF RQM ECO Register
+ */
+union nixx_af_rqm_eco {
+	u64 u;
+	struct nixx_af_rqm_eco_s {
+		u64 eco_rw                           : 64;
+	} s;
+	/* struct nixx_af_rqm_eco_s cn; */
+};
+
+static inline u64 NIXX_AF_RQM_ECO(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RQM_ECO(void)
+{
+	return 0x5a0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rvu_int
+ *
+ * NIX AF RVU Interrupt Register This register contains RVU error
+ * interrupt summary bits.
+ */
+union nixx_af_rvu_int {
+	u64 u;
+	struct nixx_af_rvu_int_s {
+		u64 unmapped_slot                    : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_rvu_int_s cn; */
+};
+
+static inline u64 NIXX_AF_RVU_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RVU_INT(void)
+{
+	return 0x1c0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rvu_int_ena_w1c
+ *
+ * NIX AF RVU Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union nixx_af_rvu_int_ena_w1c {
+	u64 u;
+	struct nixx_af_rvu_int_ena_w1c_s {
+		u64 unmapped_slot                    : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_rvu_int_ena_w1c_s cn; */
+};
+
+static inline u64 NIXX_AF_RVU_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RVU_INT_ENA_W1C(void)
+{
+	return 0x1d8;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rvu_int_ena_w1s
+ *
+ * NIX AF RVU Interrupt Enable Set Register This register sets interrupt
+ * enable bits.
+ */
+union nixx_af_rvu_int_ena_w1s {
+	u64 u;
+	struct nixx_af_rvu_int_ena_w1s_s {
+		u64 unmapped_slot                    : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_rvu_int_ena_w1s_s cn; */
+};
+
+static inline u64 NIXX_AF_RVU_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RVU_INT_ENA_W1S(void)
+{
+	return 0x1d0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rvu_int_w1s
+ *
+ * NIX AF RVU Interrupt Set Register This register sets interrupt bits.
+ */
+union nixx_af_rvu_int_w1s {
+	u64 u;
+	struct nixx_af_rvu_int_w1s_s {
+		u64 unmapped_slot                    : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_rvu_int_w1s_s cn; */
+};
+
+static inline u64 NIXX_AF_RVU_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RVU_INT_W1S(void)
+{
+	return 0x1c8;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rvu_lf_cfg_debug
+ *
+ * NIX Privileged LF Configuration Debug Register This debug register
+ * allows software to lookup the reverse mapping from VF/PF slot to LF.
+ * The forward mapping is programmed with NIX_PRIV_LF()_CFG.
+ */
+union nixx_af_rvu_lf_cfg_debug {
+	u64 u;
+	struct nixx_af_rvu_lf_cfg_debug_s {
+		u64 lf                               : 12;
+		u64 lf_valid                         : 1;
+		u64 exec                             : 1;
+		u64 reserved_14_15                   : 2;
+		u64 slot                             : 8;
+		u64 pf_func                          : 16;
+		u64 reserved_40_63                   : 24;
+	} s;
+	/* struct nixx_af_rvu_lf_cfg_debug_s cn; */
+};
+
+static inline u64 NIXX_AF_RVU_LF_CFG_DEBUG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RVU_LF_CFG_DEBUG(void)
+{
+	return 0x8000030;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_active_cycles_pc#
+ *
+ * NIX AF Active Cycles Register These registers are indexed by the
+ * conditional clock domain number.
+ */
+union nixx_af_rx_active_cycles_pcx {
+	u64 u;
+	struct nixx_af_rx_active_cycles_pcx_s {
+		u64 act_cyc                          : 64;
+	} s;
+	/* struct nixx_af_rx_active_cycles_pcx_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_ACTIVE_CYCLES_PCX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_ACTIVE_CYCLES_PCX(u64 a)
+{
+	return 0x4800 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_bpid#_status
+ *
+ * NIX AF Receive Backpressure ID Status Registers
+ */
+union nixx_af_rx_bpidx_status {
+	u64 u;
+	struct nixx_af_rx_bpidx_status_s {
+		u64 aura_cnt                         : 32;
+		u64 cq_cnt                           : 32;
+	} s;
+	/* struct nixx_af_rx_bpidx_status_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_BPIDX_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_BPIDX_STATUS(u64 a)
+{
+	return 0x1a20 + 0x20000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_cfg
+ *
+ * NIX AF Receive Configuration Register
+ */
+union nixx_af_rx_cfg {
+	u64 u;
+	struct nixx_af_rx_cfg_s {
+		u64 cbp_ena                          : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_rx_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_CFG(void)
+{
+	return 0xd0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_chan#_cfg
+ *
+ * NIX AF Receive Channel Configuration Registers
+ */
+union nixx_af_rx_chanx_cfg {
+	u64 u;
+	struct nixx_af_rx_chanx_cfg_s {
+		u64 bpid                             : 9;
+		u64 reserved_9_15                    : 7;
+		u64 bp_ena                           : 1;
+		u64 sw_xoff                          : 1;
+		u64 imp                              : 1;
+		u64 reserved_19_63                   : 45;
+	} s;
+	/* struct nixx_af_rx_chanx_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_CHANX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_CHANX_CFG(u64 a)
+{
+	return 0x1a30 + 0x8000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_cpt#_credit
+ *
+ * INTERNAL: NIX AF Receive CPT Credit Register  Internal: Not used; no
+ * IPSEC fast-path.
+ */
+union nixx_af_rx_cptx_credit {
+	u64 u;
+	struct nixx_af_rx_cptx_credit_s {
+		u64 inst_cred_cnt                    : 22;
+		u64 reserved_22_63                   : 42;
+	} s;
+	/* struct nixx_af_rx_cptx_credit_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_CPTX_CREDIT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_CPTX_CREDIT(u64 a)
+{
+	return 0x360 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_cpt#_inst_qsel
+ *
+ * INTERNAL: NIX AF Receive CPT Instruction Queue Select Register
+ * Internal: Not used; no IPSEC fast-path.
+ */
+union nixx_af_rx_cptx_inst_qsel {
+	u64 u;
+	struct nixx_af_rx_cptx_inst_qsel_s {
+		u64 slot                             : 8;
+		u64 pf_func                          : 16;
+		u64 reserved_24_63                   : 40;
+	} s;
+	/* struct nixx_af_rx_cptx_inst_qsel_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_CPTX_INST_QSEL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_CPTX_INST_QSEL(u64 a)
+{
+	return 0x320 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_def_iip4
+ *
+ * NIX AF Receive Inner IPv4 Header Definition Register Defines layer
+ * information in NPC_RESULT_S to identify an inner IPv4 header.
+ * Typically the same as NPC_AF_PCK_DEF_IIP4.
+ */
+union nixx_af_rx_def_iip4 {
+	u64 u;
+	struct nixx_af_rx_def_iip4_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_af_rx_def_iip4_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_DEF_IIP4(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_DEF_IIP4(void)
+{
+	return 0x220;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_def_iip6
+ *
+ * NIX AF Receive Inner IPv6 Header Definition Register Defines layer
+ * information in NPC_RESULT_S to identify an inner IPv6 header.
+ */
+union nixx_af_rx_def_iip6 {
+	u64 u;
+	struct nixx_af_rx_def_iip6_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_af_rx_def_iip6_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_DEF_IIP6(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_DEF_IIP6(void)
+{
+	return 0x240;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_def_ipsec#
+ *
+ * INTERNAL: NIX AF Receive IPSEC Header Definition Registers  Internal:
+ * Not used; no IPSEC fast-path.
+ */
+union nixx_af_rx_def_ipsecx {
+	u64 u;
+	struct nixx_af_rx_def_ipsecx_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11                      : 1;
+		u64 spi_offset                       : 4;
+		u64 spi_nz                           : 1;
+		u64 reserved_17_63                   : 47;
+	} s;
+	/* struct nixx_af_rx_def_ipsecx_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_DEF_IPSECX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_DEF_IPSECX(u64 a)
+{
+	return 0x2b0 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_def_isctp
+ *
+ * NIX AF Receive Inner SCTP Header Definition Register Defines layer
+ * information in NPC_RESULT_S to identify an inner SCTP header.
+ */
+union nixx_af_rx_def_isctp {
+	u64 u;
+	struct nixx_af_rx_def_isctp_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_af_rx_def_isctp_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_DEF_ISCTP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_DEF_ISCTP(void)
+{
+	return 0x2a0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_def_itcp
+ *
+ * NIX AF Receive Inner TCP Header Definition Register Defines layer
+ * information in NPC_RESULT_S to identify an inner TCP header.
+ */
+union nixx_af_rx_def_itcp {
+	u64 u;
+	struct nixx_af_rx_def_itcp_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_af_rx_def_itcp_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_DEF_ITCP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_DEF_ITCP(void)
+{
+	return 0x260;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_def_iudp
+ *
+ * NIX AF Receive Inner UDP Header Definition Register Defines layer
+ * information in NPC_RESULT_S to identify an inner UDP header.
+ */
+union nixx_af_rx_def_iudp {
+	u64 u;
+	struct nixx_af_rx_def_iudp_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_af_rx_def_iudp_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_DEF_IUDP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_DEF_IUDP(void)
+{
+	return 0x280;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_def_oip4
+ *
+ * NIX AF Receive Outer IPv4 Header Definition Register Defines layer
+ * information in NPC_RESULT_S to identify an outer IPv4 L3 header.
+ * Typically the same as NPC_AF_PCK_DEF_OIP4.
+ */
+union nixx_af_rx_def_oip4 {
+	u64 u;
+	struct nixx_af_rx_def_oip4_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_af_rx_def_oip4_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_DEF_OIP4(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_DEF_OIP4(void)
+{
+	return 0x210;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_def_oip6
+ *
+ * NIX AF Receive Outer IPv6 Header Definition Register Defines layer
+ * information in NPC_RESULT_S to identify an outer IPv6 header.
+ * Typically the same as NPC_AF_PCK_DEF_OIP6.
+ */
+union nixx_af_rx_def_oip6 {
+	u64 u;
+	struct nixx_af_rx_def_oip6_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_af_rx_def_oip6_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_DEF_OIP6(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_DEF_OIP6(void)
+{
+	return 0x230;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_def_ol2
+ *
+ * NIX AF Receive Outer L2 Header Definition Register Defines layer
+ * information in NPC_RESULT_S to identify an outer L2/Ethernet header.
+ * Typically the same as NPC_AF_PCK_DEF_OL2.
+ */
+union nixx_af_rx_def_ol2 {
+	u64 u;
+	struct nixx_af_rx_def_ol2_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_af_rx_def_ol2_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_DEF_OL2(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_DEF_OL2(void)
+{
+	return 0x200;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_def_osctp
+ *
+ * NIX AF Receive Outer SCTP Header Definition Register Defines layer
+ * information in NPC_RESULT_S to identify an outer SCTP header.
+ */
+union nixx_af_rx_def_osctp {
+	u64 u;
+	struct nixx_af_rx_def_osctp_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_af_rx_def_osctp_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_DEF_OSCTP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_DEF_OSCTP(void)
+{
+	return 0x290;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_def_otcp
+ *
+ * NIX AF Receive Outer TCP Header Definition Register Defines layer
+ * information in NPC_RESULT_S to identify an outer TCP header.
+ */
+union nixx_af_rx_def_otcp {
+	u64 u;
+	struct nixx_af_rx_def_otcp_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_af_rx_def_otcp_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_DEF_OTCP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_DEF_OTCP(void)
+{
+	return 0x250;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_def_oudp
+ *
+ * NIX AF Receive Outer UDP Header Definition Register Defines layer
+ * information in NPC_RESULT_S to identify an outer UDP header.
+ */
+union nixx_af_rx_def_oudp {
+	u64 u;
+	struct nixx_af_rx_def_oudp_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_af_rx_def_oudp_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_DEF_OUDP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_DEF_OUDP(void)
+{
+	return 0x270;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_flow_key_alg#_field#
+ *
+ * NIX AF Receive Flow Key Algorithm Field Registers A flow key algorithm
+ * defines how the 40-byte FLOW_KEY is formed from the received packet
+ * header. FLOW_KEY is formed using up to five header fields (this
+ * register's last index) with up to 16 bytes per field. Header fields
+ * must not overlap in FLOW_KEY.  The algorithm (index {a} (ALG) of these
+ * registers) is selected by NIX_RX_ACTION_S[FLOW_KEY_ALG] from the
+ * packet's NPC_RESULT_S[ACTION].  Internal: 40-byte FLOW_KEY is wide
+ * enough to support an IPv6 5-tuple that includes a VXLAN/GENEVE/NVGRE
+ * tunnel ID, e.g: _ Source IP: 16B. _ Dest IP: 16B. _ Source port: 2B. _
+ * Dest port: 2B. _ Tunnel VNI/VSI: 3B. _ Total: 39B.
+ */
+union nixx_af_rx_flow_key_algx_fieldx {
+	u64 u;
+	struct nixx_af_rx_flow_key_algx_fieldx_s {
+		u64 key_offset                       : 6;
+		u64 ln_mask                          : 1;
+		u64 fn_mask                          : 1;
+		u64 hdr_offset                       : 8;
+		u64 bytesm1                          : 5;
+		u64 lid                              : 3;
+		u64 reserved_24                      : 1;
+		u64 ena                              : 1;
+		u64 sel_chan                         : 1;
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 reserved_35_63                   : 29;
+	} s;
+	/* struct nixx_af_rx_flow_key_algx_fieldx_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_FLOW_KEY_ALGX_FIELDX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_FLOW_KEY_ALGX_FIELDX(u64 a, u64 b)
+{
+	return 0x1800 + 0x40000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_ipsec_gen_cfg
+ *
+ * INTERNAL: NIX AF Receive IPSEC General Configuration Register
+ * Internal: Not used; no IPSEC fast-path.
+ */
+union nixx_af_rx_ipsec_gen_cfg {
+	u64 u;
+	struct nixx_af_rx_ipsec_gen_cfg_s {
+		u64 param2                           : 16;
+		u64 param1                           : 16;
+		u64 opcode                           : 16;
+		u64 egrp                             : 3;
+		u64 reserved_51_63                   : 13;
+	} s;
+	/* struct nixx_af_rx_ipsec_gen_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_IPSEC_GEN_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_IPSEC_GEN_CFG(void)
+{
+	return 0x300;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_link#_cfg
+ *
+ * NIX AF Receive Link Configuration Registers Index enumerated by
+ * NIX_LINK_E.
+ */
+union nixx_af_rx_linkx_cfg {
+	u64 u;
+	struct nixx_af_rx_linkx_cfg_s {
+		u64 minlen                           : 16;
+		u64 maxlen                           : 16;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct nixx_af_rx_linkx_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_LINKX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_LINKX_CFG(u64 a)
+{
+	return 0x540 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_link#_sl#_spkt_cnt
+ *
+ * INTERNAL: NIX Receive Software Sync Link Packet Count Registers  For
+ * diagnostic use only for debug of NIX_AF_RX_SW_SYNC[ENA] function. LINK
+ * index is enumerated by NIX_LINK_E. For the internal multicast/mirror
+ * link (NIX_LINK_E::MC), SL index is zero for multicast replay, one for
+ * mirror replay. SL index one is reserved for all other links.
+ * Internal: 802.3br frame preemption/express path is defeatured. Old
+ * definition of SL index: SL index is zero for non-express packets, one
+ * for express packets. For the internal NIX_LINK_E::MC, SL index is zero
+ * for multicast replay, one for mirror replay.
+ */
+union nixx_af_rx_linkx_slx_spkt_cnt {
+	u64 u;
+	struct nixx_af_rx_linkx_slx_spkt_cnt_s {
+		u64 in_cnt                           : 20;
+		u64 reserved_20_31                   : 12;
+		u64 out_cnt                          : 20;
+		u64 reserved_52_63                   : 12;
+	} s;
+	/* struct nixx_af_rx_linkx_slx_spkt_cnt_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_LINKX_SLX_SPKT_CNT(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_LINKX_SLX_SPKT_CNT(u64 a, u64 b)
+{
+	return 0x500 + 0x10000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_link#_wrr_cfg
+ *
+ * NIX AF Receive Link Weighted Round Robin Configuration Registers Index
+ * enumerated by NIX_LINK_E.
+ */
+union nixx_af_rx_linkx_wrr_cfg {
+	u64 u;
+	struct nixx_af_rx_linkx_wrr_cfg_s {
+		u64 weight                           : 8;
+		u64 reserved_8_63                    : 56;
+	} s;
+	/* struct nixx_af_rx_linkx_wrr_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_LINKX_WRR_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_LINKX_WRR_CFG(u64 a)
+{
+	return 0x560 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_mcast_base
+ *
+ * NIX AF Receive Multicast/Mirror Table Base Address Register This
+ * register specifies the base AF IOVA of the receive multicast/mirror
+ * table in NDC/LLC/DRAM. The table consists of 1 \<\<
+ * (NIX_AF_RX_MCAST_CFG[SIZE] + 8) contiguous NIX_RX_MCE_S structures.
+ * The size of each structure is 1 \<\< NIX_AF_CONST3[MCE_LOG2BYTES].
+ * The table contains multicast/mirror replication lists. Each list
+ * consists of linked entries with NIX_RX_MCE_S[EOL] = 1 in the last
+ * entry. All lists must reside within the table size specified by
+ * NIX_AF_RX_MCAST_CFG[SIZE]. A mirror replication list will typically
+ * consist of two entries, but that is not checked or enforced by
+ * hardware.  A receive packet is multicast when the action returned by
+ * NPC has NIX_RX_ACTION_S[OP] = NIX_RX_ACTIONOP_E::MCAST. A receive
+ * packet is mirrored when the action returned by NPC has
+ * NIX_RX_ACTION_S[OP] = NIX_RX_ACTIONOP_E::MIRROR. In both cases,
+ * NIX_RX_ACTION_S[INDEX] specifies the index of the replication list's
+ * first NIX_RX_MCE_S in the table, and a linked entry with
+ * NIX_RX_MCE_S[EOL] = 1 indicates the end of list.  If a mirrored flow
+ * is part of a multicast replication list, software should include the
+ * two mirror entries in that list.  Internal: A multicast list may have
+ * multiple entries for the same LF (e.g. for future RoCE/IB multicast).
+ */
+union nixx_af_rx_mcast_base {
+	u64 u;
+	struct nixx_af_rx_mcast_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_rx_mcast_base_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_MCAST_BASE(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_MCAST_BASE(void)
+{
+	return 0x100;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_mcast_buf_base
+ *
+ * NIX AF Receive Multicast Buffer Base Address Register This register
+ * specifies the base AF IOVA of the receive multicast buffers in
+ * NDC/LLC/DRAM. These buffers are used to temporarily store packets
+ * whose action returned by NPC has NIX_RX_ACTION_S[OP] =
+ * NIX_RX_ACTIONOP_E::MCAST. The number of buffers is configured by
+ * NIX_AF_RX_MCAST_BUF_CFG[SIZE].  If the number of free buffers is
+ * insufficient for a received multicast packet, hardware tail drops the
+ * packet and sets NIX_AF_GEN_INT[RX_MCAST_DROP].  Hardware prioritizes
+ * the processing of RX mirror packets over RX multicast packets.
+ */
+union nixx_af_rx_mcast_buf_base {
+	u64 u;
+	struct nixx_af_rx_mcast_buf_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_rx_mcast_buf_base_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_MCAST_BUF_BASE(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_MCAST_BUF_BASE(void)
+{
+	return 0x120;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_mcast_buf_cfg
+ *
+ * NIX AF Receive Multicast Buffer Configuration Register See
+ * NIX_AF_RX_MCAST_BUF_BASE.
+ */
+union nixx_af_rx_mcast_buf_cfg {
+	u64 u;
+	struct nixx_af_rx_mcast_buf_cfg_s {
+		u64 size                             : 4;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_21_23                   : 3;
+		u64 npc_replay_pkind                 : 6;
+		u64 reserved_30_31                   : 2;
+		u64 free_buf_level                   : 11;
+		u64 reserved_43_61                   : 19;
+		u64 busy                             : 1;
+		u64 ena                              : 1;
+	} s;
+	struct nixx_af_rx_mcast_buf_cfg_cn96xxp1 {
+		u64 size                             : 4;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_21_23                   : 3;
+		u64 npc_replay_pkind                 : 6;
+		u64 reserved_30_31                   : 2;
+		u64 free_buf_level                   : 11;
+		u64 reserved_43_61                   : 19;
+		u64 reserved_62                      : 1;
+		u64 ena                              : 1;
+	} cn96xxp1;
+	/* struct nixx_af_rx_mcast_buf_cfg_s cn96xxp3; */
+	struct nixx_af_rx_mcast_buf_cfg_cnf95xxp1 {
+		u64 size                             : 4;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_21_23                   : 3;
+		u64 npc_replay_pkind                 : 6;
+		u64 reserved_30_31                   : 2;
+		u64 free_buf_level                   : 11;
+		u64 reserved_43_62                   : 20;
+		u64 ena                              : 1;
+	} cnf95xxp1;
+	/* struct nixx_af_rx_mcast_buf_cfg_s cnf95xxp2; */
+};
+
+static inline u64 NIXX_AF_RX_MCAST_BUF_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_MCAST_BUF_CFG(void)
+{
+	return 0x130;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_mcast_cfg
+ *
+ * NIX AF Receive Multicast/Mirror Table Configuration Register See
+ * NIX_AF_RX_MCAST_BASE.
+ */
+union nixx_af_rx_mcast_cfg {
+	u64 u;
+	struct nixx_af_rx_mcast_cfg_s {
+		u64 size                             : 4;
+		u64 max_list_lenm1                   : 8;
+		u64 reserved_12_19                   : 8;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_37_63                   : 27;
+	} s;
+	/* struct nixx_af_rx_mcast_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_MCAST_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_MCAST_CFG(void)
+{
+	return 0x110;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_mirror_buf_base
+ *
+ * NIX AF Receive Mirror Buffer Base Address Register This register
+ * specifies the base AF IOVA of the receive mirror buffers in
+ * NDC/LLC/DRAM. These buffers are used to temporarily store packets
+ * whose action returned by NPC has NIX_RX_ACTION_S[OP] =
+ * NIX_RX_ACTIONOP_E::MIRROR. The number of buffers is configured by
+ * NIX_AF_RX_MIRROR_BUF_CFG[SIZE].  If the number of free buffers is
+ * insufficient for a received multicast packet, hardware tail drops the
+ * packet and sets NIX_AF_GEN_INT[RX_MIRROR_DROP].  Hardware prioritizes
+ * the processing of RX mirror packets over RX multicast packets.
+ */
+union nixx_af_rx_mirror_buf_base {
+	u64 u;
+	struct nixx_af_rx_mirror_buf_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_rx_mirror_buf_base_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_MIRROR_BUF_BASE(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_MIRROR_BUF_BASE(void)
+{
+	return 0x140;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_mirror_buf_cfg
+ *
+ * NIX AF Receive Mirror Buffer Configuration Register See
+ * NIX_AF_RX_MIRROR_BUF_BASE.
+ */
+union nixx_af_rx_mirror_buf_cfg {
+	u64 u;
+	struct nixx_af_rx_mirror_buf_cfg_s {
+		u64 size                             : 4;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_21_23                   : 3;
+		u64 npc_replay_pkind                 : 6;
+		u64 reserved_30_31                   : 2;
+		u64 free_buf_level                   : 11;
+		u64 reserved_43_61                   : 19;
+		u64 busy                             : 1;
+		u64 ena                              : 1;
+	} s;
+	struct nixx_af_rx_mirror_buf_cfg_cn96xxp1 {
+		u64 size                             : 4;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_21_23                   : 3;
+		u64 npc_replay_pkind                 : 6;
+		u64 reserved_30_31                   : 2;
+		u64 free_buf_level                   : 11;
+		u64 reserved_43_61                   : 19;
+		u64 reserved_62                      : 1;
+		u64 ena                              : 1;
+	} cn96xxp1;
+	/* struct nixx_af_rx_mirror_buf_cfg_s cn96xxp3; */
+	struct nixx_af_rx_mirror_buf_cfg_cnf95xxp1 {
+		u64 size                             : 4;
+		u64 way_mask                         : 16;
+		u64 caching                          : 1;
+		u64 reserved_21_23                   : 3;
+		u64 npc_replay_pkind                 : 6;
+		u64 reserved_30_31                   : 2;
+		u64 free_buf_level                   : 11;
+		u64 reserved_43_62                   : 20;
+		u64 ena                              : 1;
+	} cnf95xxp1;
+	/* struct nixx_af_rx_mirror_buf_cfg_s cnf95xxp2; */
+};
+
+static inline u64 NIXX_AF_RX_MIRROR_BUF_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_MIRROR_BUF_CFG(void)
+{
+	return 0x148;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_npc_mc_drop
+ *
+ * NIX AF Multicast Drop Statistics Register The counter increments for
+ * every dropped MC packet marked by the NPC.
+ */
+union nixx_af_rx_npc_mc_drop {
+	u64 u;
+	struct nixx_af_rx_npc_mc_drop_s {
+		u64 stat                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_af_rx_npc_mc_drop_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_NPC_MC_DROP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_NPC_MC_DROP(void)
+{
+	return 0x4710;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_npc_mc_rcv
+ *
+ * NIX AF Multicast Receive Statistics Register The counter increments
+ * for every received MC packet marked by the NPC.
+ */
+union nixx_af_rx_npc_mc_rcv {
+	u64 u;
+	struct nixx_af_rx_npc_mc_rcv_s {
+		u64 stat                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_af_rx_npc_mc_rcv_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_NPC_MC_RCV(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_NPC_MC_RCV(void)
+{
+	return 0x4700;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_npc_mirror_drop
+ *
+ * NIX AF Mirror Drop Statistics Register The counter increments for
+ * every dropped MIRROR packet marked by the NPC.
+ */
+union nixx_af_rx_npc_mirror_drop {
+	u64 u;
+	struct nixx_af_rx_npc_mirror_drop_s {
+		u64 stat                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_af_rx_npc_mirror_drop_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_NPC_MIRROR_DROP(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_NPC_MIRROR_DROP(void)
+{
+	return 0x4730;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_npc_mirror_rcv
+ *
+ * NIX AF Mirror Receive Statistics Register The counter increments for
+ * every received MIRROR packet marked by the NPC.
+ */
+union nixx_af_rx_npc_mirror_rcv {
+	u64 u;
+	struct nixx_af_rx_npc_mirror_rcv_s {
+		u64 stat                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_af_rx_npc_mirror_rcv_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_NPC_MIRROR_RCV(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_NPC_MIRROR_RCV(void)
+{
+	return 0x4720;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_rx_sw_sync
+ *
+ * NIX AF Receive Software Sync Register
+ */
+union nixx_af_rx_sw_sync {
+	u64 u;
+	struct nixx_af_rx_sw_sync_s {
+		u64 ena                              : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_rx_sw_sync_s cn; */
+};
+
+static inline u64 NIXX_AF_RX_SW_SYNC(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_RX_SW_SYNC(void)
+{
+	return 0x550;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_sdp_hw_xoff#
+ *
+ * NIX AF SDP Transmit Link Hardware Controlled XOFF Registers .
+ */
+union nixx_af_sdp_hw_xoffx {
+	u64 u;
+	struct nixx_af_sdp_hw_xoffx_s {
+		u64 chan_xoff                        : 64;
+	} s;
+	/* struct nixx_af_sdp_hw_xoffx_s cn; */
+};
+
+static inline u64 NIXX_AF_SDP_HW_XOFFX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SDP_HW_XOFFX(u64 a)
+{
+	return 0xac0 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_sdp_link_credit
+ *
+ * NIX AF Transmit Link SDP Credit Register This register tracks SDP link
+ * credits.
+ */
+union nixx_af_sdp_link_credit {
+	u64 u;
+	struct nixx_af_sdp_link_credit_s {
+		u64 reserved_0                       : 1;
+		u64 cc_enable                        : 1;
+		u64 cc_packet_cnt                    : 10;
+		u64 cc_unit_cnt                      : 20;
+		u64 reserved_32_62                   : 31;
+		u64 pse_pkt_id_lmt                   : 1;
+	} s;
+	struct nixx_af_sdp_link_credit_cn96xx {
+		u64 reserved_0                       : 1;
+		u64 cc_enable                        : 1;
+		u64 cc_packet_cnt                    : 10;
+		u64 cc_unit_cnt                      : 20;
+		u64 reserved_32_62                   : 31;
+		u64 reserved_63                      : 1;
+	} cn96xx;
+	/* struct nixx_af_sdp_link_credit_s cnf95xx; */
+};
+
+static inline u64 NIXX_AF_SDP_LINK_CREDIT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SDP_LINK_CREDIT(void)
+{
+	return 0xa40;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_sdp_sw_xoff#
+ *
+ * INTERNAL: NIX AF SDP Transmit Link Software Controlled XOFF Registers
+ * Internal: Defeatured registers. Software should use
+ * NIX_AF_TL4()_SW_XOFF registers instead.
+ */
+union nixx_af_sdp_sw_xoffx {
+	u64 u;
+	struct nixx_af_sdp_sw_xoffx_s {
+		u64 chan_xoff                        : 64;
+	} s;
+	/* struct nixx_af_sdp_sw_xoffx_s cn; */
+};
+
+static inline u64 NIXX_AF_SDP_SW_XOFFX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SDP_SW_XOFFX(u64 a)
+{
+	return 0xa60 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_sdp_tx_fifo_status
+ *
+ * NIX AF SDP Transmit FIFO Status Register Status of FIFO which
+ * transmits packets to SDP.
+ */
+union nixx_af_sdp_tx_fifo_status {
+	u64 u;
+	struct nixx_af_sdp_tx_fifo_status_s {
+		u64 count                            : 12;
+		u64 reserved_12_63                   : 52;
+	} s;
+	/* struct nixx_af_sdp_tx_fifo_status_s cn; */
+};
+
+static inline u64 NIXX_AF_SDP_TX_FIFO_STATUS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SDP_TX_FIFO_STATUS(void)
+{
+	return 0x650;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_seb_active_cycles_pc#
+ *
+ * NIX AF Active Cycles Register These registers are indexed by the
+ * conditional clock domain number.
+ */
+union nixx_af_seb_active_cycles_pcx {
+	u64 u;
+	struct nixx_af_seb_active_cycles_pcx_s {
+		u64 act_cyc                          : 64;
+	} s;
+	/* struct nixx_af_seb_active_cycles_pcx_s cn; */
+};
+
+static inline u64 NIXX_AF_SEB_ACTIVE_CYCLES_PCX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SEB_ACTIVE_CYCLES_PCX(u64 a)
+{
+	return 0x6c0 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_seb_bp_test
+ *
+ * INTERNAL: NIX AF SEB Backpressure Test Register
+ */
+union nixx_af_seb_bp_test {
+	u64 u;
+	struct nixx_af_seb_bp_test_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 14;
+		u64 reserved_30_47                   : 18;
+		u64 enable                           : 7;
+		u64 reserved_55_63                   : 9;
+	} s;
+	/* struct nixx_af_seb_bp_test_s cn; */
+};
+
+static inline u64 NIXX_AF_SEB_BP_TEST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SEB_BP_TEST(void)
+{
+	return 0x630;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_seb_cfg
+ *
+ * NIX SEB Configuration Register
+ */
+union nixx_af_seb_cfg {
+	u64 u;
+	struct nixx_af_seb_cfg_s {
+		u64 sg_ndc_sel                       : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_seb_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_SEB_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SEB_CFG(void)
+{
+	return 0x5f0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_seb_eco
+ *
+ * INTERNAL: AF SEB ECO Register
+ */
+union nixx_af_seb_eco {
+	u64 u;
+	struct nixx_af_seb_eco_s {
+		u64 eco_rw                           : 64;
+	} s;
+	/* struct nixx_af_seb_eco_s cn; */
+};
+
+static inline u64 NIXX_AF_SEB_ECO(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SEB_ECO(void)
+{
+	return 0x5c0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_seb_pipe_bp_test#
+ *
+ * INTERNAL: NIX AF SEB Pipe Backpressure Test Registers
+ */
+union nixx_af_seb_pipe_bp_testx {
+	u64 u;
+	struct nixx_af_seb_pipe_bp_testx_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 24;
+		u64 reserved_40_47                   : 8;
+		u64 enable                           : 12;
+		u64 reserved_60_63                   : 4;
+	} s;
+	/* struct nixx_af_seb_pipe_bp_testx_s cn; */
+};
+
+static inline u64 NIXX_AF_SEB_PIPE_BP_TESTX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SEB_PIPE_BP_TESTX(u64 a)
+{
+	return 0x600 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_seb_pipeb_bp_test#
+ *
+ * INTERNAL: NIX AF SEB Pipe Backpressure Test Registers
+ */
+union nixx_af_seb_pipeb_bp_testx {
+	u64 u;
+	struct nixx_af_seb_pipeb_bp_testx_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 18;
+		u64 reserved_34_47                   : 14;
+		u64 enable                           : 9;
+		u64 reserved_57_63                   : 7;
+	} s;
+	/* struct nixx_af_seb_pipeb_bp_testx_s cn; */
+};
+
+static inline u64 NIXX_AF_SEB_PIPEB_BP_TESTX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SEB_PIPEB_BP_TESTX(u64 a)
+{
+	return 0x608 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_seb_wd_tick_divider
+ *
+ * INTERNAL: NIX AF SEB TSTMP Watchdog Tick Divider Register
+ */
+union nixx_af_seb_wd_tick_divider {
+	u64 u;
+	struct nixx_af_seb_wd_tick_divider_s {
+		u64 tick_div_cfg                     : 7;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct nixx_af_seb_wd_tick_divider_s cn; */
+};
+
+static inline u64 NIXX_AF_SEB_WD_TICK_DIVIDER(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SEB_WD_TICK_DIVIDER(void)
+{
+	return 0x6f0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_smq#_cfg
+ *
+ * NIX AF SQM PSE Queue Configuration Registers
+ */
+union nixx_af_smqx_cfg {
+	u64 u;
+	struct nixx_af_smqx_cfg_s {
+		u64 minlen                           : 7;
+		u64 desc_shp_ctl_dis                 : 1;
+		u64 maxlen                           : 16;
+		u64 lf                               : 7;
+		u64 reserved_31_35                   : 5;
+		u64 max_vtag_ins                     : 3;
+		u64 rr_minlen                        : 9;
+		u64 express                          : 1;
+		u64 flush                            : 1;
+		u64 enq_xoff                         : 1;
+		u64 pri_thr                          : 6;
+		u64 reserved_57_63                   : 7;
+	} s;
+	/* struct nixx_af_smqx_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_SMQX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SMQX_CFG(u64 a)
+{
+	return 0x700 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_smq#_head
+ *
+ * NIX AF SQM SMQ Head Register These registers track the head of the SMQ
+ * linked list.
+ */
+union nixx_af_smqx_head {
+	u64 u;
+	struct nixx_af_smqx_head_s {
+		u64 sq_idx                           : 20;
+		u64 valid                            : 1;
+		u64 reserved_21_63                   : 43;
+	} s;
+	/* struct nixx_af_smqx_head_s cn; */
+};
+
+static inline u64 NIXX_AF_SMQX_HEAD(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SMQX_HEAD(u64 a)
+{
+	return 0x710 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_smq#_nxt_head
+ *
+ * NIX AF SQM SMQ Next Head Register These registers track the next head
+ * of the SMQ linked list.
+ */
+union nixx_af_smqx_nxt_head {
+	u64 u;
+	struct nixx_af_smqx_nxt_head_s {
+		u64 sq_idx                           : 20;
+		u64 valid                            : 1;
+		u64 reserved_21_63                   : 43;
+	} s;
+	/* struct nixx_af_smqx_nxt_head_s cn; */
+};
+
+static inline u64 NIXX_AF_SMQX_NXT_HEAD(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SMQX_NXT_HEAD(u64 a)
+{
+	return 0x740 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_smq#_status
+ *
+ * NIX AF SQM SMQ Status Register These registers track the status of the
+ * SMQ FIFO.
+ */
+union nixx_af_smqx_status {
+	u64 u;
+	struct nixx_af_smqx_status_s {
+		u64 level                            : 7;
+		u64 reserved_7_63                    : 57;
+	} s;
+	/* struct nixx_af_smqx_status_s cn; */
+};
+
+static inline u64 NIXX_AF_SMQX_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SMQX_STATUS(u64 a)
+{
+	return 0x730 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_smq#_tail
+ *
+ * NIX AF SQM SMQ Head Register These registers track the tail of SMQ
+ * linked list.
+ */
+union nixx_af_smqx_tail {
+	u64 u;
+	struct nixx_af_smqx_tail_s {
+		u64 sq_idx                           : 20;
+		u64 valid                            : 1;
+		u64 reserved_21_63                   : 43;
+	} s;
+	/* struct nixx_af_smqx_tail_s cn; */
+};
+
+static inline u64 NIXX_AF_SMQX_TAIL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SMQX_TAIL(u64 a)
+{
+	return 0x720 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_sq_const
+ *
+ * NIX AF SQ Constants Register This register contains constants for
+ * software discovery.
+ */
+union nixx_af_sq_const {
+	u64 u;
+	struct nixx_af_sq_const_s {
+		u64 queues_per_lf                    : 24;
+		u64 smq_depth                        : 10;
+		u64 sqb_size                         : 16;
+		u64 reserved_50_63                   : 14;
+	} s;
+	/* struct nixx_af_sq_const_s cn; */
+};
+
+static inline u64 NIXX_AF_SQ_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SQ_CONST(void)
+{
+	return 0x40;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_sqm_active_cycles_pc
+ *
+ * NIX AF SQM Active Cycles Register These registers are indexed by the
+ * conditional clock domain number.
+ */
+union nixx_af_sqm_active_cycles_pc {
+	u64 u;
+	struct nixx_af_sqm_active_cycles_pc_s {
+		u64 act_cyc                          : 64;
+	} s;
+	/* struct nixx_af_sqm_active_cycles_pc_s cn; */
+};
+
+static inline u64 NIXX_AF_SQM_ACTIVE_CYCLES_PC(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SQM_ACTIVE_CYCLES_PC(void)
+{
+	return 0x770;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_sqm_bp_test#
+ *
+ * INTERNAL: NIX AF SQM Backpressure Test Register
+ */
+union nixx_af_sqm_bp_testx {
+	u64 u;
+	struct nixx_af_sqm_bp_testx_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 8;
+		u64 reserved_24_59                   : 36;
+		u64 enable                           : 4;
+	} s;
+	/* struct nixx_af_sqm_bp_testx_s cn; */
+};
+
+static inline u64 NIXX_AF_SQM_BP_TESTX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SQM_BP_TESTX(u64 a)
+{
+	return 0x760 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_sqm_dbg_ctl_status
+ *
+ * NIX AF SQM Debug Register This register is for SQM diagnostic use
+ * only.
+ */
+union nixx_af_sqm_dbg_ctl_status {
+	u64 u;
+	struct nixx_af_sqm_dbg_ctl_status_s {
+		u64 tm1                              : 8;
+		u64 tm2                              : 1;
+		u64 tm3                              : 4;
+		u64 tm4                              : 1;
+		u64 tm5                              : 1;
+		u64 tm6                              : 1;
+		u64 tm7                              : 4;
+		u64 tm8                              : 1;
+		u64 tm9                              : 1;
+		u64 tm10                             : 1;
+		u64 tm11                             : 1;
+		u64 tm12                             : 1;
+		u64 tm13                             : 1;
+		u64 reserved_26_63                   : 38;
+	} s;
+	struct nixx_af_sqm_dbg_ctl_status_cn96xxp1 {
+		u64 tm1                              : 8;
+		u64 tm2                              : 1;
+		u64 tm3                              : 4;
+		u64 tm4                              : 1;
+		u64 tm5                              : 1;
+		u64 tm6                              : 1;
+		u64 tm7                              : 4;
+		u64 tm8                              : 1;
+		u64 tm9                              : 1;
+		u64 reserved_22_63                   : 42;
+	} cn96xxp1;
+	/* struct nixx_af_sqm_dbg_ctl_status_s cn96xxp3; */
+	/* struct nixx_af_sqm_dbg_ctl_status_cn96xxp1 cnf95xxp1; */
+	struct nixx_af_sqm_dbg_ctl_status_cnf95xxp2 {
+		u64 tm1                              : 8;
+		u64 tm2                              : 1;
+		u64 tm3                              : 4;
+		u64 tm4                              : 1;
+		u64 tm5                              : 1;
+		u64 tm6                              : 1;
+		u64 tm7                              : 4;
+		u64 tm8                              : 1;
+		u64 tm9                              : 1;
+		u64 reserved_22                      : 1;
+		u64 reserved_23                      : 1;
+		u64 reserved_24                      : 1;
+		u64 reserved_25                      : 1;
+		u64 reserved_26_63                   : 38;
+	} cnf95xxp2;
+};
+
+static inline u64 NIXX_AF_SQM_DBG_CTL_STATUS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SQM_DBG_CTL_STATUS(void)
+{
+	return 0x750;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_sqm_eco
+ *
+ * INTERNAL: AF SQM ECO Register
+ */
+union nixx_af_sqm_eco {
+	u64 u;
+	struct nixx_af_sqm_eco_s {
+		u64 eco_rw                           : 64;
+	} s;
+	/* struct nixx_af_sqm_eco_s cn; */
+};
+
+static inline u64 NIXX_AF_SQM_ECO(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_SQM_ECO(void)
+{
+	return 0x5b0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_status
+ *
+ * NIX AF General Status Register
+ */
+union nixx_af_status {
+	u64 u;
+	struct nixx_af_status_s {
+		u64 blk_busy                         : 10;
+		u64 calibrate_done                   : 1;
+		u64 reserved_11_15                   : 5;
+		u64 calibrate_status                 : 15;
+		u64 reserved_31_63                   : 33;
+	} s;
+	/* struct nixx_af_status_s cn; */
+};
+
+static inline u64 NIXX_AF_STATUS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_STATUS(void)
+{
+	return 0x10;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tcp_timer
+ *
+ * NIX TCP Timer Register
+ */
+union nixx_af_tcp_timer {
+	u64 u;
+	struct nixx_af_tcp_timer_s {
+		u64 dur_counter                      : 16;
+		u64 lf_counter                       : 8;
+		u64 reserved_24_31                   : 8;
+		u64 duration                         : 16;
+		u64 reserved_48_62                   : 15;
+		u64 ena                              : 1;
+	} s;
+	/* struct nixx_af_tcp_timer_s cn; */
+};
+
+static inline u64 NIXX_AF_TCP_TIMER(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TCP_TIMER(void)
+{
+	return 0x1e0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_cir
+ *
+ * NIX AF Transmit Level 1 Committed Information Rate Register
+ */
+union nixx_af_tl1x_cir {
+	u64 u;
+	struct nixx_af_tl1x_cir_s {
+		u64 enable                           : 1;
+		u64 rate_mantissa                    : 8;
+		u64 rate_exponent                    : 4;
+		u64 rate_divider_exponent            : 4;
+		u64 reserved_17_28                   : 12;
+		u64 burst_mantissa                   : 8;
+		u64 burst_exponent                   : 4;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl1x_cir_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_CIR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_CIR(u64 a)
+{
+	return 0xc20 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_dropped_bytes
+ *
+ * NIX AF Transmit Level 1 Dropped Bytes Registers This register has the
+ * same bit fields as NIX_AF_TL1()_GREEN_BYTES.
+ */
+union nixx_af_tl1x_dropped_bytes {
+	u64 u;
+	struct nixx_af_tl1x_dropped_bytes_s {
+		u64 count                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_af_tl1x_dropped_bytes_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_DROPPED_BYTES(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_DROPPED_BYTES(u64 a)
+{
+	return 0xd30 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_dropped_packets
+ *
+ * NIX AF Transmit Level 1 Dropped Packets Registers This register has
+ * the same bit fields as NIX_AF_TL1()_GREEN_PACKETS.
+ */
+union nixx_af_tl1x_dropped_packets {
+	u64 u;
+	struct nixx_af_tl1x_dropped_packets_s {
+		u64 count                            : 40;
+		u64 reserved_40_63                   : 24;
+	} s;
+	/* struct nixx_af_tl1x_dropped_packets_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_DROPPED_PACKETS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_DROPPED_PACKETS(u64 a)
+{
+	return 0xd20 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_green
+ *
+ * INTERNAL: NIX Transmit Level 1 Green State Debug Register
+ */
+union nixx_af_tl1x_green {
+	u64 u;
+	struct nixx_af_tl1x_green_s {
+		u64 tail                             : 8;
+		u64 reserved_8_9                     : 2;
+		u64 head                             : 8;
+		u64 reserved_18_19                   : 2;
+		u64 active_vec                       : 20;
+		u64 rr_active                        : 1;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl1x_green_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_GREEN(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_GREEN(u64 a)
+{
+	return 0xc90 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_green_bytes
+ *
+ * NIX AF Transmit Level 1 Green Sent Bytes Registers
+ */
+union nixx_af_tl1x_green_bytes {
+	u64 u;
+	struct nixx_af_tl1x_green_bytes_s {
+		u64 count                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_af_tl1x_green_bytes_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_GREEN_BYTES(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_GREEN_BYTES(u64 a)
+{
+	return 0xd90 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_green_packets
+ *
+ * NIX AF Transmit Level 1 Green Sent Packets Registers
+ */
+union nixx_af_tl1x_green_packets {
+	u64 u;
+	struct nixx_af_tl1x_green_packets_s {
+		u64 count                            : 40;
+		u64 reserved_40_63                   : 24;
+	} s;
+	/* struct nixx_af_tl1x_green_packets_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_GREEN_PACKETS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_GREEN_PACKETS(u64 a)
+{
+	return 0xd80 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_md_debug0
+ *
+ * NIX AF Transmit Level 1 Meta Descriptor Debug 0 Registers
+ * NIX_AF_TL1()_MD_DEBUG0, NIX_AF_TL1()_MD_DEBUG1, NIX_AF_TL1()_MD_DEBUG2
+ * and NIX_AF_TL1()_MD_DEBUG3 provide access to the TLn queue meta
+ * descriptor. A TLn queue can hold up to two packet meta descriptors
+ * (PMD) and one flush meta descriptor (FMD): * PMD0 state is accessed
+ * with [PMD0_VLD], [PMD0_LENGTH] and NIX_AF_TL1()_MD_DEBUG1. * PMD1 is
+ * accessed with [PMD1_VLD], [PMD1_LENGTH] and NIX_AF_TL1()_MD_DEBUG2. *
+ * FMD is accessed with NIX_AF_TL1()_MD_DEBUG3.
+ */
+union nixx_af_tl1x_md_debug0 {
+	u64 u;
+	struct nixx_af_tl1x_md_debug0_s {
+		u64 pmd0_length                      : 16;
+		u64 pmd1_length                      : 16;
+		u64 pmd0_vld                         : 1;
+		u64 pmd1_vld                         : 1;
+		u64 reserved_34_45                   : 12;
+		u64 drain_pri                        : 1;
+		u64 drain                            : 1;
+		u64 c_con                            : 1;
+		u64 p_con                            : 1;
+		u64 reserved_50_51                   : 2;
+		u64 child                            : 10;
+		u64 reserved_62                      : 1;
+		u64 pmd_count                        : 1;
+	} s;
+	/* struct nixx_af_tl1x_md_debug0_s cn96xxp1; */
+	struct nixx_af_tl1x_md_debug0_cn96xxp3 {
+		u64 pmd0_length                      : 16;
+		u64 reserved_16_31                   : 16;
+		u64 pmd0_vld                         : 1;
+		u64 reserved_33                      : 1;
+		u64 reserved_34_45                   : 12;
+		u64 reserved_46                      : 1;
+		u64 reserved_47                      : 1;
+		u64 c_con                            : 1;
+		u64 p_con                            : 1;
+		u64 reserved_50_51                   : 2;
+		u64 child                            : 10;
+		u64 reserved_62                      : 1;
+		u64 reserved_63                      : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl1x_md_debug0_s cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL1X_MD_DEBUG0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_MD_DEBUG0(u64 a)
+{
+	return 0xcc0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_md_debug1
+ *
+ * NIX AF Transmit Level 1 Meta Descriptor Debug 1 Registers Packet meta
+ * descriptor 0 debug. See NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl1x_md_debug1 {
+	u64 u;
+	struct nixx_af_tl1x_md_debug1_s {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 reserved_23                      : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} s;
+	struct nixx_af_tl1x_md_debug1_cn96xxp1 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 drain                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp1;
+	struct nixx_af_tl1x_md_debug1_cn96xxp3 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 reserved_19_22                   : 4;
+		u64 flush                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl1x_md_debug1_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL1X_MD_DEBUG1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_MD_DEBUG1(u64 a)
+{
+	return 0xcc8 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_md_debug2
+ *
+ * NIX AF Transmit Level 1 Meta Descriptor Debug 2 Registers Packet meta
+ * descriptor 1 debug. See NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl1x_md_debug2 {
+	u64 u;
+	struct nixx_af_tl1x_md_debug2_s {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 reserved_23                      : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} s;
+	struct nixx_af_tl1x_md_debug2_cn96xxp1 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 drain                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp1;
+	struct nixx_af_tl1x_md_debug2_cn96xxp3 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 reserved_19_22                   : 4;
+		u64 flush                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl1x_md_debug2_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL1X_MD_DEBUG2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_MD_DEBUG2(u64 a)
+{
+	return 0xcd0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_md_debug3
+ *
+ * NIX AF Transmit Level 1 Meta Descriptor Debug 3 Registers Flush meta
+ * descriptor debug. See NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl1x_md_debug3 {
+	u64 u;
+	struct nixx_af_tl1x_md_debug3_s {
+		u64 reserved_0_36                    : 37;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} s;
+	/* struct nixx_af_tl1x_md_debug3_s cn96xxp1; */
+	struct nixx_af_tl1x_md_debug3_cn96xxp3 {
+		u64 reserved_0_36                    : 37;
+		u64 reserved_37_38                   : 2;
+		u64 reserved_39_51                   : 13;
+		u64 reserved_52_61                   : 10;
+		u64 reserved_62                      : 1;
+		u64 reserved_63                      : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl1x_md_debug3_s cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL1X_MD_DEBUG3(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_MD_DEBUG3(u64 a)
+{
+	return 0xcd8 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_red
+ *
+ * INTERNAL: NIX Transmit Level 1 Red State Debug Register  This register
+ * has the same bit fields as NIX_AF_TL1()_YELLOW.
+ */
+union nixx_af_tl1x_red {
+	u64 u;
+	struct nixx_af_tl1x_red_s {
+		u64 tail                             : 8;
+		u64 reserved_8_9                     : 2;
+		u64 head                             : 8;
+		u64 reserved_18_63                   : 46;
+	} s;
+	/* struct nixx_af_tl1x_red_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_RED(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_RED(u64 a)
+{
+	return 0xcb0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_red_bytes
+ *
+ * NIX AF Transmit Level 1 Red Sent Bytes Registers This register has the
+ * same bit fields as NIX_AF_TL1()_GREEN_BYTES.
+ */
+union nixx_af_tl1x_red_bytes {
+	u64 u;
+	struct nixx_af_tl1x_red_bytes_s {
+		u64 count                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_af_tl1x_red_bytes_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_RED_BYTES(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_RED_BYTES(u64 a)
+{
+	return 0xd50 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_red_packets
+ *
+ * NIX AF Transmit Level 1 Red Sent Packets Registers This register has
+ * the same bit fields as NIX_AF_TL1()_GREEN_PACKETS.
+ */
+union nixx_af_tl1x_red_packets {
+	u64 u;
+	struct nixx_af_tl1x_red_packets_s {
+		u64 count                            : 40;
+		u64 reserved_40_63                   : 24;
+	} s;
+	/* struct nixx_af_tl1x_red_packets_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_RED_PACKETS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_RED_PACKETS(u64 a)
+{
+	return 0xd40 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_schedule
+ *
+ * NIX AF Transmit Level 1 Scheduling Control Register
+ */
+union nixx_af_tl1x_schedule {
+	u64 u;
+	struct nixx_af_tl1x_schedule_s {
+		u64 rr_quantum                       : 24;
+		u64 reserved_24_63                   : 40;
+	} s;
+	/* struct nixx_af_tl1x_schedule_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_SCHEDULE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_SCHEDULE(u64 a)
+{
+	return 0xc00 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_shape
+ *
+ * NIX AF Transmit Level 1 Shaping Control Register
+ */
+union nixx_af_tl1x_shape {
+	u64 u;
+	struct nixx_af_tl1x_shape_s {
+		u64 adjust                           : 9;
+		u64 reserved_9_23                    : 15;
+		u64 length_disable                   : 1;
+		u64 reserved_25_63                   : 39;
+	} s;
+	struct nixx_af_tl1x_shape_cn {
+		u64 adjust                           : 9;
+		u64 reserved_9_17                    : 9;
+		u64 reserved_18_23                   : 6;
+		u64 length_disable                   : 1;
+		u64 reserved_25_63                   : 39;
+	} cn;
+};
+
+static inline u64 NIXX_AF_TL1X_SHAPE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_SHAPE(u64 a)
+{
+	return 0xc10 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_shape_state
+ *
+ * NIX AF Transmit Level 1 Shape State Register This register must not be
+ * written during normal operation.
+ */
+union nixx_af_tl1x_shape_state {
+	u64 u;
+	struct nixx_af_tl1x_shape_state_s {
+		u64 cir_accum                        : 26;
+		u64 reserved_26_51                   : 26;
+		u64 color                            : 1;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct nixx_af_tl1x_shape_state_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_SHAPE_STATE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_SHAPE_STATE(u64 a)
+{
+	return 0xc50 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_sw_xoff
+ *
+ * NIX AF Transmit Level 1 Software Controlled XOFF Registers
+ */
+union nixx_af_tl1x_sw_xoff {
+	u64 u;
+	struct nixx_af_tl1x_sw_xoff_s {
+		u64 xoff                             : 1;
+		u64 drain                            : 1;
+		u64 reserved_2                       : 1;
+		u64 drain_irq                        : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct nixx_af_tl1x_sw_xoff_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_SW_XOFF(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_SW_XOFF(u64 a)
+{
+	return 0xc70 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_topology
+ *
+ * NIX AF Transmit Level 1 Topology Registers
+ */
+union nixx_af_tl1x_topology {
+	u64 u;
+	struct nixx_af_tl1x_topology_s {
+		u64 reserved_0                       : 1;
+		u64 rr_prio                          : 4;
+		u64 reserved_5_31                    : 27;
+		u64 prio_anchor                      : 8;
+		u64 reserved_40_63                   : 24;
+	} s;
+	/* struct nixx_af_tl1x_topology_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_TOPOLOGY(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_TOPOLOGY(u64 a)
+{
+	return 0xc80 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_yellow
+ *
+ * INTERNAL: NIX Transmit Level 1 Yellow State Debug Register
+ */
+union nixx_af_tl1x_yellow {
+	u64 u;
+	struct nixx_af_tl1x_yellow_s {
+		u64 tail                             : 8;
+		u64 reserved_8_9                     : 2;
+		u64 head                             : 8;
+		u64 reserved_18_63                   : 46;
+	} s;
+	/* struct nixx_af_tl1x_yellow_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_YELLOW(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_YELLOW(u64 a)
+{
+	return 0xca0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_yellow_bytes
+ *
+ * NIX AF Transmit Level 1 Yellow Sent Bytes Registers This register has
+ * the same bit fields as NIX_AF_TL1()_GREEN_BYTES.
+ */
+union nixx_af_tl1x_yellow_bytes {
+	u64 u;
+	struct nixx_af_tl1x_yellow_bytes_s {
+		u64 count                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_af_tl1x_yellow_bytes_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_YELLOW_BYTES(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_YELLOW_BYTES(u64 a)
+{
+	return 0xd70 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1#_yellow_packets
+ *
+ * NIX AF Transmit Level 1 Yellow Sent Packets Registers This register
+ * has the same bit fields as NIX_AF_TL1()_GREEN_PACKETS.
+ */
+union nixx_af_tl1x_yellow_packets {
+	u64 u;
+	struct nixx_af_tl1x_yellow_packets_s {
+		u64 count                            : 40;
+		u64 reserved_40_63                   : 24;
+	} s;
+	/* struct nixx_af_tl1x_yellow_packets_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1X_YELLOW_PACKETS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1X_YELLOW_PACKETS(u64 a)
+{
+	return 0xd60 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl1_const
+ *
+ * NIX AF Transmit Level 1 Constants Register This register contains
+ * constants for software discovery.
+ */
+union nixx_af_tl1_const {
+	u64 u;
+	struct nixx_af_tl1_const_s {
+		u64 count                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct nixx_af_tl1_const_s cn; */
+};
+
+static inline u64 NIXX_AF_TL1_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL1_CONST(void)
+{
+	return 0x70;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_cir
+ *
+ * NIX AF Transmit Level 2 Committed Information Rate Registers This
+ * register has the same bit fields as NIX_AF_TL1()_CIR.
+ */
+union nixx_af_tl2x_cir {
+	u64 u;
+	struct nixx_af_tl2x_cir_s {
+		u64 enable                           : 1;
+		u64 rate_mantissa                    : 8;
+		u64 rate_exponent                    : 4;
+		u64 rate_divider_exponent            : 4;
+		u64 reserved_17_28                   : 12;
+		u64 burst_mantissa                   : 8;
+		u64 burst_exponent                   : 4;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl2x_cir_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_CIR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_CIR(u64 a)
+{
+	return 0xe20 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_green
+ *
+ * INTERNAL: NIX Transmit Level 2 Green State Debug Register  This
+ * register has the same bit fields as NIX_AF_TL1()_GREEN.
+ */
+union nixx_af_tl2x_green {
+	u64 u;
+	struct nixx_af_tl2x_green_s {
+		u64 tail                             : 8;
+		u64 reserved_8_9                     : 2;
+		u64 head                             : 8;
+		u64 reserved_18_19                   : 2;
+		u64 active_vec                       : 20;
+		u64 rr_active                        : 1;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl2x_green_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_GREEN(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_GREEN(u64 a)
+{
+	return 0xe90 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_md_debug0
+ *
+ * NIX AF Transmit Level 2 Meta Descriptor Debug 0 Registers See
+ * NIX_AF_TL1()_MD_DEBUG0
+ */
+union nixx_af_tl2x_md_debug0 {
+	u64 u;
+	struct nixx_af_tl2x_md_debug0_s {
+		u64 pmd0_length                      : 16;
+		u64 pmd1_length                      : 16;
+		u64 pmd0_vld                         : 1;
+		u64 pmd1_vld                         : 1;
+		u64 reserved_34_45                   : 12;
+		u64 drain_pri                        : 1;
+		u64 drain                            : 1;
+		u64 c_con                            : 1;
+		u64 p_con                            : 1;
+		u64 reserved_50_51                   : 2;
+		u64 child                            : 10;
+		u64 reserved_62                      : 1;
+		u64 pmd_count                        : 1;
+	} s;
+	/* struct nixx_af_tl2x_md_debug0_s cn96xxp1; */
+	struct nixx_af_tl2x_md_debug0_cn96xxp3 {
+		u64 pmd0_length                      : 16;
+		u64 reserved_16_31                   : 16;
+		u64 pmd0_vld                         : 1;
+		u64 reserved_33                      : 1;
+		u64 reserved_34_45                   : 12;
+		u64 reserved_46                      : 1;
+		u64 reserved_47                      : 1;
+		u64 c_con                            : 1;
+		u64 p_con                            : 1;
+		u64 reserved_50_51                   : 2;
+		u64 child                            : 10;
+		u64 reserved_62                      : 1;
+		u64 reserved_63                      : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl2x_md_debug0_s cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL2X_MD_DEBUG0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_MD_DEBUG0(u64 a)
+{
+	return 0xec0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_md_debug1
+ *
+ * NIX AF Transmit Level 2 Meta Descriptor Debug 1 Registers Packet meta
+ * descriptor 0 debug. See NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl2x_md_debug1 {
+	u64 u;
+	struct nixx_af_tl2x_md_debug1_s {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 reserved_23                      : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} s;
+	struct nixx_af_tl2x_md_debug1_cn96xxp1 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 drain                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp1;
+	struct nixx_af_tl2x_md_debug1_cn96xxp3 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 reserved_19_22                   : 4;
+		u64 flush                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl2x_md_debug1_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL2X_MD_DEBUG1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_MD_DEBUG1(u64 a)
+{
+	return 0xec8 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_md_debug2
+ *
+ * NIX AF Transmit Level 2 Meta Descriptor Debug 2 Registers Packet meta
+ * descriptor 1 debug. See NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl2x_md_debug2 {
+	u64 u;
+	struct nixx_af_tl2x_md_debug2_s {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 reserved_23                      : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} s;
+	struct nixx_af_tl2x_md_debug2_cn96xxp1 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 drain                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp1;
+	struct nixx_af_tl2x_md_debug2_cn96xxp3 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 reserved_19_22                   : 4;
+		u64 flush                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl2x_md_debug2_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL2X_MD_DEBUG2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_MD_DEBUG2(u64 a)
+{
+	return 0xed0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_md_debug3
+ *
+ * NIX AF Transmit Level 2 Meta Descriptor Debug 3 Registers Flush meta
+ * descriptor debug. See NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl2x_md_debug3 {
+	u64 u;
+	struct nixx_af_tl2x_md_debug3_s {
+		u64 reserved_0_36                    : 37;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} s;
+	/* struct nixx_af_tl2x_md_debug3_s cn96xxp1; */
+	struct nixx_af_tl2x_md_debug3_cn96xxp3 {
+		u64 reserved_0_36                    : 37;
+		u64 reserved_37_38                   : 2;
+		u64 reserved_39_51                   : 13;
+		u64 reserved_52_61                   : 10;
+		u64 reserved_62                      : 1;
+		u64 reserved_63                      : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl2x_md_debug3_s cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL2X_MD_DEBUG3(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_MD_DEBUG3(u64 a)
+{
+	return 0xed8 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_parent
+ *
+ * NIX AF Transmit Level 2 Parent Registers
+ */
+union nixx_af_tl2x_parent {
+	u64 u;
+	struct nixx_af_tl2x_parent_s {
+		u64 reserved_0_15                    : 16;
+		u64 parent                           : 5;
+		u64 reserved_21_63                   : 43;
+	} s;
+	/* struct nixx_af_tl2x_parent_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_PARENT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_PARENT(u64 a)
+{
+	return 0xe88 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_pir
+ *
+ * NIX AF Transmit Level 2 Peak Information Rate Registers This register
+ * has the same bit fields as NIX_AF_TL1()_CIR.
+ */
+union nixx_af_tl2x_pir {
+	u64 u;
+	struct nixx_af_tl2x_pir_s {
+		u64 enable                           : 1;
+		u64 rate_mantissa                    : 8;
+		u64 rate_exponent                    : 4;
+		u64 rate_divider_exponent            : 4;
+		u64 reserved_17_28                   : 12;
+		u64 burst_mantissa                   : 8;
+		u64 burst_exponent                   : 4;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl2x_pir_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_PIR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_PIR(u64 a)
+{
+	return 0xe30 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_pointers
+ *
+ * INTERNAL: NIX Transmit Level 2 Linked List Pointers Debug Register
+ */
+union nixx_af_tl2x_pointers {
+	u64 u;
+	struct nixx_af_tl2x_pointers_s {
+		u64 next                             : 8;
+		u64 reserved_8_15                    : 8;
+		u64 prev                             : 8;
+		u64 reserved_24_63                   : 40;
+	} s;
+	/* struct nixx_af_tl2x_pointers_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_POINTERS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_POINTERS(u64 a)
+{
+	return 0xe60 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_red
+ *
+ * INTERNAL: NIX Transmit Level 2 Red State Debug Register  This register
+ * has the same bit fields as NIX_AF_TL1()_RED.
+ */
+union nixx_af_tl2x_red {
+	u64 u;
+	struct nixx_af_tl2x_red_s {
+		u64 tail                             : 8;
+		u64 reserved_8_9                     : 2;
+		u64 head                             : 8;
+		u64 reserved_18_63                   : 46;
+	} s;
+	/* struct nixx_af_tl2x_red_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_RED(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_RED(u64 a)
+{
+	return 0xeb0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_sched_state
+ *
+ * NIX AF Transmit Level 2 Scheduling Control State Registers
+ */
+union nixx_af_tl2x_sched_state {
+	u64 u;
+	struct nixx_af_tl2x_sched_state_s {
+		u64 rr_count                         : 25;
+		u64 reserved_25_63                   : 39;
+	} s;
+	/* struct nixx_af_tl2x_sched_state_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_SCHED_STATE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_SCHED_STATE(u64 a)
+{
+	return 0xe40 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_schedule
+ *
+ * NIX AF Transmit Level 2 Scheduling Control Registers
+ */
+union nixx_af_tl2x_schedule {
+	u64 u;
+	struct nixx_af_tl2x_schedule_s {
+		u64 rr_quantum                       : 24;
+		u64 prio                             : 4;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct nixx_af_tl2x_schedule_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_SCHEDULE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_SCHEDULE(u64 a)
+{
+	return 0xe00 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_shape
+ *
+ * NIX AF Transmit Level 2 Shaping Control Registers
+ */
+union nixx_af_tl2x_shape {
+	u64 u;
+	struct nixx_af_tl2x_shape_s {
+		u64 adjust                           : 9;
+		u64 red_algo                         : 2;
+		u64 red_disable                      : 1;
+		u64 yellow_disable                   : 1;
+		u64 reserved_13_23                   : 11;
+		u64 length_disable                   : 1;
+		u64 schedule_list                    : 2;
+		u64 reserved_27_63                   : 37;
+	} s;
+	/* struct nixx_af_tl2x_shape_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_SHAPE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_SHAPE(u64 a)
+{
+	return 0xe10 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_shape_state
+ *
+ * NIX AF Transmit Level 2 Shape State Registers This register must not
+ * be written during normal operation.
+ */
+union nixx_af_tl2x_shape_state {
+	u64 u;
+	struct nixx_af_tl2x_shape_state_s {
+		u64 cir_accum                        : 26;
+		u64 pir_accum                        : 26;
+		u64 color                            : 2;
+		u64 reserved_54_63                   : 10;
+	} s;
+	/* struct nixx_af_tl2x_shape_state_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_SHAPE_STATE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_SHAPE_STATE(u64 a)
+{
+	return 0xe50 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_sw_xoff
+ *
+ * NIX AF Transmit Level 2 Software Controlled XOFF Registers This
+ * register has the same bit fields as NIX_AF_TL1()_SW_XOFF.
+ */
+union nixx_af_tl2x_sw_xoff {
+	u64 u;
+	struct nixx_af_tl2x_sw_xoff_s {
+		u64 xoff                             : 1;
+		u64 drain                            : 1;
+		u64 reserved_2                       : 1;
+		u64 drain_irq                        : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct nixx_af_tl2x_sw_xoff_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_SW_XOFF(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_SW_XOFF(u64 a)
+{
+	return 0xe70 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_topology
+ *
+ * NIX AF Transmit Level 2 Topology Registers
+ */
+union nixx_af_tl2x_topology {
+	u64 u;
+	struct nixx_af_tl2x_topology_s {
+		u64 reserved_0                       : 1;
+		u64 rr_prio                          : 4;
+		u64 reserved_5_31                    : 27;
+		u64 prio_anchor                      : 8;
+		u64 reserved_40_63                   : 24;
+	} s;
+	/* struct nixx_af_tl2x_topology_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_TOPOLOGY(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_TOPOLOGY(u64 a)
+{
+	return 0xe80 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2#_yellow
+ *
+ * INTERNAL: NIX Transmit Level 2 Yellow State Debug Register  This
+ * register has the same bit fields as NIX_AF_TL1()_YELLOW.
+ */
+union nixx_af_tl2x_yellow {
+	u64 u;
+	struct nixx_af_tl2x_yellow_s {
+		u64 tail                             : 8;
+		u64 reserved_8_9                     : 2;
+		u64 head                             : 8;
+		u64 reserved_18_63                   : 46;
+	} s;
+	/* struct nixx_af_tl2x_yellow_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2X_YELLOW(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2X_YELLOW(u64 a)
+{
+	return 0xea0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl2_const
+ *
+ * NIX AF Transmit Level 2 Constants Register This register contains
+ * constants for software discovery.
+ */
+union nixx_af_tl2_const {
+	u64 u;
+	struct nixx_af_tl2_const_s {
+		u64 count                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct nixx_af_tl2_const_s cn; */
+};
+
+static inline u64 NIXX_AF_TL2_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL2_CONST(void)
+{
+	return 0x78;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_cir
+ *
+ * NIX AF Transmit Level 3 Committed Information Rate Registers This
+ * register has the same bit fields as NIX_AF_TL1()_CIR.
+ */
+union nixx_af_tl3x_cir {
+	u64 u;
+	struct nixx_af_tl3x_cir_s {
+		u64 enable                           : 1;
+		u64 rate_mantissa                    : 8;
+		u64 rate_exponent                    : 4;
+		u64 rate_divider_exponent            : 4;
+		u64 reserved_17_28                   : 12;
+		u64 burst_mantissa                   : 8;
+		u64 burst_exponent                   : 4;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl3x_cir_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_CIR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_CIR(u64 a)
+{
+	return 0x1020 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_green
+ *
+ * INTERNAL: NIX Transmit Level 3 Green State Debug Register
+ */
+union nixx_af_tl3x_green {
+	u64 u;
+	struct nixx_af_tl3x_green_s {
+		u64 tail                             : 9;
+		u64 reserved_9                       : 1;
+		u64 head                             : 9;
+		u64 reserved_19                      : 1;
+		u64 active_vec                       : 20;
+		u64 rr_active                        : 1;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl3x_green_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_GREEN(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_GREEN(u64 a)
+{
+	return 0x1090 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_md_debug0
+ *
+ * NIX AF Transmit Level 3 Meta Descriptor Debug 0 Registers See
+ * NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl3x_md_debug0 {
+	u64 u;
+	struct nixx_af_tl3x_md_debug0_s {
+		u64 pmd0_length                      : 16;
+		u64 pmd1_length                      : 16;
+		u64 pmd0_vld                         : 1;
+		u64 pmd1_vld                         : 1;
+		u64 reserved_34_45                   : 12;
+		u64 drain_pri                        : 1;
+		u64 drain                            : 1;
+		u64 c_con                            : 1;
+		u64 p_con                            : 1;
+		u64 reserved_50_51                   : 2;
+		u64 child                            : 10;
+		u64 reserved_62                      : 1;
+		u64 pmd_count                        : 1;
+	} s;
+	/* struct nixx_af_tl3x_md_debug0_s cn96xxp1; */
+	struct nixx_af_tl3x_md_debug0_cn96xxp3 {
+		u64 pmd0_length                      : 16;
+		u64 reserved_16_31                   : 16;
+		u64 pmd0_vld                         : 1;
+		u64 reserved_33                      : 1;
+		u64 reserved_34_45                   : 12;
+		u64 reserved_46                      : 1;
+		u64 reserved_47                      : 1;
+		u64 c_con                            : 1;
+		u64 p_con                            : 1;
+		u64 reserved_50_51                   : 2;
+		u64 child                            : 10;
+		u64 reserved_62                      : 1;
+		u64 reserved_63                      : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl3x_md_debug0_s cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL3X_MD_DEBUG0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_MD_DEBUG0(u64 a)
+{
+	return 0x10c0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_md_debug1
+ *
+ * NIX AF Transmit Level 3 Meta Descriptor Debug 1 Registers Packet meta
+ * descriptor 0 debug. See NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl3x_md_debug1 {
+	u64 u;
+	struct nixx_af_tl3x_md_debug1_s {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 reserved_23                      : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} s;
+	struct nixx_af_tl3x_md_debug1_cn96xxp1 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 drain                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp1;
+	struct nixx_af_tl3x_md_debug1_cn96xxp3 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 reserved_19_22                   : 4;
+		u64 flush                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl3x_md_debug1_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL3X_MD_DEBUG1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_MD_DEBUG1(u64 a)
+{
+	return 0x10c8 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_md_debug2
+ *
+ * NIX AF Transmit Level 3 Meta Descriptor Debug 2 Registers Packet meta
+ * descriptor 1 debug. See NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl3x_md_debug2 {
+	u64 u;
+	struct nixx_af_tl3x_md_debug2_s {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 reserved_23                      : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} s;
+	struct nixx_af_tl3x_md_debug2_cn96xxp1 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 drain                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp1;
+	struct nixx_af_tl3x_md_debug2_cn96xxp3 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 reserved_19_22                   : 4;
+		u64 flush                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl3x_md_debug2_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL3X_MD_DEBUG2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_MD_DEBUG2(u64 a)
+{
+	return 0x10d0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_md_debug3
+ *
+ * NIX AF Transmit Level 3 Meta Descriptor Debug 3 Registers Flush meta
+ * descriptor debug. See NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl3x_md_debug3 {
+	u64 u;
+	struct nixx_af_tl3x_md_debug3_s {
+		u64 reserved_0_36                    : 37;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} s;
+	/* struct nixx_af_tl3x_md_debug3_s cn96xxp1; */
+	struct nixx_af_tl3x_md_debug3_cn96xxp3 {
+		u64 reserved_0_36                    : 37;
+		u64 reserved_37_38                   : 2;
+		u64 reserved_39_51                   : 13;
+		u64 reserved_52_61                   : 10;
+		u64 reserved_62                      : 1;
+		u64 reserved_63                      : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl3x_md_debug3_s cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL3X_MD_DEBUG3(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_MD_DEBUG3(u64 a)
+{
+	return 0x10d8 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_parent
+ *
+ * NIX AF Transmit Level 3 Parent Registers
+ */
+union nixx_af_tl3x_parent {
+	u64 u;
+	struct nixx_af_tl3x_parent_s {
+		u64 reserved_0_15                    : 16;
+		u64 parent                           : 8;
+		u64 reserved_24_63                   : 40;
+	} s;
+	/* struct nixx_af_tl3x_parent_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_PARENT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_PARENT(u64 a)
+{
+	return 0x1088 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_pir
+ *
+ * NIX AF Transmit Level 3 Peak Information Rate Registers This register
+ * has the same bit fields as NIX_AF_TL1()_CIR.
+ */
+union nixx_af_tl3x_pir {
+	u64 u;
+	struct nixx_af_tl3x_pir_s {
+		u64 enable                           : 1;
+		u64 rate_mantissa                    : 8;
+		u64 rate_exponent                    : 4;
+		u64 rate_divider_exponent            : 4;
+		u64 reserved_17_28                   : 12;
+		u64 burst_mantissa                   : 8;
+		u64 burst_exponent                   : 4;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl3x_pir_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_PIR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_PIR(u64 a)
+{
+	return 0x1030 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_pointers
+ *
+ * INTERNAL: NIX Transmit Level 3 Linked List Pointers Debug Register
+ * This register has the same bit fields as NIX_AF_TL2()_POINTERS.
+ */
+union nixx_af_tl3x_pointers {
+	u64 u;
+	struct nixx_af_tl3x_pointers_s {
+		u64 next                             : 8;
+		u64 reserved_8_15                    : 8;
+		u64 prev                             : 8;
+		u64 reserved_24_63                   : 40;
+	} s;
+	/* struct nixx_af_tl3x_pointers_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_POINTERS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_POINTERS(u64 a)
+{
+	return 0x1060 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_red
+ *
+ * INTERNAL: NIX Transmit Level 3 Red State Debug Register  This register
+ * has the same bit fields as NIX_AF_TL3()_YELLOW.
+ */
+union nixx_af_tl3x_red {
+	u64 u;
+	struct nixx_af_tl3x_red_s {
+		u64 tail                             : 9;
+		u64 reserved_9                       : 1;
+		u64 head                             : 9;
+		u64 reserved_19_63                   : 45;
+	} s;
+	/* struct nixx_af_tl3x_red_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_RED(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_RED(u64 a)
+{
+	return 0x10b0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_sched_state
+ *
+ * NIX AF Transmit Level 3 Scheduling Control State Registers This
+ * register has the same bit fields as NIX_AF_TL2()_SCHED_STATE.
+ */
+union nixx_af_tl3x_sched_state {
+	u64 u;
+	struct nixx_af_tl3x_sched_state_s {
+		u64 rr_count                         : 25;
+		u64 reserved_25_63                   : 39;
+	} s;
+	/* struct nixx_af_tl3x_sched_state_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_SCHED_STATE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_SCHED_STATE(u64 a)
+{
+	return 0x1040 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_schedule
+ *
+ * NIX AF Transmit Level 3 Scheduling Control Registers This register has
+ * the same bit fields as NIX_AF_TL2()_SCHEDULE.
+ */
+union nixx_af_tl3x_schedule {
+	u64 u;
+	struct nixx_af_tl3x_schedule_s {
+		u64 rr_quantum                       : 24;
+		u64 prio                             : 4;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct nixx_af_tl3x_schedule_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_SCHEDULE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_SCHEDULE(u64 a)
+{
+	return 0x1000 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_shape
+ *
+ * NIX AF Transmit Level 3 Shaping Control Registers
+ */
+union nixx_af_tl3x_shape {
+	u64 u;
+	struct nixx_af_tl3x_shape_s {
+		u64 adjust                           : 9;
+		u64 red_algo                         : 2;
+		u64 red_disable                      : 1;
+		u64 yellow_disable                   : 1;
+		u64 reserved_13_23                   : 11;
+		u64 length_disable                   : 1;
+		u64 schedule_list                    : 2;
+		u64 reserved_27_63                   : 37;
+	} s;
+	/* struct nixx_af_tl3x_shape_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_SHAPE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_SHAPE(u64 a)
+{
+	return 0x1010 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_shape_state
+ *
+ * NIX AF Transmit Level 3 Shaping State Registers This register has the
+ * same bit fields as NIX_AF_TL2()_SHAPE_STATE. This register must not be
+ * written during normal operation.
+ */
+union nixx_af_tl3x_shape_state {
+	u64 u;
+	struct nixx_af_tl3x_shape_state_s {
+		u64 cir_accum                        : 26;
+		u64 pir_accum                        : 26;
+		u64 color                            : 2;
+		u64 reserved_54_63                   : 10;
+	} s;
+	/* struct nixx_af_tl3x_shape_state_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_SHAPE_STATE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_SHAPE_STATE(u64 a)
+{
+	return 0x1050 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_sw_xoff
+ *
+ * NIX AF Transmit Level 3 Software Controlled XOFF Registers This
+ * register has the same bit fields as NIX_AF_TL1()_SW_XOFF
+ */
+union nixx_af_tl3x_sw_xoff {
+	u64 u;
+	struct nixx_af_tl3x_sw_xoff_s {
+		u64 xoff                             : 1;
+		u64 drain                            : 1;
+		u64 reserved_2                       : 1;
+		u64 drain_irq                        : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct nixx_af_tl3x_sw_xoff_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_SW_XOFF(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_SW_XOFF(u64 a)
+{
+	return 0x1070 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_topology
+ *
+ * NIX AF Transmit Level 3 Topology Registers
+ */
+union nixx_af_tl3x_topology {
+	u64 u;
+	struct nixx_af_tl3x_topology_s {
+		u64 reserved_0                       : 1;
+		u64 rr_prio                          : 4;
+		u64 reserved_5_31                    : 27;
+		u64 prio_anchor                      : 9;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl3x_topology_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_TOPOLOGY(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_TOPOLOGY(u64 a)
+{
+	return 0x1080 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3#_yellow
+ *
+ * INTERNAL: NIX Transmit Level 3 Yellow State Debug Register
+ */
+union nixx_af_tl3x_yellow {
+	u64 u;
+	struct nixx_af_tl3x_yellow_s {
+		u64 tail                             : 9;
+		u64 reserved_9                       : 1;
+		u64 head                             : 9;
+		u64 reserved_19_63                   : 45;
+	} s;
+	/* struct nixx_af_tl3x_yellow_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3X_YELLOW(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3X_YELLOW(u64 a)
+{
+	return 0x10a0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3_const
+ *
+ * NIX AF Transmit Level 3 Constants Register This register contains
+ * constants for software discovery.
+ */
+union nixx_af_tl3_const {
+	u64 u;
+	struct nixx_af_tl3_const_s {
+		u64 count                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct nixx_af_tl3_const_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3_CONST(void)
+{
+	return 0x80;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3_tl2#_bp_status
+ *
+ * NIX AF Transmit Level 3/2 Backpressure Status Registers
+ */
+union nixx_af_tl3_tl2x_bp_status {
+	u64 u;
+	struct nixx_af_tl3_tl2x_bp_status_s {
+		u64 hw_xoff                          : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_tl3_tl2x_bp_status_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3_TL2X_BP_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3_TL2X_BP_STATUS(u64 a)
+{
+	return 0x1610 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3_tl2#_cfg
+ *
+ * NIX AF Transmit Level 3/2 Configuration Registers
+ */
+union nixx_af_tl3_tl2x_cfg {
+	u64 u;
+	struct nixx_af_tl3_tl2x_cfg_s {
+		u64 express                          : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_tl3_tl2x_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3_TL2X_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3_TL2X_CFG(u64 a)
+{
+	return 0x1600 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl3_tl2#_link#_cfg
+ *
+ * NIX AF Transmit Level 3/2 Link Configuration Registers These registers
+ * specify the links and associated channels that a given TL3 or TL2
+ * queue (depending on NIX_AF_PSE_CHANNEL_LEVEL[BP_LEVEL]) can transmit
+ * on. Each TL3/TL2 queue can be enabled to transmit on and be
+ * backpressured by one or more links and associated channels. The last
+ * index (LINK) is enumerated by NIX_LINK_E.
+ */
+union nixx_af_tl3_tl2x_linkx_cfg {
+	u64 u;
+	struct nixx_af_tl3_tl2x_linkx_cfg_s {
+		u64 relchan                          : 8;
+		u64 reserved_8_11                    : 4;
+		u64 ena                              : 1;
+		u64 bp_ena                           : 1;
+		u64 reserved_14_63                   : 50;
+	} s;
+	/* struct nixx_af_tl3_tl2x_linkx_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_TL3_TL2X_LINKX_CFG(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL3_TL2X_LINKX_CFG(u64 a, u64 b)
+{
+	return 0x1700 + 0x10000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_bp_status
+ *
+ * NIX AF Transmit Level 4 Backpressure Status Registers
+ */
+union nixx_af_tl4x_bp_status {
+	u64 u;
+	struct nixx_af_tl4x_bp_status_s {
+		u64 hw_xoff                          : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_tl4x_bp_status_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_BP_STATUS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_BP_STATUS(u64 a)
+{
+	return 0xb00 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_cir
+ *
+ * NIX AF Transmit Level 4 Committed Information Rate Registers This
+ * register has the same bit fields as NIX_AF_TL1()_CIR.
+ */
+union nixx_af_tl4x_cir {
+	u64 u;
+	struct nixx_af_tl4x_cir_s {
+		u64 enable                           : 1;
+		u64 rate_mantissa                    : 8;
+		u64 rate_exponent                    : 4;
+		u64 rate_divider_exponent            : 4;
+		u64 reserved_17_28                   : 12;
+		u64 burst_mantissa                   : 8;
+		u64 burst_exponent                   : 4;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl4x_cir_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_CIR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_CIR(u64 a)
+{
+	return 0x1220 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_green
+ *
+ * INTERNAL: NIX Transmit Level 4 Green State Debug Register  This
+ * register has the same bit fields as NIX_AF_TL3()_GREEN.
+ */
+union nixx_af_tl4x_green {
+	u64 u;
+	struct nixx_af_tl4x_green_s {
+		u64 tail                             : 9;
+		u64 reserved_9                       : 1;
+		u64 head                             : 9;
+		u64 reserved_19                      : 1;
+		u64 active_vec                       : 20;
+		u64 rr_active                        : 1;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl4x_green_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_GREEN(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_GREEN(u64 a)
+{
+	return 0x1290 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_md_debug0
+ *
+ * NIX AF Transmit Level 4 Meta Descriptor Debug 0 Registers See
+ * NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl4x_md_debug0 {
+	u64 u;
+	struct nixx_af_tl4x_md_debug0_s {
+		u64 pmd0_length                      : 16;
+		u64 pmd1_length                      : 16;
+		u64 pmd0_vld                         : 1;
+		u64 pmd1_vld                         : 1;
+		u64 reserved_34_45                   : 12;
+		u64 drain_pri                        : 1;
+		u64 drain                            : 1;
+		u64 c_con                            : 1;
+		u64 p_con                            : 1;
+		u64 reserved_50_51                   : 2;
+		u64 child                            : 10;
+		u64 reserved_62                      : 1;
+		u64 pmd_count                        : 1;
+	} s;
+	/* struct nixx_af_tl4x_md_debug0_s cn96xxp1; */
+	struct nixx_af_tl4x_md_debug0_cn96xxp3 {
+		u64 pmd0_length                      : 16;
+		u64 reserved_16_31                   : 16;
+		u64 pmd0_vld                         : 1;
+		u64 reserved_33                      : 1;
+		u64 reserved_34_45                   : 12;
+		u64 reserved_46                      : 1;
+		u64 reserved_47                      : 1;
+		u64 c_con                            : 1;
+		u64 p_con                            : 1;
+		u64 reserved_50_51                   : 2;
+		u64 child                            : 10;
+		u64 reserved_62                      : 1;
+		u64 reserved_63                      : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl4x_md_debug0_s cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL4X_MD_DEBUG0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_MD_DEBUG0(u64 a)
+{
+	return 0x12c0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_md_debug1
+ *
+ * NIX AF Transmit Level 4 Meta Descriptor Debug 1 Registers Packet meta
+ * descriptor 0 debug. See NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl4x_md_debug1 {
+	u64 u;
+	struct nixx_af_tl4x_md_debug1_s {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 reserved_23                      : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} s;
+	struct nixx_af_tl4x_md_debug1_cn96xxp1 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 drain                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp1;
+	struct nixx_af_tl4x_md_debug1_cn96xxp3 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 reserved_19_22                   : 4;
+		u64 flush                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl4x_md_debug1_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL4X_MD_DEBUG1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_MD_DEBUG1(u64 a)
+{
+	return 0x12c8 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_md_debug2
+ *
+ * NIX AF Transmit Level 4 Meta Descriptor Debug 2 Registers Packet meta
+ * descriptor 1 debug. See NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl4x_md_debug2 {
+	u64 u;
+	struct nixx_af_tl4x_md_debug2_s {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 reserved_23                      : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} s;
+	struct nixx_af_tl4x_md_debug2_cn96xxp1 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 uid                              : 4;
+		u64 drain                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp1;
+	struct nixx_af_tl4x_md_debug2_cn96xxp3 {
+		u64 reserved_0_5                     : 6;
+		u64 red_algo_override                : 2;
+		u64 cir_dis                          : 1;
+		u64 pir_dis                          : 1;
+		u64 adjust                           : 9;
+		u64 reserved_19_22                   : 4;
+		u64 flush                            : 1;
+		u64 bubble                           : 1;
+		u64 color                            : 2;
+		u64 pse_pkt_id                       : 9;
+		u64 reserved_36                      : 1;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl4x_md_debug2_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL4X_MD_DEBUG2(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_MD_DEBUG2(u64 a)
+{
+	return 0x12d0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_md_debug3
+ *
+ * NIX AF Transmit Level 4 Meta Descriptor Debug 3 Registers Flush meta
+ * descriptor debug. See NIX_AF_TL1()_MD_DEBUG0.
+ */
+union nixx_af_tl4x_md_debug3 {
+	u64 u;
+	struct nixx_af_tl4x_md_debug3_s {
+		u64 reserved_0_36                    : 37;
+		u64 tx_pkt_p2x                       : 2;
+		u64 sqm_pkt_id                       : 13;
+		u64 mdq_idx                          : 10;
+		u64 reserved_62                      : 1;
+		u64 vld                              : 1;
+	} s;
+	/* struct nixx_af_tl4x_md_debug3_s cn96xxp1; */
+	struct nixx_af_tl4x_md_debug3_cn96xxp3 {
+		u64 reserved_0_36                    : 37;
+		u64 reserved_37_38                   : 2;
+		u64 reserved_39_51                   : 13;
+		u64 reserved_52_61                   : 10;
+		u64 reserved_62                      : 1;
+		u64 reserved_63                      : 1;
+	} cn96xxp3;
+	/* struct nixx_af_tl4x_md_debug3_s cnf95xx; */
+};
+
+static inline u64 NIXX_AF_TL4X_MD_DEBUG3(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_MD_DEBUG3(u64 a)
+{
+	return 0x12d8 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_parent
+ *
+ * NIX AF Transmit Level 4 Parent Registers
+ */
+union nixx_af_tl4x_parent {
+	u64 u;
+	struct nixx_af_tl4x_parent_s {
+		u64 reserved_0_15                    : 16;
+		u64 parent                           : 8;
+		u64 reserved_24_63                   : 40;
+	} s;
+	/* struct nixx_af_tl4x_parent_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_PARENT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_PARENT(u64 a)
+{
+	return 0x1288 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_pir
+ *
+ * NIX AF Transmit Level 4 Peak Information Rate Registers This register
+ * has the same bit fields as NIX_AF_TL1()_CIR.
+ */
+union nixx_af_tl4x_pir {
+	u64 u;
+	struct nixx_af_tl4x_pir_s {
+		u64 enable                           : 1;
+		u64 rate_mantissa                    : 8;
+		u64 rate_exponent                    : 4;
+		u64 rate_divider_exponent            : 4;
+		u64 reserved_17_28                   : 12;
+		u64 burst_mantissa                   : 8;
+		u64 burst_exponent                   : 4;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl4x_pir_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_PIR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_PIR(u64 a)
+{
+	return 0x1230 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_pointers
+ *
+ * INTERNAL: NIX Transmit Level 4 Linked List Pointers Debug Register
+ * This register has the same bit fields as NIX_AF_TL2()_POINTERS.
+ */
+union nixx_af_tl4x_pointers {
+	u64 u;
+	struct nixx_af_tl4x_pointers_s {
+		u64 next                             : 9;
+		u64 reserved_9_15                    : 7;
+		u64 prev                             : 9;
+		u64 reserved_25_63                   : 39;
+	} s;
+	/* struct nixx_af_tl4x_pointers_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_POINTERS(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_POINTERS(u64 a)
+{
+	return 0x1260 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_red
+ *
+ * INTERNAL: NIX Transmit Level 4 Red State Debug Register  This register
+ * has the same bit fields as NIX_AF_TL3()_YELLOW.
+ */
+union nixx_af_tl4x_red {
+	u64 u;
+	struct nixx_af_tl4x_red_s {
+		u64 tail                             : 9;
+		u64 reserved_9                       : 1;
+		u64 head                             : 9;
+		u64 reserved_19_63                   : 45;
+	} s;
+	/* struct nixx_af_tl4x_red_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_RED(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_RED(u64 a)
+{
+	return 0x12b0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_sched_state
+ *
+ * NIX AF Transmit Level 4 Scheduling Control State Registers This
+ * register has the same bit fields as NIX_AF_TL2()_SCHED_STATE.
+ */
+union nixx_af_tl4x_sched_state {
+	u64 u;
+	struct nixx_af_tl4x_sched_state_s {
+		u64 rr_count                         : 25;
+		u64 reserved_25_63                   : 39;
+	} s;
+	/* struct nixx_af_tl4x_sched_state_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_SCHED_STATE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_SCHED_STATE(u64 a)
+{
+	return 0x1240 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_schedule
+ *
+ * NIX AF Transmit Level 4 Scheduling Control Registers This register has
+ * the same bit fields as NIX_AF_TL2()_SCHEDULE.
+ */
+union nixx_af_tl4x_schedule {
+	u64 u;
+	struct nixx_af_tl4x_schedule_s {
+		u64 rr_quantum                       : 24;
+		u64 prio                             : 4;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct nixx_af_tl4x_schedule_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_SCHEDULE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_SCHEDULE(u64 a)
+{
+	return 0x1200 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_sdp_link_cfg
+ *
+ * NIX AF Transmit Level 4 Link Configuration Registers These registers
+ * specify which TL4 queues transmit to and are optionally backpressured
+ * by SDP.
+ */
+union nixx_af_tl4x_sdp_link_cfg {
+	u64 u;
+	struct nixx_af_tl4x_sdp_link_cfg_s {
+		u64 relchan                          : 8;
+		u64 reserved_8_11                    : 4;
+		u64 ena                              : 1;
+		u64 bp_ena                           : 1;
+		u64 reserved_14_63                   : 50;
+	} s;
+	/* struct nixx_af_tl4x_sdp_link_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_SDP_LINK_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_SDP_LINK_CFG(u64 a)
+{
+	return 0xb10 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_shape
+ *
+ * NIX AF Transmit Level 4 Shaping Control Registers This register has
+ * the same bit fields as NIX_AF_TL2()_SHAPE.
+ */
+union nixx_af_tl4x_shape {
+	u64 u;
+	struct nixx_af_tl4x_shape_s {
+		u64 adjust                           : 9;
+		u64 red_algo                         : 2;
+		u64 red_disable                      : 1;
+		u64 yellow_disable                   : 1;
+		u64 reserved_13_23                   : 11;
+		u64 length_disable                   : 1;
+		u64 schedule_list                    : 2;
+		u64 reserved_27_63                   : 37;
+	} s;
+	/* struct nixx_af_tl4x_shape_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_SHAPE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_SHAPE(u64 a)
+{
+	return 0x1210 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_shape_state
+ *
+ * NIX AF Transmit Level 4 Shaping State Registers This register has the
+ * same bit fields as NIX_AF_TL2()_SHAPE_STATE. This register must not be
+ * written during normal operation.
+ */
+union nixx_af_tl4x_shape_state {
+	u64 u;
+	struct nixx_af_tl4x_shape_state_s {
+		u64 cir_accum                        : 26;
+		u64 pir_accum                        : 26;
+		u64 color                            : 2;
+		u64 reserved_54_63                   : 10;
+	} s;
+	/* struct nixx_af_tl4x_shape_state_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_SHAPE_STATE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_SHAPE_STATE(u64 a)
+{
+	return 0x1250 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_sw_xoff
+ *
+ * NIX AF Transmit Level 4 Software Controlled XOFF Registers This
+ * register has the same bit fields as NIX_AF_TL1()_SW_XOFF
+ */
+union nixx_af_tl4x_sw_xoff {
+	u64 u;
+	struct nixx_af_tl4x_sw_xoff_s {
+		u64 xoff                             : 1;
+		u64 drain                            : 1;
+		u64 reserved_2                       : 1;
+		u64 drain_irq                        : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct nixx_af_tl4x_sw_xoff_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_SW_XOFF(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_SW_XOFF(u64 a)
+{
+	return 0x1270 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_topology
+ *
+ * NIX AF Transmit Level 4 Topology Registers
+ */
+union nixx_af_tl4x_topology {
+	u64 u;
+	struct nixx_af_tl4x_topology_s {
+		u64 reserved_0                       : 1;
+		u64 rr_prio                          : 4;
+		u64 reserved_5_31                    : 27;
+		u64 prio_anchor                      : 9;
+		u64 reserved_41_63                   : 23;
+	} s;
+	/* struct nixx_af_tl4x_topology_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_TOPOLOGY(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_TOPOLOGY(u64 a)
+{
+	return 0x1280 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4#_yellow
+ *
+ * INTERNAL: NIX Transmit Level 4 Yellow State Debug Register  This
+ * register has the same bit fields as NIX_AF_TL3()_YELLOW
+ */
+union nixx_af_tl4x_yellow {
+	u64 u;
+	struct nixx_af_tl4x_yellow_s {
+		u64 tail                             : 9;
+		u64 reserved_9                       : 1;
+		u64 head                             : 9;
+		u64 reserved_19_63                   : 45;
+	} s;
+	/* struct nixx_af_tl4x_yellow_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4X_YELLOW(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4X_YELLOW(u64 a)
+{
+	return 0x12a0 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tl4_const
+ *
+ * NIX AF Transmit Level 4 Constants Register This register contains
+ * constants for software discovery.
+ */
+union nixx_af_tl4_const {
+	u64 u;
+	struct nixx_af_tl4_const_s {
+		u64 count                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct nixx_af_tl4_const_s cn; */
+};
+
+static inline u64 NIXX_AF_TL4_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TL4_CONST(void)
+{
+	return 0x88;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tx_link#_expr_credit
+ *
+ * INTERNAL: NIX AF Transmit Link Express Credit Registers  Internal:
+ * 802.3br frame preemption/express path is defeatured. Old definition:
+ * These registers track credits per link for express packets that may
+ * potentially preempt normal packets. Link index enumerated by
+ * NIX_LINK_E.
+ */
+union nixx_af_tx_linkx_expr_credit {
+	u64 u;
+	struct nixx_af_tx_linkx_expr_credit_s {
+		u64 reserved_0                       : 1;
+		u64 cc_enable                        : 1;
+		u64 cc_packet_cnt                    : 10;
+		u64 cc_unit_cnt                      : 20;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct nixx_af_tx_linkx_expr_credit_s cn; */
+};
+
+static inline u64 NIXX_AF_TX_LINKX_EXPR_CREDIT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TX_LINKX_EXPR_CREDIT(u64 a)
+{
+	return 0xa10 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tx_link#_hw_xoff
+ *
+ * NIX AF Transmit Link Hardware Controlled XOFF Registers Link index
+ * enumerated by NIX_LINK_E.
+ */
+union nixx_af_tx_linkx_hw_xoff {
+	u64 u;
+	struct nixx_af_tx_linkx_hw_xoff_s {
+		u64 chan_xoff                        : 64;
+	} s;
+	/* struct nixx_af_tx_linkx_hw_xoff_s cn; */
+};
+
+static inline u64 NIXX_AF_TX_LINKX_HW_XOFF(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TX_LINKX_HW_XOFF(u64 a)
+{
+	return 0xa30 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tx_link#_norm_credit
+ *
+ * NIX AF Transmit Link Normal Credit Registers These registers track
+ * credits per link for normal packets sent to CGX and LBK. Link index
+ * enumerated by NIX_LINK_E.
+ */
+union nixx_af_tx_linkx_norm_credit {
+	u64 u;
+	struct nixx_af_tx_linkx_norm_credit_s {
+		u64 reserved_0                       : 1;
+		u64 cc_enable                        : 1;
+		u64 cc_packet_cnt                    : 10;
+		u64 cc_unit_cnt                      : 20;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct nixx_af_tx_linkx_norm_credit_s cn; */
+};
+
+static inline u64 NIXX_AF_TX_LINKX_NORM_CREDIT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TX_LINKX_NORM_CREDIT(u64 a)
+{
+	return 0xa00 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tx_link#_sw_xoff
+ *
+ * INTERNAL: NIX AF Transmit Link Software Controlled XOFF Registers
+ * Link index enumerated by NIX_LINK_E. Internal: Defeatured registers.
+ * Software should instead use NIX_AF_TL3()_SW_XOFF registers when
+ * NIX_AF_PSE_CHANNEL_LEVEL[BP_LEVEL] is set and NIX_AF_TL2()_SW_XOFF
+ * registers when NIX_AF_PSE_CHANNEL_LEVEL[BP_LEVEL] is clear.
+ */
+union nixx_af_tx_linkx_sw_xoff {
+	u64 u;
+	struct nixx_af_tx_linkx_sw_xoff_s {
+		u64 chan_xoff                        : 64;
+	} s;
+	/* struct nixx_af_tx_linkx_sw_xoff_s cn; */
+};
+
+static inline u64 NIXX_AF_TX_LINKX_SW_XOFF(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TX_LINKX_SW_XOFF(u64 a)
+{
+	return 0xa20 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tx_mcast#
+ *
+ * NIX AF Transmit Multicast Registers These registers access transmit
+ * multicast table entries used to specify multicast replication lists.
+ * Each list consists of linked entries with [EOL] = 1 in the last entry.
+ * A transmit packet is multicast when the action returned by NPC has
+ * NIX_TX_ACTION_S[OP] = NIX_TX_ACTIONOP_E::MCAST. NIX_TX_ACTION_S[INDEX]
+ * points to the start of the multicast replication list, and [EOL] = 1
+ * indicates the end of list.
+ */
+union nixx_af_tx_mcastx {
+	u64 u;
+	struct nixx_af_tx_mcastx_s {
+		u64 channel                          : 12;
+		u64 eol                              : 1;
+		u64 reserved_13_15                   : 3;
+		u64 next                             : 16;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct nixx_af_tx_mcastx_s cn; */
+};
+
+static inline u64 NIXX_AF_TX_MCASTX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TX_MCASTX(u64 a)
+{
+	return 0x1900 + 0x8000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tx_npc_capture_config
+ *
+ * NIX AF Transmit NPC Response Capture Configuration Register Configures
+ * the NPC response capture logic for transmit packets. When enabled,
+ * allows NPC responses for selected packets to be captured in
+ * NIX_AF_TX_NPC_CAPTURE_INFO and NIX_AF_TX_NPC_CAPTURE_RESP().
+ */
+union nixx_af_tx_npc_capture_config {
+	u64 u;
+	struct nixx_af_tx_npc_capture_config_s {
+		u64 en                               : 1;
+		u64 continuous                       : 1;
+		u64 lso_segnum_en                    : 1;
+		u64 sqe_id_en                        : 1;
+		u64 sq_id_en                         : 1;
+		u64 lf_id_en                         : 1;
+		u64 reserved_6_11                    : 6;
+		u64 lso_segnum                       : 8;
+		u64 sqe_id                           : 16;
+		u64 sq_id                            : 20;
+		u64 lf_id                            : 8;
+	} s;
+	/* struct nixx_af_tx_npc_capture_config_s cn; */
+};
+
+static inline u64 NIXX_AF_TX_NPC_CAPTURE_CONFIG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TX_NPC_CAPTURE_CONFIG(void)
+{
+	return 0x660;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tx_npc_capture_info
+ *
+ * NIX AF Transmit NPC Response Capture Information Register This
+ * register contains captured NPC response information for a transmit
+ * packet. See NIX_AF_TX_NPC_CAPTURE_CONFIG.
+ */
+union nixx_af_tx_npc_capture_info {
+	u64 u;
+	struct nixx_af_tx_npc_capture_info_s {
+		u64 vld                              : 1;
+		u64 reserved_1_11                    : 11;
+		u64 lso_segnum                       : 8;
+		u64 sqe_id                           : 16;
+		u64 sq_id                            : 20;
+		u64 lf_id                            : 8;
+	} s;
+	/* struct nixx_af_tx_npc_capture_info_s cn; */
+};
+
+static inline u64 NIXX_AF_TX_NPC_CAPTURE_INFO(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TX_NPC_CAPTURE_INFO(void)
+{
+	return 0x668;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tx_npc_capture_resp#
+ *
+ * NIX AF Transmit NPC Capture Response Registers These registers contain
+ * the captured NPC response for a transmit packet when
+ * NIX_AF_TX_NPC_CAPTURE_INFO[VLD] is set. See also
+ * NIX_AF_TX_NPC_CAPTURE_CONFIG.
+ */
+union nixx_af_tx_npc_capture_respx {
+	u64 u;
+	struct nixx_af_tx_npc_capture_respx_s {
+		u64 data                             : 64;
+	} s;
+	/* struct nixx_af_tx_npc_capture_respx_s cn; */
+};
+
+static inline u64 NIXX_AF_TX_NPC_CAPTURE_RESPX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TX_NPC_CAPTURE_RESPX(u64 a)
+{
+	return 0x680 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tx_tstmp_cfg
+ *
+ * NIX AF Transmit Timestamp Configuration Register
+ */
+union nixx_af_tx_tstmp_cfg {
+	u64 u;
+	struct nixx_af_tx_tstmp_cfg_s {
+		u64 tstmp_wd_period                  : 4;
+		u64 reserved_4_7                     : 4;
+		u64 express                          : 16;
+		u64 reserved_24_63                   : 40;
+	} s;
+	/* struct nixx_af_tx_tstmp_cfg_s cn; */
+};
+
+static inline u64 NIXX_AF_TX_TSTMP_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TX_TSTMP_CFG(void)
+{
+	return 0xc0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tx_vtag_def#_ctl
+ *
+ * NIX AF Transmit Vtag Definition Control Registers The transmit Vtag
+ * definition table specifies Vtag layers (e.g. VLAN, E-TAG) to
+ * optionally insert or replace in the TX packet header. Indexed by
+ * NIX_TX_VTAG_ACTION_S[VTAG*_DEF].
+ */
+union nixx_af_tx_vtag_defx_ctl {
+	u64 u;
+	struct nixx_af_tx_vtag_defx_ctl_s {
+		u64 size                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_af_tx_vtag_defx_ctl_s cn; */
+};
+
+static inline u64 NIXX_AF_TX_VTAG_DEFX_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TX_VTAG_DEFX_CTL(u64 a)
+{
+	return 0x1a00 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_af_tx_vtag_def#_data
+ *
+ * NIX AF Transmit Vtag Definition Data Registers See
+ * NIX_AF_TX_VTAG_DEF()_CTL.
+ */
+union nixx_af_tx_vtag_defx_data {
+	u64 u;
+	struct nixx_af_tx_vtag_defx_data_s {
+		u64 data                             : 64;
+	} s;
+	/* struct nixx_af_tx_vtag_defx_data_s cn; */
+};
+
+static inline u64 NIXX_AF_TX_VTAG_DEFX_DATA(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_AF_TX_VTAG_DEFX_DATA(u64 a)
+{
+	return 0x1a10 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_cfg
+ *
+ * NIX LF Configuration Register
+ */
+union nixx_lf_cfg {
+	u64 u;
+	struct nixx_lf_cfg_s {
+		u64 tcp_timer_int_ena                : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_lf_cfg_s cn; */
+};
+
+static inline u64 NIXX_LF_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_CFG(void)
+{
+	return 0x100;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_cint#_cnt
+ *
+ * NIX LF Completion Interrupt Count Registers
+ */
+union nixx_lf_cintx_cnt {
+	u64 u;
+	struct nixx_lf_cintx_cnt_s {
+		u64 ecount                           : 32;
+		u64 qcount                           : 16;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_lf_cintx_cnt_s cn; */
+};
+
+static inline u64 NIXX_LF_CINTX_CNT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_CINTX_CNT(u64 a)
+{
+	return 0xd00 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_cint#_ena_w1c
+ *
+ * NIX LF Completion Interrupt Enable Clear Registers This register
+ * clears interrupt enable bits.
+ */
+union nixx_lf_cintx_ena_w1c {
+	u64 u;
+	struct nixx_lf_cintx_ena_w1c_s {
+		u64 intr                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_lf_cintx_ena_w1c_s cn; */
+};
+
+static inline u64 NIXX_LF_CINTX_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_CINTX_ENA_W1C(u64 a)
+{
+	return 0xd50 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_cint#_ena_w1s
+ *
+ * NIX LF Completion Interrupt Enable Set Registers This register sets
+ * interrupt enable bits.
+ */
+union nixx_lf_cintx_ena_w1s {
+	u64 u;
+	struct nixx_lf_cintx_ena_w1s_s {
+		u64 intr                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_lf_cintx_ena_w1s_s cn; */
+};
+
+static inline u64 NIXX_LF_CINTX_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_CINTX_ENA_W1S(u64 a)
+{
+	return 0xd40 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_cint#_int
+ *
+ * NIX LF Completion Interrupt Registers
+ */
+union nixx_lf_cintx_int {
+	u64 u;
+	struct nixx_lf_cintx_int_s {
+		u64 intr                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_lf_cintx_int_s cn; */
+};
+
+static inline u64 NIXX_LF_CINTX_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_CINTX_INT(u64 a)
+{
+	return 0xd20 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_cint#_int_w1s
+ *
+ * NIX LF Completion Interrupt Set Registers This register sets interrupt
+ * bits.
+ */
+union nixx_lf_cintx_int_w1s {
+	u64 u;
+	struct nixx_lf_cintx_int_w1s_s {
+		u64 intr                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_lf_cintx_int_w1s_s cn; */
+};
+
+static inline u64 NIXX_LF_CINTX_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_CINTX_INT_W1S(u64 a)
+{
+	return 0xd30 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_cint#_wait
+ *
+ * NIX LF Completion Interrupt Count Registers
+ */
+union nixx_lf_cintx_wait {
+	u64 u;
+	struct nixx_lf_cintx_wait_s {
+		u64 ecount_wait                      : 32;
+		u64 qcount_wait                      : 16;
+		u64 time_wait                        : 8;
+		u64 reserved_56_63                   : 8;
+	} s;
+	/* struct nixx_lf_cintx_wait_s cn; */
+};
+
+static inline u64 NIXX_LF_CINTX_WAIT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_CINTX_WAIT(u64 a)
+{
+	return 0xd10 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_cq_op_door
+ *
+ * NIX LF CQ Doorbell Operation Register A write to this register
+ * dequeues CQEs from a CQ ring within the LF. A read is RAZ.  RSL
+ * accesses to this register are RAZ/WI.
+ */
+union nixx_lf_cq_op_door {
+	u64 u;
+	struct nixx_lf_cq_op_door_s {
+		u64 count                            : 16;
+		u64 reserved_16_31                   : 16;
+		u64 cq                               : 20;
+		u64 reserved_52_63                   : 12;
+	} s;
+	/* struct nixx_lf_cq_op_door_s cn; */
+};
+
+static inline u64 NIXX_LF_CQ_OP_DOOR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_CQ_OP_DOOR(void)
+{
+	return 0xb30;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_cq_op_int
+ *
+ * NIX LF Completion Queue Interrupt Operation Register A 64-bit atomic
+ * load-and-add to this register reads CQ interrupts and interrupt
+ * enables. A write optionally sets or clears interrupts and interrupt
+ * enables. A read is RAZ.  RSL accesses to this register are RAZ/WI.
+ */
+union nixx_lf_cq_op_int {
+	u64 u;
+	struct nixx_lf_cq_op_int_s {
+		u64 cq_err_int                       : 8;
+		u64 cq_err_int_ena                   : 8;
+		u64 reserved_16_41                   : 26;
+		u64 op_err                           : 1;
+		u64 setop                            : 1;
+		u64 cq                               : 20;
+	} s;
+	/* struct nixx_lf_cq_op_int_s cn; */
+};
+
+static inline u64 NIXX_LF_CQ_OP_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_CQ_OP_INT(void)
+{
+	return 0xb00;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_cq_op_status
+ *
+ * NIX LF Completion Queue Status Operation Register A 64-bit atomic
+ * load-and-add to this register reads NIX_CQ_CTX_S[HEAD,TAIL]. The
+ * atomic write data has format NIX_OP_Q_WDATA_S and selects the CQ
+ * within LF.  All other accesses to this register (e.g. reads and
+ * writes) are RAZ/WI.  RSL accesses to this register are RAZ/WI.
+ */
+union nixx_lf_cq_op_status {
+	u64 u;
+	struct nixx_lf_cq_op_status_s {
+		u64 tail                             : 20;
+		u64 head                             : 20;
+		u64 reserved_40_45                   : 6;
+		u64 cq_err                           : 1;
+		u64 reserved_47_62                   : 16;
+		u64 op_err                           : 1;
+	} s;
+	/* struct nixx_lf_cq_op_status_s cn; */
+};
+
+static inline u64 NIXX_LF_CQ_OP_STATUS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_CQ_OP_STATUS(void)
+{
+	return 0xb40;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_err_int
+ *
+ * NIX LF Error Interrupt Register
+ */
+union nixx_lf_err_int {
+	u64 u;
+	struct nixx_lf_err_int_s {
+		u64 sqb_fault                        : 1;
+		u64 sq_ctx_fault                     : 1;
+		u64 rq_ctx_fault                     : 1;
+		u64 cq_ctx_fault                     : 1;
+		u64 reserved_4                       : 1;
+		u64 rsse_fault                       : 1;
+		u64 ipsec_dyno_fault                 : 1;
+		u64 sq_disabled                      : 1;
+		u64 sq_oor                           : 1;
+		u64 send_jump_fault                  : 1;
+		u64 send_sg_fault                    : 1;
+		u64 rq_disabled                      : 1;
+		u64 rq_oor                           : 1;
+		u64 rx_wqe_fault                     : 1;
+		u64 rss_err                          : 1;
+		u64 reserved_15_19                   : 5;
+		u64 dyno_err                         : 1;
+		u64 reserved_21_23                   : 3;
+		u64 cq_disabled                      : 1;
+		u64 cq_oor                           : 1;
+		u64 reserved_26_27                   : 2;
+		u64 qint_fault                       : 1;
+		u64 cint_fault                       : 1;
+		u64 reserved_30_63                   : 34;
+	} s;
+	/* struct nixx_lf_err_int_s cn; */
+};
+
+static inline u64 NIXX_LF_ERR_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_ERR_INT(void)
+{
+	return 0x220;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_err_int_ena_w1c
+ *
+ * NIX LF Error Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union nixx_lf_err_int_ena_w1c {
+	u64 u;
+	struct nixx_lf_err_int_ena_w1c_s {
+		u64 sqb_fault                        : 1;
+		u64 sq_ctx_fault                     : 1;
+		u64 rq_ctx_fault                     : 1;
+		u64 cq_ctx_fault                     : 1;
+		u64 reserved_4                       : 1;
+		u64 rsse_fault                       : 1;
+		u64 ipsec_dyno_fault                 : 1;
+		u64 sq_disabled                      : 1;
+		u64 sq_oor                           : 1;
+		u64 send_jump_fault                  : 1;
+		u64 send_sg_fault                    : 1;
+		u64 rq_disabled                      : 1;
+		u64 rq_oor                           : 1;
+		u64 rx_wqe_fault                     : 1;
+		u64 rss_err                          : 1;
+		u64 reserved_15_19                   : 5;
+		u64 dyno_err                         : 1;
+		u64 reserved_21_23                   : 3;
+		u64 cq_disabled                      : 1;
+		u64 cq_oor                           : 1;
+		u64 reserved_26_27                   : 2;
+		u64 qint_fault                       : 1;
+		u64 cint_fault                       : 1;
+		u64 reserved_30_63                   : 34;
+	} s;
+	/* struct nixx_lf_err_int_ena_w1c_s cn; */
+};
+
+static inline u64 NIXX_LF_ERR_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_ERR_INT_ENA_W1C(void)
+{
+	return 0x230;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_err_int_ena_w1s
+ *
+ * NIX LF Error Interrupt Enable Set Register This register sets
+ * interrupt enable bits.
+ */
+union nixx_lf_err_int_ena_w1s {
+	u64 u;
+	struct nixx_lf_err_int_ena_w1s_s {
+		u64 sqb_fault                        : 1;
+		u64 sq_ctx_fault                     : 1;
+		u64 rq_ctx_fault                     : 1;
+		u64 cq_ctx_fault                     : 1;
+		u64 reserved_4                       : 1;
+		u64 rsse_fault                       : 1;
+		u64 ipsec_dyno_fault                 : 1;
+		u64 sq_disabled                      : 1;
+		u64 sq_oor                           : 1;
+		u64 send_jump_fault                  : 1;
+		u64 send_sg_fault                    : 1;
+		u64 rq_disabled                      : 1;
+		u64 rq_oor                           : 1;
+		u64 rx_wqe_fault                     : 1;
+		u64 rss_err                          : 1;
+		u64 reserved_15_19                   : 5;
+		u64 dyno_err                         : 1;
+		u64 reserved_21_23                   : 3;
+		u64 cq_disabled                      : 1;
+		u64 cq_oor                           : 1;
+		u64 reserved_26_27                   : 2;
+		u64 qint_fault                       : 1;
+		u64 cint_fault                       : 1;
+		u64 reserved_30_63                   : 34;
+	} s;
+	/* struct nixx_lf_err_int_ena_w1s_s cn; */
+};
+
+static inline u64 NIXX_LF_ERR_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_ERR_INT_ENA_W1S(void)
+{
+	return 0x238;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_err_int_w1s
+ *
+ * NIX LF Error Interrupt Set Register This register sets interrupt bits.
+ */
+union nixx_lf_err_int_w1s {
+	u64 u;
+	struct nixx_lf_err_int_w1s_s {
+		u64 sqb_fault                        : 1;
+		u64 sq_ctx_fault                     : 1;
+		u64 rq_ctx_fault                     : 1;
+		u64 cq_ctx_fault                     : 1;
+		u64 reserved_4                       : 1;
+		u64 rsse_fault                       : 1;
+		u64 ipsec_dyno_fault                 : 1;
+		u64 sq_disabled                      : 1;
+		u64 sq_oor                           : 1;
+		u64 send_jump_fault                  : 1;
+		u64 send_sg_fault                    : 1;
+		u64 rq_disabled                      : 1;
+		u64 rq_oor                           : 1;
+		u64 rx_wqe_fault                     : 1;
+		u64 rss_err                          : 1;
+		u64 reserved_15_19                   : 5;
+		u64 dyno_err                         : 1;
+		u64 reserved_21_23                   : 3;
+		u64 cq_disabled                      : 1;
+		u64 cq_oor                           : 1;
+		u64 reserved_26_27                   : 2;
+		u64 qint_fault                       : 1;
+		u64 cint_fault                       : 1;
+		u64 reserved_30_63                   : 34;
+	} s;
+	/* struct nixx_lf_err_int_w1s_s cn; */
+};
+
+static inline u64 NIXX_LF_ERR_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_ERR_INT_W1S(void)
+{
+	return 0x228;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_gint
+ *
+ * NIX LF General Interrupt Register
+ */
+union nixx_lf_gint {
+	u64 u;
+	struct nixx_lf_gint_s {
+		u64 drop                             : 1;
+		u64 tcp_timer                        : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct nixx_lf_gint_s cn; */
+};
+
+static inline u64 NIXX_LF_GINT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_GINT(void)
+{
+	return 0x200;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_gint_ena_w1c
+ *
+ * NIX LF General Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union nixx_lf_gint_ena_w1c {
+	u64 u;
+	struct nixx_lf_gint_ena_w1c_s {
+		u64 drop                             : 1;
+		u64 tcp_timer                        : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct nixx_lf_gint_ena_w1c_s cn; */
+};
+
+static inline u64 NIXX_LF_GINT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_GINT_ENA_W1C(void)
+{
+	return 0x210;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_gint_ena_w1s
+ *
+ * NIX LF General Interrupt Enable Set Register This register sets
+ * interrupt enable bits.
+ */
+union nixx_lf_gint_ena_w1s {
+	u64 u;
+	struct nixx_lf_gint_ena_w1s_s {
+		u64 drop                             : 1;
+		u64 tcp_timer                        : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct nixx_lf_gint_ena_w1s_s cn; */
+};
+
+static inline u64 NIXX_LF_GINT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_GINT_ENA_W1S(void)
+{
+	return 0x218;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_gint_w1s
+ *
+ * NIX LF General Interrupt Set Register This register sets interrupt
+ * bits.
+ */
+union nixx_lf_gint_w1s {
+	u64 u;
+	struct nixx_lf_gint_w1s_s {
+		u64 drop                             : 1;
+		u64 tcp_timer                        : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct nixx_lf_gint_w1s_s cn; */
+};
+
+static inline u64 NIXX_LF_GINT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_GINT_W1S(void)
+{
+	return 0x208;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_mnq_err_dbg
+ *
+ * NIX LF Meta-descriptor Enqueue Error Debug Register This register
+ * captures debug info for an error detected during send meta-descriptor
+ * enqueue from an SQ to an SMQ. Hardware sets [VALID] when the debug
+ * info is captured, and subsequent errors are not captured until
+ * software clears [VALID] by writing a one to it.
+ */
+union nixx_lf_mnq_err_dbg {
+	u64 u;
+	struct nixx_lf_mnq_err_dbg_s {
+		u64 errcode                          : 8;
+		u64 sq                               : 20;
+		u64 sqe_id                           : 16;
+		u64 valid                            : 1;
+		u64 reserved_45_63                   : 19;
+	} s;
+	/* struct nixx_lf_mnq_err_dbg_s cn; */
+};
+
+static inline u64 NIXX_LF_MNQ_ERR_DBG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_MNQ_ERR_DBG(void)
+{
+	return 0x270;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_op_ipsec_dyno_cnt
+ *
+ * INTERNAL: NIX LF IPSEC Dynamic Ordering Counter Operation Register
+ * Internal: Not used; no IPSEC fast-path. All accesses are RAZ/WI.
+ */
+union nixx_lf_op_ipsec_dyno_cnt {
+	u64 u;
+	struct nixx_lf_op_ipsec_dyno_cnt_s {
+		u64 count                            : 32;
+		u64 reserved_32_46                   : 15;
+		u64 storeop                          : 1;
+		u64 dyno_sel                         : 15;
+		u64 op_err                           : 1;
+	} s;
+	/* struct nixx_lf_op_ipsec_dyno_cnt_s cn; */
+};
+
+static inline u64 NIXX_LF_OP_IPSEC_DYNO_CNT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_OP_IPSEC_DYNO_CNT(void)
+{
+	return 0x980;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_op_send#
+ *
+ * NIX LF Send Operation Registers An LMTST (or large store from CPT) to
+ * this address enqueues one or more SQEs to a send queue.
+ * NIX_SEND_HDR_S[SQ] in the first SQE selects the send queue.The maximum
+ * size of each SQE is specified by NIX_SQ_CTX_S[MAX_SQE_SIZE].  A read
+ * to this address is RAZ.  An RSL access to this address will fault.
+ * The endianness of the instruction write data is controlled by
+ * NIX_AF_LF()_CFG[BE].  When a NIX_SEND_JUMP_S is not present in the
+ * SQE, the SQE consists of the entire send descriptor.  When a
+ * NIX_SEND_JUMP_S is present in the SQE, the SQE must contain exactly
+ * the portion of the send descriptor up to and including the
+ * NIX_SEND_JUMP_S, and the remainder of the send descriptor must be at
+ * LF IOVA NIX_SEND_JUMP_S[ADDR] in LLC/DRAM.  Software must ensure that
+ * all LLC/DRAM locations that will be referenced by NIX while processing
+ * this descriptor, including all packet data and post-jump
+ * subdescriptors contain the latest updates before issuing the LMTST. A
+ * DMB instruction may be required prior to the LMTST to ensure this. A
+ * DMB following the LMTST may be useful if SQ descriptor ordering
+ * matters and more than one CPU core is simultaneously enqueueing to the
+ * same SQ.
+ */
+union nixx_lf_op_sendx {
+	u64 u;
+	struct nixx_lf_op_sendx_s {
+		u64 data                             : 64;
+	} s;
+	/* struct nixx_lf_op_sendx_s cn; */
+};
+
+static inline u64 NIXX_LF_OP_SENDX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_OP_SENDX(u64 a)
+{
+	return 0x800 + 8 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_qint#_cnt
+ *
+ * NIX LF Queue Interrupt Count Registers
+ */
+union nixx_lf_qintx_cnt {
+	u64 u;
+	struct nixx_lf_qintx_cnt_s {
+		u64 count                            : 22;
+		u64 reserved_22_63                   : 42;
+	} s;
+	/* struct nixx_lf_qintx_cnt_s cn; */
+};
+
+static inline u64 NIXX_LF_QINTX_CNT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_QINTX_CNT(u64 a)
+{
+	return 0xc00 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_qint#_ena_w1c
+ *
+ * NIX LF Queue Interrupt Enable Clear Registers This register clears
+ * interrupt enable bits.
+ */
+union nixx_lf_qintx_ena_w1c {
+	u64 u;
+	struct nixx_lf_qintx_ena_w1c_s {
+		u64 intr                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_lf_qintx_ena_w1c_s cn; */
+};
+
+static inline u64 NIXX_LF_QINTX_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_QINTX_ENA_W1C(u64 a)
+{
+	return 0xc30 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_qint#_ena_w1s
+ *
+ * NIX LF Queue Interrupt Enable Set Registers This register sets
+ * interrupt enable bits.
+ */
+union nixx_lf_qintx_ena_w1s {
+	u64 u;
+	struct nixx_lf_qintx_ena_w1s_s {
+		u64 intr                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_lf_qintx_ena_w1s_s cn; */
+};
+
+static inline u64 NIXX_LF_QINTX_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_QINTX_ENA_W1S(u64 a)
+{
+	return 0xc20 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_qint#_int
+ *
+ * NIX LF Queue Interrupt Registers
+ */
+union nixx_lf_qintx_int {
+	u64 u;
+	struct nixx_lf_qintx_int_s {
+		u64 intr                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_lf_qintx_int_s cn; */
+};
+
+static inline u64 NIXX_LF_QINTX_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_QINTX_INT(u64 a)
+{
+	return 0xc10 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_qint#_int_w1s
+ *
+ * INTERNAL: NIX LF Queue Interrupt Set Registers
+ */
+union nixx_lf_qintx_int_w1s {
+	u64 u;
+	struct nixx_lf_qintx_int_w1s_s {
+		u64 intr                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct nixx_lf_qintx_int_w1s_s cn; */
+};
+
+static inline u64 NIXX_LF_QINTX_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_QINTX_INT_W1S(u64 a)
+{
+	return 0xc18 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_ras
+ *
+ * NIX LF RAS Interrupt Register
+ */
+union nixx_lf_ras {
+	u64 u;
+	struct nixx_lf_ras_s {
+		u64 sqb_poison                       : 1;
+		u64 sq_ctx_poison                    : 1;
+		u64 rq_ctx_poison                    : 1;
+		u64 cq_ctx_poison                    : 1;
+		u64 reserved_4                       : 1;
+		u64 rsse_poison                      : 1;
+		u64 ipsec_dyno_poison                : 1;
+		u64 send_jump_poison                 : 1;
+		u64 send_sg_poison                   : 1;
+		u64 qint_poison                      : 1;
+		u64 cint_poison                      : 1;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_lf_ras_s cn; */
+};
+
+static inline u64 NIXX_LF_RAS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_RAS(void)
+{
+	return 0x240;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_ras_ena_w1c
+ *
+ * NIX LF RAS Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union nixx_lf_ras_ena_w1c {
+	u64 u;
+	struct nixx_lf_ras_ena_w1c_s {
+		u64 sqb_poison                       : 1;
+		u64 sq_ctx_poison                    : 1;
+		u64 rq_ctx_poison                    : 1;
+		u64 cq_ctx_poison                    : 1;
+		u64 reserved_4                       : 1;
+		u64 rsse_poison                      : 1;
+		u64 ipsec_dyno_poison                : 1;
+		u64 send_jump_poison                 : 1;
+		u64 send_sg_poison                   : 1;
+		u64 qint_poison                      : 1;
+		u64 cint_poison                      : 1;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_lf_ras_ena_w1c_s cn; */
+};
+
+static inline u64 NIXX_LF_RAS_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_RAS_ENA_W1C(void)
+{
+	return 0x250;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_ras_ena_w1s
+ *
+ * NIX LF RAS Interrupt Enable Set Register This register sets interrupt
+ * enable bits.
+ */
+union nixx_lf_ras_ena_w1s {
+	u64 u;
+	struct nixx_lf_ras_ena_w1s_s {
+		u64 sqb_poison                       : 1;
+		u64 sq_ctx_poison                    : 1;
+		u64 rq_ctx_poison                    : 1;
+		u64 cq_ctx_poison                    : 1;
+		u64 reserved_4                       : 1;
+		u64 rsse_poison                      : 1;
+		u64 ipsec_dyno_poison                : 1;
+		u64 send_jump_poison                 : 1;
+		u64 send_sg_poison                   : 1;
+		u64 qint_poison                      : 1;
+		u64 cint_poison                      : 1;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_lf_ras_ena_w1s_s cn; */
+};
+
+static inline u64 NIXX_LF_RAS_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_RAS_ENA_W1S(void)
+{
+	return 0x258;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_ras_w1s
+ *
+ * NIX LF RAS Interrupt Set Register This register sets interrupt bits.
+ */
+union nixx_lf_ras_w1s {
+	u64 u;
+	struct nixx_lf_ras_w1s_s {
+		u64 sqb_poison                       : 1;
+		u64 sq_ctx_poison                    : 1;
+		u64 rq_ctx_poison                    : 1;
+		u64 cq_ctx_poison                    : 1;
+		u64 reserved_4                       : 1;
+		u64 rsse_poison                      : 1;
+		u64 ipsec_dyno_poison                : 1;
+		u64 send_jump_poison                 : 1;
+		u64 send_sg_poison                   : 1;
+		u64 qint_poison                      : 1;
+		u64 cint_poison                      : 1;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct nixx_lf_ras_w1s_s cn; */
+};
+
+static inline u64 NIXX_LF_RAS_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_RAS_W1S(void)
+{
+	return 0x248;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_rq_op_drop_octs
+ *
+ * NIX LF Receive Queue Dropped Octets Operation Register A 64-bit atomic
+ * load-and-add to this register reads NIX_RQ_CTX_S[DROP_OCTS]. The
+ * atomic write data has format NIX_OP_Q_WDATA_S and selects the RQ
+ * within LF.  All other accesses to this register (e.g. reads and
+ * writes) are RAZ/WI.  RSL accesses to this register are RAZ/WI.
+ */
+union nixx_lf_rq_op_drop_octs {
+	u64 u;
+	struct nixx_lf_rq_op_drop_octs_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_62                   : 15;
+		u64 op_err                           : 1;
+	} s;
+	/* struct nixx_lf_rq_op_drop_octs_s cn; */
+};
+
+static inline u64 NIXX_LF_RQ_OP_DROP_OCTS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_RQ_OP_DROP_OCTS(void)
+{
+	return 0x930;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_rq_op_drop_pkts
+ *
+ * NIX LF Receive Queue Dropped Packets Operation Register A 64-bit
+ * atomic load-and-add to this register reads NIX_RQ_CTX_S[DROP_PKTS].
+ * The atomic write data has format NIX_OP_Q_WDATA_S and selects the RQ
+ * within LF.  All other accesses to this register (e.g. reads and
+ * writes) are RAZ/WI.  RSL accesses to this register are RAZ/WI.
+ */
+union nixx_lf_rq_op_drop_pkts {
+	u64 u;
+	struct nixx_lf_rq_op_drop_pkts_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_62                   : 15;
+		u64 op_err                           : 1;
+	} s;
+	/* struct nixx_lf_rq_op_drop_pkts_s cn; */
+};
+
+static inline u64 NIXX_LF_RQ_OP_DROP_PKTS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_RQ_OP_DROP_PKTS(void)
+{
+	return 0x940;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_rq_op_int
+ *
+ * NIX LF Receive Queue Interrupt Operation Register A 64-bit atomic
+ * load-and-add to this register reads RQ interrupts and interrupt
+ * enables. A 64-bit write optionally sets or clears interrupts and
+ * interrupt enables.  All other accesses to this register (e.g. reads,
+ * 128-bit accesses) are RAZ/WI.  RSL accesses to this register are
+ * RAZ/WI.
+ */
+union nixx_lf_rq_op_int {
+	u64 u;
+	struct nixx_lf_rq_op_int_s {
+		u64 rq_int                           : 8;
+		u64 rq_int_ena                       : 8;
+		u64 reserved_16_41                   : 26;
+		u64 op_err                           : 1;
+		u64 setop                            : 1;
+		u64 rq                               : 20;
+	} s;
+	/* struct nixx_lf_rq_op_int_s cn; */
+};
+
+static inline u64 NIXX_LF_RQ_OP_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_RQ_OP_INT(void)
+{
+	return 0x900;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_rq_op_octs
+ *
+ * NIX LF Receive Queue Octets Operation Register A 64-bit atomic load-
+ * and-add to this register reads NIX_RQ_CTX_S[OCTS]. The atomic write
+ * data has format NIX_OP_Q_WDATA_S and selects the RQ within LF.  All
+ * other accesses to this register (e.g. reads and writes) are RAZ/WI.
+ * RSL accesses to this register are RAZ/WI.
+ */
+union nixx_lf_rq_op_octs {
+	u64 u;
+	struct nixx_lf_rq_op_octs_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_62                   : 15;
+		u64 op_err                           : 1;
+	} s;
+	/* struct nixx_lf_rq_op_octs_s cn; */
+};
+
+static inline u64 NIXX_LF_RQ_OP_OCTS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_RQ_OP_OCTS(void)
+{
+	return 0x910;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_rq_op_pkts
+ *
+ * NIX LF Receive Queue Packets Operation Register A 64-bit atomic load-
+ * and-add to this register reads NIX_RQ_CTX_S[PKTS]. The atomic write
+ * data has format NIX_OP_Q_WDATA_S and selects the RQ within LF.  All
+ * other accesses to this register (e.g. reads and writes) are RAZ/WI.
+ * RSL accesses to this register are RAZ/WI.
+ */
+union nixx_lf_rq_op_pkts {
+	u64 u;
+	struct nixx_lf_rq_op_pkts_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_62                   : 15;
+		u64 op_err                           : 1;
+	} s;
+	/* struct nixx_lf_rq_op_pkts_s cn; */
+};
+
+static inline u64 NIXX_LF_RQ_OP_PKTS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_RQ_OP_PKTS(void)
+{
+	return 0x920;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_rq_op_re_pkts
+ *
+ * NIX LF Receive Queue Errored Packets Operation Register A 64-bit
+ * atomic load-and-add to this register reads NIX_RQ_CTX_S[RE_PKTS]. The
+ * atomic write data has format NIX_OP_Q_WDATA_S and selects the RQ
+ * within LF.  All other accesses to this register (e.g. reads and
+ * writes) are RAZ/WI.  RSL accesses to this register are RAZ/WI.
+ */
+union nixx_lf_rq_op_re_pkts {
+	u64 u;
+	struct nixx_lf_rq_op_re_pkts_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_62                   : 15;
+		u64 op_err                           : 1;
+	} s;
+	/* struct nixx_lf_rq_op_re_pkts_s cn; */
+};
+
+static inline u64 NIXX_LF_RQ_OP_RE_PKTS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_RQ_OP_RE_PKTS(void)
+{
+	return 0x950;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_rx_secret#
+ *
+ * NIX LF Receive Secret Key Registers
+ */
+union nixx_lf_rx_secretx {
+	u64 u;
+	struct nixx_lf_rx_secretx_s {
+		u64 key                              : 64;
+	} s;
+	/* struct nixx_lf_rx_secretx_s cn; */
+};
+
+static inline u64 NIXX_LF_RX_SECRETX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_RX_SECRETX(u64 a)
+{
+	return 0 + 8 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_rx_stat#
+ *
+ * NIX LF Receive Statistics Registers The last dimension indicates which
+ * statistic, and is enumerated by NIX_STAT_LF_RX_E.
+ */
+union nixx_lf_rx_statx {
+	u64 u;
+	struct nixx_lf_rx_statx_s {
+		u64 stat                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_lf_rx_statx_s cn; */
+};
+
+static inline u64 NIXX_LF_RX_STATX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_RX_STATX(u64 a)
+{
+	return 0x400 + 8 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_send_err_dbg
+ *
+ * NIX LF Send Error Debug Register This register captures debug info an
+ * error detected on packet send after a meta-descriptor is granted by
+ * PSE. Hardware sets [VALID] when the debug info is captured, and
+ * subsequent errors are not captured until software clears [VALID] by
+ * writing a one to it.
+ */
+union nixx_lf_send_err_dbg {
+	u64 u;
+	struct nixx_lf_send_err_dbg_s {
+		u64 errcode                          : 8;
+		u64 sq                               : 20;
+		u64 sqe_id                           : 16;
+		u64 valid                            : 1;
+		u64 reserved_45_63                   : 19;
+	} s;
+	/* struct nixx_lf_send_err_dbg_s cn; */
+};
+
+static inline u64 NIXX_LF_SEND_ERR_DBG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_SEND_ERR_DBG(void)
+{
+	return 0x280;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_sq_op_drop_octs
+ *
+ * NIX LF Send Queue Dropped Octets Operation Register A 64-bit atomic
+ * load-and-add to this register reads NIX_SQ_CTX_S[DROP_OCTS]. The
+ * atomic write data has format NIX_OP_Q_WDATA_S and selects the SQ
+ * within LF.  All other accesses to this register (e.g. reads and
+ * writes) are RAZ/WI.  RSL accesses to this register are RAZ/WI.
+ */
+union nixx_lf_sq_op_drop_octs {
+	u64 u;
+	struct nixx_lf_sq_op_drop_octs_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_62                   : 15;
+		u64 op_err                           : 1;
+	} s;
+	/* struct nixx_lf_sq_op_drop_octs_s cn; */
+};
+
+static inline u64 NIXX_LF_SQ_OP_DROP_OCTS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_SQ_OP_DROP_OCTS(void)
+{
+	return 0xa40;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_sq_op_drop_pkts
+ *
+ * NIX LF Send Queue Dropped Packets Operation Register A 64-bit atomic
+ * load-and-add to this register reads NIX_SQ_CTX_S[DROP_PKTS]. The
+ * atomic write data has format NIX_OP_Q_WDATA_S and selects the SQ
+ * within LF.  All other accesses to this register (e.g. reads and
+ * writes) are RAZ/WI.  RSL accesses to this register are RAZ/WI.
+ */
+union nixx_lf_sq_op_drop_pkts {
+	u64 u;
+	struct nixx_lf_sq_op_drop_pkts_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_62                   : 15;
+		u64 op_err                           : 1;
+	} s;
+	/* struct nixx_lf_sq_op_drop_pkts_s cn; */
+};
+
+static inline u64 NIXX_LF_SQ_OP_DROP_PKTS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_SQ_OP_DROP_PKTS(void)
+{
+	return 0xa50;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_sq_op_err_dbg
+ *
+ * NIX LF SQ Operation Error Debug Register This register captures debug
+ * info for an error detected on LMT store to NIX_LF_OP_SEND() or when a
+ * NIX_LF_SQ_OP_* register is accessed. Hardware sets [VALID] when the
+ * debug info is captured, and subsequent errors are not captured until
+ * software clears [VALID] by writing a one to it.
+ */
+union nixx_lf_sq_op_err_dbg {
+	u64 u;
+	struct nixx_lf_sq_op_err_dbg_s {
+		u64 errcode                          : 8;
+		u64 sq                               : 20;
+		u64 sqe_id                           : 16;
+		u64 valid                            : 1;
+		u64 reserved_45_63                   : 19;
+	} s;
+	/* struct nixx_lf_sq_op_err_dbg_s cn; */
+};
+
+static inline u64 NIXX_LF_SQ_OP_ERR_DBG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_SQ_OP_ERR_DBG(void)
+{
+	return 0x260;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_sq_op_int
+ *
+ * NIX LF Send Queue Interrupt Operation Register A 64-bit atomic load-
+ * and-add to this register reads SQ interrupts, interrupt enables and
+ * XOFF status. A write optionally sets or clears interrupts, interrupt
+ * enables and XOFF status. A read is RAZ.  RSL accesses to this register
+ * are RAZ/WI.
+ */
+union nixx_lf_sq_op_int {
+	u64 u;
+	struct nixx_lf_sq_op_int_s {
+		u64 sq_int                           : 8;
+		u64 sq_int_ena                       : 8;
+		u64 xoff                             : 1;
+		u64 reserved_17_41                   : 25;
+		u64 op_err                           : 1;
+		u64 setop                            : 1;
+		u64 sq                               : 20;
+	} s;
+	/* struct nixx_lf_sq_op_int_s cn; */
+};
+
+static inline u64 NIXX_LF_SQ_OP_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_SQ_OP_INT(void)
+{
+	return 0xa00;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_sq_op_octs
+ *
+ * NIX LF Send Queue Octets Operation Register A 64-bit atomic load-and-
+ * add to this register reads NIX_SQ_CTX_S[OCTS]. The atomic write data
+ * has format NIX_OP_Q_WDATA_S and selects the SQ within LF.  All other
+ * accesses to this register (e.g. reads and writes) are RAZ/WI.  RSL
+ * accesses to this register are RAZ/WI.
+ */
+union nixx_lf_sq_op_octs {
+	u64 u;
+	struct nixx_lf_sq_op_octs_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_62                   : 15;
+		u64 op_err                           : 1;
+	} s;
+	/* struct nixx_lf_sq_op_octs_s cn; */
+};
+
+static inline u64 NIXX_LF_SQ_OP_OCTS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_SQ_OP_OCTS(void)
+{
+	return 0xa10;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_sq_op_pkts
+ *
+ * NIX LF Send Queue Packets Operation Register A 64-bit atomic load-and-
+ * add to this register reads NIX_SQ_CTX_S[PKTS]. The atomic write data
+ * has format NIX_OP_Q_WDATA_S and selects the SQ within LF.  All other
+ * accesses to this register (e.g. reads and writes) are RAZ/WI.  RSL
+ * accesses to this register are RAZ/WI.
+ */
+union nixx_lf_sq_op_pkts {
+	u64 u;
+	struct nixx_lf_sq_op_pkts_s {
+		u64 cnt                              : 48;
+		u64 reserved_48_62                   : 15;
+		u64 op_err                           : 1;
+	} s;
+	/* struct nixx_lf_sq_op_pkts_s cn; */
+};
+
+static inline u64 NIXX_LF_SQ_OP_PKTS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_SQ_OP_PKTS(void)
+{
+	return 0xa20;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_sq_op_status
+ *
+ * NIX LF Send Queue Status Operation Register A 64-bit atomic load-and-
+ * add to this register reads status fields in NIX_SQ_CTX_S. The atomic
+ * write data has format NIX_OP_Q_WDATA_S and selects the SQ within LF.
+ * Completion of the load-and-add operation also ensures that all
+ * previously issued LMT stores to NIX_LF_OP_SEND() have completed.  All
+ * other accesses to this register (e.g. reads and writes) are RAZ/WI.
+ * RSL accesses to this register are RAZ/WI.
+ */
+union nixx_lf_sq_op_status {
+	u64 u;
+	struct nixx_lf_sq_op_status_s {
+		u64 sqb_count                        : 16;
+		u64 reserved_16_19                   : 4;
+		u64 head_offset                      : 6;
+		u64 reserved_26_27                   : 2;
+		u64 tail_offset                      : 6;
+		u64 reserved_34_62                   : 29;
+		u64 op_err                           : 1;
+	} s;
+	struct nixx_lf_sq_op_status_cn {
+		u64 sqb_count                        : 16;
+		u64 reserved_16_19                   : 4;
+		u64 head_offset                      : 6;
+		u64 reserved_26_27                   : 2;
+		u64 tail_offset                      : 6;
+		u64 reserved_34_35                   : 2;
+		u64 reserved_36_62                   : 27;
+		u64 op_err                           : 1;
+	} cn;
+};
+
+static inline u64 NIXX_LF_SQ_OP_STATUS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_SQ_OP_STATUS(void)
+{
+	return 0xa30;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) nix#_lf_tx_stat#
+ *
+ * NIX LF Transmit Statistics Registers The last dimension indicates
+ * which statistic, and is enumerated by NIX_STAT_LF_TX_E.
+ */
+union nixx_lf_tx_statx {
+	u64 u;
+	struct nixx_lf_tx_statx_s {
+		u64 stat                             : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct nixx_lf_tx_statx_s cn; */
+};
+
+static inline u64 NIXX_LF_TX_STATX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_LF_TX_STATX(u64 a)
+{
+	return 0x300 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_priv_af_int_cfg
+ *
+ * NIX Privileged Admin Function Interrupt Configuration Register
+ */
+union nixx_priv_af_int_cfg {
+	u64 u;
+	struct nixx_priv_af_int_cfg_s {
+		u64 msix_offset                      : 11;
+		u64 reserved_11                      : 1;
+		u64 msix_size                        : 8;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct nixx_priv_af_int_cfg_s cn; */
+};
+
+static inline u64 NIXX_PRIV_AF_INT_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_PRIV_AF_INT_CFG(void)
+{
+	return 0x8000000;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_priv_lf#_cfg
+ *
+ * NIX Privileged Local Function Configuration Registers These registers
+ * allow each NIX local function (LF) to be provisioned to a VF/PF for
+ * RVU. See also NIX_AF_RVU_LF_CFG_DEBUG.  Software should read this
+ * register after write to ensure that the LF is mapped to [PF_FUNC]
+ * before issuing transactions to the mapped PF and function.  [SLOT]
+ * must be zero.  Internal: Hardware ignores [SLOT] and always assumes
+ * 0x0.
+ */
+union nixx_priv_lfx_cfg {
+	u64 u;
+	struct nixx_priv_lfx_cfg_s {
+		u64 slot                             : 8;
+		u64 pf_func                          : 16;
+		u64 reserved_24_62                   : 39;
+		u64 ena                              : 1;
+	} s;
+	/* struct nixx_priv_lfx_cfg_s cn; */
+};
+
+static inline u64 NIXX_PRIV_LFX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_PRIV_LFX_CFG(u64 a)
+{
+	return 0x8000010 + 0x100 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) nix#_priv_lf#_int_cfg
+ *
+ * NIX Privileged LF Interrupt Configuration Registers
+ */
+union nixx_priv_lfx_int_cfg {
+	u64 u;
+	struct nixx_priv_lfx_int_cfg_s {
+		u64 msix_offset                      : 11;
+		u64 reserved_11                      : 1;
+		u64 msix_size                        : 8;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct nixx_priv_lfx_int_cfg_s cn; */
+};
+
+static inline u64 NIXX_PRIV_LFX_INT_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NIXX_PRIV_LFX_INT_CFG(u64 a)
+{
+	return 0x8000020 + 0x100 * a;
+}
+
+#endif /* __CSRS_NIX_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx2/csrs/csrs-npa.h b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-npa.h
new file mode 100644
index 0000000..b70c91b
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-npa.h
@@ -0,0 +1,2294 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+#ifndef __CSRS_NPA_H__
+#define __CSRS_NPA_H__
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * NPA.
+ *
+ * This file is auto generated.  Do not edit.
+ *
+ */
+
+/**
+ * Enumeration npa_af_int_vec_e
+ *
+ * NPA Admin Function Interrupt Vector Enumeration Enumerates the NPA AF
+ * MSI-X interrupt vectors.
+ */
+#define NPA_AF_INT_VEC_E_AF_ERR (3)
+#define NPA_AF_INT_VEC_E_AQ_DONE (2)
+#define NPA_AF_INT_VEC_E_GEN (1)
+#define NPA_AF_INT_VEC_E_POISON (4)
+#define NPA_AF_INT_VEC_E_RVU (0)
+
+/**
+ * Enumeration npa_aq_comp_e
+ *
+ * NPA Admin Queue Completion Enumeration Enumerates the values of
+ * NPA_AQ_RES_S[COMPCODE].
+ */
+#define NPA_AQ_COMP_E_CTX_FAULT (4)
+#define NPA_AQ_COMP_E_CTX_POISON (3)
+#define NPA_AQ_COMP_E_GOOD (1)
+#define NPA_AQ_COMP_E_LOCKERR (5)
+#define NPA_AQ_COMP_E_NOTDONE (0)
+#define NPA_AQ_COMP_E_SWERR (2)
+
+/**
+ * Enumeration npa_aq_ctype_e
+ *
+ * NPA Admin Queue Context Type Enumeration Enumerates
+ * NPA_AQ_INST_S[CTYPE] values.
+ */
+#define NPA_AQ_CTYPE_E_AURA (0)
+#define NPA_AQ_CTYPE_E_POOL (1)
+
+/**
+ * Enumeration npa_aq_instop_e
+ *
+ * NPA Admin Queue Opcode Enumeration Enumerates NPA_AQ_INST_S[OP]
+ * values.
+ */
+#define NPA_AQ_INSTOP_E_INIT (1)
+#define NPA_AQ_INSTOP_E_LOCK (4)
+#define NPA_AQ_INSTOP_E_NOP (0)
+#define NPA_AQ_INSTOP_E_READ (3)
+#define NPA_AQ_INSTOP_E_UNLOCK (5)
+#define NPA_AQ_INSTOP_E_WRITE (2)
+
+/**
+ * Enumeration npa_aura_err_int_e
+ *
+ * NPA Aura Error Interrupt Enumeration Enumerates the bit index of
+ * NPA_AURA_S[ERR_INT], and NPA_AURA_S[ERR_INT_ENA].
+ */
+#define NPA_AURA_ERR_INT_E_AURA_ADD_OVER (1)
+#define NPA_AURA_ERR_INT_E_AURA_ADD_UNDER (2)
+#define NPA_AURA_ERR_INT_E_AURA_FREE_UNDER (0)
+#define NPA_AURA_ERR_INT_E_POOL_DIS (3)
+#define NPA_AURA_ERR_INT_E_RX(a) (0 + (a))
+
+/**
+ * Enumeration npa_bpintf_e
+ *
+ * NPA Backpressure Interface Enumeration Enumerates index of
+ * NPA_AURA_S[BP_ENA].
+ */
+#define NPA_BPINTF_E_NIXX_RX(a) (0 + (a))
+
+/**
+ * Enumeration npa_inpq_e
+ *
+ * NPA Input Queue Enumeration Enumerates ALLOC/FREE input queues from
+ * coprocessors.
+ */
+#define NPA_INPQ_E_AURA_OP (0xe)
+#define NPA_INPQ_E_BPHY (7)
+#define NPA_INPQ_E_DPI (6)
+#define NPA_INPQ_E_INTERNAL_RSV (0xf)
+#define NPA_INPQ_E_NIXX_RX(a) (0 + 2 * (a))
+#define NPA_INPQ_E_NIXX_TX(a) (1 + 2 * (a))
+#define NPA_INPQ_E_RX(a) (0 + (a))
+#define NPA_INPQ_E_SSO (4)
+#define NPA_INPQ_E_TIM (5)
+
+/**
+ * Enumeration npa_lf_int_vec_e
+ *
+ * NPA Local Function Interrupt Vector Enumeration Enumerates the NPA
+ * MSI-X interrupt vectors per LF.
+ */
+#define NPA_LF_INT_VEC_E_ERR_INT (0x40)
+#define NPA_LF_INT_VEC_E_POISON (0x41)
+#define NPA_LF_INT_VEC_E_QINTX(a) (0 + (a))
+
+/**
+ * Enumeration npa_ndc0_port_e
+ *
+ * NPA NDC0 Port Enumeration Enumerates NPA NDC0 (NDC_IDX_E::NPA_U(0))
+ * ports and the PORT index of NDC_AF_PORT()_RT()_RW()_REQ_PC and
+ * NDC_AF_PORT()_RT()_RW()_LAT_PC.
+ */
+#define NPA_NDC0_PORT_E_AURA0 (0)
+#define NPA_NDC0_PORT_E_AURA1 (1)
+#define NPA_NDC0_PORT_E_POOL0 (2)
+#define NPA_NDC0_PORT_E_POOL1 (3)
+#define NPA_NDC0_PORT_E_STACK0 (4)
+#define NPA_NDC0_PORT_E_STACK1 (5)
+
+/**
+ * Enumeration npa_pool_err_int_e
+ *
+ * NPA Pool Error Interrupt Enumeration Enumerates the bit index of
+ * NPA_POOL_S[ERR_INT] and NPA_POOL_S[ERR_INT_ENA].
+ */
+#define NPA_POOL_ERR_INT_E_OVFLS (0)
+#define NPA_POOL_ERR_INT_E_PERR (2)
+#define NPA_POOL_ERR_INT_E_RX(a) (0 + (a))
+#define NPA_POOL_ERR_INT_E_RANGE (1)
+
+/**
+ * Structure npa_aq_inst_s
+ *
+ * NPA Admin Queue Instruction Structure This structure specifies the AQ
+ * instruction. Instructions and associated software structures are
+ * stored in memory as little-endian unless NPA_AF_GEN_CFG[AF_BE] is set.
+ * Hardware reads of NPA_AQ_INST_S do not allocate into LLC.  Hardware
+ * reads and writes of the context structure selected by [CTYPE], [LF]
+ * and [CINDEX] use the NDC and LLC caching style configured for that
+ * context, i.e.: * NPA_AURA_HW_S reads and writes use
+ * NPA_AF_LF()_AURAS_CFG[CACHING] and NPA_AF_LF()_AURAS_CFG[WAY_MASK]. *
+ * NPA_POOL_HW_S reads and writes use NPA_AURA_HW_S[POOL_CACHING] and
+ * NPA_AURA_HW_S[POOL_WAY_MASK].
+ */
+union npa_aq_inst_s {
+	u64 u[2];
+	struct npa_aq_inst_s_s {
+		u64 op                               : 4;
+		u64 ctype                            : 4;
+		u64 lf                               : 9;
+		u64 reserved_17_23                   : 7;
+		u64 cindex                           : 20;
+		u64 reserved_44_62                   : 19;
+		u64 doneint                          : 1;
+		u64 res_addr                         : 64;
+	} s;
+	/* struct npa_aq_inst_s_s cn; */
+};
+
+/**
+ * Structure npa_aq_res_s
+ *
+ * NPA Admin Queue Result Structure NPA writes this structure after it
+ * completes the NPA_AQ_INST_S instruction. The result structure is
+ * exactly 16 bytes, and each instruction completion produces exactly one
+ * result structure.  Results and associated software structures are
+ * stored in memory as little-endian unless NPA_AF_GEN_CFG[AF_BE] is set.
+ * When [OP] = NPA_AQ_INSTOP_E::INIT, WRITE or READ, this structure is
+ * immediately followed by context read or write data. See
+ * NPA_AQ_INSTOP_E.  Hardware writes of NPA_AQ_RES_S and context data
+ * always allocate into LLC. Hardware reads of context data do not
+ * allocate into LLC.
+ */
+union npa_aq_res_s {
+	u64 u[2];
+	struct npa_aq_res_s_s {
+		u64 op                               : 4;
+		u64 ctype                            : 4;
+		u64 compcode                         : 8;
+		u64 doneint                          : 1;
+		u64 reserved_17_63                   : 47;
+		u64 reserved_64_127                  : 64;
+	} s;
+	/* struct npa_aq_res_s_s cn; */
+};
+
+/**
+ * Structure npa_aura_op_wdata_s
+ *
+ * NPA Aura Operation Write Data Structure This structure specifies the
+ * write data format of a 64-bit atomic load-and-add to
+ * NPA_LF_AURA_OP_ALLOC() and NPA_LF_POOL_OP_PC, and a 128-bit atomic
+ * CASP operation to NPA_LF_AURA_OP_ALLOC().
+ */
+union npa_aura_op_wdata_s {
+	u64 u;
+	struct npa_aura_op_wdata_s_s {
+		u64 aura                             : 20;
+		u64 reserved_20_62                   : 43;
+		u64 drop                             : 1;
+	} s;
+	/* struct npa_aura_op_wdata_s_s cn; */
+};
+
+/**
+ * Structure npa_aura_s
+ *
+ * NPA Aura Context Structure This structure specifies the format used by
+ * software with the NPA admin queue to read and write an aura's
+ * NPA_AURA_HW_S structure maintained by hardware in LLC/DRAM.
+ */
+union npa_aura_s {
+	u64 u[8];
+	struct npa_aura_s_s {
+		u64 pool_addr                        : 64;
+		u64 ena                              : 1;
+		u64 reserved_65_66                   : 2;
+		u64 pool_caching                     : 1;
+		u64 pool_way_mask                    : 16;
+		u64 avg_con                          : 9;
+		u64 reserved_93                      : 1;
+		u64 pool_drop_ena                    : 1;
+		u64 aura_drop_ena                    : 1;
+		u64 bp_ena                           : 2;
+		u64 reserved_98_103                  : 6;
+		u64 aura_drop                        : 8;
+		u64 shift                            : 6;
+		u64 reserved_118_119                 : 2;
+		u64 avg_level                        : 8;
+		u64 count                            : 36;
+		u64 reserved_164_167                 : 4;
+		u64 nix0_bpid                        : 9;
+		u64 reserved_177_179                 : 3;
+		u64 nix1_bpid                        : 9;
+		u64 reserved_189_191                 : 3;
+		u64 limit                            : 36;
+		u64 reserved_228_231                 : 4;
+		u64 bp                               : 8;
+		u64 reserved_240_243                 : 4;
+		u64 fc_ena                           : 1;
+		u64 fc_up_crossing                   : 1;
+		u64 fc_stype                         : 2;
+		u64 fc_hyst_bits                     : 4;
+		u64 reserved_252_255                 : 4;
+		u64 fc_addr                          : 64;
+		u64 pool_drop                        : 8;
+		u64 update_time                      : 16;
+		u64 err_int                          : 8;
+		u64 err_int_ena                      : 8;
+		u64 thresh_int                       : 1;
+		u64 thresh_int_ena                   : 1;
+		u64 thresh_up                        : 1;
+		u64 reserved_363                     : 1;
+		u64 thresh_qint_idx                  : 7;
+		u64 reserved_371                     : 1;
+		u64 err_qint_idx                     : 7;
+		u64 reserved_379_383                 : 5;
+		u64 thresh                           : 36;
+		u64 reserved_420_447                 : 28;
+		u64 reserved_448_511                 : 64;
+	} s;
+	/* struct npa_aura_s_s cn; */
+};
+
+/**
+ * Structure npa_pool_s
+ *
+ * NPA Pool Context Structure This structure specifies the format used by
+ * software with the NPA admin queue to read and write a pool's
+ * NPA_POOL_HW_S structure maintained by hardware in LLC/DRAM.
+ */
+union npa_pool_s {
+	u64 u[16];
+	struct npa_pool_s_s {
+		u64 stack_base                       : 64;
+		u64 ena                              : 1;
+		u64 nat_align                        : 1;
+		u64 reserved_66_67                   : 2;
+		u64 stack_caching                    : 1;
+		u64 reserved_69_71                   : 3;
+		u64 stack_way_mask                   : 16;
+		u64 buf_offset                       : 12;
+		u64 reserved_100_103                 : 4;
+		u64 buf_size                         : 11;
+		u64 reserved_115_127                 : 13;
+		u64 stack_max_pages                  : 32;
+		u64 stack_pages                      : 32;
+		u64 op_pc                            : 48;
+		u64 reserved_240_255                 : 16;
+		u64 stack_offset                     : 4;
+		u64 reserved_260_263                 : 4;
+		u64 shift                            : 6;
+		u64 reserved_270_271                 : 2;
+		u64 avg_level                        : 8;
+		u64 avg_con                          : 9;
+		u64 fc_ena                           : 1;
+		u64 fc_stype                         : 2;
+		u64 fc_hyst_bits                     : 4;
+		u64 fc_up_crossing                   : 1;
+		u64 reserved_297_299                 : 3;
+		u64 update_time                      : 16;
+		u64 reserved_316_319                 : 4;
+		u64 fc_addr                          : 64;
+		u64 ptr_start                        : 64;
+		u64 ptr_end                          : 64;
+		u64 reserved_512_535                 : 24;
+		u64 err_int                          : 8;
+		u64 err_int_ena                      : 8;
+		u64 thresh_int                       : 1;
+		u64 thresh_int_ena                   : 1;
+		u64 thresh_up                        : 1;
+		u64 reserved_555                     : 1;
+		u64 thresh_qint_idx                  : 7;
+		u64 reserved_563                     : 1;
+		u64 err_qint_idx                     : 7;
+		u64 reserved_571_575                 : 5;
+		u64 thresh                           : 36;
+		u64 reserved_612_639                 : 28;
+		u64 reserved_640_703                 : 64;
+		u64 reserved_704_767                 : 64;
+		u64 reserved_768_831                 : 64;
+		u64 reserved_832_895                 : 64;
+		u64 reserved_896_959                 : 64;
+		u64 reserved_960_1023                : 64;
+	} s;
+	/* struct npa_pool_s_s cn; */
+};
+
+/**
+ * Structure npa_qint_hw_s
+ *
+ * NPA Queue Interrupt Context Hardware Structure This structure contains
+ * context state maintained by hardware for each queue interrupt (QINT)
+ * in NDC/LLC/DRAM. Software accesses this structure with the
+ * NPA_LF_QINT()_* registers. Hardware maintains a table of
+ * NPA_AF_CONST[QINTS] contiguous NPA_QINT_HW_S structures per LF
+ * starting at IOVA NPA_AF_LF()_QINTS_BASE. Always stored in byte
+ * invariant little-endian format (LE8).
+ */
+union npa_qint_hw_s {
+	u32 u;
+	struct npa_qint_hw_s_s {
+		u32 count                            : 22;
+		u32 reserved_22_30                   : 9;
+		u32 ena                              : 1;
+	} s;
+	/* struct npa_qint_hw_s_s cn; */
+};
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_active_cycles_pc
+ *
+ * NPA AF Active Cycles Register
+ */
+union npa_af_active_cycles_pc {
+	u64 u;
+	struct npa_af_active_cycles_pc_s {
+		u64 act_cyc                          : 64;
+	} s;
+	/* struct npa_af_active_cycles_pc_s cn; */
+};
+
+static inline u64 NPA_AF_ACTIVE_CYCLES_PC(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_ACTIVE_CYCLES_PC(void)
+{
+	return 0xf0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_aq_base
+ *
+ * NPA AF Admin Queue Base Address Register
+ */
+union npa_af_aq_base {
+	u64 u;
+	struct npa_af_aq_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 base_addr                        : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct npa_af_aq_base_s cn; */
+};
+
+static inline u64 NPA_AF_AQ_BASE(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AQ_BASE(void)
+{
+	return 0x610;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_aq_cfg
+ *
+ * NPA AF Admin Queue Configuration Register
+ */
+union npa_af_aq_cfg {
+	u64 u;
+	struct npa_af_aq_cfg_s {
+		u64 qsize                            : 4;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct npa_af_aq_cfg_s cn; */
+};
+
+static inline u64 NPA_AF_AQ_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AQ_CFG(void)
+{
+	return 0x600;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_aq_done
+ *
+ * NPA AF AQ Done Count Register
+ */
+union npa_af_aq_done {
+	u64 u;
+	struct npa_af_aq_done_s {
+		u64 done                             : 20;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct npa_af_aq_done_s cn; */
+};
+
+static inline u64 NPA_AF_AQ_DONE(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AQ_DONE(void)
+{
+	return 0x650;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_aq_done_ack
+ *
+ * NPA AF AQ Done Count Ack Register This register is written by software
+ * to acknowledge interrupts.
+ */
+union npa_af_aq_done_ack {
+	u64 u;
+	struct npa_af_aq_done_ack_s {
+		u64 done_ack                         : 20;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct npa_af_aq_done_ack_s cn; */
+};
+
+static inline u64 NPA_AF_AQ_DONE_ACK(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AQ_DONE_ACK(void)
+{
+	return 0x660;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_aq_done_ena_w1c
+ *
+ * NPA AF AQ Done Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union npa_af_aq_done_ena_w1c {
+	u64 u;
+	struct npa_af_aq_done_ena_w1c_s {
+		u64 done                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npa_af_aq_done_ena_w1c_s cn; */
+};
+
+static inline u64 NPA_AF_AQ_DONE_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AQ_DONE_ENA_W1C(void)
+{
+	return 0x698;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_aq_done_ena_w1s
+ *
+ * NPA AF AQ Done Interrupt Enable Set Register This register sets
+ * interrupt enable bits.
+ */
+union npa_af_aq_done_ena_w1s {
+	u64 u;
+	struct npa_af_aq_done_ena_w1s_s {
+		u64 done                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npa_af_aq_done_ena_w1s_s cn; */
+};
+
+static inline u64 NPA_AF_AQ_DONE_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AQ_DONE_ENA_W1S(void)
+{
+	return 0x690;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_aq_done_int
+ *
+ * NPA AF AQ Done Interrupt Register
+ */
+union npa_af_aq_done_int {
+	u64 u;
+	struct npa_af_aq_done_int_s {
+		u64 done                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npa_af_aq_done_int_s cn; */
+};
+
+static inline u64 NPA_AF_AQ_DONE_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AQ_DONE_INT(void)
+{
+	return 0x680;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_aq_done_int_w1s
+ *
+ * INTERNAL: NPA AF AQ Done Interrupt Set Register
+ */
+union npa_af_aq_done_int_w1s {
+	u64 u;
+	struct npa_af_aq_done_int_w1s_s {
+		u64 done                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npa_af_aq_done_int_w1s_s cn; */
+};
+
+static inline u64 NPA_AF_AQ_DONE_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AQ_DONE_INT_W1S(void)
+{
+	return 0x688;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_aq_done_timer
+ *
+ * NPA AF Admin Queue Done Interrupt Timer Register Used to debug the
+ * queue interrupt coalescing timer.
+ */
+union npa_af_aq_done_timer {
+	u64 u;
+	struct npa_af_aq_done_timer_s {
+		u64 count                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct npa_af_aq_done_timer_s cn; */
+};
+
+static inline u64 NPA_AF_AQ_DONE_TIMER(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AQ_DONE_TIMER(void)
+{
+	return 0x670;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_aq_done_wait
+ *
+ * NPA AF AQ Done Interrupt Coalescing Wait Register Specifies the queue
+ * interrupt coalescing settings.
+ */
+union npa_af_aq_done_wait {
+	u64 u;
+	struct npa_af_aq_done_wait_s {
+		u64 num_wait                         : 20;
+		u64 reserved_20_31                   : 12;
+		u64 time_wait                        : 16;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct npa_af_aq_done_wait_s cn; */
+};
+
+static inline u64 NPA_AF_AQ_DONE_WAIT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AQ_DONE_WAIT(void)
+{
+	return 0x640;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_aq_door
+ *
+ * NPA AF Admin Queue Doorbell Register Software writes to this register
+ * to enqueue one or more entries to AQ.
+ */
+union npa_af_aq_door {
+	u64 u;
+	struct npa_af_aq_door_s {
+		u64 count                            : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct npa_af_aq_door_s cn; */
+};
+
+static inline u64 NPA_AF_AQ_DOOR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AQ_DOOR(void)
+{
+	return 0x630;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_aq_status
+ *
+ * NPA AF Admin Queue Status Register
+ */
+union npa_af_aq_status {
+	u64 u;
+	struct npa_af_aq_status_s {
+		u64 reserved_0_3                     : 4;
+		u64 head_ptr                         : 20;
+		u64 reserved_24_35                   : 12;
+		u64 tail_ptr                         : 20;
+		u64 reserved_56_61                   : 6;
+		u64 aq_busy                          : 1;
+		u64 aq_err                           : 1;
+	} s;
+	struct npa_af_aq_status_cn {
+		u64 reserved_0_3                     : 4;
+		u64 head_ptr                         : 20;
+		u64 reserved_24_31                   : 8;
+		u64 reserved_32_35                   : 4;
+		u64 tail_ptr                         : 20;
+		u64 reserved_56_61                   : 6;
+		u64 aq_busy                          : 1;
+		u64 aq_err                           : 1;
+	} cn;
+};
+
+static inline u64 NPA_AF_AQ_STATUS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AQ_STATUS(void)
+{
+	return 0x620;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_avg_delay
+ *
+ * NPA AF Queue Average Delay Register
+ */
+union npa_af_avg_delay {
+	u64 u;
+	struct npa_af_avg_delay_s {
+		u64 avg_dly                          : 19;
+		u64 reserved_19_23                   : 5;
+		u64 avg_timer                        : 16;
+		u64 reserved_40_62                   : 23;
+		u64 avg_timer_dis                    : 1;
+	} s;
+	/* struct npa_af_avg_delay_s cn; */
+};
+
+static inline u64 NPA_AF_AVG_DELAY(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_AVG_DELAY(void)
+{
+	return 0x100;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_bar2_alias#
+ *
+ * INTERNAL: NPA Admin Function  BAR2 Alias Registers  These registers
+ * alias to the NPA BAR2 registers for the PF and function selected by
+ * NPA_AF_BAR2_SEL[PF_FUNC].  Internal: Not implemented. Placeholder for
+ * bug33464.
+ */
+union npa_af_bar2_aliasx {
+	u64 u;
+	struct npa_af_bar2_aliasx_s {
+		u64 data                             : 64;
+	} s;
+	/* struct npa_af_bar2_aliasx_s cn; */
+};
+
+static inline u64 NPA_AF_BAR2_ALIASX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_BAR2_ALIASX(u64 a)
+{
+	return 0x9100000 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_bar2_sel
+ *
+ * INTERNAL: NPA Admin Function BAR2 Select Register  This register
+ * configures BAR2 accesses from the NPA_AF_BAR2_ALIAS() registers in
+ * BAR0. Internal: Not implemented. Placeholder for bug33464.
+ */
+union npa_af_bar2_sel {
+	u64 u;
+	struct npa_af_bar2_sel_s {
+		u64 alias_pf_func                    : 16;
+		u64 alias_ena                        : 1;
+		u64 reserved_17_63                   : 47;
+	} s;
+	/* struct npa_af_bar2_sel_s cn; */
+};
+
+static inline u64 NPA_AF_BAR2_SEL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_BAR2_SEL(void)
+{
+	return 0x9000000;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_blk_rst
+ *
+ * NPA AF Block Reset Register
+ */
+union npa_af_blk_rst {
+	u64 u;
+	struct npa_af_blk_rst_s {
+		u64 rst                              : 1;
+		u64 reserved_1_62                    : 62;
+		u64 busy                             : 1;
+	} s;
+	/* struct npa_af_blk_rst_s cn; */
+};
+
+static inline u64 NPA_AF_BLK_RST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_BLK_RST(void)
+{
+	return 0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_bp_test
+ *
+ * INTERNAL: NPA AF Backpressure Test Register
+ */
+union npa_af_bp_test {
+	u64 u;
+	struct npa_af_bp_test_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 32;
+		u64 enable                           : 16;
+	} s;
+	/* struct npa_af_bp_test_s cn; */
+};
+
+static inline u64 NPA_AF_BP_TEST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_BP_TEST(void)
+{
+	return 0x200;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_const
+ *
+ * NPA AF Constants Register This register contains constants for
+ * software discovery.
+ */
+union npa_af_const {
+	u64 u;
+	struct npa_af_const_s {
+		u64 stack_page_bytes                 : 8;
+		u64 stack_page_ptrs                  : 8;
+		u64 lfs                              : 12;
+		u64 qints                            : 12;
+		u64 num_ndc                          : 3;
+		u64 reserved_43_63                   : 21;
+	} s;
+	/* struct npa_af_const_s cn; */
+};
+
+static inline u64 NPA_AF_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_CONST(void)
+{
+	return 0x10;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_const1
+ *
+ * NPA AF Constants Register 1 This register contains constants for
+ * software discovery.
+ */
+union npa_af_const1 {
+	u64 u;
+	struct npa_af_const1_s {
+		u64 aura_log2bytes                   : 4;
+		u64 pool_log2bytes                   : 4;
+		u64 qint_log2bytes                   : 4;
+		u64 reserved_12_63                   : 52;
+	} s;
+	/* struct npa_af_const1_s cn; */
+};
+
+static inline u64 NPA_AF_CONST1(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_CONST1(void)
+{
+	return 0x18;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_dtx_filter_ctl
+ *
+ * NPA AF DTX LF Filter Control Register
+ */
+union npa_af_dtx_filter_ctl {
+	u64 u;
+	struct npa_af_dtx_filter_ctl_s {
+		u64 ena                              : 1;
+		u64 reserved_1_3                     : 3;
+		u64 lf                               : 7;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct npa_af_dtx_filter_ctl_s cn; */
+};
+
+static inline u64 NPA_AF_DTX_FILTER_CTL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_DTX_FILTER_CTL(void)
+{
+	return 0x10040;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_eco
+ *
+ * INTERNAL: NPA AF ECO Register
+ */
+union npa_af_eco {
+	u64 u;
+	struct npa_af_eco_s {
+		u64 eco_rw                           : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct npa_af_eco_s cn; */
+};
+
+static inline u64 NPA_AF_ECO(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_ECO(void)
+{
+	return 0x300;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_err_int
+ *
+ * NPA Admin Function Error Interrupt Register
+ */
+union npa_af_err_int {
+	u64 u;
+	struct npa_af_err_int_s {
+		u64 reserved_0_11                    : 12;
+		u64 aq_door_err                      : 1;
+		u64 aq_res_fault                     : 1;
+		u64 aq_inst_fault                    : 1;
+		u64 reserved_15_63                   : 49;
+	} s;
+	/* struct npa_af_err_int_s cn; */
+};
+
+static inline u64 NPA_AF_ERR_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_ERR_INT(void)
+{
+	return 0x180;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_err_int_ena_w1c
+ *
+ * NPA Admin Function Error Interrupt Enable Clear Register This register
+ * clears interrupt enable bits.
+ */
+union npa_af_err_int_ena_w1c {
+	u64 u;
+	struct npa_af_err_int_ena_w1c_s {
+		u64 reserved_0_11                    : 12;
+		u64 aq_door_err                      : 1;
+		u64 aq_res_fault                     : 1;
+		u64 aq_inst_fault                    : 1;
+		u64 reserved_15_63                   : 49;
+	} s;
+	/* struct npa_af_err_int_ena_w1c_s cn; */
+};
+
+static inline u64 NPA_AF_ERR_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_ERR_INT_ENA_W1C(void)
+{
+	return 0x198;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_err_int_ena_w1s
+ *
+ * NPA Admin Function Error Interrupt Enable Set Register This register
+ * sets interrupt enable bits.
+ */
+union npa_af_err_int_ena_w1s {
+	u64 u;
+	struct npa_af_err_int_ena_w1s_s {
+		u64 reserved_0_11                    : 12;
+		u64 aq_door_err                      : 1;
+		u64 aq_res_fault                     : 1;
+		u64 aq_inst_fault                    : 1;
+		u64 reserved_15_63                   : 49;
+	} s;
+	/* struct npa_af_err_int_ena_w1s_s cn; */
+};
+
+static inline u64 NPA_AF_ERR_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_ERR_INT_ENA_W1S(void)
+{
+	return 0x190;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_err_int_w1s
+ *
+ * NPA Admin Function Error Interrupt Set Register This register sets
+ * interrupt bits.
+ */
+union npa_af_err_int_w1s {
+	u64 u;
+	struct npa_af_err_int_w1s_s {
+		u64 reserved_0_11                    : 12;
+		u64 aq_door_err                      : 1;
+		u64 aq_res_fault                     : 1;
+		u64 aq_inst_fault                    : 1;
+		u64 reserved_15_63                   : 49;
+	} s;
+	/* struct npa_af_err_int_w1s_s cn; */
+};
+
+static inline u64 NPA_AF_ERR_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_ERR_INT_W1S(void)
+{
+	return 0x188;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_gen_cfg
+ *
+ * NPA AF General Configuration Register This register provides NPA
+ * control and status information.
+ */
+union npa_af_gen_cfg {
+	u64 u;
+	struct npa_af_gen_cfg_s {
+		u64 reserved_0                       : 1;
+		u64 af_be                            : 1;
+		u64 reserved_2                       : 1;
+		u64 force_cond_clk_en                : 1;
+		u64 force_intf_clk_en                : 1;
+		u64 reserved_5_9                     : 5;
+		u64 ocla_bp                          : 1;
+		u64 reserved_11                      : 1;
+		u64 ratem1                           : 4;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct npa_af_gen_cfg_s cn; */
+};
+
+static inline u64 NPA_AF_GEN_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_GEN_CFG(void)
+{
+	return 0x30;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_gen_int
+ *
+ * NPA AF General Interrupt Register This register contains general error
+ * interrupt summary bits.
+ */
+union npa_af_gen_int {
+	u64 u;
+	struct npa_af_gen_int_s {
+		u64 free_dis                         : 16;
+		u64 alloc_dis                        : 16;
+		u64 unmapped_pf_func                 : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	/* struct npa_af_gen_int_s cn; */
+};
+
+static inline u64 NPA_AF_GEN_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_GEN_INT(void)
+{
+	return 0x140;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_gen_int_ena_w1c
+ *
+ * NPA AF General Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union npa_af_gen_int_ena_w1c {
+	u64 u;
+	struct npa_af_gen_int_ena_w1c_s {
+		u64 free_dis                         : 16;
+		u64 alloc_dis                        : 16;
+		u64 unmapped_pf_func                 : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	/* struct npa_af_gen_int_ena_w1c_s cn; */
+};
+
+static inline u64 NPA_AF_GEN_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_GEN_INT_ENA_W1C(void)
+{
+	return 0x158;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_gen_int_ena_w1s
+ *
+ * NPA AF General Interrupt Enable Set Register This register sets
+ * interrupt enable bits.
+ */
+union npa_af_gen_int_ena_w1s {
+	u64 u;
+	struct npa_af_gen_int_ena_w1s_s {
+		u64 free_dis                         : 16;
+		u64 alloc_dis                        : 16;
+		u64 unmapped_pf_func                 : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	/* struct npa_af_gen_int_ena_w1s_s cn; */
+};
+
+static inline u64 NPA_AF_GEN_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_GEN_INT_ENA_W1S(void)
+{
+	return 0x150;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_gen_int_w1s
+ *
+ * NPA AF General Interrupt Set Register This register sets interrupt
+ * bits.
+ */
+union npa_af_gen_int_w1s {
+	u64 u;
+	struct npa_af_gen_int_w1s_s {
+		u64 free_dis                         : 16;
+		u64 alloc_dis                        : 16;
+		u64 unmapped_pf_func                 : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	/* struct npa_af_gen_int_w1s_s cn; */
+};
+
+static inline u64 NPA_AF_GEN_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_GEN_INT_W1S(void)
+{
+	return 0x148;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_inp_ctl
+ *
+ * NPA AF Input Control Register
+ */
+union npa_af_inp_ctl {
+	u64 u;
+	struct npa_af_inp_ctl_s {
+		u64 free_dis                         : 16;
+		u64 alloc_dis                        : 16;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct npa_af_inp_ctl_s cn; */
+};
+
+static inline u64 NPA_AF_INP_CTL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_INP_CTL(void)
+{
+	return 0xd0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_lf#_auras_cfg
+ *
+ * NPA AF Local Function Auras Configuration Registers
+ */
+union npa_af_lfx_auras_cfg {
+	u64 u;
+	struct npa_af_lfx_auras_cfg_s {
+		u64 way_mask                         : 16;
+		u64 loc_aura_size                    : 4;
+		u64 loc_aura_offset                  : 14;
+		u64 caching                          : 1;
+		u64 be                               : 1;
+		u64 rmt_aura_size                    : 4;
+		u64 rmt_aura_offset                  : 14;
+		u64 rmt_lf                           : 7;
+		u64 reserved_61_63                   : 3;
+	} s;
+	struct npa_af_lfx_auras_cfg_cn96xxp1 {
+		u64 way_mask                         : 16;
+		u64 loc_aura_size                    : 4;
+		u64 loc_aura_offset                  : 14;
+		u64 caching                          : 1;
+		u64 reserved_35                      : 1;
+		u64 rmt_aura_size                    : 4;
+		u64 rmt_aura_offset                  : 14;
+		u64 rmt_lf                           : 7;
+		u64 reserved_61_63                   : 3;
+	} cn96xxp1;
+	/* struct npa_af_lfx_auras_cfg_s cn96xxp3; */
+	/* struct npa_af_lfx_auras_cfg_s cnf95xx; */
+};
+
+static inline u64 NPA_AF_LFX_AURAS_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_LFX_AURAS_CFG(u64 a)
+{
+	return 0x4000 + 0x40000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_lf#_loc_auras_base
+ *
+ * NPA AF Local Function Auras Base Registers
+ */
+union npa_af_lfx_loc_auras_base {
+	u64 u;
+	struct npa_af_lfx_loc_auras_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct npa_af_lfx_loc_auras_base_s cn; */
+};
+
+static inline u64 NPA_AF_LFX_LOC_AURAS_BASE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_LFX_LOC_AURAS_BASE(u64 a)
+{
+	return 0x4010 + 0x40000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_lf#_qints_base
+ *
+ * NPA AF Local Function Queue Interrupts Base Registers
+ */
+union npa_af_lfx_qints_base {
+	u64 u;
+	struct npa_af_lfx_qints_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct npa_af_lfx_qints_base_s cn; */
+};
+
+static inline u64 NPA_AF_LFX_QINTS_BASE(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_LFX_QINTS_BASE(u64 a)
+{
+	return 0x4110 + 0x40000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_lf#_qints_cfg
+ *
+ * NPA AF Local Function Queue Interrupts Configuration Registers This
+ * register controls access to the LF's queue interrupt context table in
+ * LLC/DRAM. The table consists of NPA_AF_CONST[QINTS] contiguous
+ * NPA_QINT_HW_S structures. The size of each structure is 1 \<\<
+ * NPA_AF_CONST1[QINT_LOG2BYTES] bytes.
+ */
+union npa_af_lfx_qints_cfg {
+	u64 u;
+	struct npa_af_lfx_qints_cfg_s {
+		u64 reserved_0_19                    : 20;
+		u64 way_mask                         : 16;
+		u64 caching                          : 2;
+		u64 reserved_38_63                   : 26;
+	} s;
+	/* struct npa_af_lfx_qints_cfg_s cn; */
+};
+
+static inline u64 NPA_AF_LFX_QINTS_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_LFX_QINTS_CFG(u64 a)
+{
+	return 0x4100 + 0x40000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_lf_rst
+ *
+ * NPA Admin Function LF Reset Register
+ */
+union npa_af_lf_rst {
+	u64 u;
+	struct npa_af_lf_rst_s {
+		u64 lf                               : 8;
+		u64 reserved_8_11                    : 4;
+		u64 exec                             : 1;
+		u64 reserved_13_63                   : 51;
+	} s;
+	/* struct npa_af_lf_rst_s cn; */
+};
+
+static inline u64 NPA_AF_LF_RST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_LF_RST(void)
+{
+	return 0x20;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_ndc_cfg
+ *
+ * NDC AF General Configuration Register This register provides NDC
+ * control.
+ */
+union npa_af_ndc_cfg {
+	u64 u;
+	struct npa_af_ndc_cfg_s {
+		u64 ndc_bypass                       : 1;
+		u64 ndc_ign_pois                     : 1;
+		u64 byp_aura                         : 1;
+		u64 byp_pool                         : 1;
+		u64 byp_stack                        : 1;
+		u64 byp_qint                         : 1;
+		u64 reserved_6_63                    : 58;
+	} s;
+	/* struct npa_af_ndc_cfg_s cn; */
+};
+
+static inline u64 NPA_AF_NDC_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_NDC_CFG(void)
+{
+	return 0x40;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_ndc_sync
+ *
+ * NPA AF NDC Sync Register Used to synchronize the NPA NDC.
+ */
+union npa_af_ndc_sync {
+	u64 u;
+	struct npa_af_ndc_sync_s {
+		u64 lf                               : 8;
+		u64 reserved_8_11                    : 4;
+		u64 exec                             : 1;
+		u64 reserved_13_63                   : 51;
+	} s;
+	/* struct npa_af_ndc_sync_s cn; */
+};
+
+static inline u64 NPA_AF_NDC_SYNC(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_NDC_SYNC(void)
+{
+	return 0x50;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_ras
+ *
+ * NPA AF RAS Interrupt Register This register is intended for delivery
+ * of RAS events to the SCP, so should be ignored by OS drivers.
+ */
+union npa_af_ras {
+	u64 u;
+	struct npa_af_ras_s {
+		u64 reserved_0_31                    : 32;
+		u64 aq_ctx_poison                    : 1;
+		u64 aq_res_poison                    : 1;
+		u64 aq_inst_poison                   : 1;
+		u64 reserved_35_63                   : 29;
+	} s;
+	/* struct npa_af_ras_s cn; */
+};
+
+static inline u64 NPA_AF_RAS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_RAS(void)
+{
+	return 0x1a0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_ras_ena_w1c
+ *
+ * NPA AF RAS Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union npa_af_ras_ena_w1c {
+	u64 u;
+	struct npa_af_ras_ena_w1c_s {
+		u64 reserved_0_31                    : 32;
+		u64 aq_ctx_poison                    : 1;
+		u64 aq_res_poison                    : 1;
+		u64 aq_inst_poison                   : 1;
+		u64 reserved_35_63                   : 29;
+	} s;
+	/* struct npa_af_ras_ena_w1c_s cn; */
+};
+
+static inline u64 NPA_AF_RAS_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_RAS_ENA_W1C(void)
+{
+	return 0x1b8;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_ras_ena_w1s
+ *
+ * NPA AF RAS Interrupt Enable Set Register This register sets interrupt
+ * enable bits.
+ */
+union npa_af_ras_ena_w1s {
+	u64 u;
+	struct npa_af_ras_ena_w1s_s {
+		u64 reserved_0_31                    : 32;
+		u64 aq_ctx_poison                    : 1;
+		u64 aq_res_poison                    : 1;
+		u64 aq_inst_poison                   : 1;
+		u64 reserved_35_63                   : 29;
+	} s;
+	/* struct npa_af_ras_ena_w1s_s cn; */
+};
+
+static inline u64 NPA_AF_RAS_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_RAS_ENA_W1S(void)
+{
+	return 0x1b0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_ras_w1s
+ *
+ * NPA AF RAS Interrupt Set Register This register sets interrupt bits.
+ */
+union npa_af_ras_w1s {
+	u64 u;
+	struct npa_af_ras_w1s_s {
+		u64 reserved_0_31                    : 32;
+		u64 aq_ctx_poison                    : 1;
+		u64 aq_res_poison                    : 1;
+		u64 aq_inst_poison                   : 1;
+		u64 reserved_35_63                   : 29;
+	} s;
+	/* struct npa_af_ras_w1s_s cn; */
+};
+
+static inline u64 NPA_AF_RAS_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_RAS_W1S(void)
+{
+	return 0x1a8;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_rvu_int
+ *
+ * NPA AF RVU Interrupt Register This register contains RVU error
+ * interrupt summary bits.
+ */
+union npa_af_rvu_int {
+	u64 u;
+	struct npa_af_rvu_int_s {
+		u64 unmapped_slot                    : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npa_af_rvu_int_s cn; */
+};
+
+static inline u64 NPA_AF_RVU_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_RVU_INT(void)
+{
+	return 0x160;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_rvu_int_ena_w1c
+ *
+ * NPA AF RVU Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union npa_af_rvu_int_ena_w1c {
+	u64 u;
+	struct npa_af_rvu_int_ena_w1c_s {
+		u64 unmapped_slot                    : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npa_af_rvu_int_ena_w1c_s cn; */
+};
+
+static inline u64 NPA_AF_RVU_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_RVU_INT_ENA_W1C(void)
+{
+	return 0x178;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_rvu_int_ena_w1s
+ *
+ * NPA AF RVU Interrupt Enable Set Register This register sets interrupt
+ * enable bits.
+ */
+union npa_af_rvu_int_ena_w1s {
+	u64 u;
+	struct npa_af_rvu_int_ena_w1s_s {
+		u64 unmapped_slot                    : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npa_af_rvu_int_ena_w1s_s cn; */
+};
+
+static inline u64 NPA_AF_RVU_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_RVU_INT_ENA_W1S(void)
+{
+	return 0x170;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_rvu_int_w1s
+ *
+ * NPA AF RVU Interrupt Set Register This register sets interrupt bits.
+ */
+union npa_af_rvu_int_w1s {
+	u64 u;
+	struct npa_af_rvu_int_w1s_s {
+		u64 unmapped_slot                    : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npa_af_rvu_int_w1s_s cn; */
+};
+
+static inline u64 NPA_AF_RVU_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_RVU_INT_W1S(void)
+{
+	return 0x168;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_af_rvu_lf_cfg_debug
+ *
+ * NPA Privileged LF Configuration Debug Register This debug register
+ * allows software to lookup the reverse mapping from VF/PF slot to LF.
+ * The forward mapping is programmed with NPA_PRIV_LF()_CFG.
+ */
+union npa_af_rvu_lf_cfg_debug {
+	u64 u;
+	struct npa_af_rvu_lf_cfg_debug_s {
+		u64 lf                               : 12;
+		u64 lf_valid                         : 1;
+		u64 exec                             : 1;
+		u64 reserved_14_15                   : 2;
+		u64 slot                             : 8;
+		u64 pf_func                          : 16;
+		u64 reserved_40_63                   : 24;
+	} s;
+	/* struct npa_af_rvu_lf_cfg_debug_s cn; */
+};
+
+static inline u64 NPA_AF_RVU_LF_CFG_DEBUG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_AF_RVU_LF_CFG_DEBUG(void)
+{
+	return 0x10030;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_aura_op_alloc#
+ *
+ * NPA Aura Allocate Operation Registers These registers are used to
+ * allocate one or two pointers from a given aura's pool. A 64-bit atomic
+ * load-and-add to NPA_LF_AURA_OP_ALLOC(0) allocates a single pointer. A
+ * 128-bit atomic CASP operation to NPA_LF_AURA_OP_ALLOC(0..1) allocates
+ * two pointers. The atomic write data format is NPA_AURA_OP_WDATA_S. For
+ * CASP, the first SWAP word in the write data contains
+ * NPA_AURA_OP_WDATA_S and the remaining write data words are ignored.
+ * All other accesses to this register (e.g. reads and writes) are
+ * RAZ/WI.  RSL accesses to this register are RAZ/WI.
+ */
+union npa_lf_aura_op_allocx {
+	u64 u;
+	struct npa_lf_aura_op_allocx_s {
+		u64 addr                             : 64;
+	} s;
+	/* struct npa_lf_aura_op_allocx_s cn; */
+};
+
+static inline u64 NPA_LF_AURA_OP_ALLOCX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_AURA_OP_ALLOCX(u64 a)
+{
+	return 0x10 + 8 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_aura_op_cnt
+ *
+ * NPA LF Aura Count Register A 64-bit atomic load-and-add to this
+ * register returns a given aura's count. A write sets or adds the aura's
+ * count. A read is RAZ.  RSL accesses to this register are RAZ/WI.
+ */
+union npa_lf_aura_op_cnt {
+	u64 u;
+	struct npa_lf_aura_op_cnt_s {
+		u64 count                            : 36;
+		u64 reserved_36_41                   : 6;
+		u64 op_err                           : 1;
+		u64 cnt_add                          : 1;
+		u64 aura                             : 20;
+	} s;
+	/* struct npa_lf_aura_op_cnt_s cn; */
+};
+
+static inline u64 NPA_LF_AURA_OP_CNT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_AURA_OP_CNT(void)
+{
+	return 0x30;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_aura_op_free0
+ *
+ * NPA LF Aura Free Operation Register 0 A 128-bit write (STP) to
+ * NPA_LF_AURA_OP_FREE0 and NPA_LF_AURA_OP_FREE1 frees a pointer into a
+ * given aura's pool. All other accesses to these registers (e.g. reads
+ * and 64-bit writes) are RAZ/WI.  RSL accesses to this register are
+ * RAZ/WI.
+ */
+union npa_lf_aura_op_free0 {
+	u64 u;
+	struct npa_lf_aura_op_free0_s {
+		u64 addr                             : 64;
+	} s;
+	/* struct npa_lf_aura_op_free0_s cn; */
+};
+
+static inline u64 NPA_LF_AURA_OP_FREE0(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_AURA_OP_FREE0(void)
+{
+	return 0x20;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_aura_op_free1
+ *
+ * NPA LF Aura Free Operation Register 1 See NPA_LF_AURA_OP_FREE0.  RSL
+ * accesses to this register are RAZ/WI.
+ */
+union npa_lf_aura_op_free1 {
+	u64 u;
+	struct npa_lf_aura_op_free1_s {
+		u64 aura                             : 20;
+		u64 reserved_20_62                   : 43;
+		u64 fabs                             : 1;
+	} s;
+	/* struct npa_lf_aura_op_free1_s cn; */
+};
+
+static inline u64 NPA_LF_AURA_OP_FREE1(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_AURA_OP_FREE1(void)
+{
+	return 0x28;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_aura_op_int
+ *
+ * NPA LF Aura Interrupt Operation Register A 64-bit atomic load-and-add
+ * to this register reads
+ * NPA_AURA_HW_S[ERR_INT,ERR_INT_ENA,THRESH_INT,THRESH_INT_ENA]. A write
+ * optionally sets or clears these fields. A read is RAZ.  RSL accesses
+ * to this register are RAZ/WI.
+ */
+union npa_lf_aura_op_int {
+	u64 u;
+	struct npa_lf_aura_op_int_s {
+		u64 err_int                          : 8;
+		u64 err_int_ena                      : 8;
+		u64 thresh_int                       : 1;
+		u64 thresh_int_ena                   : 1;
+		u64 reserved_18_41                   : 24;
+		u64 op_err                           : 1;
+		u64 setop                            : 1;
+		u64 aura                             : 20;
+	} s;
+	/* struct npa_lf_aura_op_int_s cn; */
+};
+
+static inline u64 NPA_LF_AURA_OP_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_AURA_OP_INT(void)
+{
+	return 0x60;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_aura_op_limit
+ *
+ * NPA LF Aura Allocation Limit Register A 64-bit atomic load-and-add to
+ * this register returns a given aura's limit. A write sets the aura's
+ * limit. A read is RAZ.  RSL accesses to this register are RAZ/WI.
+ */
+union npa_lf_aura_op_limit {
+	u64 u;
+	struct npa_lf_aura_op_limit_s {
+		u64 limit                            : 36;
+		u64 reserved_36_41                   : 6;
+		u64 op_err                           : 1;
+		u64 reserved_43                      : 1;
+		u64 aura                             : 20;
+	} s;
+	/* struct npa_lf_aura_op_limit_s cn; */
+};
+
+static inline u64 NPA_LF_AURA_OP_LIMIT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_AURA_OP_LIMIT(void)
+{
+	return 0x50;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_aura_op_thresh
+ *
+ * NPA LF Aura Threshold Operation Register A 64-bit atomic load-and-add
+ * to this register reads NPA_AURA_HW_S[THRESH_UP,THRESH]. A write to the
+ * register writes NPA_AURA_HW_S[THRESH_UP,THRESH] and recomputes
+ * NPA_AURA_HW_S[THRESH_INT]. A read is RAZ.  RSL accesses to this
+ * register are RAZ/WI.
+ */
+union npa_lf_aura_op_thresh {
+	u64 u;
+	struct npa_lf_aura_op_thresh_s {
+		u64 thresh                           : 36;
+		u64 reserved_36_41                   : 6;
+		u64 op_err                           : 1;
+		u64 thresh_up                        : 1;
+		u64 aura                             : 20;
+	} s;
+	/* struct npa_lf_aura_op_thresh_s cn; */
+};
+
+static inline u64 NPA_LF_AURA_OP_THRESH(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_AURA_OP_THRESH(void)
+{
+	return 0x70;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_err_int
+ *
+ * NPA LF Error Interrupt Register
+ */
+union npa_lf_err_int {
+	u64 u;
+	struct npa_lf_err_int_s {
+		u64 aura_dis                         : 1;
+		u64 aura_oor                         : 1;
+		u64 reserved_2                       : 1;
+		u64 rmt_req_oor                      : 1;
+		u64 reserved_4_11                    : 8;
+		u64 aura_fault                       : 1;
+		u64 pool_fault                       : 1;
+		u64 stack_fault                      : 1;
+		u64 qint_fault                       : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct npa_lf_err_int_s cn; */
+};
+
+static inline u64 NPA_LF_ERR_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_ERR_INT(void)
+{
+	return 0x200;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_err_int_ena_w1c
+ *
+ * NPA LF Error Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union npa_lf_err_int_ena_w1c {
+	u64 u;
+	struct npa_lf_err_int_ena_w1c_s {
+		u64 aura_dis                         : 1;
+		u64 aura_oor                         : 1;
+		u64 reserved_2                       : 1;
+		u64 rmt_req_oor                      : 1;
+		u64 reserved_4_11                    : 8;
+		u64 aura_fault                       : 1;
+		u64 pool_fault                       : 1;
+		u64 stack_fault                      : 1;
+		u64 qint_fault                       : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct npa_lf_err_int_ena_w1c_s cn; */
+};
+
+static inline u64 NPA_LF_ERR_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_ERR_INT_ENA_W1C(void)
+{
+	return 0x210;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_err_int_ena_w1s
+ *
+ * NPA LF Error Interrupt Enable Set Register This register sets
+ * interrupt enable bits.
+ */
+union npa_lf_err_int_ena_w1s {
+	u64 u;
+	struct npa_lf_err_int_ena_w1s_s {
+		u64 aura_dis                         : 1;
+		u64 aura_oor                         : 1;
+		u64 reserved_2                       : 1;
+		u64 rmt_req_oor                      : 1;
+		u64 reserved_4_11                    : 8;
+		u64 aura_fault                       : 1;
+		u64 pool_fault                       : 1;
+		u64 stack_fault                      : 1;
+		u64 qint_fault                       : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct npa_lf_err_int_ena_w1s_s cn; */
+};
+
+static inline u64 NPA_LF_ERR_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_ERR_INT_ENA_W1S(void)
+{
+	return 0x218;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_err_int_w1s
+ *
+ * NPA LF Error Interrupt Set Register This register sets interrupt bits.
+ */
+union npa_lf_err_int_w1s {
+	u64 u;
+	struct npa_lf_err_int_w1s_s {
+		u64 aura_dis                         : 1;
+		u64 aura_oor                         : 1;
+		u64 reserved_2                       : 1;
+		u64 rmt_req_oor                      : 1;
+		u64 reserved_4_11                    : 8;
+		u64 aura_fault                       : 1;
+		u64 pool_fault                       : 1;
+		u64 stack_fault                      : 1;
+		u64 qint_fault                       : 1;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct npa_lf_err_int_w1s_s cn; */
+};
+
+static inline u64 NPA_LF_ERR_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_ERR_INT_W1S(void)
+{
+	return 0x208;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_pool_op_available
+ *
+ * NPA LF Pool Available Count Operation Register A 64-bit atomic load-
+ * and-add to this register returns a given pool's free pointer count.
+ * Reads and writes are RAZ/WI.  RSL accesses to this register are
+ * RAZ/WI.
+ */
+union npa_lf_pool_op_available {
+	u64 u;
+	struct npa_lf_pool_op_available_s {
+		u64 count                            : 36;
+		u64 reserved_36_41                   : 6;
+		u64 op_err                           : 1;
+		u64 reserved_43                      : 1;
+		u64 aura                             : 20;
+	} s;
+	/* struct npa_lf_pool_op_available_s cn; */
+};
+
+static inline u64 NPA_LF_POOL_OP_AVAILABLE(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_POOL_OP_AVAILABLE(void)
+{
+	return 0x110;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_pool_op_int
+ *
+ * NPA LF Pool Interrupt Operation Register A 64-bit atomic load-and-add
+ * to this register reads
+ * NPA_POOL_S[ERR_INT,ERR_INT_ENA,THRESH_INT,THRESH_INT_ENA]. A write
+ * optionally sets or clears these fields. A read is RAZ.  RSL accesses
+ * to this register are RAZ/WI.
+ */
+union npa_lf_pool_op_int {
+	u64 u;
+	struct npa_lf_pool_op_int_s {
+		u64 err_int                          : 8;
+		u64 err_int_ena                      : 8;
+		u64 thresh_int                       : 1;
+		u64 thresh_int_ena                   : 1;
+		u64 reserved_18_41                   : 24;
+		u64 op_err                           : 1;
+		u64 setop                            : 1;
+		u64 aura                             : 20;
+	} s;
+	/* struct npa_lf_pool_op_int_s cn; */
+};
+
+static inline u64 NPA_LF_POOL_OP_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_POOL_OP_INT(void)
+{
+	return 0x160;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_pool_op_pc
+ *
+ * NPA LF Pool Performance Count Register A 64-bit atomic load-and-add to
+ * this register reads NPA_POOL_S[OP_PC] from a given aura's pool. The
+ * aura is selected by the atomic write data, whose format is
+ * NPA_AURA_OP_WDATA_S. Reads and writes are RAZ/WI.  RSL accesses to
+ * this register are RAZ/WI.
+ */
+union npa_lf_pool_op_pc {
+	u64 u;
+	struct npa_lf_pool_op_pc_s {
+		u64 op_pc                            : 48;
+		u64 op_err                           : 1;
+		u64 reserved_49_63                   : 15;
+	} s;
+	/* struct npa_lf_pool_op_pc_s cn; */
+};
+
+static inline u64 NPA_LF_POOL_OP_PC(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_POOL_OP_PC(void)
+{
+	return 0x100;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_pool_op_ptr_end0
+ *
+ * NPA LF Pool Pointer End Operation Register 0 A 128-bit write (STP) to
+ * the NPA_LF_POOL_OP_PTR_END0 and NPA_LF_POOL_OP_PTR_END1 registers
+ * writes to a given pool's pointer end value. All other accesses to
+ * these registers (e.g. reads and 64-bit writes) are RAZ/WI.  RSL
+ * accesses to this register are RAZ/WI.
+ */
+union npa_lf_pool_op_ptr_end0 {
+	u64 u;
+	struct npa_lf_pool_op_ptr_end0_s {
+		u64 ptr_end                          : 64;
+	} s;
+	/* struct npa_lf_pool_op_ptr_end0_s cn; */
+};
+
+static inline u64 NPA_LF_POOL_OP_PTR_END0(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_POOL_OP_PTR_END0(void)
+{
+	return 0x130;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_pool_op_ptr_end1
+ *
+ * NPA LF Pool Pointer End Operation Register 1 See
+ * NPA_LF_POOL_OP_PTR_END0.  RSL accesses to this register are RAZ/WI.
+ */
+union npa_lf_pool_op_ptr_end1 {
+	u64 u;
+	struct npa_lf_pool_op_ptr_end1_s {
+		u64 aura                             : 20;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct npa_lf_pool_op_ptr_end1_s cn; */
+};
+
+static inline u64 NPA_LF_POOL_OP_PTR_END1(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_POOL_OP_PTR_END1(void)
+{
+	return 0x138;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_pool_op_ptr_start0
+ *
+ * NPA LF Pool Pointer Start Operation Register 0 A 128-bit write (STP)
+ * to the NPA_LF_POOL_OP_PTR_START0 and NPA_LF_POOL_OP_PTR_START1
+ * registers writes to a given pool's pointer start value. All other
+ * accesses to these registers (e.g. reads and 64-bit writes) are RAZ/WI.
+ * RSL accesses to this register are RAZ/WI.
+ */
+union npa_lf_pool_op_ptr_start0 {
+	u64 u;
+	struct npa_lf_pool_op_ptr_start0_s {
+		u64 ptr_start                        : 64;
+	} s;
+	/* struct npa_lf_pool_op_ptr_start0_s cn; */
+};
+
+static inline u64 NPA_LF_POOL_OP_PTR_START0(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_POOL_OP_PTR_START0(void)
+{
+	return 0x120;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_pool_op_ptr_start1
+ *
+ * NPA LF Pool Pointer Start Operation Register 1 See
+ * NPA_LF_POOL_OP_PTR_START0.  RSL accesses to this register are RAZ/WI.
+ */
+union npa_lf_pool_op_ptr_start1 {
+	u64 u;
+	struct npa_lf_pool_op_ptr_start1_s {
+		u64 aura                             : 20;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct npa_lf_pool_op_ptr_start1_s cn; */
+};
+
+static inline u64 NPA_LF_POOL_OP_PTR_START1(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_POOL_OP_PTR_START1(void)
+{
+	return 0x128;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_pool_op_thresh
+ *
+ * NPA LF Pool Threshold Operation Register A 64-bit atomic load-and-add
+ * to this register reads NPA_POOL_S[THRESH_UP,THRESH]. A write to the
+ * register writes NPA_POOL_S[THRESH_UP,THRESH]. A read is RAZ.  RSL
+ * accesses to this register are RAZ/WI.
+ */
+union npa_lf_pool_op_thresh {
+	u64 u;
+	struct npa_lf_pool_op_thresh_s {
+		u64 thresh                           : 36;
+		u64 reserved_36_41                   : 6;
+		u64 op_err                           : 1;
+		u64 thresh_up                        : 1;
+		u64 aura                             : 20;
+	} s;
+	/* struct npa_lf_pool_op_thresh_s cn; */
+};
+
+static inline u64 NPA_LF_POOL_OP_THRESH(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_POOL_OP_THRESH(void)
+{
+	return 0x170;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_qint#_cnt
+ *
+ * NPA LF Queue Interrupt Count Registers
+ */
+union npa_lf_qintx_cnt {
+	u64 u;
+	struct npa_lf_qintx_cnt_s {
+		u64 count                            : 22;
+		u64 reserved_22_63                   : 42;
+	} s;
+	/* struct npa_lf_qintx_cnt_s cn; */
+};
+
+static inline u64 NPA_LF_QINTX_CNT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_QINTX_CNT(u64 a)
+{
+	return 0x300 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_qint#_ena_w1c
+ *
+ * NPA LF Queue Interrupt Enable Clear Registers This register clears
+ * interrupt enable bits.
+ */
+union npa_lf_qintx_ena_w1c {
+	u64 u;
+	struct npa_lf_qintx_ena_w1c_s {
+		u64 intr                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npa_lf_qintx_ena_w1c_s cn; */
+};
+
+static inline u64 NPA_LF_QINTX_ENA_W1C(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_QINTX_ENA_W1C(u64 a)
+{
+	return 0x330 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_qint#_ena_w1s
+ *
+ * NPA LF Queue Interrupt Enable Set Registers This register sets
+ * interrupt enable bits.
+ */
+union npa_lf_qintx_ena_w1s {
+	u64 u;
+	struct npa_lf_qintx_ena_w1s_s {
+		u64 intr                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npa_lf_qintx_ena_w1s_s cn; */
+};
+
+static inline u64 NPA_LF_QINTX_ENA_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_QINTX_ENA_W1S(u64 a)
+{
+	return 0x320 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_qint#_int
+ *
+ * NPA LF Queue Interrupt Registers
+ */
+union npa_lf_qintx_int {
+	u64 u;
+	struct npa_lf_qintx_int_s {
+		u64 intr                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npa_lf_qintx_int_s cn; */
+};
+
+static inline u64 NPA_LF_QINTX_INT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_QINTX_INT(u64 a)
+{
+	return 0x310 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_qint#_int_w1s
+ *
+ * INTERNAL: NPA LF Queue Interrupt Set Registers
+ */
+union npa_lf_qintx_int_w1s {
+	u64 u;
+	struct npa_lf_qintx_int_w1s_s {
+		u64 intr                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npa_lf_qintx_int_w1s_s cn; */
+};
+
+static inline u64 NPA_LF_QINTX_INT_W1S(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_QINTX_INT_W1S(u64 a)
+{
+	return 0x318 + 0x1000 * a;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_ras
+ *
+ * NPA LF RAS Interrupt Register
+ */
+union npa_lf_ras {
+	u64 u;
+	struct npa_lf_ras_s {
+		u64 aura_poison                      : 1;
+		u64 pool_poison                      : 1;
+		u64 stack_poison                     : 1;
+		u64 qint_poison                      : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct npa_lf_ras_s cn; */
+};
+
+static inline u64 NPA_LF_RAS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_RAS(void)
+{
+	return 0x220;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_ras_ena_w1c
+ *
+ * NPA LF RAS Interrupt Enable Clear Register This register clears
+ * interrupt enable bits.
+ */
+union npa_lf_ras_ena_w1c {
+	u64 u;
+	struct npa_lf_ras_ena_w1c_s {
+		u64 aura_poison                      : 1;
+		u64 pool_poison                      : 1;
+		u64 stack_poison                     : 1;
+		u64 qint_poison                      : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct npa_lf_ras_ena_w1c_s cn; */
+};
+
+static inline u64 NPA_LF_RAS_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_RAS_ENA_W1C(void)
+{
+	return 0x230;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_ras_ena_w1s
+ *
+ * NPA LF RAS Interrupt Enable Set Register This register sets interrupt
+ * enable bits.
+ */
+union npa_lf_ras_ena_w1s {
+	u64 u;
+	struct npa_lf_ras_ena_w1s_s {
+		u64 aura_poison                      : 1;
+		u64 pool_poison                      : 1;
+		u64 stack_poison                     : 1;
+		u64 qint_poison                      : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct npa_lf_ras_ena_w1s_s cn; */
+};
+
+static inline u64 NPA_LF_RAS_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_RAS_ENA_W1S(void)
+{
+	return 0x238;
+}
+
+/**
+ * Register (RVU_PFVF_BAR2) npa_lf_ras_w1s
+ *
+ * NPA LF RAS Interrupt Set Register This register sets interrupt bits.
+ */
+union npa_lf_ras_w1s {
+	u64 u;
+	struct npa_lf_ras_w1s_s {
+		u64 aura_poison                      : 1;
+		u64 pool_poison                      : 1;
+		u64 stack_poison                     : 1;
+		u64 qint_poison                      : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct npa_lf_ras_w1s_s cn; */
+};
+
+static inline u64 NPA_LF_RAS_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_LF_RAS_W1S(void)
+{
+	return 0x228;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_priv_af_int_cfg
+ *
+ * NPA Privileged AF Interrupt Configuration Register
+ */
+union npa_priv_af_int_cfg {
+	u64 u;
+	struct npa_priv_af_int_cfg_s {
+		u64 msix_offset                      : 11;
+		u64 reserved_11                      : 1;
+		u64 msix_size                        : 8;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct npa_priv_af_int_cfg_s cn; */
+};
+
+static inline u64 NPA_PRIV_AF_INT_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_PRIV_AF_INT_CFG(void)
+{
+	return 0x10000;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_priv_lf#_cfg
+ *
+ * NPA Privileged Local Function Configuration Registers These registers
+ * allow each NPA local function (LF) to be provisioned to a VF/PF slot
+ * for RVU. See also NPA_AF_RVU_LF_CFG_DEBUG.  Software should read this
+ * register after write to ensure that the LF is mapped to [PF_FUNC]
+ * before issuing transactions to the mapped PF and function.  [SLOT]
+ * must be zero.  Internal: Hardware ignores [SLOT] and always assumes
+ * 0x0.
+ */
+union npa_priv_lfx_cfg {
+	u64 u;
+	struct npa_priv_lfx_cfg_s {
+		u64 slot                             : 8;
+		u64 pf_func                          : 16;
+		u64 reserved_24_62                   : 39;
+		u64 ena                              : 1;
+	} s;
+	/* struct npa_priv_lfx_cfg_s cn; */
+};
+
+static inline u64 NPA_PRIV_LFX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_PRIV_LFX_CFG(u64 a)
+{
+	return 0x10010 + 0x100 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npa_priv_lf#_int_cfg
+ *
+ * NPA Privileged LF Interrupt Configuration Registers
+ */
+union npa_priv_lfx_int_cfg {
+	u64 u;
+	struct npa_priv_lfx_int_cfg_s {
+		u64 msix_offset                      : 11;
+		u64 reserved_11                      : 1;
+		u64 msix_size                        : 8;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct npa_priv_lfx_int_cfg_s cn; */
+};
+
+static inline u64 NPA_PRIV_LFX_INT_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPA_PRIV_LFX_INT_CFG(u64 a)
+{
+	return 0x10020 + 0x100 * a;
+}
+
+#endif /* __CSRS_NPA_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx2/csrs/csrs-npc.h b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-npc.h
new file mode 100644
index 0000000..c1c4baa
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-npc.h
@@ -0,0 +1,1629 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+#ifndef __CSRS_NPC_H__
+#define __CSRS_NPC_H__
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * NPC.
+ *
+ * This file is auto generated.  Do not edit.
+ *
+ */
+
+/**
+ * Enumeration npc_errlev_e
+ *
+ * NPC Error Level Enumeration Enumerates the lowest protocol layer
+ * containing an error.
+ */
+#define NPC_ERRLEV_E_LA (1)
+#define NPC_ERRLEV_E_LB (2)
+#define NPC_ERRLEV_E_LC (3)
+#define NPC_ERRLEV_E_LD (4)
+#define NPC_ERRLEV_E_LE (5)
+#define NPC_ERRLEV_E_LF (6)
+#define NPC_ERRLEV_E_LG (7)
+#define NPC_ERRLEV_E_LH (8)
+#define NPC_ERRLEV_E_NIX (0xf)
+#define NPC_ERRLEV_E_RX(a) (0 + (a))
+#define NPC_ERRLEV_E_RE (0)
+
+/**
+ * Enumeration npc_intf_e
+ *
+ * NPC Interface Enumeration Enumerates the NPC interfaces.
+ */
+#define NPC_INTF_E_NIXX_RX(a) (0 + 2 * (a))
+#define NPC_INTF_E_NIXX_TX(a) (1 + 2 * (a))
+
+/**
+ * Enumeration npc_lid_e
+ *
+ * NPC Layer ID Enumeration Enumerates layers parsed by NPC.
+ */
+#define NPC_LID_E_LA (0)
+#define NPC_LID_E_LB (1)
+#define NPC_LID_E_LC (2)
+#define NPC_LID_E_LD (3)
+#define NPC_LID_E_LE (4)
+#define NPC_LID_E_LF (5)
+#define NPC_LID_E_LG (6)
+#define NPC_LID_E_LH (7)
+
+/**
+ * Enumeration npc_lkupop_e
+ *
+ * NPC Lookup Operation Enumeration Enumerates the lookup operation for
+ * NPC_AF_LKUP_CTL[OP].
+ */
+#define NPC_LKUPOP_E_KEY (1)
+#define NPC_LKUPOP_E_PKT (0)
+
+/**
+ * Enumeration npc_mcamkeyw_e
+ *
+ * NPC MCAM Search Key Width Enumeration
+ */
+#define NPC_MCAMKEYW_E_X1 (0)
+#define NPC_MCAMKEYW_E_X2 (1)
+#define NPC_MCAMKEYW_E_X4 (2)
+
+/**
+ * Structure npc_layer_info_s
+ *
+ * NPC Layer Parse Information Structure This structure specifies the
+ * format of NPC_RESULT_S[LA,LB,...,LH].
+ */
+union npc_layer_info_s {
+	u32 u;
+	struct npc_layer_info_s_s {
+		u32 lptr                             : 8;
+		u32 flags                            : 8;
+		u32 ltype                            : 4;
+		u32 reserved_20_31                   : 12;
+	} s;
+	/* struct npc_layer_info_s_s cn; */
+};
+
+/**
+ * Structure npc_layer_kex_s
+ *
+ * NPC Layer MCAM Search Key Extract Structure This structure specifies
+ * the format of each of the NPC_PARSE_KEX_S[LA,LB,...,LH] fields. It
+ * contains the subset of NPC_LAYER_INFO_S fields that can be included in
+ * the MCAM search key. See NPC_PARSE_KEX_S and NPC_AF_INTF()_KEX_CFG.
+ */
+union npc_layer_kex_s {
+	u32 u;
+	struct npc_layer_kex_s_s {
+		u32 flags                            : 8;
+		u32 ltype                            : 4;
+		u32 reserved_12_31                   : 20;
+	} s;
+	/* struct npc_layer_kex_s_s cn; */
+};
+
+/**
+ * Structure npc_mcam_key_x1_s
+ *
+ * NPC MCAM Search Key X1 Structure This structure specifies the MCAM
+ * search key format used by an interface when
+ * NPC_AF_INTF()_KEX_CFG[KEYW] = NPC_MCAMKEYW_E::X1.
+ */
+union npc_mcam_key_x1_s {
+	u64 u[3];
+	struct npc_mcam_key_x1_s_s {
+		u64 intf                             : 2;
+		u64 reserved_2_63                    : 62;
+		u64 kw0                              : 64;
+		u64 kw1                              : 48;
+		u64 reserved_176_191                 : 16;
+	} s;
+	/* struct npc_mcam_key_x1_s_s cn; */
+};
+
+/**
+ * Structure npc_mcam_key_x2_s
+ *
+ * NPC MCAM Search Key X2 Structure This structure specifies the MCAM
+ * search key format used by an interface when
+ * NPC_AF_INTF()_KEX_CFG[KEYW] = NPC_MCAMKEYW_E::X2.
+ */
+union npc_mcam_key_x2_s {
+	u64 u[5];
+	struct npc_mcam_key_x2_s_s {
+		u64 intf                             : 2;
+		u64 reserved_2_63                    : 62;
+		u64 kw0                              : 64;
+		u64 kw1                              : 64;
+		u64 kw2                              : 64;
+		u64 kw3                              : 32;
+		u64 reserved_288_319                 : 32;
+	} s;
+	/* struct npc_mcam_key_x2_s_s cn; */
+};
+
+/**
+ * Structure npc_mcam_key_x4_s
+ *
+ * NPC MCAM Search Key X4 Structure This structure specifies the MCAM
+ * search key format used by an interface when
+ * NPC_AF_INTF()_KEX_CFG[KEYW] = NPC_MCAMKEYW_E::X4.
+ */
+union npc_mcam_key_x4_s {
+	u64 u[8];
+	struct npc_mcam_key_x4_s_s {
+		u64 intf                             : 2;
+		u64 reserved_2_63                    : 62;
+		u64 kw0                              : 64;
+		u64 kw1                              : 64;
+		u64 kw2                              : 64;
+		u64 kw3                              : 64;
+		u64 kw4                              : 64;
+		u64 kw5                              : 64;
+		u64 kw6                              : 64;
+	} s;
+	/* struct npc_mcam_key_x4_s_s cn; */
+};
+
+/**
+ * Structure npc_parse_kex_s
+ *
+ * NPC Parse Key Extract Structure This structure contains the subset of
+ * NPC_RESULT_S fields that can be included in the MCAM search key. See
+ * NPC_AF_INTF()_KEX_CFG.
+ */
+union npc_parse_kex_s {
+	u64 u[2];
+	struct npc_parse_kex_s_s {
+		u64 chan                             : 12;
+		u64 errlev                           : 4;
+		u64 errcode                          : 8;
+		u64 l2m                              : 1;
+		u64 l2b                              : 1;
+		u64 l3m                              : 1;
+		u64 l3b                              : 1;
+		u64 la                               : 12;
+		u64 lb                               : 12;
+		u64 lc                               : 12;
+		u64 ld                               : 12;
+		u64 le                               : 12;
+		u64 lf                               : 12;
+		u64 lg                               : 12;
+		u64 lh                               : 12;
+		u64 reserved_124_127                 : 4;
+	} s;
+	/* struct npc_parse_kex_s_s cn; */
+};
+
+/**
+ * Structure npc_result_s
+ *
+ * NPC Result Structure This structure contains a packet's parse and flow
+ * identification information.
+ */
+union npc_result_s {
+	u64 u[6];
+	struct npc_result_s_s {
+		u64 intf                             : 2;
+		u64 pkind                            : 6;
+		u64 chan                             : 12;
+		u64 errlev                           : 4;
+		u64 errcode                          : 8;
+		u64 l2m                              : 1;
+		u64 l2b                              : 1;
+		u64 l3m                              : 1;
+		u64 l3b                              : 1;
+		u64 eoh_ptr                          : 8;
+		u64 reserved_44_63                   : 20;
+		u64 action                           : 64;
+		u64 vtag_action                      : 64;
+		u64 la                               : 20;
+		u64 lb                               : 20;
+		u64 lc                               : 20;
+		u64 reserved_252_255                 : 4;
+		u64 ld                               : 20;
+		u64 le                               : 20;
+		u64 lf                               : 20;
+		u64 reserved_316_319                 : 4;
+		u64 lg                               : 20;
+		u64 lh                               : 20;
+		u64 reserved_360_383                 : 24;
+	} s;
+	/* struct npc_result_s_s cn; */
+};
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_active_pc
+ *
+ * NPC Interrupt-Timer Configuration Register
+ */
+union npc_af_active_pc {
+	u64 u;
+	struct npc_af_active_pc_s {
+		u64 active_pc                        : 64;
+	} s;
+	/* struct npc_af_active_pc_s cn; */
+};
+
+static inline u64 NPC_AF_ACTIVE_PC(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_ACTIVE_PC(void)
+{
+	return 0x10;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_blk_rst
+ *
+ * NPC AF Block Reset Register
+ */
+union npc_af_blk_rst {
+	u64 u;
+	struct npc_af_blk_rst_s {
+		u64 rst                              : 1;
+		u64 reserved_1_62                    : 62;
+		u64 busy                             : 1;
+	} s;
+	/* struct npc_af_blk_rst_s cn; */
+};
+
+static inline u64 NPC_AF_BLK_RST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_BLK_RST(void)
+{
+	return 0x40;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_cfg
+ *
+ * NPC AF General Configuration Register
+ */
+union npc_af_cfg {
+	u64 u;
+	struct npc_af_cfg_s {
+		u64 reserved_0_1                     : 2;
+		u64 cclk_force                       : 1;
+		u64 force_intf_clk_en                : 1;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct npc_af_cfg_s cn; */
+};
+
+static inline u64 NPC_AF_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_CFG(void)
+{
+	return 0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_const
+ *
+ * NPC AF Constants Register This register contains constants for
+ * software discovery.
+ */
+union npc_af_const {
+	u64 u;
+	struct npc_af_const_s {
+		u64 intfs                            : 4;
+		u64 lids                             : 4;
+		u64 kpus                             : 5;
+		u64 reserved_13_15                   : 3;
+		u64 mcam_bank_width                  : 10;
+		u64 reserved_26_27                   : 2;
+		u64 mcam_bank_depth                  : 16;
+		u64 mcam_banks                       : 4;
+		u64 match_stats                      : 16;
+	} s;
+	/* struct npc_af_const_s cn; */
+};
+
+static inline u64 NPC_AF_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_CONST(void)
+{
+	return 0x20;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_const1
+ *
+ * NPC AF Constants 1 Register This register contains constants for
+ * software discovery.
+ */
+union npc_af_const1 {
+	u64 u;
+	struct npc_af_const1_s {
+		u64 kpu_entries                      : 12;
+		u64 pkinds                           : 8;
+		u64 cpi_size                         : 16;
+		u64 reserved_36_63                   : 28;
+	} s;
+	/* struct npc_af_const1_s cn; */
+};
+
+static inline u64 NPC_AF_CONST1(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_CONST1(void)
+{
+	return 0x30;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_cpi#_cfg
+ *
+ * NPC AF Channel Parse Index Table Registers
+ */
+union npc_af_cpix_cfg {
+	u64 u;
+	struct npc_af_cpix_cfg_s {
+		u64 padd                             : 4;
+		u64 reserved_4_63                    : 60;
+	} s;
+	/* struct npc_af_cpix_cfg_s cn; */
+};
+
+static inline u64 NPC_AF_CPIX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_CPIX_CFG(u64 a)
+{
+	return 0x200000 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_dbg_ctl
+ *
+ * NPC AF Debug Control Register This register controls the capture of
+ * debug information in NPC_AF_KPU()_DBG, NPC_AF_MCAM_DBG,
+ * NPC_AF_DBG_DATA() and NPC_AF_DBG_RESULT().
+ */
+union npc_af_dbg_ctl {
+	u64 u;
+	struct npc_af_dbg_ctl_s {
+		u64 continuous                       : 1;
+		u64 lkup_dbg                         : 1;
+		u64 intf_dbg                         : 4;
+		u64 reserved_6_63                    : 58;
+	} s;
+	/* struct npc_af_dbg_ctl_s cn; */
+};
+
+static inline u64 NPC_AF_DBG_CTL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_DBG_CTL(void)
+{
+	return 0x3000000;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_dbg_data#
+ *
+ * NPC AF Debug Data Registers These registers contain the packet header
+ * data of the last packet/lookup whose debug information is captured by
+ * NPC_AF_DBG_CTL[INTF_DBG,LKUP_DBG].
+ */
+union npc_af_dbg_datax {
+	u64 u;
+	struct npc_af_dbg_datax_s {
+		u64 data                             : 64;
+	} s;
+	/* struct npc_af_dbg_datax_s cn; */
+};
+
+static inline u64 NPC_AF_DBG_DATAX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_DBG_DATAX(u64 a)
+{
+	return 0x3001400 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_dbg_result#
+ *
+ * NPC AF Debug Result Registers These registers contain the result data
+ * of the last packet/lookup whose debug information is captured by
+ * NPC_AF_DBG_CTL[INTF_DBG,LKUP_DBG].  Internal: FIXME - add note about
+ * coherency of data in continuous packet capture mode.
+ */
+union npc_af_dbg_resultx {
+	u64 u;
+	struct npc_af_dbg_resultx_s {
+		u64 data                             : 64;
+	} s;
+	/* struct npc_af_dbg_resultx_s cn; */
+};
+
+static inline u64 NPC_AF_DBG_RESULTX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_DBG_RESULTX(u64 a)
+{
+	return 0x3001800 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_dbg_status
+ *
+ * NPC AF Debug Status Register
+ */
+union npc_af_dbg_status {
+	u64 u;
+	struct npc_af_dbg_status_s {
+		u64 done                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npc_af_dbg_status_s cn; */
+};
+
+static inline u64 NPC_AF_DBG_STATUS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_DBG_STATUS(void)
+{
+	return 0x3000010;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_dv_fc_scratch
+ *
+ * INTERNAL: NPC AF Scratch Register  Internal: This register is for
+ * internal DV purpose.
+ */
+union npc_af_dv_fc_scratch {
+	u64 u;
+	struct npc_af_dv_fc_scratch_s {
+		u64 it                               : 64;
+	} s;
+	/* struct npc_af_dv_fc_scratch_s cn; */
+};
+
+static inline u64 NPC_AF_DV_FC_SCRATCH(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_DV_FC_SCRATCH(void)
+{
+	return 0x60;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_eco0
+ *
+ * INTERNAL: ECO 0 Register
+ */
+union npc_af_eco0 {
+	u64 u;
+	struct npc_af_eco0_s {
+		u64 eco_rw                           : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct npc_af_eco0_s cn; */
+};
+
+static inline u64 NPC_AF_ECO0(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_ECO0(void)
+{
+	return 0x200;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_ikpu_err_ctl
+ *
+ * NPC AF Initial KPU Error Control Registers Similar to
+ * NPC_AF_KPU()_ERR_CTL, but specifies values captured in
+ * NPC_RESULT_S[ERRLEV,ERRCODE] for errors detected by the PKIND-based
+ * initial actions from NPC_AF_PKIND()_ACTION0 and
+ * NPC_AF_PKIND()_ACTION1. [DP_OFFSET_ERRCODE] from this register is
+ * never used.
+ */
+union npc_af_ikpu_err_ctl {
+	u64 u;
+	struct npc_af_ikpu_err_ctl_s {
+		u64 errlev                           : 4;
+		u64 dp_offset_errcode                : 8;
+		u64 ptr_advance_errcode              : 8;
+		u64 var_len_offset_errcode           : 8;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct npc_af_ikpu_err_ctl_s cn; */
+};
+
+static inline u64 NPC_AF_IKPU_ERR_CTL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_IKPU_ERR_CTL(void)
+{
+	return 0x3000080;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_intf#_kex_cfg
+ *
+ * NPC AF Interface Key Extract Configuration Registers
+ */
+union npc_af_intfx_kex_cfg {
+	u64 u;
+	struct npc_af_intfx_kex_cfg_s {
+		u64 parse_nibble_ena                 : 31;
+		u64 reserved_31                      : 1;
+		u64 keyw                             : 3;
+		u64 reserved_35_63                   : 29;
+	} s;
+	/* struct npc_af_intfx_kex_cfg_s cn; */
+};
+
+static inline u64 NPC_AF_INTFX_KEX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_INTFX_KEX_CFG(u64 a)
+{
+	return 0x1010 + 0x100 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_intf#_ldata#_flags#_cfg
+ *
+ * NPC AF Interface Layer Data Flags Configuration Registers These
+ * registers control the extraction of layer data (LDATA) into the MCAM
+ * search key for each interface based on the FLAGS\<3:0\> bits of two
+ * layers selected by NPC_AF_KEX_LDATA()_FLAGS_CFG.
+ */
+union npc_af_intfx_ldatax_flagsx_cfg {
+	u64 u;
+	struct npc_af_intfx_ldatax_flagsx_cfg_s {
+		u64 key_offset                       : 6;
+		u64 reserved_6                       : 1;
+		u64 ena                              : 1;
+		u64 hdr_offset                       : 8;
+		u64 bytesm1                          : 4;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct npc_af_intfx_ldatax_flagsx_cfg_s cn; */
+};
+
+static inline u64 NPC_AF_INTFX_LDATAX_FLAGSX_CFG(u64 a, u64 b, u64 c)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_INTFX_LDATAX_FLAGSX_CFG(u64 a, u64 b, u64 c)
+{
+	return 0x980000 + 0x10000 * a + 0x1000 * b + 8 * c;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_intf#_lid#_lt#_ld#_cfg
+ *
+ * NPC AF Interface Layer Data Extract Configuration Registers These
+ * registers control the extraction of layer data (LDATA) into the MCAM
+ * search key for each interface. Up to two LDATA fields can be extracted
+ * per layer (LID(0..7) indexed by NPC_LID_E), with up to 16 bytes per
+ * LDATA field. For each layer, the corresponding NPC_LAYER_INFO_S[LTYPE]
+ * value in NPC_RESULT_S is used as the LTYPE(0..15) index and select the
+ * associated LDATA(0..1) registers.  NPC_LAYER_INFO_S[LTYPE]=0x0 means
+ * the corresponding layer not parsed (invalid), so software should keep
+ * NPC_AF_INTF()_LID()_LT(0)_LD()_CFG[ENA] clear to disable extraction
+ * when LTYPE is zero.
+ */
+union npc_af_intfx_lidx_ltx_ldx_cfg {
+	u64 u;
+	struct npc_af_intfx_lidx_ltx_ldx_cfg_s {
+		u64 key_offset                       : 6;
+		u64 flags_ena                        : 1;
+		u64 ena                              : 1;
+		u64 hdr_offset                       : 8;
+		u64 bytesm1                          : 4;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct npc_af_intfx_lidx_ltx_ldx_cfg_s cn; */
+};
+
+static inline u64 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(u64 a, u64 b, u64 c, u64 d)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(u64 a, u64 b, u64 c, u64 d)
+{
+	return 0x900000 + 0x10000 * a + 0x1000 * b + 0x20 * c + 8 * d;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_intf#_miss_act
+ *
+ * NPC AF Interface MCAM Miss Action Data Registers When a combination of
+ * NPC_AF_MCAME()_BANK()_CAM()_* and NPC_AF_MCAME()_BANK()_CFG[ENA]
+ * yields an MCAM miss for a packet, this register specifies the packet's
+ * match action captured in NPC_RESULT_S[ACTION].
+ */
+union npc_af_intfx_miss_act {
+	u64 u;
+	struct npc_af_intfx_miss_act_s {
+		u64 action                           : 64;
+	} s;
+	/* struct npc_af_intfx_miss_act_s cn; */
+};
+
+static inline u64 NPC_AF_INTFX_MISS_ACT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_INTFX_MISS_ACT(u64 a)
+{
+	return 0x1a00000 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_intf#_miss_stat_act
+ *
+ * NPC AF Interface MCAM Miss Stat Action Data Registers Used to
+ * optionally increment a NPC_AF_MATCH_STAT() counter when a packet
+ * misses an MCAM entry.
+ */
+union npc_af_intfx_miss_stat_act {
+	u64 u;
+	struct npc_af_intfx_miss_stat_act_s {
+		u64 stat_sel                         : 9;
+		u64 ena                              : 1;
+		u64 reserved_10_63                   : 54;
+	} s;
+	/* struct npc_af_intfx_miss_stat_act_s cn; */
+};
+
+static inline u64 NPC_AF_INTFX_MISS_STAT_ACT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_INTFX_MISS_STAT_ACT(u64 a)
+{
+	return 0x1880040 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_intf#_miss_tag_act
+ *
+ * NPC AF Interface MCAM Miss VTag Action Data Registers When a
+ * combination of NPC_AF_MCAME()_BANK()_CAM()_* and
+ * NPC_AF_MCAME()_BANK()_CFG[ENA] yields an MCAM miss for a packet, this
+ * register specifies the packet's match Vtag action captured in
+ * NPC_RESULT_S[VTAG_ACTION].
+ */
+union npc_af_intfx_miss_tag_act {
+	u64 u;
+	struct npc_af_intfx_miss_tag_act_s {
+		u64 vtag_action                      : 64;
+	} s;
+	/* struct npc_af_intfx_miss_tag_act_s cn; */
+};
+
+static inline u64 NPC_AF_INTFX_MISS_TAG_ACT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_INTFX_MISS_TAG_ACT(u64 a)
+{
+	return 0x1b00008 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_intf#_stat
+ *
+ * NPC AF Interface Statistics Registers Statistics per interface. Index
+ * enumerated by NPC_INTF_E.
+ */
+union npc_af_intfx_stat {
+	u64 u;
+	struct npc_af_intfx_stat_s {
+		u64 count                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct npc_af_intfx_stat_s cn; */
+};
+
+static inline u64 NPC_AF_INTFX_STAT(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_INTFX_STAT(u64 a)
+{
+	return 0x2000800 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_kcam_scrub_ctl
+ *
+ * NPC AF KCAM Scrub Control Register
+ */
+union npc_af_kcam_scrub_ctl {
+	u64 u;
+	struct npc_af_kcam_scrub_ctl_s {
+		u64 ena                              : 1;
+		u64 reserved_1_7                     : 7;
+		u64 lp_dis                           : 1;
+		u64 reserved_9_15                    : 7;
+		u64 toth                             : 4;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct npc_af_kcam_scrub_ctl_s cn; */
+};
+
+static inline u64 NPC_AF_KCAM_SCRUB_CTL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_KCAM_SCRUB_CTL(void)
+{
+	return 0xb0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_kex_ldata#_flags_cfg
+ *
+ * NPC AF Key Extract Layer Data Flags Configuration Register
+ */
+union npc_af_kex_ldatax_flags_cfg {
+	u64 u;
+	struct npc_af_kex_ldatax_flags_cfg_s {
+		u64 lid                              : 3;
+		u64 reserved_3_63                    : 61;
+	} s;
+	/* struct npc_af_kex_ldatax_flags_cfg_s cn; */
+};
+
+static inline u64 NPC_AF_KEX_LDATAX_FLAGS_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_KEX_LDATAX_FLAGS_CFG(u64 a)
+{
+	return 0x800 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_kpu#_cfg
+ *
+ * NPC AF KPU Configuration Registers
+ */
+union npc_af_kpux_cfg {
+	u64 u;
+	struct npc_af_kpux_cfg_s {
+		u64 ena                              : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npc_af_kpux_cfg_s cn; */
+};
+
+static inline u64 NPC_AF_KPUX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_KPUX_CFG(u64 a)
+{
+	return 0x500 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_kpu#_dbg
+ *
+ * NPC AF KPU Debug Registers This register contains information for the
+ * last packet/lookup for which debug is enabled by
+ * NPC_AF_DBG_CTL[INTF_DBG,LKUP_DBG]. The register contents are undefined
+ * when debug information is captured for a software key lookup
+ * (NPC_AF_LKUP_CTL[OP] = NPC_LKUPOP_E::KEY).
+ */
+union npc_af_kpux_dbg {
+	u64 u;
+	struct npc_af_kpux_dbg_s {
+		u64 hit_entry                        : 8;
+		u64 byp                              : 1;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct npc_af_kpux_dbg_s cn; */
+};
+
+static inline u64 NPC_AF_KPUX_DBG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_KPUX_DBG(u64 a)
+{
+	return 0x3000020 + 0x100 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_kpu#_entry#_action0
+ *
+ * NPC AF KPU Entry Action Data 0 Registers When a KPU's search data
+ * matches a KPU CAM entry in NPC_AF_KPU()_ENTRY()_CAM(), the
+ * corresponding entry action in NPC_AF_KPU()_ENTRY()_ACTION0 and
+ * NPC_AF_KPU()_ENTRY()_ACTION1 specifies the next state and operations
+ * to perform before exiting the KPU.
+ */
+union npc_af_kpux_entryx_action0 {
+	u64 u;
+	struct npc_af_kpux_entryx_action0_s {
+		u64 var_len_shift                    : 3;
+		u64 var_len_right                    : 1;
+		u64 var_len_mask                     : 8;
+		u64 var_len_offset                   : 8;
+		u64 ptr_advance                      : 8;
+		u64 capture_flags                    : 8;
+		u64 capture_ltype                    : 4;
+		u64 capture_lid                      : 3;
+		u64 reserved_43                      : 1;
+		u64 next_state                       : 8;
+		u64 parse_done                       : 1;
+		u64 capture_ena                      : 1;
+		u64 byp_count                        : 3;
+		u64 reserved_57_63                   : 7;
+	} s;
+	/* struct npc_af_kpux_entryx_action0_s cn; */
+};
+
+static inline u64 NPC_AF_KPUX_ENTRYX_ACTION0(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_KPUX_ENTRYX_ACTION0(u64 a, u64 b)
+{
+	return 0x100020 + 0x4000 * a + 0x40 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_kpu#_entry#_action1
+ *
+ * NPC AF KPU Entry Action Data 0 Registers See
+ * NPC_AF_KPU()_ENTRY()_ACTION0.
+ */
+union npc_af_kpux_entryx_action1 {
+	u64 u;
+	struct npc_af_kpux_entryx_action1_s {
+		u64 dp0_offset                       : 8;
+		u64 dp1_offset                       : 8;
+		u64 dp2_offset                       : 8;
+		u64 errcode                          : 8;
+		u64 errlev                           : 4;
+		u64 reserved_36_63                   : 28;
+	} s;
+	/* struct npc_af_kpux_entryx_action1_s cn; */
+};
+
+static inline u64 NPC_AF_KPUX_ENTRYX_ACTION1(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_KPUX_ENTRYX_ACTION1(u64 a, u64 b)
+{
+	return 0x100028 + 0x4000 * a + 0x40 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_kpu#_entry#_cam#
+ *
+ * NPC AF KPU Entry CAM Registers KPU comparison ternary data. The field
+ * values in NPC_AF_KPU()_ENTRY()_CAM() are ternary, where  each data bit
+ * of the search key matches as follows: _ [CAM(1)]\<n\>=0,
+ * [CAM(0)]\<n\>=0: Always match; search key data\<n\> don't care. _
+ * [CAM(1)]\<n\>=0, [CAM(0)]\<n\>=1: Match when search key data\<n\> ==
+ * 0. _ [CAM(1)]\<n\>=1, [CAM(0)]\<n\>=0: Match when search key data\<n\>
+ * == 1. _ [CAM(1)]\<n\>=1, [CAM(0)]\<n\>=1: Reserved.  The reserved
+ * combination is not allowed. Hardware suppresses any write to CAM(0) or
+ * CAM(1) that would result in the reserved combination for any CAM bit.
+ * The reset value for all non-reserved fields is all zeros for CAM(1)
+ * and all ones for CAM(0), matching a search key of all zeros.  Software
+ * must program a default entry for each KPU, e.g. by programming each
+ * KPU's last entry {b} (NPC_AF_KPU()_ENTRY({b})_CAM()) to always match
+ * all bits.
+ */
+union npc_af_kpux_entryx_camx {
+	u64 u;
+	struct npc_af_kpux_entryx_camx_s {
+		u64 dp0_data                         : 16;
+		u64 dp1_data                         : 16;
+		u64 dp2_data                         : 16;
+		u64 state                            : 8;
+		u64 reserved_56_63                   : 8;
+	} s;
+	/* struct npc_af_kpux_entryx_camx_s cn; */
+};
+
+static inline u64 NPC_AF_KPUX_ENTRYX_CAMX(u64 a, u64 b, u64 c)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_KPUX_ENTRYX_CAMX(u64 a, u64 b, u64 c)
+{
+	return 0x100000 + 0x4000 * a + 0x40 * b + 8 * c;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_kpu#_entry_dis#
+ *
+ * NPC AF KPU Entry Disable Registers See NPC_AF_KPU()_ENTRY()_ACTION0.
+ */
+union npc_af_kpux_entry_disx {
+	u64 u;
+	struct npc_af_kpux_entry_disx_s {
+		u64 dis                              : 64;
+	} s;
+	/* struct npc_af_kpux_entry_disx_s cn; */
+};
+
+static inline u64 NPC_AF_KPUX_ENTRY_DISX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_KPUX_ENTRY_DISX(u64 a, u64 b)
+{
+	return 0x180000 + 0x40 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_kpu#_err_ctl
+ *
+ * NPC AF KPU Error Control Registers This register specifies values
+ * captured in NPC_RESULT_S[ERRLEV,ERRCODE] when errors are detected by a
+ * KPU.
+ */
+union npc_af_kpux_err_ctl {
+	u64 u;
+	struct npc_af_kpux_err_ctl_s {
+		u64 errlev                           : 4;
+		u64 dp_offset_errcode                : 8;
+		u64 ptr_advance_errcode              : 8;
+		u64 var_len_offset_errcode           : 8;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct npc_af_kpux_err_ctl_s cn; */
+};
+
+static inline u64 NPC_AF_KPUX_ERR_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_KPUX_ERR_CTL(u64 a)
+{
+	return 0x30000a0 + 0x100 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_kpu_diag
+ *
+ * INTERNAL : NPC AF Debug Result Registers
+ */
+union npc_af_kpu_diag {
+	u64 u;
+	struct npc_af_kpu_diag_s {
+		u64 skip_dis                         : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npc_af_kpu_diag_s cn; */
+};
+
+static inline u64 NPC_AF_KPU_DIAG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_KPU_DIAG(void)
+{
+	return 0x3002000;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_lkup_ctl
+ *
+ * NPC AF Software Lookup Control Registers
+ */
+union npc_af_lkup_ctl {
+	u64 u;
+	struct npc_af_lkup_ctl_s {
+		u64 intf                             : 2;
+		u64 pkind                            : 6;
+		u64 chan                             : 12;
+		u64 hdr_sizem1                       : 8;
+		u64 op                               : 3;
+		u64 exec                             : 1;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct npc_af_lkup_ctl_s cn; */
+};
+
+static inline u64 NPC_AF_LKUP_CTL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_LKUP_CTL(void)
+{
+	return 0x2000000;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_lkup_data#
+ *
+ * NPC AF Software Lookup Data Registers
+ */
+union npc_af_lkup_datax {
+	u64 u;
+	struct npc_af_lkup_datax_s {
+		u64 data                             : 64;
+	} s;
+	/* struct npc_af_lkup_datax_s cn; */
+};
+
+static inline u64 NPC_AF_LKUP_DATAX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_LKUP_DATAX(u64 a)
+{
+	return 0x2000200 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_lkup_result#
+ *
+ * NPC AF Software Lookup Result Registers
+ */
+union npc_af_lkup_resultx {
+	u64 u;
+	struct npc_af_lkup_resultx_s {
+		u64 data                             : 64;
+	} s;
+	/* struct npc_af_lkup_resultx_s cn; */
+};
+
+static inline u64 NPC_AF_LKUP_RESULTX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_LKUP_RESULTX(u64 a)
+{
+	return 0x2000400 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_match_stat#
+ *
+ * NPC AF Match Statistics Registers
+ */
+union npc_af_match_statx {
+	u64 u;
+	struct npc_af_match_statx_s {
+		u64 count                            : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct npc_af_match_statx_s cn; */
+};
+
+static inline u64 NPC_AF_MATCH_STATX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_MATCH_STATX(u64 a)
+{
+	return 0x1880008 + 0x100 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_mcam_bank#_hit#
+ *
+ * NPC AF MCAM Bank Hit Registers
+ */
+union npc_af_mcam_bankx_hitx {
+	u64 u;
+	struct npc_af_mcam_bankx_hitx_s {
+		u64 hit                              : 64;
+	} s;
+	/* struct npc_af_mcam_bankx_hitx_s cn; */
+};
+
+static inline u64 NPC_AF_MCAM_BANKX_HITX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_MCAM_BANKX_HITX(u64 a, u64 b)
+{
+	return 0x1c80000 + 0x100 * a + 0x10 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_mcam_dbg
+ *
+ * NPC AF MCAM Debug Register This register contains information for the
+ * last packet/lookup for which debug is enabled by
+ * NPC_AF_DBG_CTL[INTF_DBG,LKUP_DBG].
+ */
+union npc_af_mcam_dbg {
+	u64 u;
+	struct npc_af_mcam_dbg_s {
+		u64 hit_entry                        : 10;
+		u64 reserved_10_11                   : 2;
+		u64 hit_bank                         : 2;
+		u64 reserved_14_15                   : 2;
+		u64 miss                             : 1;
+		u64 reserved_17_63                   : 47;
+	} s;
+	/* struct npc_af_mcam_dbg_s cn; */
+};
+
+static inline u64 NPC_AF_MCAM_DBG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_MCAM_DBG(void)
+{
+	return 0x3001000;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_mcam_scrub_ctl
+ *
+ * NPC AF MCAM Scrub Control Register
+ */
+union npc_af_mcam_scrub_ctl {
+	u64 u;
+	struct npc_af_mcam_scrub_ctl_s {
+		u64 ena                              : 1;
+		u64 reserved_1_7                     : 7;
+		u64 lp_dis                           : 1;
+		u64 reserved_9_15                    : 7;
+		u64 toth                             : 4;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct npc_af_mcam_scrub_ctl_s cn; */
+};
+
+static inline u64 NPC_AF_MCAM_SCRUB_CTL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_MCAM_SCRUB_CTL(void)
+{
+	return 0xa0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_mcame#_bank#_action
+ *
+ * NPC AF MCAM Entry Bank Action Data Registers Specifies a packet's
+ * match action captured in NPC_RESULT_S[ACTION].  When an interface is
+ * configured to use the NPC_MCAM_KEY_X2_S search key format
+ * (NPC_AF_INTF()_KEX_CFG[KEYW] = NPC_MCAMKEYW_E::X2), *
+ * NPC_AF_MCAME()_BANK(0)_ACTION/_TAG_ACT/_STAT_ACT are used if the
+ * search key matches NPC_AF_MCAME()_BANK(0..1)_CAM()_W*. *
+ * NPC_AF_MCAME()_BANK(2)_ACTION/_TAG_ACT/_STAT_ACT are used if the
+ * search key matches NPC_AF_MCAME()_BANK(2..3)_CAM()_W*. *
+ * NPC_AF_MCAME()_BANK(1,3)_ACTION/_TAG_ACT/_STAT_ACT are not used.  When
+ * an interface is configured to use the NPC_MCAM_KEY_X4_S search key
+ * format (NPC_AF_INTF()_KEX_CFG[KEYW] = NPC_MCAMKEYW_E::X4): *
+ * NPC_AF_MCAME()_BANK(0)_ACTION/_TAG_ACT/_STAT_ACT are used if the
+ * search key matches NPC_AF_MCAME()_BANK(0..3)_CAM()_W*. *
+ * NPC_AF_MCAME()_BANK(1..3)_ACTION/_TAG_ACT/_STAT_ACT are not used.
+ */
+union npc_af_mcamex_bankx_action {
+	u64 u;
+	struct npc_af_mcamex_bankx_action_s {
+		u64 action                           : 64;
+	} s;
+	/* struct npc_af_mcamex_bankx_action_s cn; */
+};
+
+static inline u64 NPC_AF_MCAMEX_BANKX_ACTION(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_MCAMEX_BANKX_ACTION(u64 a, u64 b)
+{
+	return 0x1900000 + 0x100 * a + 0x10 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_mcame#_bank#_cam#_intf
+ *
+ * NPC AF MCAM Entry Bank CAM Data Interface Registers MCAM comparison
+ * ternary data interface word. The field values in
+ * NPC_AF_MCAME()_BANK()_CAM()_INTF, NPC_AF_MCAME()_BANK()_CAM()_W0 and
+ * NPC_AF_MCAME()_BANK()_CAM()_W1 are ternary, where  each data bit of
+ * the search key matches as follows: _ [CAM(1)]\<n\>=0, [CAM(0)]\<n\>=0:
+ * Always match; search key data\<n\> don't care. _ [CAM(1)]\<n\>=0,
+ * [CAM(0)]\<n\>=1: Match when search key data\<n\> == 0. _
+ * [CAM(1)]\<n\>=1, [CAM(0)]\<n\>=0: Match when search key data\<n\> ==
+ * 1. _ [CAM(1)]\<n\>=1, [CAM(0)]\<n\>=1: Reserved.  The reserved
+ * combination is not allowed. Hardware suppresses any write to CAM(0) or
+ * CAM(1) that would result in the reserved combination for any CAM bit.
+ * The reset value for all non-reserved fields in
+ * NPC_AF_MCAME()_BANK()_CAM()_INTF, NPC_AF_MCAME()_BANK()_CAM()_W0 and
+ * NPC_AF_MCAME()_BANK()_CAM()_W1 is all zeros for CAM(1) and all ones
+ * for CAM(0), matching a search key of all zeros.  When an interface is
+ * configured to use the NPC_MCAM_KEY_X1_S search key format
+ * (NPC_AF_INTF()_KEX_CFG[KEYW] = NPC_MCAMKEYW_E::X1), the four banks of
+ * every MCAM entry are used as individual entries, each of which is
+ * independently compared with the search key as follows: _
+ * NPC_AF_MCAME()_BANK()_CAM()_INTF[INTF] corresponds to
+ * NPC_MCAM_KEY_X1_S[INTF]. _ NPC_AF_MCAME()_BANK()_CAM()_W0[MD]
+ * corresponds to NPC_MCAM_KEY_X1_S[KW0]. _
+ * NPC_AF_MCAME()_BANK()_CAM()_W1[MD] corresponds to
+ * NPC_MCAM_KEY_X1_S[KW1].  When an interface is configured to use the
+ * NPC_MCAM_KEY_X2_S search key format (NPC_AF_INTF()_KEX_CFG[KEYW] =
+ * NPC_MCAMKEYW_E::X2), banks 0-1 of every MCAM entry are used as one
+ * double-wide entry, banks 2-3 as a second double-wide entry, and each
+ * double-wide entry is independently compared with the search key as
+ * follows: _ NPC_AF_MCAME()_BANK(0,2)_CAM()_INTF[INTF] corresponds to
+ * NPC_MCAM_KEY_X2_S[INTF]. _ NPC_AF_MCAME()_BANK(0,2)_CAM()_W0[MD]
+ * corresponds to NPC_MCAM_KEY_X2_S[KW0]. _
+ * NPC_AF_MCAME()_BANK(0,2)_CAM()_W1[MD] corresponds to
+ * NPC_MCAM_KEY_X2_S[KW1]\<47:0\>. _
+ * NPC_AF_MCAME()_BANK(1,3)_CAM()_INTF[INTF] corresponds to
+ * NPC_MCAM_KEY_X2_S[INTF]. _
+ * NPC_AF_MCAME()_BANK(1,3)_CAM()_W0[MD]\<15:0\> corresponds to
+ * NPC_MCAM_KEY_X2_S[KW1]\<63:48\>. _
+ * NPC_AF_MCAME()_BANK(1,3)_CAM()_W0[MD]\<63:16\> corresponds to
+ * NPC_MCAM_KEY_X2_S[KW2]\<47:0\>. _
+ * NPC_AF_MCAME()_BANK(1,3)_CAM()_W1[MD]\<15:0\> corresponds to
+ * NPC_MCAM_KEY_X2_S[KW2]\<63:48\>. _
+ * NPC_AF_MCAME()_BANK(1,3)_CAM()_W1[MD]\<47:16\> corresponds to
+ * NPC_MCAM_KEY_X2_S[KW3]\<31:0\>.  When an interface is configured to
+ * use the NPC_MCAM_KEY_X4_S search key format
+ * (NPC_AF_INTF()_KEX_CFG[KEYW] = NPC_MCAMKEYW_E::X4), the four banks of
+ * every MCAM entry are used as a single quad-wide entry that is compared
+ * with the search key as follows: _
+ * NPC_AF_MCAME()_BANK(0)_CAM()_INTF[INTF] corresponds to
+ * NPC_MCAM_KEY_X4_S[INTF]. _ NPC_AF_MCAME()_BANK(0)_CAM()_W0[MD]
+ * corresponds to NPC_MCAM_KEY_X4_S[KW0]. _
+ * NPC_AF_MCAME()_BANK(0)_CAM()_W1[MD] corresponds to
+ * NPC_MCAM_KEY_X4_S[KW1]\<47:0\>. _
+ * NPC_AF_MCAME()_BANK(1)_CAM()_INTF[INTF] corresponds to
+ * NPC_MCAM_KEY_X4_S[INTF]. _ NPC_AF_MCAME()_BANK(1)_CAM()_W0[MD]\<15:0\>
+ * corresponds to NPC_MCAM_KEY_X4_S[KW1]\<63:48\>. _
+ * NPC_AF_MCAME()_BANK(1)_CAM()_W0[MD]\<63:16\> corresponds to
+ * NPC_MCAM_KEY_X4_S[KW2]\<47:0\>. _
+ * NPC_AF_MCAME()_BANK(1)_CAM()_W1[MD]\<15:0\> corresponds to
+ * NPC_MCAM_KEY_X4_S[KW2]\<63:48\>. _
+ * NPC_AF_MCAME()_BANK(1)_CAM()_W1[MD]\<47:16\> corresponds to
+ * NPC_MCAM_KEY_X4_S[KW3]\<31:0\>. _
+ * NPC_AF_MCAME()_BANK(2)_CAM()_INTF[INTF] corresponds to
+ * NPC_MCAM_KEY_X4_S[INTF]. _ NPC_AF_MCAME()_BANK(2)_CAM()_W0[MD]\<31:0\>
+ * corresponds to NPC_MCAM_KEY_X4_S[KW3]\<63:32\>. _
+ * NPC_AF_MCAME()_BANK(2)_CAM()_W0[MD]\<63:32\> corresponds to
+ * NPC_MCAM_KEY_X4_S[KW4]\<31:0\>. _
+ * NPC_AF_MCAME()_BANK(2)_CAM()_W1[MD]\<31:0\> corresponds to
+ * NPC_MCAM_KEY_X4_S[KW4]\<63:32\>. _
+ * NPC_AF_MCAME()_BANK(2)_CAM()_W1[MD]\<47:32\> corresponds to
+ * NPC_MCAM_KEY_X4_S[KW5]\<15:0\>. _
+ * NPC_AF_MCAME()_BANK(3)_CAM()_INTF[INTF] corresponds to
+ * NPC_MCAM_KEY_X4_S[INTF]. _ NPC_AF_MCAME()_BANK(3)_CAM()_W0[MD]\<47:0\>
+ * corresponds to NPC_MCAM_KEY_X4_S[KW5]\<63:16\>. _
+ * NPC_AF_MCAME()_BANK(3)_CAM()_W0[MD]\<63:48\> corresponds to
+ * NPC_MCAM_KEY_X4_S[KW6]\<15:0\>. _ NPC_AF_MCAME()_BANK(3)_CAM()_W1[MD]
+ * corresponds to NPC_MCAM_KEY_X4_S[KW6]\<63:16\>.  Note that for the X2
+ * and X4 formats, a wide entry will not match unless the INTF fields
+ * from the associated two or four banks match the INTF value from the
+ * search key.  For the X1 and X2 formats, a match in a lower-numbered
+ * bank takes priority over a match in any higher numbered banks. Within
+ * each bank, the lowest numbered matching entry takes priority over any
+ * higher numbered entry.
+ */
+union npc_af_mcamex_bankx_camx_intf {
+	u64 u;
+	struct npc_af_mcamex_bankx_camx_intf_s {
+		u64 intf                             : 2;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct npc_af_mcamex_bankx_camx_intf_s cn; */
+};
+
+static inline u64 NPC_AF_MCAMEX_BANKX_CAMX_INTF(u64 a, u64 b, u64 c)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_MCAMEX_BANKX_CAMX_INTF(u64 a, u64 b, u64 c)
+{
+	return 0x1000000 + 0x400 * a + 0x40 * b + 8 * c;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_mcame#_bank#_cam#_w0
+ *
+ * NPC AF MCAM Entry Bank CAM Data Word 0 Registers MCAM comparison
+ * ternary data word 0. See NPC_AF_MCAME()_BANK()_CAM()_INTF.
+ */
+union npc_af_mcamex_bankx_camx_w0 {
+	u64 u;
+	struct npc_af_mcamex_bankx_camx_w0_s {
+		u64 md                               : 64;
+	} s;
+	/* struct npc_af_mcamex_bankx_camx_w0_s cn; */
+};
+
+static inline u64 NPC_AF_MCAMEX_BANKX_CAMX_W0(u64 a, u64 b, u64 c)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_MCAMEX_BANKX_CAMX_W0(u64 a, u64 b, u64 c)
+{
+	return 0x1000010 + 0x400 * a + 0x40 * b + 8 * c;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_mcame#_bank#_cam#_w1
+ *
+ * NPC AF MCAM Entry Bank Data Word 1 Registers MCAM comparison ternary
+ * data word 1. See NPC_AF_MCAME()_BANK()_CAM()_INTF.
+ */
+union npc_af_mcamex_bankx_camx_w1 {
+	u64 u;
+	struct npc_af_mcamex_bankx_camx_w1_s {
+		u64 md                               : 48;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct npc_af_mcamex_bankx_camx_w1_s cn; */
+};
+
+static inline u64 NPC_AF_MCAMEX_BANKX_CAMX_W1(u64 a, u64 b, u64 c)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_MCAMEX_BANKX_CAMX_W1(u64 a, u64 b, u64 c)
+{
+	return 0x1000020 + 0x400 * a + 0x40 * b + 8 * c;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_mcame#_bank#_cfg
+ *
+ * NPC AF MCAM Entry Bank Configuration Registers
+ */
+union npc_af_mcamex_bankx_cfg {
+	u64 u;
+	struct npc_af_mcamex_bankx_cfg_s {
+		u64 ena                              : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct npc_af_mcamex_bankx_cfg_s cn; */
+};
+
+static inline u64 NPC_AF_MCAMEX_BANKX_CFG(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_MCAMEX_BANKX_CFG(u64 a, u64 b)
+{
+	return 0x1800000 + 0x100 * a + 0x10 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_mcame#_bank#_stat_act
+ *
+ * NPC AF MCAM Entry Bank Statistics Action Registers Used to optionally
+ * increment a NPC_AF_MATCH_STAT() counter when a packet matches an MCAM
+ * entry. See also NPC_AF_MCAME()_BANK()_ACTION.
+ */
+union npc_af_mcamex_bankx_stat_act {
+	u64 u;
+	struct npc_af_mcamex_bankx_stat_act_s {
+		u64 stat_sel                         : 9;
+		u64 ena                              : 1;
+		u64 reserved_10_63                   : 54;
+	} s;
+	/* struct npc_af_mcamex_bankx_stat_act_s cn; */
+};
+
+static inline u64 NPC_AF_MCAMEX_BANKX_STAT_ACT(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_MCAMEX_BANKX_STAT_ACT(u64 a, u64 b)
+{
+	return 0x1880000 + 0x100 * a + 0x10 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_mcame#_bank#_tag_act
+ *
+ * NPC AF MCAM Entry Bank VTag Action Data Registers Specifies a packet's
+ * match Vtag action captured in NPC_RESULT_S[VTAG_ACTION]. See also
+ * NPC_AF_MCAME()_BANK()_ACTION.
+ */
+union npc_af_mcamex_bankx_tag_act {
+	u64 u;
+	struct npc_af_mcamex_bankx_tag_act_s {
+		u64 vtag_action                      : 64;
+	} s;
+	/* struct npc_af_mcamex_bankx_tag_act_s cn; */
+};
+
+static inline u64 NPC_AF_MCAMEX_BANKX_TAG_ACT(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_MCAMEX_BANKX_TAG_ACT(u64 a, u64 b)
+{
+	return 0x1900008 + 0x100 * a + 0x10 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_pck_cfg
+ *
+ * NPC AF Protocol Check Configuration Register
+ */
+union npc_af_pck_cfg {
+	u64 u;
+	struct npc_af_pck_cfg_s {
+		u64 reserved_0                       : 1;
+		u64 iip4_cksum                       : 1;
+		u64 oip4_cksum                       : 1;
+		u64 reserved_3                       : 1;
+		u64 l3b                              : 1;
+		u64 l3m                              : 1;
+		u64 l2b                              : 1;
+		u64 l2m                              : 1;
+		u64 reserved_8_23                    : 16;
+		u64 iip4_cksum_errcode               : 8;
+		u64 oip4_cksum_errcode               : 8;
+		u64 reserved_40_63                   : 24;
+	} s;
+	/* struct npc_af_pck_cfg_s cn; */
+};
+
+static inline u64 NPC_AF_PCK_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_PCK_CFG(void)
+{
+	return 0x600;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_pck_def_iip4
+ *
+ * NPC AF Protocol Check Inner IPv4 Definition Register Provides layer
+ * information used by the protocol checker to identify an inner IPv4
+ * header.
+ */
+union npc_af_pck_def_iip4 {
+	u64 u;
+	struct npc_af_pck_def_iip4_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct npc_af_pck_def_iip4_s cn; */
+};
+
+static inline u64 NPC_AF_PCK_DEF_IIP4(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_PCK_DEF_IIP4(void)
+{
+	return 0x640;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_pck_def_oip4
+ *
+ * NPC AF Protocol Check Outer IPv4 Definition Register Provides layer
+ * information used by the protocol checker to identify an outer IPv4
+ * header.
+ */
+union npc_af_pck_def_oip4 {
+	u64 u;
+	struct npc_af_pck_def_oip4_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct npc_af_pck_def_oip4_s cn; */
+};
+
+static inline u64 NPC_AF_PCK_DEF_OIP4(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_PCK_DEF_OIP4(void)
+{
+	return 0x620;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_pck_def_oip6
+ *
+ * NPC AF Protocol Check Outer IPv6 Definition Register Provides layer
+ * information used by the protocol checker to identify an outer IPv6
+ * header. [LID] must have the same value as NPC_AF_PCK_DEF_OIP4[LID].
+ */
+union npc_af_pck_def_oip6 {
+	u64 u;
+	struct npc_af_pck_def_oip6_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct npc_af_pck_def_oip6_s cn; */
+};
+
+static inline u64 NPC_AF_PCK_DEF_OIP6(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_PCK_DEF_OIP6(void)
+{
+	return 0x630;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_pck_def_ol2
+ *
+ * NPC AF Protocol Check Outer L2 Definition Register Provides layer
+ * information used by the protocol checker to identify an outer L2
+ * header.
+ */
+union npc_af_pck_def_ol2 {
+	u64 u;
+	struct npc_af_pck_def_ol2_s {
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_11_63                   : 53;
+	} s;
+	/* struct npc_af_pck_def_ol2_s cn; */
+};
+
+static inline u64 NPC_AF_PCK_DEF_OL2(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_PCK_DEF_OL2(void)
+{
+	return 0x610;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_pkind#_action0
+ *
+ * NPC AF Port Kind Action Data 0 Registers NPC_AF_PKIND()_ACTION0 and
+ * NPC_AF_PKIND()_ACTION1 specify the initial parse state and operations
+ * to perform before entering KPU 0.
+ */
+union npc_af_pkindx_action0 {
+	u64 u;
+	struct npc_af_pkindx_action0_s {
+		u64 var_len_shift                    : 3;
+		u64 var_len_right                    : 1;
+		u64 var_len_mask                     : 8;
+		u64 var_len_offset                   : 8;
+		u64 ptr_advance                      : 8;
+		u64 capture_flags                    : 8;
+		u64 capture_ltype                    : 4;
+		u64 capture_lid                      : 3;
+		u64 reserved_43                      : 1;
+		u64 next_state                       : 8;
+		u64 parse_done                       : 1;
+		u64 capture_ena                      : 1;
+		u64 byp_count                        : 3;
+		u64 reserved_57_63                   : 7;
+	} s;
+	/* struct npc_af_pkindx_action0_s cn; */
+};
+
+static inline u64 NPC_AF_PKINDX_ACTION0(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_PKINDX_ACTION0(u64 a)
+{
+	return 0x80000 + 0x40 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_pkind#_action1
+ *
+ * NPC AF Port Kind Action Data 1 Registers NPC_AF_PKIND()_ACTION0 and
+ * NPC_AF_PKIND()_ACTION1 specify the initial parse state and operations
+ * to perform before entering KPU 0.
+ */
+union npc_af_pkindx_action1 {
+	u64 u;
+	struct npc_af_pkindx_action1_s {
+		u64 dp0_offset                       : 8;
+		u64 dp1_offset                       : 8;
+		u64 dp2_offset                       : 8;
+		u64 errcode                          : 8;
+		u64 errlev                           : 4;
+		u64 reserved_36_63                   : 28;
+	} s;
+	/* struct npc_af_pkindx_action1_s cn; */
+};
+
+static inline u64 NPC_AF_PKINDX_ACTION1(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_PKINDX_ACTION1(u64 a)
+{
+	return 0x80008 + 0x40 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) npc_af_pkind#_cpi_def#
+ *
+ * NPC AF Port Kind Channel Parse Index Definition Registers These
+ * registers specify the layer information and algorithm to compute a
+ * packet's channel parse index (CPI), which provides a port to channel
+ * adder for calculating NPC_RESULT_S[CHAN].  There are two CPI
+ * definitions per port kind, allowing the CPI computation to use two
+ * possible layer definitions in the parsed packet, e.g. DiffServ DSCP
+ * from either IPv4 or IPv6 header.  CPI pseudocode: \<pre\> for (i = 0;
+ * i \< 2; i++) {    cpi_def = NPC_AF_PKIND()_CPI_DEF(i);    LX = LA, LB,
+ * ..., or LH as selected by cpi_def[LID];     if (cpi_def[ENA]        &&
+ * ((cpi_def[LTYPE_MATCH] & cpi_def[LTYPE_MASK])             ==
+ * (NPC_RESULT_S[LX[LTYPE]] & cpi_def[LTYPE_MASK]))        &&
+ * ((cpi_def[FLAGS_MATCH] & cpi_def[FLAGS_MASK])             ==
+ * (NPC_RESULT_S[LX[FLAGS]] & cpi_def[FLAGS_MASK])))    {       // Found
+ * matching layer       nibble_offset = (2*NPC_RESULT_S[LX[LPTR]]) +
+ * cpi_def[ADD_OFFSET];       add_byte = byte at nibble_offset from start
+ * of packet;       cpi_add = (add_byte & cpi_def[ADD_MASK]) \>\>
+ * cpi_def[ADD_SHIFT];       cpi = cpi_def[CPI_BASE] + cpi_add;
+ * NPC_RESULT_S[CHAN] += NPC_AF_CPI(cpi)_CFG[PADD];       break;    } }
+ * \</pre\>
+ */
+union npc_af_pkindx_cpi_defx {
+	u64 u;
+	struct npc_af_pkindx_cpi_defx_s {
+		u64 cpi_base                         : 10;
+		u64 reserved_10_11                   : 2;
+		u64 add_shift                        : 3;
+		u64 reserved_15                      : 1;
+		u64 add_mask                         : 8;
+		u64 add_offset                       : 8;
+		u64 flags_mask                       : 8;
+		u64 flags_match                      : 8;
+		u64 ltype_mask                       : 4;
+		u64 ltype_match                      : 4;
+		u64 lid                              : 3;
+		u64 reserved_59_62                   : 4;
+		u64 ena                              : 1;
+	} s;
+	/* struct npc_af_pkindx_cpi_defx_s cn; */
+};
+
+static inline u64 NPC_AF_PKINDX_CPI_DEFX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 NPC_AF_PKINDX_CPI_DEFX(u64 a, u64 b)
+{
+	return 0x80020 + 0x40 * a + 8 * b;
+}
+
+#endif /* __CSRS_NPC_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx2/csrs/csrs-rvu.h b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-rvu.h
new file mode 100644
index 0000000..f4e0de6
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/csrs/csrs-rvu.h
@@ -0,0 +1,2276 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+#ifndef __CSRS_RVU_H__
+#define __CSRS_RVU_H__
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * RVU.
+ *
+ * This file is auto generated.  Do not edit.
+ *
+ */
+
+/**
+ * Enumeration rvu_af_int_vec_e
+ *
+ * RVU Admin Function Interrupt Vector Enumeration Enumerates the MSI-X
+ * interrupt vectors. Internal: RVU maintains the state of these vectors
+ * internally, and generates GIB messages for it without accessing the
+ * MSI-X table region in LLC/DRAM.
+ */
+#define RVU_AF_INT_VEC_E_GEN (3)
+#define RVU_AF_INT_VEC_E_MBOX (4)
+#define RVU_AF_INT_VEC_E_PFFLR (1)
+#define RVU_AF_INT_VEC_E_PFME (2)
+#define RVU_AF_INT_VEC_E_POISON (0)
+
+/**
+ * Enumeration rvu_bar_e
+ *
+ * RVU Base Address Register Enumeration Enumerates the base address
+ * registers. Internal: For documentation only.
+ */
+#define RVU_BAR_E_RVU_PFX_BAR0(a) (0x840000000000ll + 0x1000000000ll * (a))
+#define RVU_BAR_E_RVU_PFX_BAR0_SIZE 0x10000000ull
+#define RVU_BAR_E_RVU_PFX_FUNCX_BAR2(a, b)	\
+	(0x840200000000ll + 0x1000000000ll * (a) + 0x2000000ll * (b))
+#define RVU_BAR_E_RVU_PFX_FUNCX_BAR2_SIZE 0x100000ull
+#define RVU_BAR_E_RVU_PFX_FUNCX_BAR4(a, b)	\
+	(0x840400000000ll + 0x1000000000ll * (a) + 0x2000000ll * (b))
+#define RVU_BAR_E_RVU_PFX_FUNCX_BAR4_SIZE 0x10000ull
+
+/**
+ * Enumeration rvu_block_addr_e
+ *
+ * RVU Block Address Enumeration Enumerates addressing of RVU resource
+ * blocks within each RVU BAR, i.e. values of RVU_FUNC_ADDR_S[BLOCK] and
+ * RVU_AF_ADDR_S[BLOCK].  CNXXXX may not implement all enumerated blocks.
+ * Software can read RVU_PF/RVU_VF_BLOCK_ADDR()_DISC[IMP] to discover
+ * which blocks are implemented and enabled.
+ */
+#define RVU_BLOCK_ADDR_E_CPTX(a) (0xa + (a))
+#define RVU_BLOCK_ADDR_E_LMT (1)
+#define RVU_BLOCK_ADDR_E_NDCX(a) (0xc + (a))
+#define RVU_BLOCK_ADDR_E_NIXX(a) (4 + (a))
+#define RVU_BLOCK_ADDR_E_NPA (3)
+#define RVU_BLOCK_ADDR_E_NPC (6)
+#define RVU_BLOCK_ADDR_E_RX(a) (0 + (a))
+#define RVU_BLOCK_ADDR_E_REEX(a) (0x14 + (a))
+#define RVU_BLOCK_ADDR_E_RVUM (0)
+#define RVU_BLOCK_ADDR_E_SSO (7)
+#define RVU_BLOCK_ADDR_E_SSOW (8)
+#define RVU_BLOCK_ADDR_E_TIM (9)
+
+/**
+ * Enumeration rvu_block_type_e
+ *
+ * RVU Block Type Enumeration Enumerates values of
+ * RVU_PF/RVU_VF_BLOCK_ADDR()_DISC[BTYPE].
+ */
+#define RVU_BLOCK_TYPE_E_CPT (9)
+#define RVU_BLOCK_TYPE_E_DDF (0xb)
+#define RVU_BLOCK_TYPE_E_LMT (2)
+#define RVU_BLOCK_TYPE_E_NDC (0xa)
+#define RVU_BLOCK_TYPE_E_NIX (3)
+#define RVU_BLOCK_TYPE_E_NPA (4)
+#define RVU_BLOCK_TYPE_E_NPC (5)
+#define RVU_BLOCK_TYPE_E_RAD (0xd)
+#define RVU_BLOCK_TYPE_E_REE (0xe)
+#define RVU_BLOCK_TYPE_E_RVUM (0)
+#define RVU_BLOCK_TYPE_E_SSO (6)
+#define RVU_BLOCK_TYPE_E_SSOW (7)
+#define RVU_BLOCK_TYPE_E_TIM (8)
+#define RVU_BLOCK_TYPE_E_ZIP (0xc)
+
+/**
+ * Enumeration rvu_bus_lf_e
+ *
+ * INTERNAL: RVU Bus LF Range Enumeration  Enumerates the LF range for
+ * the RVU bus. Internal: This is an enum used in csr3 virtual equations.
+ */
+#define RVU_BUS_LF_E_RVU_BUS_LFX(a) (0 + 0x2000000 * (a))
+
+/**
+ * Enumeration rvu_bus_lf_slot_e
+ *
+ * INTERNAL: RVU Bus LF Slot Range Enumeration  Enumerates the LF and
+ * Slot range for the RVU bus. Internal: This is an enum used in csr3
+ * virtual equations.
+ */
+#define RVU_BUS_LF_SLOT_E_RVU_BUS_LFX_SLOTX(a, b)	\
+	(0 + 0x2000000 * (a) + 0x1000 * (b))
+
+/**
+ * Enumeration rvu_bus_pf_e
+ *
+ * INTERNAL: RVU Bus PF Range Enumeration  Enumerates the PF range for
+ * the RVU bus. Internal: This is an enum used in csr3 virtual equations.
+ */
+#define RVU_BUS_PF_E_RVU_BUS_PFX(a) (0ll + 0x1000000000ll * (a))
+
+/**
+ * Enumeration rvu_bus_pfvf_e
+ *
+ * INTERNAL: RVU Bus PFVF Range Enumeration  Enumerates the PF and VF
+ * ranges for the RVU bus. Internal: This is an enum used in csr3 virtual
+ * equations.
+ */
+#define RVU_BUS_PFVF_E_RVU_BUS_PFX(a) (0 + 0x2000000 * (a))
+#define RVU_BUS_PFVF_E_RVU_BUS_VFX(a) (0 + 0x2000000 * (a))
+
+/**
+ * Enumeration rvu_busbar_e
+ *
+ * INTERNAL: RVU Bus Base Address Region Enumeration  Enumerates the base
+ * address region for the RVU bus. Internal: This is an enum used in csr3
+ * virtual equations.
+ */
+#define RVU_BUSBAR_E_RVU_BUSBAR0 (0)
+#define RVU_BUSBAR_E_RVU_BUSBAR2 (0x200000000ll)
+
+/**
+ * Enumeration rvu_busdid_e
+ *
+ * INTERNAL: RVU Bus DID Enumeration  Enumerates the DID offset for the
+ * RVU bus. Internal: This is an enum used in csr3 virtual equations.
+ */
+#define RVU_BUSDID_E_RVU_BUSDID (0x840000000000ll)
+
+/**
+ * Enumeration rvu_pf_int_vec_e
+ *
+ * RVU PF Interrupt Vector Enumeration Enumerates the MSI-X interrupt
+ * vectors.
+ */
+#define RVU_PF_INT_VEC_E_AFPF_MBOX (6)
+#define RVU_PF_INT_VEC_E_VFFLRX(a) (0 + (a))
+#define RVU_PF_INT_VEC_E_VFMEX(a) (2 + (a))
+#define RVU_PF_INT_VEC_E_VFPF_MBOXX(a) (4 + (a))
+
+/**
+ * Enumeration rvu_vf_int_vec_e
+ *
+ * RVU VF Interrupt Vector Enumeration Enumerates the MSI-X interrupt
+ * vectors.
+ */
+#define RVU_VF_INT_VEC_E_MBOX (0)
+
+/**
+ * Structure rvu_af_addr_s
+ *
+ * RVU Admin Function Register Address Structure Address format for
+ * accessing shared Admin Function (AF) registers in RVU PF BAR0. These
+ * registers may be accessed by all RVU PFs whose
+ * RVU_PRIV_PF()_CFG[AF_ENA] bit is set.
+ */
+union rvu_af_addr_s {
+	u64 u;
+	struct rvu_af_addr_s_s {
+		u64 addr                             : 28;
+		u64 block                            : 5;
+		u64 reserved_33_63                   : 31;
+	} s;
+	/* struct rvu_af_addr_s_s cn; */
+};
+
+/**
+ * Structure rvu_func_addr_s
+ *
+ * RVU Function-unique Address Structure Address format for accessing
+ * function-unique registers in RVU PF/FUNC BAR2.
+ */
+union rvu_func_addr_s {
+	u32 u;
+	struct rvu_func_addr_s_s {
+		u32 addr                             : 12;
+		u32 lf_slot                          : 8;
+		u32 block                            : 5;
+		u32 reserved_25_31                   : 7;
+	} s;
+	/* struct rvu_func_addr_s_s cn; */
+};
+
+/**
+ * Structure rvu_msix_vec_s
+ *
+ * RVU MSI-X Vector Structure Format of entries in the RVU MSI-X table
+ * region in LLC/DRAM. See RVU_PRIV_PF()_MSIX_CFG.
+ */
+union rvu_msix_vec_s {
+	u64 u[2];
+	struct rvu_msix_vec_s_s {
+		u64 addr                             : 64;
+		u64 data                             : 32;
+		u64 mask                             : 1;
+		u64 pend                             : 1;
+		u64 reserved_98_127                  : 30;
+	} s;
+	/* struct rvu_msix_vec_s_s cn; */
+};
+
+/**
+ * Structure rvu_pf_func_s
+ *
+ * RVU PF Function Identification Structure Identifies an RVU PF/VF, and
+ * format of *_PRIV_LF()_CFG[PF_FUNC] in RVU resource blocks, e.g.
+ * NPA_PRIV_LF()_CFG[PF_FUNC].  Internal: Also used for PF/VF
+ * identification on inter-coprocessor hardware interfaces (NPA, SSO,
+ * CPT, ...).
+ */
+union rvu_pf_func_s {
+	u32 u;
+	struct rvu_pf_func_s_s {
+		u32 func                             : 10;
+		u32 pf                               : 6;
+		u32 reserved_16_31                   : 16;
+	} s;
+	/* struct rvu_pf_func_s_s cn; */
+};
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_afpf#_mbox#
+ *
+ * RVU Admin Function AF/PF Mailbox Registers
+ */
+union rvu_af_afpfx_mboxx {
+	u64 u;
+	struct rvu_af_afpfx_mboxx_s {
+		u64 data                             : 64;
+	} s;
+	/* struct rvu_af_afpfx_mboxx_s cn; */
+};
+
+static inline u64 RVU_AF_AFPFX_MBOXX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_AFPFX_MBOXX(u64 a, u64 b)
+{
+	return 0x2000 + 0x10 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_bar2_alias#
+ *
+ * INTERNAL: RVU Admin Function  BAR2 Alias Registers  These registers
+ * alias to the RVU BAR2 registers for the PF and function selected by
+ * RVU_AF_BAR2_SEL[PF_FUNC].  Internal: Not implemented. Placeholder for
+ * bug33464.
+ */
+union rvu_af_bar2_aliasx {
+	u64 u;
+	struct rvu_af_bar2_aliasx_s {
+		u64 data                             : 64;
+	} s;
+	/* struct rvu_af_bar2_aliasx_s cn; */
+};
+
+static inline u64 RVU_AF_BAR2_ALIASX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_BAR2_ALIASX(u64 a)
+{
+	return 0x9100000 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_bar2_sel
+ *
+ * INTERNAL: RVU Admin Function BAR2 Select Register  This register
+ * configures BAR2 accesses from the RVU_AF_BAR2_ALIAS() registers in
+ * BAR0. Internal: Not implemented. Placeholder for bug33464.
+ */
+union rvu_af_bar2_sel {
+	u64 u;
+	struct rvu_af_bar2_sel_s {
+		u64 alias_pf_func                    : 16;
+		u64 alias_ena                        : 1;
+		u64 reserved_17_63                   : 47;
+	} s;
+	/* struct rvu_af_bar2_sel_s cn; */
+};
+
+static inline u64 RVU_AF_BAR2_SEL(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_BAR2_SEL(void)
+{
+	return 0x9000000;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_blk_rst
+ *
+ * RVU Master Admin Function Block Reset Register
+ */
+union rvu_af_blk_rst {
+	u64 u;
+	struct rvu_af_blk_rst_s {
+		u64 rst                              : 1;
+		u64 reserved_1_62                    : 62;
+		u64 busy                             : 1;
+	} s;
+	/* struct rvu_af_blk_rst_s cn; */
+};
+
+static inline u64 RVU_AF_BLK_RST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_BLK_RST(void)
+{
+	return 0x30;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_bp_test
+ *
+ * INTERNAL: RVUM Backpressure Test Registers
+ */
+union rvu_af_bp_test {
+	u64 u;
+	struct rvu_af_bp_test_s {
+		u64 lfsr_freq                        : 12;
+		u64 reserved_12_15                   : 4;
+		u64 bp_cfg                           : 16;
+		u64 enable                           : 8;
+		u64 reserved_40_63                   : 24;
+	} s;
+	/* struct rvu_af_bp_test_s cn; */
+};
+
+static inline u64 RVU_AF_BP_TEST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_BP_TEST(void)
+{
+	return 0x4000;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_eco
+ *
+ * INTERNAL: RVU Admin Function ECO Register
+ */
+union rvu_af_eco {
+	u64 u;
+	struct rvu_af_eco_s {
+		u64 eco_rw                           : 32;
+		u64 reserved_32_63                   : 32;
+	} s;
+	/* struct rvu_af_eco_s cn; */
+};
+
+static inline u64 RVU_AF_ECO(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_ECO(void)
+{
+	return 0x20;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_gen_int
+ *
+ * RVU Admin Function General Interrupt Register This register contains
+ * General interrupt summary bits.
+ */
+union rvu_af_gen_int {
+	u64 u;
+	struct rvu_af_gen_int_s {
+		u64 unmapped                         : 1;
+		u64 msix_fault                       : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct rvu_af_gen_int_s cn; */
+};
+
+static inline u64 RVU_AF_GEN_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_GEN_INT(void)
+{
+	return 0x120;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_gen_int_ena_w1c
+ *
+ * RVU Admin Function General Interrupt Enable Clear Register This
+ * register clears interrupt enable bits.
+ */
+union rvu_af_gen_int_ena_w1c {
+	u64 u;
+	struct rvu_af_gen_int_ena_w1c_s {
+		u64 unmapped                         : 1;
+		u64 msix_fault                       : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct rvu_af_gen_int_ena_w1c_s cn; */
+};
+
+static inline u64 RVU_AF_GEN_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_GEN_INT_ENA_W1C(void)
+{
+	return 0x138;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_gen_int_ena_w1s
+ *
+ * RVU Admin Function General Interrupt Enable Set Register This register
+ * sets interrupt enable bits.
+ */
+union rvu_af_gen_int_ena_w1s {
+	u64 u;
+	struct rvu_af_gen_int_ena_w1s_s {
+		u64 unmapped                         : 1;
+		u64 msix_fault                       : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct rvu_af_gen_int_ena_w1s_s cn; */
+};
+
+static inline u64 RVU_AF_GEN_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_GEN_INT_ENA_W1S(void)
+{
+	return 0x130;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_gen_int_w1s
+ *
+ * RVU Admin Function General Interrupt Set Register This register sets
+ * interrupt bits.
+ */
+union rvu_af_gen_int_w1s {
+	u64 u;
+	struct rvu_af_gen_int_w1s_s {
+		u64 unmapped                         : 1;
+		u64 msix_fault                       : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct rvu_af_gen_int_w1s_s cn; */
+};
+
+static inline u64 RVU_AF_GEN_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_GEN_INT_W1S(void)
+{
+	return 0x128;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_hwvf_rst
+ *
+ * RVU Admin Function Hardware VF Reset Register
+ */
+union rvu_af_hwvf_rst {
+	u64 u;
+	struct rvu_af_hwvf_rst_s {
+		u64 hwvf                             : 8;
+		u64 reserved_8_11                    : 4;
+		u64 exec                             : 1;
+		u64 reserved_13_63                   : 51;
+	} s;
+	/* struct rvu_af_hwvf_rst_s cn; */
+};
+
+static inline u64 RVU_AF_HWVF_RST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_HWVF_RST(void)
+{
+	return 0x2850;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_msixtr_base
+ *
+ * RVU Admin Function MSI-X Table Region Base-Address Register
+ */
+union rvu_af_msixtr_base {
+	u64 u;
+	struct rvu_af_msixtr_base_s {
+		u64 reserved_0_6                     : 7;
+		u64 addr                             : 46;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct rvu_af_msixtr_base_s cn; */
+};
+
+static inline u64 RVU_AF_MSIXTR_BASE(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_MSIXTR_BASE(void)
+{
+	return 0x10;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pf#_vf_bar4_addr
+ *
+ * RVU Admin Function PF/VF BAR4 Address Registers
+ */
+union rvu_af_pfx_vf_bar4_addr {
+	u64 u;
+	struct rvu_af_pfx_vf_bar4_addr_s {
+		u64 reserved_0_15                    : 16;
+		u64 addr                             : 48;
+	} s;
+	/* struct rvu_af_pfx_vf_bar4_addr_s cn; */
+};
+
+static inline u64 RVU_AF_PFX_VF_BAR4_ADDR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFX_VF_BAR4_ADDR(u64 a)
+{
+	return 0x1000 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pf_bar4_addr
+ *
+ * RVU Admin Function PF BAR4 Address Registers
+ */
+union rvu_af_pf_bar4_addr {
+	u64 u;
+	struct rvu_af_pf_bar4_addr_s {
+		u64 reserved_0_15                    : 16;
+		u64 addr                             : 48;
+	} s;
+	/* struct rvu_af_pf_bar4_addr_s cn; */
+};
+
+static inline u64 RVU_AF_PF_BAR4_ADDR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PF_BAR4_ADDR(void)
+{
+	return 0x40;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pf_rst
+ *
+ * RVU Admin Function PF Reset Register
+ */
+union rvu_af_pf_rst {
+	u64 u;
+	struct rvu_af_pf_rst_s {
+		u64 pf                               : 4;
+		u64 reserved_4_11                    : 8;
+		u64 exec                             : 1;
+		u64 reserved_13_63                   : 51;
+	} s;
+	/* struct rvu_af_pf_rst_s cn; */
+};
+
+static inline u64 RVU_AF_PF_RST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PF_RST(void)
+{
+	return 0x2840;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfaf_mbox_int
+ *
+ * RVU Admin Function PF to AF Mailbox Interrupt Registers
+ */
+union rvu_af_pfaf_mbox_int {
+	u64 u;
+	struct rvu_af_pfaf_mbox_int_s {
+		u64 mbox                             : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfaf_mbox_int_s cn; */
+};
+
+static inline u64 RVU_AF_PFAF_MBOX_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFAF_MBOX_INT(void)
+{
+	return 0x2880;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfaf_mbox_int_ena_w1c
+ *
+ * RVU Admin Function PF to AF Mailbox Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union rvu_af_pfaf_mbox_int_ena_w1c {
+	u64 u;
+	struct rvu_af_pfaf_mbox_int_ena_w1c_s {
+		u64 mbox                             : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfaf_mbox_int_ena_w1c_s cn; */
+};
+
+static inline u64 RVU_AF_PFAF_MBOX_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFAF_MBOX_INT_ENA_W1C(void)
+{
+	return 0x2898;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfaf_mbox_int_ena_w1s
+ *
+ * RVU Admin Function PF to AF Mailbox Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union rvu_af_pfaf_mbox_int_ena_w1s {
+	u64 u;
+	struct rvu_af_pfaf_mbox_int_ena_w1s_s {
+		u64 mbox                             : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfaf_mbox_int_ena_w1s_s cn; */
+};
+
+static inline u64 RVU_AF_PFAF_MBOX_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFAF_MBOX_INT_ENA_W1S(void)
+{
+	return 0x2890;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfaf_mbox_int_w1s
+ *
+ * RVU Admin Function PF to AF Mailbox Interrupt Set Registers This
+ * register sets interrupt bits.
+ */
+union rvu_af_pfaf_mbox_int_w1s {
+	u64 u;
+	struct rvu_af_pfaf_mbox_int_w1s_s {
+		u64 mbox                             : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfaf_mbox_int_w1s_s cn; */
+};
+
+static inline u64 RVU_AF_PFAF_MBOX_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFAF_MBOX_INT_W1S(void)
+{
+	return 0x2888;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfflr_int
+ *
+ * RVU Admin Function PF Function Level Reset Interrupt Registers
+ */
+union rvu_af_pfflr_int {
+	u64 u;
+	struct rvu_af_pfflr_int_s {
+		u64 flr                              : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfflr_int_s cn; */
+};
+
+static inline u64 RVU_AF_PFFLR_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFFLR_INT(void)
+{
+	return 0x28a0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfflr_int_ena_w1c
+ *
+ * RVU Admin Function PF Function Level Reset Interrupt Enable Clear
+ * Registers This register clears interrupt enable bits.
+ */
+union rvu_af_pfflr_int_ena_w1c {
+	u64 u;
+	struct rvu_af_pfflr_int_ena_w1c_s {
+		u64 flr                              : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfflr_int_ena_w1c_s cn; */
+};
+
+static inline u64 RVU_AF_PFFLR_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFFLR_INT_ENA_W1C(void)
+{
+	return 0x28b8;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfflr_int_ena_w1s
+ *
+ * RVU Admin Function PF Function Level Reset Interrupt Enable Set
+ * Registers This register sets interrupt enable bits.
+ */
+union rvu_af_pfflr_int_ena_w1s {
+	u64 u;
+	struct rvu_af_pfflr_int_ena_w1s_s {
+		u64 flr                              : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfflr_int_ena_w1s_s cn; */
+};
+
+static inline u64 RVU_AF_PFFLR_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFFLR_INT_ENA_W1S(void)
+{
+	return 0x28b0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfflr_int_w1s
+ *
+ * RVU Admin Function PF Function Level Reset Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union rvu_af_pfflr_int_w1s {
+	u64 u;
+	struct rvu_af_pfflr_int_w1s_s {
+		u64 flr                              : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfflr_int_w1s_s cn; */
+};
+
+static inline u64 RVU_AF_PFFLR_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFFLR_INT_W1S(void)
+{
+	return 0x28a8;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfme_int
+ *
+ * RVU Admin Function PF Bus Master Enable Interrupt Registers
+ */
+union rvu_af_pfme_int {
+	u64 u;
+	struct rvu_af_pfme_int_s {
+		u64 me                               : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfme_int_s cn; */
+};
+
+static inline u64 RVU_AF_PFME_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFME_INT(void)
+{
+	return 0x28c0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfme_int_ena_w1c
+ *
+ * RVU Admin Function PF Bus Master Enable Interrupt Enable Clear
+ * Registers This register clears interrupt enable bits.
+ */
+union rvu_af_pfme_int_ena_w1c {
+	u64 u;
+	struct rvu_af_pfme_int_ena_w1c_s {
+		u64 me                               : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfme_int_ena_w1c_s cn; */
+};
+
+static inline u64 RVU_AF_PFME_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFME_INT_ENA_W1C(void)
+{
+	return 0x28d8;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfme_int_ena_w1s
+ *
+ * RVU Admin Function PF Bus Master Enable Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union rvu_af_pfme_int_ena_w1s {
+	u64 u;
+	struct rvu_af_pfme_int_ena_w1s_s {
+		u64 me                               : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfme_int_ena_w1s_s cn; */
+};
+
+static inline u64 RVU_AF_PFME_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFME_INT_ENA_W1S(void)
+{
+	return 0x28d0;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfme_int_w1s
+ *
+ * RVU Admin Function PF Bus Master Enable Interrupt Set Registers This
+ * register sets interrupt bits.
+ */
+union rvu_af_pfme_int_w1s {
+	u64 u;
+	struct rvu_af_pfme_int_w1s_s {
+		u64 me                               : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfme_int_w1s_s cn; */
+};
+
+static inline u64 RVU_AF_PFME_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFME_INT_W1S(void)
+{
+	return 0x28c8;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfme_status
+ *
+ * RVU Admin Function PF Bus Master Enable Status Registers
+ */
+union rvu_af_pfme_status {
+	u64 u;
+	struct rvu_af_pfme_status_s {
+		u64 me                               : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pfme_status_s cn; */
+};
+
+static inline u64 RVU_AF_PFME_STATUS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFME_STATUS(void)
+{
+	return 0x2800;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pftrpend
+ *
+ * RVU Admin Function PF Transaction Pending Registers
+ */
+union rvu_af_pftrpend {
+	u64 u;
+	struct rvu_af_pftrpend_s {
+		u64 trpend                           : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pftrpend_s cn; */
+};
+
+static inline u64 RVU_AF_PFTRPEND(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFTRPEND(void)
+{
+	return 0x2810;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pftrpend_w1s
+ *
+ * RVU Admin Function PF Transaction Pending Set Registers This register
+ * reads or sets bits.
+ */
+union rvu_af_pftrpend_w1s {
+	u64 u;
+	struct rvu_af_pftrpend_w1s_s {
+		u64 trpend                           : 16;
+		u64 reserved_16_63                   : 48;
+	} s;
+	/* struct rvu_af_pftrpend_w1s_s cn; */
+};
+
+static inline u64 RVU_AF_PFTRPEND_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_PFTRPEND_W1S(void)
+{
+	return 0x2820;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_ras
+ *
+ * RVU Admin Function RAS Interrupt Register This register is intended
+ * for delivery of RAS events to the SCP, so should be ignored by OS
+ * drivers.
+ */
+union rvu_af_ras {
+	u64 u;
+	struct rvu_af_ras_s {
+		u64 msix_poison                      : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_af_ras_s cn; */
+};
+
+static inline u64 RVU_AF_RAS(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_RAS(void)
+{
+	return 0x100;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_ras_ena_w1c
+ *
+ * RVU Admin Function RAS Interrupt Enable Clear Register This register
+ * clears interrupt enable bits.
+ */
+union rvu_af_ras_ena_w1c {
+	u64 u;
+	struct rvu_af_ras_ena_w1c_s {
+		u64 msix_poison                      : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_af_ras_ena_w1c_s cn; */
+};
+
+static inline u64 RVU_AF_RAS_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_RAS_ENA_W1C(void)
+{
+	return 0x118;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_ras_ena_w1s
+ *
+ * RVU Admin Function RAS Interrupt Enable Set Register This register
+ * sets interrupt enable bits.
+ */
+union rvu_af_ras_ena_w1s {
+	u64 u;
+	struct rvu_af_ras_ena_w1s_s {
+		u64 msix_poison                      : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_af_ras_ena_w1s_s cn; */
+};
+
+static inline u64 RVU_AF_RAS_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_RAS_ENA_W1S(void)
+{
+	return 0x110;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_ras_w1s
+ *
+ * RVU Admin Function RAS Interrupt Set Register This register sets
+ * interrupt bits.
+ */
+union rvu_af_ras_w1s {
+	u64 u;
+	struct rvu_af_ras_w1s_s {
+		u64 msix_poison                      : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_af_ras_w1s_s cn; */
+};
+
+static inline u64 RVU_AF_RAS_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_AF_RAS_W1S(void)
+{
+	return 0x108;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_block_addr#_disc
+ *
+ * RVU PF Block Address Discovery Registers These registers allow each PF
+ * driver to discover block resources that are provisioned to its PF. The
+ * register's BLOCK_ADDR index is enumerated by RVU_BLOCK_ADDR_E.
+ */
+union rvu_pf_block_addrx_disc {
+	u64 u;
+	struct rvu_pf_block_addrx_disc_s {
+		u64 num_lfs                          : 9;
+		u64 reserved_9_10                    : 2;
+		u64 imp                              : 1;
+		u64 rid                              : 8;
+		u64 btype                            : 8;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct rvu_pf_block_addrx_disc_s cn; */
+};
+
+static inline u64 RVU_PF_BLOCK_ADDRX_DISC(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_BLOCK_ADDRX_DISC(u64 a)
+{
+	return 0x200 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_int
+ *
+ * RVU PF Interrupt Registers
+ */
+union rvu_pf_int {
+	u64 u;
+	struct rvu_pf_int_s {
+		u64 mbox                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_pf_int_s cn; */
+};
+
+static inline u64 RVU_PF_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_INT(void)
+{
+	return 0xc20;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_int_ena_w1c
+ *
+ * RVU PF Interrupt Enable Clear Register This register clears interrupt
+ * enable bits.
+ */
+union rvu_pf_int_ena_w1c {
+	u64 u;
+	struct rvu_pf_int_ena_w1c_s {
+		u64 mbox                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_pf_int_ena_w1c_s cn; */
+};
+
+static inline u64 RVU_PF_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_INT_ENA_W1C(void)
+{
+	return 0xc38;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_int_ena_w1s
+ *
+ * RVU PF Interrupt Enable Set Register This register sets interrupt
+ * enable bits.
+ */
+union rvu_pf_int_ena_w1s {
+	u64 u;
+	struct rvu_pf_int_ena_w1s_s {
+		u64 mbox                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_pf_int_ena_w1s_s cn; */
+};
+
+static inline u64 RVU_PF_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_INT_ENA_W1S(void)
+{
+	return 0xc30;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_int_w1s
+ *
+ * RVU PF Interrupt Set Register This register sets interrupt bits.
+ */
+union rvu_pf_int_w1s {
+	u64 u;
+	struct rvu_pf_int_w1s_s {
+		u64 mbox                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_pf_int_w1s_s cn; */
+};
+
+static inline u64 RVU_PF_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_INT_W1S(void)
+{
+	return 0xc28;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_msix_pba#
+ *
+ * RVU PF MSI-X Pending-Bit-Array Registers This register is the MSI-X PF
+ * PBA table.
+ */
+union rvu_pf_msix_pbax {
+	u64 u;
+	struct rvu_pf_msix_pbax_s {
+		u64 pend                             : 64;
+	} s;
+	/* struct rvu_pf_msix_pbax_s cn; */
+};
+
+static inline u64 RVU_PF_MSIX_PBAX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_MSIX_PBAX(u64 a)
+{
+	return 0xf0000 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_msix_vec#_addr
+ *
+ * RVU PF MSI-X Vector-Table Address Registers These registers and
+ * RVU_PF_MSIX_VEC()_CTL form the PF MSI-X vector table. The number of
+ * MSI-X vectors for a given PF is specified by
+ * RVU_PRIV_PF()_MSIX_CFG[PF_MSIXT_SIZEM1] (plus 1).  Software must do a
+ * read after any writes to the MSI-X vector table to ensure that the
+ * writes have completed before interrupts are generated to the modified
+ * vectors.
+ */
+union rvu_pf_msix_vecx_addr {
+	u64 u;
+	struct rvu_pf_msix_vecx_addr_s {
+		u64 secvec                           : 1;
+		u64 reserved_1                       : 1;
+		u64 addr                             : 51;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct rvu_pf_msix_vecx_addr_s cn; */
+};
+
+static inline u64 RVU_PF_MSIX_VECX_ADDR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_MSIX_VECX_ADDR(u64 a)
+{
+	return 0x80000 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_msix_vec#_ctl
+ *
+ * RVU PF MSI-X Vector-Table Control and Data Registers These registers
+ * and RVU_PF_MSIX_VEC()_ADDR form the PF MSI-X vector table.
+ */
+union rvu_pf_msix_vecx_ctl {
+	u64 u;
+	struct rvu_pf_msix_vecx_ctl_s {
+		u64 data                             : 32;
+		u64 mask                             : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	/* struct rvu_pf_msix_vecx_ctl_s cn; */
+};
+
+static inline u64 RVU_PF_MSIX_VECX_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_MSIX_VECX_CTL(u64 a)
+{
+	return 0x80008 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_pfaf_mbox#
+ *
+ * RVU PF/AF Mailbox Registers
+ */
+union rvu_pf_pfaf_mboxx {
+	u64 u;
+	struct rvu_pf_pfaf_mboxx_s {
+		u64 data                             : 64;
+	} s;
+	/* struct rvu_pf_pfaf_mboxx_s cn; */
+};
+
+static inline u64 RVU_PF_PFAF_MBOXX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_PFAF_MBOXX(u64 a)
+{
+	return 0xc00 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vf#_pfvf_mbox#
+ *
+ * RVU PF/VF Mailbox Registers
+ */
+union rvu_pf_vfx_pfvf_mboxx {
+	u64 u;
+	struct rvu_pf_vfx_pfvf_mboxx_s {
+		u64 data                             : 64;
+	} s;
+	/* struct rvu_pf_vfx_pfvf_mboxx_s cn; */
+};
+
+static inline u64 RVU_PF_VFX_PFVF_MBOXX(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFX_PFVF_MBOXX(u64 a, u64 b)
+{
+	return 0 + 0x1000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vf_bar4_addr
+ *
+ * RVU PF VF BAR4 Address Registers
+ */
+union rvu_pf_vf_bar4_addr {
+	u64 u;
+	struct rvu_pf_vf_bar4_addr_s {
+		u64 reserved_0_15                    : 16;
+		u64 addr                             : 48;
+	} s;
+	/* struct rvu_pf_vf_bar4_addr_s cn; */
+};
+
+static inline u64 RVU_PF_VF_BAR4_ADDR(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VF_BAR4_ADDR(void)
+{
+	return 0x10;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfflr_int#
+ *
+ * RVU PF VF Function Level Reset Interrupt Registers
+ */
+union rvu_pf_vfflr_intx {
+	u64 u;
+	struct rvu_pf_vfflr_intx_s {
+		u64 flr                              : 64;
+	} s;
+	/* struct rvu_pf_vfflr_intx_s cn; */
+};
+
+static inline u64 RVU_PF_VFFLR_INTX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFFLR_INTX(u64 a)
+{
+	return 0x900 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfflr_int_ena_w1c#
+ *
+ * RVU PF VF Function Level Reset Interrupt Enable Clear Registers This
+ * register clears interrupt enable bits.
+ */
+union rvu_pf_vfflr_int_ena_w1cx {
+	u64 u;
+	struct rvu_pf_vfflr_int_ena_w1cx_s {
+		u64 flr                              : 64;
+	} s;
+	/* struct rvu_pf_vfflr_int_ena_w1cx_s cn; */
+};
+
+static inline u64 RVU_PF_VFFLR_INT_ENA_W1CX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFFLR_INT_ENA_W1CX(u64 a)
+{
+	return 0x960 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfflr_int_ena_w1s#
+ *
+ * RVU PF VF Function Level Reset Interrupt Enable Set Registers This
+ * register sets interrupt enable bits.
+ */
+union rvu_pf_vfflr_int_ena_w1sx {
+	u64 u;
+	struct rvu_pf_vfflr_int_ena_w1sx_s {
+		u64 flr                              : 64;
+	} s;
+	/* struct rvu_pf_vfflr_int_ena_w1sx_s cn; */
+};
+
+static inline u64 RVU_PF_VFFLR_INT_ENA_W1SX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFFLR_INT_ENA_W1SX(u64 a)
+{
+	return 0x940 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfflr_int_w1s#
+ *
+ * RVU PF VF Function Level Reset Interrupt Set Registers This register
+ * sets interrupt bits.
+ */
+union rvu_pf_vfflr_int_w1sx {
+	u64 u;
+	struct rvu_pf_vfflr_int_w1sx_s {
+		u64 flr                              : 64;
+	} s;
+	/* struct rvu_pf_vfflr_int_w1sx_s cn; */
+};
+
+static inline u64 RVU_PF_VFFLR_INT_W1SX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFFLR_INT_W1SX(u64 a)
+{
+	return 0x920 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfme_int#
+ *
+ * RVU PF VF Bus Master Enable Interrupt Registers
+ */
+union rvu_pf_vfme_intx {
+	u64 u;
+	struct rvu_pf_vfme_intx_s {
+		u64 me                               : 64;
+	} s;
+	/* struct rvu_pf_vfme_intx_s cn; */
+};
+
+static inline u64 RVU_PF_VFME_INTX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFME_INTX(u64 a)
+{
+	return 0x980 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfme_int_ena_w1c#
+ *
+ * RVU PF VF Bus Master Enable Interrupt Enable Clear Registers This
+ * register clears interrupt enable bits.
+ */
+union rvu_pf_vfme_int_ena_w1cx {
+	u64 u;
+	struct rvu_pf_vfme_int_ena_w1cx_s {
+		u64 me                               : 64;
+	} s;
+	/* struct rvu_pf_vfme_int_ena_w1cx_s cn; */
+};
+
+static inline u64 RVU_PF_VFME_INT_ENA_W1CX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFME_INT_ENA_W1CX(u64 a)
+{
+	return 0x9e0 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfme_int_ena_w1s#
+ *
+ * RVU PF VF Bus Master Enable Interrupt Enable Set Registers This
+ * register sets interrupt enable bits.
+ */
+union rvu_pf_vfme_int_ena_w1sx {
+	u64 u;
+	struct rvu_pf_vfme_int_ena_w1sx_s {
+		u64 me                               : 64;
+	} s;
+	/* struct rvu_pf_vfme_int_ena_w1sx_s cn; */
+};
+
+static inline u64 RVU_PF_VFME_INT_ENA_W1SX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFME_INT_ENA_W1SX(u64 a)
+{
+	return 0x9c0 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfme_int_w1s#
+ *
+ * RVU PF VF Bus Master Enable Interrupt Set Registers This register sets
+ * interrupt bits.
+ */
+union rvu_pf_vfme_int_w1sx {
+	u64 u;
+	struct rvu_pf_vfme_int_w1sx_s {
+		u64 me                               : 64;
+	} s;
+	/* struct rvu_pf_vfme_int_w1sx_s cn; */
+};
+
+static inline u64 RVU_PF_VFME_INT_W1SX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFME_INT_W1SX(u64 a)
+{
+	return 0x9a0 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfme_status#
+ *
+ * RVU PF VF Bus Master Enable Status Registers
+ */
+union rvu_pf_vfme_statusx {
+	u64 u;
+	struct rvu_pf_vfme_statusx_s {
+		u64 me                               : 64;
+	} s;
+	/* struct rvu_pf_vfme_statusx_s cn; */
+};
+
+static inline u64 RVU_PF_VFME_STATUSX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFME_STATUSX(u64 a)
+{
+	return 0x800 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfpf_mbox_int#
+ *
+ * RVU VF to PF Mailbox Interrupt Registers
+ */
+union rvu_pf_vfpf_mbox_intx {
+	u64 u;
+	struct rvu_pf_vfpf_mbox_intx_s {
+		u64 mbox                             : 64;
+	} s;
+	/* struct rvu_pf_vfpf_mbox_intx_s cn; */
+};
+
+static inline u64 RVU_PF_VFPF_MBOX_INTX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFPF_MBOX_INTX(u64 a)
+{
+	return 0x880 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfpf_mbox_int_ena_w1c#
+ *
+ * RVU VF to PF Mailbox Interrupt Enable Clear Registers This register
+ * clears interrupt enable bits.
+ */
+union rvu_pf_vfpf_mbox_int_ena_w1cx {
+	u64 u;
+	struct rvu_pf_vfpf_mbox_int_ena_w1cx_s {
+		u64 mbox                             : 64;
+	} s;
+	/* struct rvu_pf_vfpf_mbox_int_ena_w1cx_s cn; */
+};
+
+static inline u64 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(u64 a)
+{
+	return 0x8e0 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfpf_mbox_int_ena_w1s#
+ *
+ * RVU VF to PF Mailbox Interrupt Enable Set Registers This register sets
+ * interrupt enable bits.
+ */
+union rvu_pf_vfpf_mbox_int_ena_w1sx {
+	u64 u;
+	struct rvu_pf_vfpf_mbox_int_ena_w1sx_s {
+		u64 mbox                             : 64;
+	} s;
+	/* struct rvu_pf_vfpf_mbox_int_ena_w1sx_s cn; */
+};
+
+static inline u64 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(u64 a)
+{
+	return 0x8c0 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfpf_mbox_int_w1s#
+ *
+ * RVU VF to PF Mailbox Interrupt Set Registers This register sets
+ * interrupt bits.
+ */
+union rvu_pf_vfpf_mbox_int_w1sx {
+	u64 u;
+	struct rvu_pf_vfpf_mbox_int_w1sx_s {
+		u64 mbox                             : 64;
+	} s;
+	/* struct rvu_pf_vfpf_mbox_int_w1sx_s cn; */
+};
+
+static inline u64 RVU_PF_VFPF_MBOX_INT_W1SX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFPF_MBOX_INT_W1SX(u64 a)
+{
+	return 0x8a0 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vftrpend#
+ *
+ * RVU PF VF Transaction Pending Registers
+ */
+union rvu_pf_vftrpendx {
+	u64 u;
+	struct rvu_pf_vftrpendx_s {
+		u64 trpend                           : 64;
+	} s;
+	/* struct rvu_pf_vftrpendx_s cn; */
+};
+
+static inline u64 RVU_PF_VFTRPENDX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFTRPENDX(u64 a)
+{
+	return 0x820 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vftrpend_w1s#
+ *
+ * RVU PF VF Transaction Pending Set Registers This register reads or
+ * sets bits.
+ */
+union rvu_pf_vftrpend_w1sx {
+	u64 u;
+	struct rvu_pf_vftrpend_w1sx_s {
+		u64 trpend                           : 64;
+	} s;
+	/* struct rvu_pf_vftrpend_w1sx_s cn; */
+};
+
+static inline u64 RVU_PF_VFTRPEND_W1SX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PF_VFTRPEND_W1SX(u64 a)
+{
+	return 0x840 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_active_pc
+ *
+ * RVU Active Program Counter Register
+ */
+union rvu_priv_active_pc {
+	u64 u;
+	struct rvu_priv_active_pc_s {
+		u64 active_pc                        : 64;
+	} s;
+	/* struct rvu_priv_active_pc_s cn; */
+};
+
+static inline u64 RVU_PRIV_ACTIVE_PC(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_ACTIVE_PC(void)
+{
+	return 0x8000030;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_block_type#_rev
+ *
+ * RVU Privileged Block Type Revision Registers These registers are used
+ * by configuration software to specify the revision ID of each block
+ * type enumerated by RVU_BLOCK_TYPE_E, to assist VF/PF software
+ * discovery.
+ */
+union rvu_priv_block_typex_rev {
+	u64 u;
+	struct rvu_priv_block_typex_rev_s {
+		u64 rid                              : 8;
+		u64 reserved_8_63                    : 56;
+	} s;
+	/* struct rvu_priv_block_typex_rev_s cn; */
+};
+
+static inline u64 RVU_PRIV_BLOCK_TYPEX_REV(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_BLOCK_TYPEX_REV(u64 a)
+{
+	return 0x8000400 + 8 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_clk_cfg
+ *
+ * RVU Privileged General Configuration Register
+ */
+union rvu_priv_clk_cfg {
+	u64 u;
+	struct rvu_priv_clk_cfg_s {
+		u64 blk_clken                        : 1;
+		u64 ncbi_clken                       : 1;
+		u64 reserved_2_63                    : 62;
+	} s;
+	/* struct rvu_priv_clk_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_CLK_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_CLK_CFG(void)
+{
+	return 0x8000020;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_const
+ *
+ * RVU Privileged Constants Register This register contains constants for
+ * software discovery.
+ */
+union rvu_priv_const {
+	u64 u;
+	struct rvu_priv_const_s {
+		u64 max_msix                         : 20;
+		u64 hwvfs                            : 12;
+		u64 pfs                              : 8;
+		u64 max_vfs_per_pf                   : 8;
+		u64 reserved_48_63                   : 16;
+	} s;
+	/* struct rvu_priv_const_s cn; */
+};
+
+static inline u64 RVU_PRIV_CONST(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_CONST(void)
+{
+	return 0x8000000;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_gen_cfg
+ *
+ * RVU Privileged General Configuration Register
+ */
+union rvu_priv_gen_cfg {
+	u64 u;
+	struct rvu_priv_gen_cfg_s {
+		u64 lock                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_priv_gen_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_GEN_CFG(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_GEN_CFG(void)
+{
+	return 0x8000010;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_cpt#_cfg
+ *
+ * RVU Privileged Hardware VF CPT Configuration Registers Similar to
+ * RVU_PRIV_HWVF()_NIX()_CFG, but for CPT({a}) block.
+ */
+union rvu_priv_hwvfx_cptx_cfg {
+	u64 u;
+	struct rvu_priv_hwvfx_cptx_cfg_s {
+		u64 num_lfs                          : 9;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct rvu_priv_hwvfx_cptx_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_HWVFX_CPTX_CFG(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_HWVFX_CPTX_CFG(u64 a, u64 b)
+{
+	return 0x8001350 + 0x10000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_int_cfg
+ *
+ * RVU Privileged Hardware VF Interrupt Configuration Registers
+ */
+union rvu_priv_hwvfx_int_cfg {
+	u64 u;
+	struct rvu_priv_hwvfx_int_cfg_s {
+		u64 msix_offset                      : 11;
+		u64 reserved_11                      : 1;
+		u64 msix_size                        : 8;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct rvu_priv_hwvfx_int_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_HWVFX_INT_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_HWVFX_INT_CFG(u64 a)
+{
+	return 0x8001280 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_nix#_cfg
+ *
+ * RVU Privileged Hardware VF NIX Configuration Registers These registers
+ * are used to assist VF software discovery. For each HWVF, if the HWVF
+ * is mapped to a VF by RVU_PRIV_PF()_CFG[FIRST_HWVF,NVF], software
+ * writes NIX block's resource configuration for the VF in this register.
+ * The VF driver can read RVU_VF_BLOCK_ADDR()_DISC to discover the
+ * configuration.
+ */
+union rvu_priv_hwvfx_nixx_cfg {
+	u64 u;
+	struct rvu_priv_hwvfx_nixx_cfg_s {
+		u64 has_lf                           : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_priv_hwvfx_nixx_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_HWVFX_NIXX_CFG(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_HWVFX_NIXX_CFG(u64 a, u64 b)
+{
+	return 0x8001300 + 0x10000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_npa_cfg
+ *
+ * RVU Privileged Hardware VF NPA Configuration Registers Similar to
+ * RVU_PRIV_HWVF()_NIX()_CFG, but for NPA block.
+ */
+union rvu_priv_hwvfx_npa_cfg {
+	u64 u;
+	struct rvu_priv_hwvfx_npa_cfg_s {
+		u64 has_lf                           : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_priv_hwvfx_npa_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_HWVFX_NPA_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_HWVFX_NPA_CFG(u64 a)
+{
+	return 0x8001310 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_sso_cfg
+ *
+ * RVU Privileged Hardware VF SSO Configuration Registers Similar to
+ * RVU_PRIV_HWVF()_NIX()_CFG, but for SSO block.
+ */
+union rvu_priv_hwvfx_sso_cfg {
+	u64 u;
+	struct rvu_priv_hwvfx_sso_cfg_s {
+		u64 num_lfs                          : 9;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct rvu_priv_hwvfx_sso_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_HWVFX_SSO_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_HWVFX_SSO_CFG(u64 a)
+{
+	return 0x8001320 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_ssow_cfg
+ *
+ * RVU Privileged Hardware VF SSO Work Slot Configuration Registers
+ * Similar to RVU_PRIV_HWVF()_NIX()_CFG, but for SSOW block.
+ */
+union rvu_priv_hwvfx_ssow_cfg {
+	u64 u;
+	struct rvu_priv_hwvfx_ssow_cfg_s {
+		u64 num_lfs                          : 9;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct rvu_priv_hwvfx_ssow_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_HWVFX_SSOW_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_HWVFX_SSOW_CFG(u64 a)
+{
+	return 0x8001330 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_tim_cfg
+ *
+ * RVU Privileged Hardware VF SSO Work Slot Configuration Registers
+ * Similar to RVU_PRIV_HWVF()_NIX()_CFG, but for TIM block.
+ */
+union rvu_priv_hwvfx_tim_cfg {
+	u64 u;
+	struct rvu_priv_hwvfx_tim_cfg_s {
+		u64 num_lfs                          : 9;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct rvu_priv_hwvfx_tim_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_HWVFX_TIM_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_HWVFX_TIM_CFG(u64 a)
+{
+	return 0x8001340 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_cfg
+ *
+ * RVU Privileged PF Configuration Registers
+ */
+union rvu_priv_pfx_cfg {
+	u64 u;
+	struct rvu_priv_pfx_cfg_s {
+		u64 first_hwvf                       : 12;
+		u64 nvf                              : 8;
+		u64 ena                              : 1;
+		u64 af_ena                           : 1;
+		u64 me_flr_ena                       : 1;
+		u64 pf_vf_io_bar4                    : 1;
+		u64 reserved_24_63                   : 40;
+	} s;
+	struct rvu_priv_pfx_cfg_cn96xxp1 {
+		u64 first_hwvf                       : 12;
+		u64 nvf                              : 8;
+		u64 ena                              : 1;
+		u64 af_ena                           : 1;
+		u64 me_flr_ena                       : 1;
+		u64 reserved_23_63                   : 41;
+	} cn96xxp1;
+	/* struct rvu_priv_pfx_cfg_s cn96xxp3; */
+	/* struct rvu_priv_pfx_cfg_cn96xxp1 cnf95xx; */
+};
+
+static inline u64 RVU_PRIV_PFX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_PFX_CFG(u64 a)
+{
+	return 0x8000100 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_cpt#_cfg
+ *
+ * RVU Privileged PF CPT Configuration Registers Similar to
+ * RVU_PRIV_PF()_NIX()_CFG, but for CPT({a}) block.
+ */
+union rvu_priv_pfx_cptx_cfg {
+	u64 u;
+	struct rvu_priv_pfx_cptx_cfg_s {
+		u64 num_lfs                          : 9;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct rvu_priv_pfx_cptx_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_PFX_CPTX_CFG(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_PFX_CPTX_CFG(u64 a, u64 b)
+{
+	return 0x8000350 + 0x10000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_id_cfg
+ *
+ * RVU Privileged PF ID Configuration Registers
+ */
+union rvu_priv_pfx_id_cfg {
+	u64 u;
+	struct rvu_priv_pfx_id_cfg_s {
+		u64 pf_devid                         : 8;
+		u64 vf_devid                         : 8;
+		u64 class_code                       : 24;
+		u64 reserved_40_63                   : 24;
+	} s;
+	/* struct rvu_priv_pfx_id_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_PFX_ID_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_PFX_ID_CFG(u64 a)
+{
+	return 0x8000120 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_int_cfg
+ *
+ * RVU Privileged PF Interrupt Configuration Registers
+ */
+union rvu_priv_pfx_int_cfg {
+	u64 u;
+	struct rvu_priv_pfx_int_cfg_s {
+		u64 msix_offset                      : 11;
+		u64 reserved_11                      : 1;
+		u64 msix_size                        : 8;
+		u64 reserved_20_63                   : 44;
+	} s;
+	/* struct rvu_priv_pfx_int_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_PFX_INT_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_PFX_INT_CFG(u64 a)
+{
+	return 0x8000200 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_msix_cfg
+ *
+ * RVU Privileged PF MSI-X Configuration Registers These registers
+ * specify MSI-X table sizes and locations for RVU PFs and associated
+ * VFs. Hardware maintains all RVU MSI-X tables in a contiguous memory
+ * region in LLC/DRAM called the MSI-X table region. The table region's
+ * base AF IOVA is specified by RVU_AF_MSIXTR_BASE, and its size as a
+ * multiple of 16-byte RVU_MSIX_VEC_S structures must be less than or
+ * equal to RVU_PRIV_CONST[MAX_MSIX].  A PF's MSI-X table consists of the
+ * following range of RVU_MSIX_VEC_S structures in the table region: *
+ * First index: [PF_MSIXT_OFFSET]. * Last index: [PF_MSIXT_OFFSET] +
+ * [PF_MSIXT_SIZEM1].  If a PF has enabled VFs (associated
+ * RVU_PRIV_PF()_CFG[NVF] is nonzero), then each VF's MSI-X table
+ * consumes the following range of RVU_MSIX_VEC_S structures: * First
+ * index: [VF_MSIXT_OFFSET] + N*([VF_MSIXT_SIZEM1] + 1). * Last index:
+ * [VF_MSIXT_OFFSET] + N*([VF_MSIXT_SIZEM1] + 1) + [VF_MSIXT_SIZEM1].
+ * N=0 for the first VF, N=1 for the second VF, etc.  Different PFs and
+ * VFs must have non-overlapping vector ranges, and the last index of any
+ * range must be less than RVU_PRIV_CONST[MAX_MSIX].
+ */
+union rvu_priv_pfx_msix_cfg {
+	u64 u;
+	struct rvu_priv_pfx_msix_cfg_s {
+		u64 vf_msixt_sizem1                  : 12;
+		u64 vf_msixt_offset                  : 20;
+		u64 pf_msixt_sizem1                  : 12;
+		u64 pf_msixt_offset                  : 20;
+	} s;
+	/* struct rvu_priv_pfx_msix_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_PFX_MSIX_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_PFX_MSIX_CFG(u64 a)
+{
+	return 0x8000110 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_nix#_cfg
+ *
+ * RVU Privileged PF NIX Configuration Registers These registers are used
+ * to assist PF software discovery. For each enabled RVU PF, software
+ * writes the block's resource configuration for the PF in this register.
+ * The PF driver can read RVU_PF_BLOCK_ADDR()_DISC to discover the
+ * configuration.
+ */
+union rvu_priv_pfx_nixx_cfg {
+	u64 u;
+	struct rvu_priv_pfx_nixx_cfg_s {
+		u64 has_lf                           : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_priv_pfx_nixx_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_PFX_NIXX_CFG(u64 a, u64 b)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_PFX_NIXX_CFG(u64 a, u64 b)
+{
+	return 0x8000300 + 0x10000 * a + 8 * b;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_npa_cfg
+ *
+ * RVU Privileged PF NPA Configuration Registers Similar to
+ * RVU_PRIV_PF()_NIX()_CFG, but for NPA block.
+ */
+union rvu_priv_pfx_npa_cfg {
+	u64 u;
+	struct rvu_priv_pfx_npa_cfg_s {
+		u64 has_lf                           : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_priv_pfx_npa_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_PFX_NPA_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_PFX_NPA_CFG(u64 a)
+{
+	return 0x8000310 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_sso_cfg
+ *
+ * RVU Privileged PF SSO Configuration Registers Similar to
+ * RVU_PRIV_PF()_NIX()_CFG, but for SSO block.
+ */
+union rvu_priv_pfx_sso_cfg {
+	u64 u;
+	struct rvu_priv_pfx_sso_cfg_s {
+		u64 num_lfs                          : 9;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct rvu_priv_pfx_sso_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_PFX_SSO_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_PFX_SSO_CFG(u64 a)
+{
+	return 0x8000320 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_ssow_cfg
+ *
+ * RVU Privileged PF SSO Work Slot Configuration Registers Similar to
+ * RVU_PRIV_PF()_NIX()_CFG, but for SSOW block.
+ */
+union rvu_priv_pfx_ssow_cfg {
+	u64 u;
+	struct rvu_priv_pfx_ssow_cfg_s {
+		u64 num_lfs                          : 9;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct rvu_priv_pfx_ssow_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_PFX_SSOW_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_PFX_SSOW_CFG(u64 a)
+{
+	return 0x8000330 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_tim_cfg
+ *
+ * RVU Privileged PF SSO Work Slot Configuration Registers Similar to
+ * RVU_PRIV_PF()_NIX()_CFG, but for TIM block.
+ */
+union rvu_priv_pfx_tim_cfg {
+	u64 u;
+	struct rvu_priv_pfx_tim_cfg_s {
+		u64 num_lfs                          : 9;
+		u64 reserved_9_63                    : 55;
+	} s;
+	/* struct rvu_priv_pfx_tim_cfg_s cn; */
+};
+
+static inline u64 RVU_PRIV_PFX_TIM_CFG(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_PRIV_PFX_TIM_CFG(u64 a)
+{
+	return 0x8000340 + 0x10000 * a;
+}
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_block_addr#_disc
+ *
+ * RVU VF Block Address Discovery Registers These registers allow each VF
+ * driver to discover block resources that are provisioned to its VF. The
+ * register's BLOCK_ADDR index is enumerated by RVU_BLOCK_ADDR_E.
+ */
+union rvu_vf_block_addrx_disc {
+	u64 u;
+	struct rvu_vf_block_addrx_disc_s {
+		u64 num_lfs                          : 9;
+		u64 reserved_9_10                    : 2;
+		u64 imp                              : 1;
+		u64 rid                              : 8;
+		u64 btype                            : 8;
+		u64 reserved_28_63                   : 36;
+	} s;
+	/* struct rvu_vf_block_addrx_disc_s cn; */
+};
+
+static inline u64 RVU_VF_BLOCK_ADDRX_DISC(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_VF_BLOCK_ADDRX_DISC(u64 a)
+{
+	return 0x200 + 8 * a;
+}
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_int
+ *
+ * RVU VF Interrupt Registers
+ */
+union rvu_vf_int {
+	u64 u;
+	struct rvu_vf_int_s {
+		u64 mbox                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_vf_int_s cn; */
+};
+
+static inline u64 RVU_VF_INT(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_VF_INT(void)
+{
+	return 0x20;
+}
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_int_ena_w1c
+ *
+ * RVU VF Interrupt Enable Clear Register This register clears interrupt
+ * enable bits.
+ */
+union rvu_vf_int_ena_w1c {
+	u64 u;
+	struct rvu_vf_int_ena_w1c_s {
+		u64 mbox                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_vf_int_ena_w1c_s cn; */
+};
+
+static inline u64 RVU_VF_INT_ENA_W1C(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_VF_INT_ENA_W1C(void)
+{
+	return 0x38;
+}
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_int_ena_w1s
+ *
+ * RVU VF Interrupt Enable Set Register This register sets interrupt
+ * enable bits.
+ */
+union rvu_vf_int_ena_w1s {
+	u64 u;
+	struct rvu_vf_int_ena_w1s_s {
+		u64 mbox                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_vf_int_ena_w1s_s cn; */
+};
+
+static inline u64 RVU_VF_INT_ENA_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_VF_INT_ENA_W1S(void)
+{
+	return 0x30;
+}
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_int_w1s
+ *
+ * RVU VF Interrupt Set Register This register sets interrupt bits.
+ */
+union rvu_vf_int_w1s {
+	u64 u;
+	struct rvu_vf_int_w1s_s {
+		u64 mbox                             : 1;
+		u64 reserved_1_63                    : 63;
+	} s;
+	/* struct rvu_vf_int_w1s_s cn; */
+};
+
+static inline u64 RVU_VF_INT_W1S(void)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_VF_INT_W1S(void)
+{
+	return 0x28;
+}
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_msix_pba#
+ *
+ * RVU VF MSI-X Pending-Bit-Array Registers This register is the MSI-X VF
+ * PBA table.
+ */
+union rvu_vf_msix_pbax {
+	u64 u;
+	struct rvu_vf_msix_pbax_s {
+		u64 pend                             : 64;
+	} s;
+	/* struct rvu_vf_msix_pbax_s cn; */
+};
+
+static inline u64 RVU_VF_MSIX_PBAX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_VF_MSIX_PBAX(u64 a)
+{
+	return 0xf0000 + 8 * a;
+}
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_msix_vec#_addr
+ *
+ * RVU VF MSI-X Vector-Table Address Registers These registers and
+ * RVU_VF_MSIX_VEC()_CTL form the VF MSI-X vector table. The number of
+ * MSI-X vectors for a given VF is specified by
+ * RVU_PRIV_PF()_MSIX_CFG[VF_MSIXT_SIZEM1] (plus 1).  Software must do a
+ * read after any writes to the MSI-X vector table to ensure that the
+ * writes have completed before interrupts are generated to the modified
+ * vectors.
+ */
+union rvu_vf_msix_vecx_addr {
+	u64 u;
+	struct rvu_vf_msix_vecx_addr_s {
+		u64 secvec                           : 1;
+		u64 reserved_1                       : 1;
+		u64 addr                             : 51;
+		u64 reserved_53_63                   : 11;
+	} s;
+	/* struct rvu_vf_msix_vecx_addr_s cn; */
+};
+
+static inline u64 RVU_VF_MSIX_VECX_ADDR(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_VF_MSIX_VECX_ADDR(u64 a)
+{
+	return 0x80000 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_msix_vec#_ctl
+ *
+ * RVU VF MSI-X Vector-Table Control and Data Registers These registers
+ * and RVU_VF_MSIX_VEC()_ADDR form the VF MSI-X vector table.
+ */
+union rvu_vf_msix_vecx_ctl {
+	u64 u;
+	struct rvu_vf_msix_vecx_ctl_s {
+		u64 data                             : 32;
+		u64 mask                             : 1;
+		u64 reserved_33_63                   : 31;
+	} s;
+	/* struct rvu_vf_msix_vecx_ctl_s cn; */
+};
+
+static inline u64 RVU_VF_MSIX_VECX_CTL(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_VF_MSIX_VECX_CTL(u64 a)
+{
+	return 0x80008 + 0x10 * a;
+}
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_vfpf_mbox#
+ *
+ * RVU VF/PF Mailbox Registers
+ */
+union rvu_vf_vfpf_mboxx {
+	u64 u;
+	struct rvu_vf_vfpf_mboxx_s {
+		u64 data                             : 64;
+	} s;
+	/* struct rvu_vf_vfpf_mboxx_s cn; */
+};
+
+static inline u64 RVU_VF_VFPF_MBOXX(u64 a)
+	__attribute__ ((pure, always_inline));
+static inline u64 RVU_VF_VFPF_MBOXX(u64 a)
+{
+	return 0 + 8 * a;
+}
+
+#endif /* __CSRS_RVU_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx2/gpio.h b/arch/arm/include/asm/arch-octeontx2/gpio.h
new file mode 100644
index 0000000..3943ffd
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/gpio.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
diff --git a/arch/arm/include/asm/arch-octeontx2/smc-id.h b/arch/arm/include/asm/arch-octeontx2/smc-id.h
new file mode 100644
index 0000000..93a81b2
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/smc-id.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#ifndef __SMC_ID_H__
+#define __SMC_ID_H__
+
+/* SMC function IDs for general purpose queries */
+
+#define OCTEONTX2_SVC_CALL_COUNT	0xc200ff00
+#define OCTEONTX2_SVC_UID		0xc200ff01
+
+#define OCTEONTX2_SVC_VERSION		0xc200ff03
+
+/* OcteonTX Service Calls version numbers */
+#define OCTEONTX2_VERSION_MAJOR	0x1
+#define OCTEONTX2_VERSION_MINOR	0x0
+
+/* x1 - node number */
+#define OCTEONTX2_DRAM_SIZE		0xc2000301
+#define OCTEONTX2_NODE_COUNT		0xc2000601
+#define OCTEONTX2_DISABLE_RVU_LFS	0xc2000b01
+
+#define OCTEONTX2_CONFIG_OOO		0xc2000b04
+
+/* fail safe */
+#define OCTEONTX2_FSAFE_PR_BOOT_SUCCESS	0xc2000b02
+
+#endif /* __SMC_ID_H__ */
diff --git a/arch/arm/include/asm/arch-octeontx2/smc.h b/arch/arm/include/asm/arch-octeontx2/smc.h
new file mode 100644
index 0000000..8e719a2
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/smc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#ifndef __SMC_H__
+#define __SMC_H__
+
+#include <asm/arch/smc-id.h>
+
+ssize_t smc_configure_ooo(unsigned int val);
+ssize_t smc_dram_size(unsigned int node);
+ssize_t smc_disable_rvu_lfs(unsigned int node);
+ssize_t smc_flsf_fw_booted(void);
+
+#endif
diff --git a/arch/arm/include/asm/arch-octeontx2/soc.h b/arch/arm/include/asm/arch-octeontx2/soc.h
new file mode 100644
index 0000000..9cf6628
--- /dev/null
+++ b/arch/arm/include/asm/arch-octeontx2/soc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#ifndef __SOC_H__
+#define __SOC_H__
+
+/* Product PARTNUM */
+#define CN81XX	0xA2
+#define CN83XX	0xA3
+#define CN96XX	0xB2
+#define CN95XX	0xB3
+
+/* Register defines */
+
+#define otx_is_soc(soc)	(read_partnum() == (soc))
+#define otx_is_board(model) (!strcmp(read_board_name(), model))
+#define otx_is_platform(platform) (read_platform() == (platform))
+
+enum platform_t {
+	PLATFORM_HW = 0,
+	PLATFORM_EMULATOR = 1,
+	PLATFORM_ASIM = 3,
+};
+
+int read_platform(void);
+u8 read_partnum(void);
+const char *read_board_name(void);
+
+#endif /* __SOC_H */
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index ade1401..df264a1 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -180,16 +180,20 @@
 #define in_le32(a)	in_arch(l,le32,a)
 #define in_le16(a)	in_arch(w,le16,a)
 
+#define out_be64(a,v)	out_arch(l,be64,a,v)
 #define out_be32(a,v)	out_arch(l,be32,a,v)
 #define out_be16(a,v)	out_arch(w,be16,a,v)
 
+#define in_be64(a)	in_arch(l,be64,a)
 #define in_be32(a)	in_arch(l,be32,a)
 #define in_be16(a)	in_arch(w,be16,a)
 
+#define out_64(a,v)	__raw_writeq(v,a)
 #define out_32(a,v)	__raw_writel(v,a)
 #define out_16(a,v)	__raw_writew(v,a)
 #define out_8(a,v)	__raw_writeb(v,a)
 
+#define in_64(a)	__raw_readq(a)
 #define in_32(a)	__raw_readl(a)
 #define in_16(a)	__raw_readw(a)
 #define in_8(a)		__raw_readb(a)
@@ -231,6 +235,18 @@
 #define setbits_8(addr, set) setbits(8, addr, set)
 #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
 
+#define clrbits_be64(addr, clear) clrbits(be64, addr, clear)
+#define setbits_be64(addr, set) setbits(be64, addr, set)
+#define clrsetbits_be64(addr, clear, set) clrsetbits(be64, addr, clear, set)
+
+#define clrbits_le64(addr, clear) clrbits(le64, addr, clear)
+#define setbits_le64(addr, set) setbits(le64, addr, set)
+#define clrsetbits_le64(addr, clear, set) clrsetbits(le64, addr, clear, set)
+
+#define clrbits_64(addr, clear) clrbits(64, addr, clear)
+#define setbits_64(addr, set) setbits(64, addr, set)
+#define clrsetbits_64(addr, clear, set) clrsetbits(64, addr, clear, set)
+
 /*
  * Now, pick up the machine-defined IO definitions
  */
diff --git a/arch/arm/mach-octeontx/Kconfig b/arch/arm/mach-octeontx/Kconfig
new file mode 100644
index 0000000..28ecf98
--- /dev/null
+++ b/arch/arm/mach-octeontx/Kconfig
@@ -0,0 +1,23 @@
+if ARCH_OCTEONTX
+
+choice
+	prompt "OcteonTX board select"
+	optional
+
+config TARGET_OCTEONTX_81XX
+	bool "Marvell OcteonTX CN81XX"
+
+config TARGET_OCTEONTX_83XX
+	bool "Marvell OcteonTX CN83XX"
+
+endchoice
+
+config SYS_SOC
+	string
+	default "octeontx"
+
+config SYS_PCI_64BIT
+	bool
+	default y
+
+endif
diff --git a/arch/arm/mach-octeontx/Makefile b/arch/arm/mach-octeontx/Makefile
new file mode 100644
index 0000000..20cb48a
--- /dev/null
+++ b/arch/arm/mach-octeontx/Makefile
@@ -0,0 +1,9 @@
+#/* SPDX-License-Identifier:    GPL-2.0
+# *
+# * Copyright (C) 2018 Marvell International Ltd.
+# *
+# * https://spdx.org/licenses
+# */
+
+obj-y += lowlevel_init.o clock.o cpu.o
+
diff --git a/arch/arm/mach-octeontx/clock.c b/arch/arm/mach-octeontx/clock.c
new file mode 100644
index 0000000..9da2107
--- /dev/null
+++ b/arch/arm/mach-octeontx/clock.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <asm/arch/board.h>
+#include <asm/arch/clock.h>
+
+/**
+ * Returns the I/O clock speed in Hz
+ */
+u64 octeontx_get_io_clock(void)
+{
+	union rst_boot rst_boot;
+
+	rst_boot.u = readq(RST_BOOT);
+
+	return rst_boot.s.pnr_mul * PLL_REF_CLK;
+}
+
+/**
+ * Returns the core clock speed in Hz
+ */
+u64 octeontx_get_core_clock(void)
+{
+	union rst_boot rst_boot;
+
+	rst_boot.u = readq(RST_BOOT);
+
+	return rst_boot.s.c_mul * PLL_REF_CLK;
+}
diff --git a/arch/arm/mach-octeontx/cpu.c b/arch/arm/mach-octeontx/cpu.c
new file mode 100644
index 0000000..9c29c31
--- /dev/null
+++ b/arch/arm/mach-octeontx/cpu.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <common.h>
+#include <asm/armv8/mmu.h>
+#include <asm/io.h>
+#include <asm/arch/board.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define OTX_MEM_MAP_USED 3
+
+/* 1 for 83xx, +1 is end of list which needs to be empty */
+#define OTX_MEM_MAP_MAX (OTX_MEM_MAP_USED + 1 + CONFIG_NR_DRAM_BANKS + 1)
+
+static struct mm_region otx_mem_map[OTX_MEM_MAP_MAX] = {
+	{
+		.virt = 0x800000000000UL,
+		.phys = 0x800000000000UL,
+		.size = 0x40000000000UL,
+		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+			 PTE_BLOCK_NON_SHARE
+	}, {
+		.virt = 0x840000000000UL,
+		.phys = 0x840000000000UL,
+		.size = 0x40000000000UL,
+		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+			 PTE_BLOCK_NON_SHARE
+	}, {
+		.virt = 0x880000000000UL,
+		.phys = 0x880000000000UL,
+		.size = 0x40000000000UL,
+		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+			 PTE_BLOCK_NON_SHARE
+	}
+
+};
+
+struct mm_region *mem_map = otx_mem_map;
+
+void mem_map_fill(void)
+{
+	int banks = OTX_MEM_MAP_USED;
+	u32 dram_start = CONFIG_SYS_TEXT_BASE;
+
+	if (otx_is_soc(CN83XX)) {
+		otx_mem_map[banks].virt = 0x8c0000000000UL;
+		otx_mem_map[banks].phys = 0x8c0000000000UL;
+		otx_mem_map[banks].size = 0x40000000000UL;
+		otx_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+					   PTE_BLOCK_NON_SHARE;
+		banks = banks + 1;
+	}
+
+	for (int i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
+		otx_mem_map[banks].virt = dram_start;
+		otx_mem_map[banks].phys = dram_start;
+		otx_mem_map[banks].size = gd->ram_size;
+		otx_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+					   PTE_BLOCK_NON_SHARE;
+		banks = banks + 1;
+	}
+}
+
+u64 get_page_table_size(void)
+{
+	return 0x80000;
+}
+
+void reset_cpu(ulong addr)
+{
+}
diff --git a/arch/arm/mach-octeontx/lowlevel_init.S b/arch/arm/mach-octeontx/lowlevel_init.S
new file mode 100644
index 0000000..41a9f08
--- /dev/null
+++ b/arch/arm/mach-octeontx/lowlevel_init.S
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <config.h>
+#include <linux/linkage.h>
+#include <asm/macro.h>
+
+.align 8
+.global fdt_base_addr
+fdt_base_addr:
+	.dword 0x0
+
+.global save_boot_params
+save_boot_params:
+	/* Read FDT base from x1 register passed by ATF */
+	adr	x21, fdt_base_addr
+	str	x1, [x21]
+
+	/* Returns */
+	b	save_boot_params_ret
+
+ENTRY(lowlevel_init)
+	mov	x29, lr			/* Save LR */
+
+	/* any lowlevel init should go here */
+
+	mov	lr, x29			/* Restore LR */
+	ret
+ENDPROC(lowlevel_init)
diff --git a/arch/arm/mach-octeontx2/Kconfig b/arch/arm/mach-octeontx2/Kconfig
new file mode 100644
index 0000000..8e5cb0f
--- /dev/null
+++ b/arch/arm/mach-octeontx2/Kconfig
@@ -0,0 +1,23 @@
+if ARCH_OCTEONTX2
+
+choice
+	prompt "OcteonTX2 board select"
+	optional
+
+config TARGET_OCTEONTX2_95XX
+	bool "Marvell OcteonTX2 CN95XX"
+
+config TARGET_OCTEONTX2_96XX
+	bool "Marvell OcteonTX2 CN96XX"
+
+endchoice
+
+config SYS_SOC
+	string
+	default "octeontx2"
+
+config SYS_PCI_64BIT
+	bool
+	default y
+
+endif
diff --git a/arch/arm/mach-octeontx2/Makefile b/arch/arm/mach-octeontx2/Makefile
new file mode 100644
index 0000000..c319234
--- /dev/null
+++ b/arch/arm/mach-octeontx2/Makefile
@@ -0,0 +1,9 @@
+#/*
+# * Copyright (C) 2018 Marvell International Ltd.
+# *
+# * SPDX-License-Identifier:    GPL-2.0
+# * https://spdx.org/licenses
+# */
+
+obj-y += lowlevel_init.o clock.o cpu.o
+
diff --git a/arch/arm/mach-octeontx2/clock.c b/arch/arm/mach-octeontx2/clock.c
new file mode 100644
index 0000000..9da2107
--- /dev/null
+++ b/arch/arm/mach-octeontx2/clock.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <asm/arch/board.h>
+#include <asm/arch/clock.h>
+
+/**
+ * Returns the I/O clock speed in Hz
+ */
+u64 octeontx_get_io_clock(void)
+{
+	union rst_boot rst_boot;
+
+	rst_boot.u = readq(RST_BOOT);
+
+	return rst_boot.s.pnr_mul * PLL_REF_CLK;
+}
+
+/**
+ * Returns the core clock speed in Hz
+ */
+u64 octeontx_get_core_clock(void)
+{
+	union rst_boot rst_boot;
+
+	rst_boot.u = readq(RST_BOOT);
+
+	return rst_boot.s.c_mul * PLL_REF_CLK;
+}
diff --git a/arch/arm/mach-octeontx2/config.mk b/arch/arm/mach-octeontx2/config.mk
new file mode 100644
index 0000000..9214f6b
--- /dev/null
+++ b/arch/arm/mach-octeontx2/config.mk
@@ -0,0 +1,4 @@
+ifeq ($(CONFIG_ARCH_OCTEONTX2),y)
+PLATFORM_CPPFLAGS += $(call cc-option,-march=armv8.2-a,)
+PLATFORM_CPPFLAGS += $(call cc-option,-mtune=octeontx2,)
+endif
diff --git a/arch/arm/mach-octeontx2/cpu.c b/arch/arm/mach-octeontx2/cpu.c
new file mode 100644
index 0000000..2a6d5e8
--- /dev/null
+++ b/arch/arm/mach-octeontx2/cpu.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <common.h>
+#include <asm/armv8/mmu.h>
+#include <asm/io.h>
+#include <asm/arch/board.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define OTX2_MEM_MAP_USED 4
+
+/* +1 is end of list which needs to be empty */
+#define OTX2_MEM_MAP_MAX (OTX2_MEM_MAP_USED + CONFIG_NR_DRAM_BANKS + 1)
+
+static struct mm_region otx2_mem_map[OTX2_MEM_MAP_MAX] = {
+	{
+		.virt = 0x800000000000UL,
+		.phys = 0x800000000000UL,
+		.size = 0x40000000000UL,
+		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+			 PTE_BLOCK_NON_SHARE
+	}, {
+		.virt = 0x840000000000UL,
+		.phys = 0x840000000000UL,
+		.size = 0x40000000000UL,
+		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+			 PTE_BLOCK_NON_SHARE
+	}, {
+		.virt = 0x880000000000UL,
+		.phys = 0x880000000000UL,
+		.size = 0x40000000000UL,
+		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+			 PTE_BLOCK_NON_SHARE
+	}, {
+		.virt = 0x8c0000000000UL,
+		.phys = 0x8c0000000000UL,
+		.size = 0x40000000000UL,
+		.attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+			 PTE_BLOCK_NON_SHARE
+	}
+};
+
+struct mm_region *mem_map = otx2_mem_map;
+
+void mem_map_fill(void)
+{
+	int banks = OTX2_MEM_MAP_USED;
+	u32 dram_start = CONFIG_SYS_TEXT_BASE;
+
+	for (int i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
+		otx2_mem_map[banks].virt = dram_start;
+		otx2_mem_map[banks].phys = dram_start;
+		otx2_mem_map[banks].size = gd->ram_size;
+		otx2_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+					    PTE_BLOCK_NON_SHARE;
+		banks = banks + 1;
+	}
+}
+
+u64 get_page_table_size(void)
+{
+	return 0x80000;
+}
+
+void reset_cpu(ulong addr)
+{
+}
diff --git a/arch/arm/mach-octeontx2/lowlevel_init.S b/arch/arm/mach-octeontx2/lowlevel_init.S
new file mode 100644
index 0000000..41a9f08
--- /dev/null
+++ b/arch/arm/mach-octeontx2/lowlevel_init.S
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <config.h>
+#include <linux/linkage.h>
+#include <asm/macro.h>
+
+.align 8
+.global fdt_base_addr
+fdt_base_addr:
+	.dword 0x0
+
+.global save_boot_params
+save_boot_params:
+	/* Read FDT base from x1 register passed by ATF */
+	adr	x21, fdt_base_addr
+	str	x1, [x21]
+
+	/* Returns */
+	b	save_boot_params_ret
+
+ENTRY(lowlevel_init)
+	mov	x29, lr			/* Save LR */
+
+	/* any lowlevel init should go here */
+
+	mov	lr, x29			/* Restore LR */
+	ret
+ENDPROC(lowlevel_init)
diff --git a/arch/sandbox/dts/test.dts b/arch/sandbox/dts/test.dts
index 1d8956a..9f45c48 100644
--- a/arch/sandbox/dts/test.dts
+++ b/arch/sandbox/dts/test.dts
@@ -666,8 +666,9 @@
 		bus-range = <0x00 0xff>;
 		#address-cells = <3>;
 		#size-cells = <2>;
-		ranges = <0x02000000 0 0x30000000 0x30000000 0 0x2000
-				0x01000000 0 0x40000000 0x40000000 0 0x2000>;
+		ranges = <0x02000000 0 0x30000000 0x30000000 0 0x2000 // MEM0
+			  0x02000000 0 0x31000000 0x31000000 0 0x2000 // MEM1
+			  0x01000000 0 0x40000000 0x40000000 0 0x2000>;
 		sandbox,dev-info = <0x08 0x00 0x1234 0x5678
 				    0x0c 0x00 0x1234 0x5678
 				    0x10 0x00 0x1234 0x5678>;
diff --git a/board/Marvell/octeontx/Kconfig b/board/Marvell/octeontx/Kconfig
new file mode 100644
index 0000000..45d1159
--- /dev/null
+++ b/board/Marvell/octeontx/Kconfig
@@ -0,0 +1,14 @@
+if TARGET_OCTEONTX_81XX || TARGET_OCTEONTX_83XX
+
+config SYS_VENDOR
+	string
+	default	"Marvell"
+
+config SYS_BOARD
+	string
+	default "octeontx"
+
+config SYS_CONFIG_NAME
+	default "octeontx_common"
+
+endif
diff --git a/board/Marvell/octeontx/MAINTAINERS b/board/Marvell/octeontx/MAINTAINERS
new file mode 100644
index 0000000..1f3b12b
--- /dev/null
+++ b/board/Marvell/octeontx/MAINTAINERS
@@ -0,0 +1,8 @@
+OCTEONTX BOARD
+M:	Aaron Williams <awilliams@marvell.com>
+S:	Maintained
+F:	board/Marvell/octeontx/
+F:	include/configs/octeontx_81xx.h
+F:	include/configs/octeontx_83xx.h
+F:	configs/octeontx_81xx_defconfig
+F:	configs/octeontx_83xx_defconfig
diff --git a/board/Marvell/octeontx/Makefile b/board/Marvell/octeontx/Makefile
new file mode 100644
index 0000000..fbe32ae
--- /dev/null
+++ b/board/Marvell/octeontx/Makefile
@@ -0,0 +1,9 @@
+#/*
+# * Copyright (C) 2018 Marvell International Ltd.
+# *
+# * SPDX-License-Identifier:    GPL-2.0
+# * https://spdx.org/licenses
+# */
+
+obj-y	:= board.o smc.o soc-utils.o
+obj-$(CONFIG_OF_LIBFDT) += board-fdt.o
diff --git a/board/Marvell/octeontx/board-fdt.c b/board/Marvell/octeontx/board-fdt.c
new file mode 100644
index 0000000..0b05ef1
--- /dev/null
+++ b/board/Marvell/octeontx/board-fdt.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <errno.h>
+#include <env.h>
+#include <log.h>
+#include <net.h>
+#include <asm/io.h>
+#include <linux/compiler.h>
+#include <linux/libfdt.h>
+#include <fdtdec.h>
+#include <fdt_support.h>
+#include <asm/arch/board.h>
+#include <asm/global_data.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static int fdt_get_mdio_bus(const void *fdt, int phy_offset)
+{
+	int node, bus = -1;
+	const u64 *reg;
+	u64 addr;
+
+	if (phy_offset < 0)
+		return -1;
+	/* obtain mdio node and get the reg prop */
+	node = fdt_parent_offset(fdt, phy_offset);
+	if (node < 0)
+		return -1;
+
+	reg = fdt_getprop(fdt, node, "reg", NULL);
+	addr = fdt64_to_cpu(*reg);
+	bus = (addr & (1 << 7)) ? 1 : 0;
+	return bus;
+}
+
+static int fdt_get_phy_addr(const void *fdt, int phy_offset)
+{
+	const u32 *reg;
+	int addr = -1;
+
+	if (phy_offset < 0)
+		return -1;
+	reg = fdt_getprop(fdt, phy_offset, "reg", NULL);
+	addr = fdt32_to_cpu(*reg);
+	return addr;
+}
+
+void fdt_parse_phy_info(void)
+{
+	const void *fdt = gd->fdt_blob;
+	int offset = 0, node, bgx_id = 0, lmacid = 0;
+	const u32 *val;
+	char bgxname[24];
+	int len, rgx_id = 0, eth_id = 0;
+	int phandle, phy_offset;
+	int subnode, i;
+	int bdknode;
+
+	bdknode = fdt_path_offset(fdt, "/cavium,bdk");
+	if (bdknode < 0) {
+		printf("%s: bdk node is missing from device tree: %s\n",
+		       __func__, fdt_strerror(bdknode));
+	}
+
+	offset = fdt_node_offset_by_compatible(fdt, -1, "pci-bridge");
+	if (offset < 1)
+		return;
+
+	for (bgx_id = 0; bgx_id < MAX_BGX_PER_NODE; bgx_id++) {
+		int phy_addr[LMAC_CNT] = {[0 ... LMAC_CNT - 1] = -1};
+		bool autoneg_dis[LMAC_CNT] = {[0 ... LMAC_CNT - 1] = 0};
+		int mdio_bus[LMAC_CNT] = {[0 ... LMAC_CNT - 1] = -1};
+		bool lmac_reg[LMAC_CNT] = {[0 ... LMAC_CNT - 1] = 0};
+		bool lmac_enable[LMAC_CNT] = {[0 ... LMAC_CNT - 1] = 0};
+
+		snprintf(bgxname, sizeof(bgxname), "bgx%d", bgx_id);
+		node = fdt_subnode_offset(fdt, offset, bgxname);
+		if (node < 0) {
+			/* check if it is rgx node */
+			snprintf(bgxname, sizeof(bgxname), "rgx%d", rgx_id);
+			node = fdt_subnode_offset(fdt, offset, bgxname);
+			if (node < 0) {
+				debug("bgx%d/rgx0 node not found\n", bgx_id);
+				return;
+			}
+		}
+		debug("bgx%d node found\n", bgx_id);
+
+		/*
+		 * loop through each of the bgx/rgx nodes
+		 * to find PHY nodes
+		 */
+		fdt_for_each_subnode(subnode, fdt, node) {
+			/* Check for reg property */
+			val = fdt_getprop(fdt, subnode, "reg", &len);
+			if (val) {
+				debug("lmacid = %d\n", lmacid);
+				lmac_reg[lmacid] = 1;
+			}
+			/* check for phy-handle property */
+			val = fdt_getprop(fdt, subnode, "phy-handle", &len);
+			if (val) {
+				phandle = fdt32_to_cpu(*val);
+				if (!phandle) {
+					debug("phandle not valid %d\n", lmacid);
+				} else {
+					phy_offset = fdt_node_offset_by_phandle
+							(fdt, phandle);
+					phy_addr[lmacid] = fdt_get_phy_addr
+							(fdt, phy_offset);
+					mdio_bus[lmacid] = fdt_get_mdio_bus
+							(fdt, phy_offset);
+					}
+				} else {
+					debug("phy-handle prop not found %d\n",
+					      lmacid);
+				}
+				/* check for autonegotiation property */
+				val = fdt_getprop(fdt, subnode,
+						  "cavium,disable-autonegotiation",
+						  &len);
+				if (val)
+					autoneg_dis[lmacid] = 1;
+
+				eth_id++;
+				lmacid++;
+			}
+
+			for (i = 0; i < MAX_LMAC_PER_BGX; i++) {
+				const char *str;
+
+				snprintf(bgxname, sizeof(bgxname),
+					 "BGX-ENABLE.N0.BGX%d.P%d", bgx_id, i);
+				if (bdknode >= 0) {
+					str = fdt_getprop(fdt, bdknode,
+							  bgxname, &len);
+					if (str)
+						lmac_enable[i] =
+							simple_strtol(str, NULL,
+								      10);
+				}
+			}
+
+			lmacid = 0;
+			bgx_set_board_info(bgx_id, mdio_bus, phy_addr,
+					   autoneg_dis, lmac_reg, lmac_enable);
+		}
+}
+
+static int fdt_get_bdk_node(void)
+{
+	int node, ret;
+	const void *fdt = gd->fdt_blob;
+
+	if (!fdt) {
+		printf("ERROR: %s: no valid device tree found\n", __func__);
+		return 0;
+	}
+
+	ret = fdt_check_header(fdt);
+	if (ret < 0) {
+		printf("fdt: %s\n", fdt_strerror(ret));
+		return 0;
+	}
+
+	node = fdt_path_offset(fdt, "/cavium,bdk");
+	if (node < 0) {
+		printf("%s: /cavium,bdk is missing from device tree: %s\n",
+		       __func__, fdt_strerror(node));
+		return 0;
+	}
+	return node;
+}
+
+const char *fdt_get_board_serial(void)
+{
+	const void *fdt = gd->fdt_blob;
+	int node, len = 64;
+	const char *str = NULL;
+
+	node = fdt_get_bdk_node();
+	if (!node)
+		return NULL;
+
+	str = fdt_getprop(fdt, node, "BOARD-SERIAL", &len);
+	if (!str)
+		printf("Error: cannot retrieve board serial from fdt\n");
+	return str;
+}
+
+const char *fdt_get_board_revision(void)
+{
+	const void *fdt = gd->fdt_blob;
+	int node, len = 64;
+	const char *str = NULL;
+
+	node = fdt_get_bdk_node();
+	if (!node)
+		return NULL;
+
+	str = fdt_getprop(fdt, node, "BOARD-REVISION", &len);
+	if (!str)
+		printf("Error: cannot retrieve board revision from fdt\n");
+	return str;
+}
+
+const char *fdt_get_board_model(void)
+{
+	const void *fdt = gd->fdt_blob;
+	int node, len = 16;
+	const char *str = NULL;
+
+	node = fdt_get_bdk_node();
+	if (!node)
+		return NULL;
+
+	str = fdt_getprop(fdt, node, "BOARD-MODEL", &len);
+	if (!str)
+		printf("Error: cannot retrieve board model from fdt\n");
+	return str;
+}
+
+void fdt_board_get_ethaddr(int bgx, int lmac, unsigned char *eth)
+{
+	const void *fdt = gd->fdt_blob;
+	const char *mac = NULL;
+	int offset = 0, node, len;
+	int subnode, i = 0;
+	char bgxname[24];
+
+	offset = fdt_node_offset_by_compatible(fdt, -1, "pci-bridge");
+	if (offset < 0) {
+		printf("%s couldn't find mrml bridge node in fdt\n",
+		       __func__);
+		return;
+	}
+	if (bgx == 2 && otx_is_soc(CN81XX)) {
+		snprintf(bgxname, sizeof(bgxname), "rgx%d", 0);
+		lmac = 0;
+	} else {
+		snprintf(bgxname, sizeof(bgxname), "bgx%d", bgx);
+	}
+
+	node = fdt_subnode_offset(fdt, offset, bgxname);
+
+	fdt_for_each_subnode(subnode, fdt, node) {
+		if (i++ != lmac)
+			continue;
+		/* check for local-mac-address */
+		mac = fdt_getprop(fdt, subnode, "local-mac-address", &len);
+		if (mac) {
+			debug("%s mac %pM\n", __func__, mac);
+			memcpy(eth, mac, ARP_HLEN);
+		} else {
+			memset(eth, 0, ARP_HLEN);
+		}
+		debug("%s eth %pM\n", __func__, eth);
+		return;
+	}
+}
+
+int arch_fixup_memory_node(void *blob)
+{
+	return 0;
+}
+
+int ft_board_setup(void *blob, struct bd_info *bd)
+{
+	/* remove "cavium, bdk" node from DT */
+	int ret = 0, offset;
+
+	ret = fdt_check_header(blob);
+	if (ret < 0) {
+		printf("ERROR: %s\n", fdt_strerror(ret));
+		return ret;
+	}
+
+	if (blob) {
+		offset = fdt_path_offset(blob, "/cavium,bdk");
+		if (offset < 0) {
+			printf("ERROR: FDT BDK node not found\n");
+			return offset;
+		}
+
+		/* delete node */
+		ret = fdt_del_node(blob, offset);
+		if (ret < 0) {
+			printf("WARNING : could not remove bdk node\n");
+			return ret;
+		}
+
+		debug("%s deleted bdk node\n", __func__);
+	}
+
+	return 0;
+}
+
+/**
+ * Return the FDT base address that was passed by ATF
+ *
+ * @return	FDT base address received from ATF in x1 register
+ */
+void *board_fdt_blob_setup(void)
+{
+	return (void *)fdt_base_addr;
+}
diff --git a/board/Marvell/octeontx/board.c b/board/Marvell/octeontx/board.c
new file mode 100644
index 0000000..940faac
--- /dev/null
+++ b/board/Marvell/octeontx/board.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <dm.h>
+#include <malloc.h>
+#include <errno.h>
+#include <env.h>
+#include <init.h>
+#include <log.h>
+#include <netdev.h>
+#include <pci_ids.h>
+#include <asm/io.h>
+#include <linux/compiler.h>
+#include <linux/libfdt.h>
+#include <fdt_support.h>
+#include <asm/arch/smc.h>
+#include <asm/arch/soc.h>
+#include <asm/arch/board.h>
+#include <dm/util.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+void octeontx_cleanup_ethaddr(void)
+{
+	char ename[32];
+
+	for (int i = 0; i < 20; i++) {
+		sprintf(ename, i ? "eth%daddr" : "ethaddr", i);
+		if (env_get(ename))
+			env_set(ename, NULL);
+	}
+}
+
+int octeontx_board_has_pmp(void)
+{
+	return (otx_is_board("sff8104") || otx_is_board("nas8104"));
+}
+
+int board_early_init_r(void)
+{
+	pci_init();
+	return 0;
+}
+
+int board_init(void)
+{
+	if (IS_ENABLED(CONFIG_NET_OCTEONTX))
+		fdt_parse_phy_info();
+
+	return 0;
+}
+
+int timer_init(void)
+{
+	return 0;
+}
+
+int dram_init(void)
+{
+	gd->ram_size = smc_dram_size(0);
+	gd->ram_size -= CONFIG_SYS_SDRAM_BASE;
+	mem_map_fill();
+
+	return 0;
+}
+
+void board_late_probe_devices(void)
+{
+	struct udevice *dev;
+	int err, bgx_cnt, i;
+
+	/* Probe MAC(BGX) and NIC PF devices before Network stack init */
+	bgx_cnt = otx_is_soc(CN81XX) ? 2 : 4;
+	for (i = 0; i < bgx_cnt; i++) {
+		err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
+					 PCI_DEVICE_ID_CAVIUM_BGX, i, &dev);
+		if (err)
+			debug("%s BGX%d device not found\n", __func__, i);
+	}
+	if (otx_is_soc(CN81XX)) {
+		err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
+					 PCI_DEVICE_ID_CAVIUM_RGX, 0, &dev);
+		if (err)
+			debug("%s RGX device not found\n", __func__);
+	}
+	err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
+				 PCI_DEVICE_ID_CAVIUM_NIC, 0, &dev);
+	if (err)
+		debug("NIC PF device not found\n");
+}
+
+/**
+ * Board late initialization routine.
+ */
+int board_late_init(void)
+{
+	char boardname[32];
+	char boardserial[150], boardrev[150];
+	bool save_env = false;
+	const char *str;
+
+	/*
+	 * Try to cleanup ethaddr env variables, this is needed
+	 * as with each boot, configuration of network interfaces can change.
+	 */
+	octeontx_cleanup_ethaddr();
+
+	snprintf(boardname, sizeof(boardname), "%s> ", fdt_get_board_model());
+	env_set("prompt", boardname);
+
+	set_working_fdt_addr(env_get_hex("fdtcontroladdr", fdt_base_addr));
+
+	str = fdt_get_board_revision();
+	if (str) {
+		snprintf(boardrev, sizeof(boardrev), "%s", str);
+		if (env_get("boardrev") &&
+		    strcmp(boardrev, env_get("boardrev")))
+			save_env = true;
+		env_set("boardrev", boardrev);
+	}
+
+	str = fdt_get_board_serial();
+	if (str) {
+		snprintf(boardserial, sizeof(boardserial), "%s", str);
+		if (env_get("serial#") &&
+		    strcmp(boardserial, env_get("serial#")))
+			save_env = true;
+		env_set("serial#", boardserial);
+	}
+
+	if (IS_ENABLED(CONFIG_NET_OCTEONTX))
+		board_late_probe_devices();
+
+	if (save_env)
+		env_save();
+
+	return 0;
+}
+
+/*
+ * Invoked before relocation, so limit to stack variables.
+ */
+int checkboard(void)
+{
+	printf("Board: %s\n", fdt_get_board_model());
+
+	return 0;
+}
diff --git a/board/Marvell/octeontx/smc.c b/board/Marvell/octeontx/smc.c
new file mode 100644
index 0000000..5eeba23
--- /dev/null
+++ b/board/Marvell/octeontx/smc.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <asm/global_data.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/arch/smc.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+ssize_t smc_dram_size(unsigned int node)
+{
+	struct pt_regs regs;
+
+	regs.regs[0] = OCTEONTX_DRAM_SIZE;
+	regs.regs[1] = node;
+	smc_call(&regs);
+
+	return regs.regs[0];
+}
+
diff --git a/board/Marvell/octeontx/soc-utils.c b/board/Marvell/octeontx/soc-utils.c
new file mode 100644
index 0000000..5fd5afd
--- /dev/null
+++ b/board/Marvell/octeontx/soc-utils.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <dm.h>
+#include <dm/util.h>
+#include <errno.h>
+#include <malloc.h>
+#include <asm/io.h>
+#include <asm/arch/soc.h>
+#include <asm/arch/board.h>
+
+int read_platform(void)
+{
+	int plat = PLATFORM_HW;
+
+	const char *model = fdt_get_board_model();
+
+	if (model && !strncmp(model, "ASIM-", 5))
+		plat = PLATFORM_ASIM;
+	if (model && !strncmp(model, "EMUL-", 5))
+		plat = PLATFORM_EMULATOR;
+	return plat;
+}
+
+static inline u64 read_midr(void)
+{
+	u64 result;
+
+	asm ("mrs %[rd],MIDR_EL1" : [rd] "=r" (result));
+	return result;
+}
+
+u8 read_partnum(void)
+{
+	return ((read_midr() >> 4) & 0xFF);
+}
+
+const char *read_board_name(void)
+{
+	return fdt_get_board_model();
+}
+
+bool read_alt_pkg(void)
+{
+	return false;
+}
diff --git a/board/Marvell/octeontx2/Kconfig b/board/Marvell/octeontx2/Kconfig
new file mode 100644
index 0000000..99291d7
--- /dev/null
+++ b/board/Marvell/octeontx2/Kconfig
@@ -0,0 +1,14 @@
+if TARGET_OCTEONTX2_95XX || TARGET_OCTEONTX2_96XX
+
+config SYS_VENDOR
+	string
+	default	"Marvell"
+
+config SYS_BOARD
+	string
+	default "octeontx2"
+
+config SYS_CONFIG_NAME
+	default "octeontx2_common"
+
+endif
diff --git a/board/Marvell/octeontx2/MAINTAINERS b/board/Marvell/octeontx2/MAINTAINERS
new file mode 100644
index 0000000..eec1d77
--- /dev/null
+++ b/board/Marvell/octeontx2/MAINTAINERS
@@ -0,0 +1,8 @@
+OCTEONTX2 BOARD
+M:	Aaron Williams <awilliams@marvell.com>
+S:	Maintained
+F:	board/Marvell/octeontx2/
+F:	include/configs/octeontx2_96xx.h
+F:	include/configs/octeontx2_95xx.h
+F:	configs/octeontx2_96xx_defconfig
+F:	configs/octeontx2_95xx_defconfig
diff --git a/board/Marvell/octeontx2/Makefile b/board/Marvell/octeontx2/Makefile
new file mode 100644
index 0000000..1f763b1
--- /dev/null
+++ b/board/Marvell/octeontx2/Makefile
@@ -0,0 +1,9 @@
+#/* SPDX-License-Identifier:    GPL-2.0
+# *
+# * Copyright (C) 2018 Marvell International Ltd.
+# *
+# * https://spdx.org/licenses
+# */
+
+obj-y	:= board.o smc.o soc-utils.o
+obj-$(CONFIG_OF_LIBFDT) += board-fdt.o
diff --git a/board/Marvell/octeontx2/board-fdt.c b/board/Marvell/octeontx2/board-fdt.c
new file mode 100644
index 0000000..a4771af
--- /dev/null
+++ b/board/Marvell/octeontx2/board-fdt.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <errno.h>
+#include <fdtdec.h>
+#include <fdt_support.h>
+#include <log.h>
+
+#include <linux/compiler.h>
+#include <linux/libfdt.h>
+
+#include <asm/arch/board.h>
+#include <asm/arch/smc.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static int fdt_get_bdk_node(void)
+{
+	int node, ret;
+	const void *fdt = gd->fdt_blob;
+
+	if (!fdt) {
+		printf("ERROR: %s: no valid device tree found\n", __func__);
+		return 0;
+	}
+
+	ret = fdt_check_header(fdt);
+	if (ret < 0) {
+		printf("fdt: %s\n", fdt_strerror(ret));
+		return 0;
+	}
+
+	node = fdt_path_offset(fdt, "/cavium,bdk");
+	if (node < 0) {
+		printf("%s: /cavium,bdk is missing from device tree: %s\n",
+		       __func__, fdt_strerror(node));
+		return 0;
+	}
+	return node;
+}
+
+u64 fdt_get_board_mac_addr(void)
+{
+	int node, len = 16;
+	const char *str = NULL;
+	const void *fdt = gd->fdt_blob;
+	u64 mac_addr = 0;
+
+	node = fdt_get_bdk_node();
+	if (!node)
+		return mac_addr;
+	str = fdt_getprop(fdt, node, "BOARD-MAC-ADDRESS", &len);
+	if (str)
+		mac_addr = simple_strtol(str, NULL, 16);
+	return mac_addr;
+}
+
+int fdt_get_board_mac_cnt(void)
+{
+	int node, len = 16;
+	const char *str = NULL;
+	const void *fdt = gd->fdt_blob;
+	int mac_count = 0;
+
+	node = fdt_get_bdk_node();
+	if (!node)
+		return mac_count;
+	str = fdt_getprop(fdt, node, "BOARD-MAC-ADDRESS-NUM", &len);
+	if (str) {
+		mac_count = simple_strtol(str, NULL, 10);
+		if (!mac_count)
+			mac_count = simple_strtol(str, NULL, 16);
+		debug("fdt: MAC_NUM %d\n", mac_count);
+	} else {
+		printf("Error: cannot retrieve mac count prop from fdt\n");
+	}
+	str = fdt_getprop(gd->fdt_blob, node, "BOARD-MAC-ADDRESS-NUM-OVERRIDE",
+			  &len);
+	if (str) {
+		if (simple_strtol(str, NULL, 10) >= 0)
+			mac_count = simple_strtol(str, NULL, 10);
+		debug("fdt: MAC_NUM %d\n", mac_count);
+	} else {
+		printf("Error: cannot retrieve mac num override prop\n");
+	}
+	return mac_count;
+}
+
+const char *fdt_get_board_serial(void)
+{
+	const void *fdt = gd->fdt_blob;
+	int node, len = 64;
+	const char *str = NULL;
+
+	node = fdt_get_bdk_node();
+	if (!node)
+		return NULL;
+
+	str = fdt_getprop(fdt, node, "BOARD-SERIAL", &len);
+	if (!str)
+		printf("Error: cannot retrieve board serial from fdt\n");
+	return str;
+}
+
+const char *fdt_get_board_revision(void)
+{
+	const void *fdt = gd->fdt_blob;
+	int node, len = 64;
+	const char *str = NULL;
+
+	node = fdt_get_bdk_node();
+	if (!node)
+		return NULL;
+
+	str = fdt_getprop(fdt, node, "BOARD-REVISION", &len);
+	if (!str)
+		printf("Error: cannot retrieve board revision from fdt\n");
+	return str;
+}
+
+const char *fdt_get_board_model(void)
+{
+	int node, len = 16;
+	const char *str = NULL;
+	const void *fdt = gd->fdt_blob;
+
+	node = fdt_get_bdk_node();
+	if (!node)
+		return NULL;
+	str = fdt_getprop(fdt, node, "BOARD-MODEL", &len);
+	if (!str)
+		printf("Error: cannot retrieve board model from fdt\n");
+	return str;
+}
+
+int arch_fixup_memory_node(void *blob)
+{
+	return 0;
+}
+
+int ft_board_setup(void *blob, struct bd_info *bd)
+{
+	int nodeoff, node, ret, i;
+	const char *temp;
+
+	static const char * const
+		octeontx_brd_nodes[] = {"BOARD-MODEL",
+					"BOARD-SERIAL",
+					"BOARD-MAC-ADDRESS",
+					"BOARD-REVISION",
+					"BOARD-MAC-ADDRESS-NUM"
+					};
+	char nodes[ARRAY_SIZE(octeontx_brd_nodes)][32];
+
+	ret = fdt_check_header(blob);
+	if (ret < 0) {
+		printf("ERROR: %s\n", fdt_strerror(ret));
+		return ret;
+	}
+
+	if (blob) {
+		nodeoff = fdt_path_offset(blob, "/cavium,bdk");
+		if (nodeoff < 0) {
+			printf("ERROR: FDT BDK node not found\n");
+			return nodeoff;
+		}
+
+		/* Read properties in temporary variables */
+		for (i = 0; i < ARRAY_SIZE(octeontx_brd_nodes); i++) {
+			temp = fdt_getprop(blob, nodeoff,
+					   octeontx_brd_nodes[i], NULL);
+			strncpy(nodes[i], temp, sizeof(nodes[i]));
+		}
+
+		/* Delete cavium,bdk node */
+		ret = fdt_del_node(blob, nodeoff);
+		if (ret < 0) {
+			printf("WARNING : could not remove cavium, bdk node\n");
+			return ret;
+		}
+		debug("%s deleted 'cavium,bdk' node\n", __func__);
+		/*
+		 * Add a new node at root level which would have
+		 * necessary info
+		 */
+		node = fdt_add_subnode(blob, 0, "octeontx_brd");
+		if (node < 0) {
+			printf("Cannot create node octeontx_brd: %s\n",
+			       fdt_strerror(node));
+			return -EIO;
+		}
+
+		/* Populate properties in node */
+		for (i = 0; i < ARRAY_SIZE(octeontx_brd_nodes); i++) {
+			if (fdt_setprop_string(blob, node,
+					       octeontx_brd_nodes[i],
+					       nodes[i])) {
+				printf("Can't set %s\n", nodes[i]);
+				return -EIO;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * Return the FDT base address that was passed by ATF
+ *
+ * @return	FDT base address received from ATF in x1 register
+ */
+void *board_fdt_blob_setup(void)
+{
+	return (void *)fdt_base_addr;
+}
diff --git a/board/Marvell/octeontx2/board.c b/board/Marvell/octeontx2/board.c
new file mode 100644
index 0000000..50e903d
--- /dev/null
+++ b/board/Marvell/octeontx2/board.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <command.h>
+#include <console.h>
+#include <cpu_func.h>
+#include <dm.h>
+#include <dm/uclass-internal.h>
+#include <env.h>
+#include <init.h>
+#include <malloc.h>
+#include <net.h>
+#include <pci_ids.h>
+#include <errno.h>
+#include <asm/io.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/libfdt.h>
+#include <fdt_support.h>
+#include <asm/arch/smc.h>
+#include <asm/arch/soc.h>
+#include <asm/arch/board.h>
+#include <dm/util.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+void cleanup_env_ethaddr(void)
+{
+	char ename[32];
+
+	for (int i = 0; i < 20; i++) {
+		sprintf(ename, i ? "eth%daddr" : "ethaddr", i);
+		if (env_get(ename))
+			env_set(ename, NULL);
+	}
+}
+
+void octeontx2_board_get_mac_addr(u8 index, u8 *mac_addr)
+{
+	u64 tmp_mac, board_mac_addr = fdt_get_board_mac_addr();
+	static int board_mac_num;
+
+	board_mac_num = fdt_get_board_mac_cnt();
+	if ((!is_zero_ethaddr((u8 *)&board_mac_addr)) && board_mac_num) {
+		tmp_mac = board_mac_addr;
+		tmp_mac += index;
+		tmp_mac = swab64(tmp_mac) >> 16;
+		memcpy(mac_addr, (u8 *)&tmp_mac, ARP_HLEN);
+		board_mac_num--;
+	} else {
+		memset(mac_addr, 0, ARP_HLEN);
+	}
+	debug("%s mac %pM\n", __func__, mac_addr);
+}
+
+void board_quiesce_devices(void)
+{
+	struct uclass *uc_dev;
+	int ret;
+
+	/* Removes all RVU PF devices */
+	ret = uclass_get(UCLASS_ETH, &uc_dev);
+	if (uc_dev)
+		ret = uclass_destroy(uc_dev);
+	if (ret)
+		printf("couldn't remove rvu pf devices\n");
+
+	if (IS_ENABLED(CONFIG_OCTEONTX2_CGX_INTF)) {
+		/* Bring down all cgx lmac links */
+		cgx_intf_shutdown();
+	}
+
+	/* Removes all CGX and RVU AF devices */
+	ret = uclass_get(UCLASS_MISC, &uc_dev);
+	if (uc_dev)
+		ret = uclass_destroy(uc_dev);
+	if (ret)
+		printf("couldn't remove misc (cgx/rvu_af) devices\n");
+
+	/* SMC call - removes all LF<->PF mappings */
+	smc_disable_rvu_lfs(0);
+}
+
+int board_early_init_r(void)
+{
+	pci_init();
+	return 0;
+}
+
+int board_init(void)
+{
+	return 0;
+}
+
+int timer_init(void)
+{
+	return 0;
+}
+
+int dram_init(void)
+{
+	gd->ram_size = smc_dram_size(0);
+	gd->ram_size -= CONFIG_SYS_SDRAM_BASE;
+
+	mem_map_fill();
+
+	return 0;
+}
+
+void board_late_probe_devices(void)
+{
+	struct udevice *dev;
+	int err, cgx_cnt = 3, i;
+
+	/* Probe MAC(CGX) and NIC AF devices before Network stack init */
+	for (i = 0; i < cgx_cnt; i++) {
+		err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
+					 PCI_DEVICE_ID_CAVIUM_CGX, i, &dev);
+		if (err)
+			debug("%s CGX%d device not found\n", __func__, i);
+	}
+	err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
+				 PCI_DEVICE_ID_CAVIUM_RVU_AF, 0, &dev);
+	if (err)
+		debug("NIC AF device not found\n");
+}
+
+/**
+ * Board late initialization routine.
+ */
+int board_late_init(void)
+{
+	char boardname[32];
+	char boardserial[150], boardrev[150];
+	long val;
+	bool save_env = false;
+	const char *str;
+
+	debug("%s()\n", __func__);
+
+	/*
+	 * Now that pci_init initializes env device.
+	 * Try to cleanup ethaddr env variables, this is needed
+	 * as with each boot, configuration of QLM can change.
+	 */
+	cleanup_env_ethaddr();
+
+	snprintf(boardname, sizeof(boardname), "%s> ", fdt_get_board_model());
+	env_set("prompt", boardname);
+	set_working_fdt_addr(env_get_hex("fdtcontroladdr", fdt_base_addr));
+
+	str = fdt_get_board_revision();
+	if (str) {
+		snprintf(boardrev, sizeof(boardrev), "%s", str);
+		if (env_get("boardrev") &&
+		    strcmp(boardrev, env_get("boardrev")))
+			save_env = true;
+		env_set("boardrev", boardrev);
+	}
+
+	str = fdt_get_board_serial();
+	if (str) {
+		snprintf(boardserial, sizeof(boardserial), "%s", str);
+		if (env_get("serial#") &&
+		    strcmp(boardserial, env_get("serial#")))
+			save_env = true;
+		env_set("serial#", boardserial);
+	}
+
+	val = env_get_hex("disable_ooo", 0);
+	smc_configure_ooo(val);
+
+	if (IS_ENABLED(CONFIG_NET_OCTEONTX2))
+		board_late_probe_devices();
+
+	if (save_env)
+		env_save();
+
+	return 0;
+}
+
+/*
+ * Invoked before relocation, so limit to stack variables.
+ */
+int checkboard(void)
+{
+	printf("Board: %s\n", fdt_get_board_model());
+
+	return 0;
+}
+
+void board_acquire_flash_arb(bool acquire)
+{
+	union cpc_boot_ownerx ownerx;
+
+	if (!acquire) {
+		ownerx.u = readl(CPC_BOOT_OWNERX(3));
+		ownerx.s.boot_req = 0;
+		writel(ownerx.u, CPC_BOOT_OWNERX(3));
+	} else {
+		ownerx.u = 0;
+		ownerx.s.boot_req = 1;
+		writel(ownerx.u, CPC_BOOT_OWNERX(3));
+		udelay(1);
+		do {
+			ownerx.u = readl(CPC_BOOT_OWNERX(3));
+		} while (ownerx.s.boot_wait);
+	}
+}
+
+int last_stage_init(void)
+{
+	(void)smc_flsf_fw_booted();
+	return 0;
+}
+
+static int do_go_uboot(struct cmd_tbl *cmdtp, int flag, int argc,
+		       char *const argv[])
+{
+	typedef void __noreturn (*uboot_entry_t)(ulong, void *);
+	uboot_entry_t entry;
+	ulong addr;
+	void *fdt;
+
+	if (argc < 2)
+		return CMD_RET_USAGE;
+
+	addr = simple_strtoul(argv[1], NULL, 16);
+	fdt = board_fdt_blob_setup();
+	entry = (uboot_entry_t)addr;
+	flush_cache((ulong)addr, 1 << 20);	/* 1MiB should be enough */
+	dcache_disable();
+
+	printf("## Starting U-Boot at %p (FDT at %p)...\n", entry, fdt);
+
+	entry(0, fdt);
+
+	return 0;
+}
+
+U_BOOT_CMD(go_uboot, 2, 0, do_go_uboot,
+	   "Start U-Boot from RAM (pass FDT via x1 register)",
+	   "");
diff --git a/board/Marvell/octeontx2/smc.c b/board/Marvell/octeontx2/smc.c
new file mode 100644
index 0000000..9e31695
--- /dev/null
+++ b/board/Marvell/octeontx2/smc.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <asm/psci.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/arch/smc.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+ssize_t smc_dram_size(unsigned int node)
+{
+	struct pt_regs regs;
+
+	regs.regs[0] = OCTEONTX2_DRAM_SIZE;
+	regs.regs[1] = node;
+	smc_call(&regs);
+
+	return regs.regs[0];
+}
+
+ssize_t smc_disable_rvu_lfs(unsigned int node)
+{
+	struct pt_regs regs;
+
+	regs.regs[0] = OCTEONTX2_DISABLE_RVU_LFS;
+	regs.regs[1] = node;
+	smc_call(&regs);
+
+	return regs.regs[0];
+}
+
+ssize_t smc_configure_ooo(unsigned int val)
+{
+	struct pt_regs regs;
+
+	regs.regs[0] = OCTEONTX2_CONFIG_OOO;
+	regs.regs[1] = val;
+	smc_call(&regs);
+
+	return regs.regs[0];
+}
+
+ssize_t smc_flsf_fw_booted(void)
+{
+	struct pt_regs regs;
+
+	regs.regs[0] = OCTEONTX2_FSAFE_PR_BOOT_SUCCESS;
+	smc_call(&regs);
+
+	return regs.regs[0];
+}
diff --git a/board/Marvell/octeontx2/soc-utils.c b/board/Marvell/octeontx2/soc-utils.c
new file mode 100644
index 0000000..1cba7fb
--- /dev/null
+++ b/board/Marvell/octeontx2/soc-utils.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <malloc.h>
+#include <errno.h>
+#include <asm/io.h>
+#include <linux/compiler.h>
+#include <asm/arch/soc.h>
+#include <asm/arch/board.h>
+#include <dm/util.h>
+
+int read_platform(void)
+{
+	int plat = PLATFORM_HW;
+
+	const char *model = fdt_get_board_model();
+
+	if (model && !strncmp(model, "ASIM-", 5))
+		plat = PLATFORM_ASIM;
+	if (model && !strncmp(model, "EMUL-", 5))
+		plat = PLATFORM_EMULATOR;
+
+	return plat;
+}
+
+static inline u64 read_midr(void)
+{
+	u64 result;
+
+	asm ("mrs %[rd],MIDR_EL1" : [rd] "=r" (result));
+	return result;
+}
+
+u8 read_partnum(void)
+{
+	return ((read_midr() >> 4) & 0xFF);
+}
+
+const char *read_board_name(void)
+{
+	return fdt_get_board_model();
+}
+
diff --git a/board/renesas/rcar-common/common.c b/board/renesas/rcar-common/common.c
index 83dd288..9762fb2 100644
--- a/board/renesas/rcar-common/common.c
+++ b/board/renesas/rcar-common/common.c
@@ -42,51 +42,4 @@
 
 	return 0;
 }
-
-#if CONFIG_IS_ENABLED(OF_BOARD_SETUP) && CONFIG_IS_ENABLED(PCI)
-int ft_board_setup(void *blob, struct bd_info *bd)
-{
-	struct udevice *dev;
-	struct uclass *uc;
-	fdt_addr_t regs_addr;
-	int i, off, ret;
-
-	ret = uclass_get(UCLASS_PCI, &uc);
-	if (ret)
-		return ret;
-
-	uclass_foreach_dev(dev, uc) {
-		struct pci_controller hose = { 0 };
-
-		for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
-			if (hose.region_count == MAX_PCI_REGIONS) {
-				printf("maximum number of regions parsed, aborting\n");
-				break;
-			}
-
-			if (bd->bi_dram[i].size) {
-				pci_set_region(&hose.regions[hose.region_count++],
-					       bd->bi_dram[i].start,
-					       bd->bi_dram[i].start,
-					       bd->bi_dram[i].size,
-					       PCI_REGION_MEM |
-					       PCI_REGION_PREFETCH |
-					       PCI_REGION_SYS_MEMORY);
-			}
-		}
-
-		regs_addr = devfdt_get_addr_index(dev, 0);
-		off = fdt_node_offset_by_compat_reg(blob,
-				"renesas,pcie-rcar-gen3", regs_addr);
-		if (off < 0) {
-			printf("Failed to find PCIe node@%llx\n", regs_addr);
-			return off;
-		}
-
-		fdt_pci_dma_ranges(blob, off, &hose);
-	}
-
-	return 0;
-}
-#endif
 #endif
diff --git a/configs/octeontx2_95xx_defconfig b/configs/octeontx2_95xx_defconfig
new file mode 100644
index 0000000..047cb45
--- /dev/null
+++ b/configs/octeontx2_95xx_defconfig
@@ -0,0 +1,105 @@
+CONFIG_ARM=y
+# CONFIG_ARM64_SUPPORT_AARCH32 is not set
+CONFIG_ARCH_OCTEONTX2=y
+CONFIG_SYS_TEXT_BASE=0x04000000
+CONFIG_SYS_MALLOC_F_LEN=0x4000
+CONFIG_NR_DRAM_BANKS=1
+CONFIG_ENV_SIZE=0x8000
+CONFIG_ENV_OFFSET=0xF00000
+CONFIG_ENV_SECT_SIZE=0x10000
+CONFIG_TARGET_OCTEONTX2_95XX=y
+CONFIG_DM_GPIO=y
+CONFIG_DEBUG_UART_BASE=0x87e028000000
+CONFIG_DEBUG_UART_CLOCK=24000000
+CONFIG_DEBUG_UART=y
+CONFIG_FIT=y
+CONFIG_FIT_SIGNATURE=y
+CONFIG_OF_BOARD_SETUP=y
+CONFIG_BOOTDELAY=5
+CONFIG_USE_BOOTARGS=y
+CONFIG_BOOTARGS="console=ttyAMA0,115200n8 earlycon=pl011,0x87e028000000 maxcpus=6 rootwait rw root=/dev/mmcblk0p2 coherent_pool=16M"
+CONFIG_VERSION_VARIABLE=y
+# CONFIG_DISPLAY_CPUINFO is not set
+CONFIG_BOARD_EARLY_INIT_R=y
+CONFIG_HUSH_PARSER=y
+CONFIG_SYS_PROMPT="Marvell> "
+# CONFIG_CMD_BOOTEFI_HELLO_COMPILE is not set
+CONFIG_CMD_MD5SUM=y
+CONFIG_MD5SUM_VERIFY=y
+CONFIG_CMD_MX_CYCLIC=y
+CONFIG_CMD_MEMTEST=y
+CONFIG_SYS_MEMTEST_START=0x04000000
+CONFIG_SYS_MEMTEST_END=0x040f0000
+CONFIG_CMD_SHA1SUM=y
+CONFIG_SHA1SUM_VERIFY=y
+CONFIG_CMD_DM=y
+# CONFIG_CMD_FLASH is not set
+CONFIG_CMD_GPIO=y
+CONFIG_CMD_I2C=y
+CONFIG_CMD_MMC=y
+CONFIG_CMD_BKOPS_ENABLE=y
+CONFIG_CMD_PART=y
+CONFIG_CMD_PCI=y
+CONFIG_CMD_SF_TEST=y
+CONFIG_CMD_DHCP=y
+CONFIG_CMD_TFTPPUT=y
+CONFIG_CMD_TFTPSRV=y
+CONFIG_CMD_RARP=y
+CONFIG_CMD_MII=y
+CONFIG_CMD_PING=y
+CONFIG_CMD_CDP=y
+CONFIG_CMD_SNTP=y
+CONFIG_CMD_DNS=y
+CONFIG_CMD_LINK_LOCAL=y
+CONFIG_CMD_PXE=y
+CONFIG_CMD_TIME=y
+CONFIG_CMD_EXT2=y
+CONFIG_CMD_EXT4=y
+CONFIG_CMD_EXT4_WRITE=y
+CONFIG_CMD_FAT=y
+CONFIG_CMD_FS_GENERIC=y
+CONFIG_EFI_PARTITION=y
+CONFIG_PARTITION_TYPE_GUID=y
+CONFIG_OF_BOARD=y
+CONFIG_ENV_IS_IN_SPI_FLASH=y
+CONFIG_USE_ENV_SPI_BUS=y
+CONFIG_ENV_SPI_BUS=0
+CONFIG_USE_ENV_SPI_CS=y
+CONFIG_ENV_SPI_CS=0
+CONFIG_USE_ENV_SPI_MAX_HZ=y
+CONFIG_ENV_SPI_MAX_HZ=125000000
+CONFIG_USE_ENV_SPI_MODE=y
+CONFIG_ENV_SPI_MODE=0x0
+CONFIG_NET_RANDOM_ETHADDR=y
+CONFIG_DM_I2C=y
+CONFIG_MISC=y
+CONFIG_DM_MMC=y
+CONFIG_MMC_HS400_SUPPORT=y
+CONFIG_MMC_OCTEONTX=y
+CONFIG_MTD=y
+CONFIG_DM_SPI_FLASH=y
+CONFIG_SF_DEFAULT_MODE=0x0
+CONFIG_SF_DEFAULT_SPEED=125000000
+CONFIG_SPI_FLASH_SFDP_SUPPORT=y
+CONFIG_SPI_FLASH_MACRONIX=y
+CONFIG_SPI_FLASH_SPANSION=y
+CONFIG_SPI_FLASH_STMICRO=y
+CONFIG_DM_ETH=y
+CONFIG_PCI=y
+CONFIG_DM_PCI=y
+CONFIG_DM_PCI_COMPAT=y
+CONFIG_PCI_REGION_MULTI_ENTRY=y
+CONFIG_PCI_SRIOV=y
+CONFIG_PCI_ARID=y
+CONFIG_PCI_OCTEONTX=y
+CONFIG_DM_REGULATOR=y
+CONFIG_DM_REGULATOR_FIXED=y
+CONFIG_DM_REGULATOR_GPIO=y
+CONFIG_DM_RTC=y
+CONFIG_DM_SERIAL=y
+CONFIG_DEBUG_UART_SKIP_INIT=y
+CONFIG_PL01X_SERIAL=y
+CONFIG_SPI=y
+CONFIG_DM_SPI=y
+CONFIG_WDT=y
+CONFIG_ERRNO_STR=y
diff --git a/configs/octeontx2_96xx_defconfig b/configs/octeontx2_96xx_defconfig
new file mode 100644
index 0000000..b48733b
--- /dev/null
+++ b/configs/octeontx2_96xx_defconfig
@@ -0,0 +1,131 @@
+CONFIG_ARM=y
+# CONFIG_ARM64_SUPPORT_AARCH32 is not set
+CONFIG_ARCH_OCTEONTX2=y
+CONFIG_SYS_TEXT_BASE=0x04000000
+CONFIG_SYS_MALLOC_F_LEN=0x4000
+CONFIG_NR_DRAM_BANKS=1
+CONFIG_ENV_SIZE=0x8000
+CONFIG_ENV_OFFSET=0xF00000
+CONFIG_ENV_SECT_SIZE=0x10000
+CONFIG_TARGET_OCTEONTX2_96XX=y
+CONFIG_DM_GPIO=y
+CONFIG_DEBUG_UART_BASE=0x87e028000000
+CONFIG_DEBUG_UART_CLOCK=24000000
+CONFIG_DEBUG_UART=y
+CONFIG_AHCI=y
+# CONFIG_SYS_MALLOC_CLEAR_ON_INIT is not set
+CONFIG_FIT=y
+CONFIG_FIT_SIGNATURE=y
+CONFIG_OF_BOARD_SETUP=y
+CONFIG_BOOTDELAY=5
+CONFIG_USE_BOOTARGS=y
+CONFIG_BOOTARGS="console=ttyAMA0,115200n8 earlycon=pl011,0x87e028000000 maxcpus=24 rootwait rw root=/dev/mmcblk0p2 coherent_pool=16M"
+CONFIG_VERSION_VARIABLE=y
+# CONFIG_DISPLAY_CPUINFO is not set
+CONFIG_BOARD_EARLY_INIT_R=y
+CONFIG_HUSH_PARSER=y
+CONFIG_SYS_PROMPT="Marvell> "
+# CONFIG_CMD_BOOTEFI_HELLO_COMPILE is not set
+CONFIG_CMD_MD5SUM=y
+CONFIG_MD5SUM_VERIFY=y
+CONFIG_CMD_MX_CYCLIC=y
+CONFIG_CMD_MEMTEST=y
+CONFIG_CMD_SHA1SUM=y
+CONFIG_SHA1SUM_VERIFY=y
+CONFIG_CMD_DM=y
+# CONFIG_CMD_FLASH is not set
+CONFIG_CMD_GPIO=y
+CONFIG_CMD_I2C=y
+CONFIG_CMD_MMC=y
+CONFIG_CMD_BKOPS_ENABLE=y
+CONFIG_CMD_PART=y
+CONFIG_CMD_PCI=y
+CONFIG_CMD_SF_TEST=y
+CONFIG_CMD_USB=y
+CONFIG_CMD_DHCP=y
+CONFIG_CMD_TFTPPUT=y
+CONFIG_CMD_TFTPSRV=y
+CONFIG_CMD_RARP=y
+CONFIG_CMD_MII=y
+CONFIG_CMD_PING=y
+CONFIG_CMD_CDP=y
+CONFIG_CMD_SNTP=y
+CONFIG_CMD_DNS=y
+CONFIG_CMD_LINK_LOCAL=y
+CONFIG_CMD_PXE=y
+CONFIG_CMD_TIME=y
+CONFIG_CMD_EXT2=y
+CONFIG_CMD_EXT4=y
+CONFIG_CMD_EXT4_WRITE=y
+CONFIG_CMD_FAT=y
+CONFIG_CMD_FS_GENERIC=y
+CONFIG_EFI_PARTITION=y
+CONFIG_PARTITION_TYPE_GUID=y
+CONFIG_OF_BOARD=y
+CONFIG_ENV_IS_IN_SPI_FLASH=y
+CONFIG_USE_ENV_SPI_BUS=y
+CONFIG_ENV_SPI_BUS=0
+CONFIG_USE_ENV_SPI_CS=y
+CONFIG_ENV_SPI_CS=0
+CONFIG_USE_ENV_SPI_MAX_HZ=y
+CONFIG_ENV_SPI_MAX_HZ=125000000
+CONFIG_USE_ENV_SPI_MODE=y
+CONFIG_ENV_SPI_MODE=0x0
+CONFIG_NET_RANDOM_ETHADDR=y
+CONFIG_SCSI_AHCI=y
+CONFIG_AHCI_PCI=y
+CONFIG_DM_I2C=y
+CONFIG_I2C_SET_DEFAULT_BUS_NUM=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_MISC=y
+CONFIG_DM_MMC=y
+CONFIG_MMC_HS400_SUPPORT=y
+CONFIG_MMC_OCTEONTX=y
+CONFIG_MTD=y
+CONFIG_DM_SPI_FLASH=y
+CONFIG_SF_DEFAULT_MODE=0x0
+CONFIG_SF_DEFAULT_SPEED=125000000
+CONFIG_SPI_FLASH_SFDP_SUPPORT=y
+CONFIG_SPI_FLASH_MACRONIX=y
+CONFIG_SPI_FLASH_SPANSION=y
+CONFIG_SPI_FLASH_STMICRO=y
+CONFIG_SPI_FLASH_WINBOND=y
+CONFIG_PHYLIB=y
+CONFIG_PHY_MARVELL=y
+CONFIG_PHY_VITESSE=y
+CONFIG_DM_ETH=y
+CONFIG_E1000=y
+CONFIG_E1000_SPI=y
+CONFIG_CMD_E1000=y
+CONFIG_NVME=y
+CONFIG_PCI=y
+CONFIG_DM_PCI=y
+CONFIG_DM_PCI_COMPAT=y
+CONFIG_PCI_REGION_MULTI_ENTRY=y
+CONFIG_PCI_SRIOV=y
+CONFIG_PCI_ARID=y
+CONFIG_PCI_OCTEONTX=y
+CONFIG_DM_REGULATOR=y
+CONFIG_DM_REGULATOR_FIXED=y
+CONFIG_DM_REGULATOR_GPIO=y
+CONFIG_DM_RTC=y
+CONFIG_SCSI=y
+CONFIG_DM_SCSI=y
+CONFIG_DM_SERIAL=y
+CONFIG_DEBUG_UART_SKIP_INIT=y
+CONFIG_PL01X_SERIAL=y
+CONFIG_SPI=y
+CONFIG_DM_SPI=y
+CONFIG_OCTEON_SPI=y
+CONFIG_USB=y
+CONFIG_DM_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PCI=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_HOST_ETHER=y
+CONFIG_USB_ETHER_ASIX=y
+CONFIG_USB_ETHER_ASIX88179=y
+CONFIG_USB_ETHER_RTL8152=y
+CONFIG_WDT=y
+CONFIG_ERRNO_STR=y
diff --git a/configs/octeontx_81xx_defconfig b/configs/octeontx_81xx_defconfig
new file mode 100644
index 0000000..ca3286b
--- /dev/null
+++ b/configs/octeontx_81xx_defconfig
@@ -0,0 +1,131 @@
+CONFIG_ARM=y
+# CONFIG_ARM64_SUPPORT_AARCH32 is not set
+CONFIG_ARCH_OCTEONTX=y
+CONFIG_SYS_TEXT_BASE=0x2800000
+CONFIG_SYS_MALLOC_F_LEN=0x4000
+CONFIG_NR_DRAM_BANKS=1
+CONFIG_ENV_SIZE=0x8000
+CONFIG_ENV_OFFSET=0xF00000
+CONFIG_ENV_SECT_SIZE=0x10000
+CONFIG_TARGET_OCTEONTX_81XX=y
+CONFIG_DM_GPIO=y
+CONFIG_DEBUG_UART_BASE=0x87e028000000
+CONFIG_DEBUG_UART_CLOCK=24000000
+CONFIG_DEBUG_UART=y
+CONFIG_AHCI=y
+CONFIG_FIT=y
+CONFIG_FIT_SIGNATURE=y
+CONFIG_OF_BOARD_SETUP=y
+CONFIG_BOOTDELAY=5
+CONFIG_USE_BOOTARGS=y
+CONFIG_BOOTARGS="console=ttyAMA0,115200n8 earlycon=pl011,0x87e028000000 maxcpus=4 rootwait rw root=/dev/sda2 coherent_pool=16M"
+CONFIG_VERSION_VARIABLE=y
+# CONFIG_DISPLAY_CPUINFO is not set
+CONFIG_BOARD_EARLY_INIT_R=y
+CONFIG_HUSH_PARSER=y
+CONFIG_SYS_PROMPT="Marvell> "
+# CONFIG_CMD_BOOTEFI_HELLO_COMPILE is not set
+CONFIG_CMD_MD5SUM=y
+CONFIG_MD5SUM_VERIFY=y
+CONFIG_CMD_MX_CYCLIC=y
+CONFIG_CMD_MEMTEST=y
+CONFIG_SYS_MEMTEST_START=0x2800000
+CONFIG_SYS_MEMTEST_END=0x28f0000
+CONFIG_CMD_SHA1SUM=y
+CONFIG_SHA1SUM_VERIFY=y
+CONFIG_CMD_DM=y
+# CONFIG_CMD_FLASH is not set
+CONFIG_CMD_GPIO=y
+CONFIG_CMD_I2C=y
+CONFIG_CMD_MMC=y
+CONFIG_CMD_BKOPS_ENABLE=y
+CONFIG_CMD_PART=y
+CONFIG_CMD_PCI=y
+CONFIG_CMD_SF_TEST=y
+CONFIG_CMD_USB=y
+CONFIG_CMD_DHCP=y
+CONFIG_CMD_TFTPPUT=y
+CONFIG_CMD_TFTPSRV=y
+CONFIG_CMD_RARP=y
+CONFIG_CMD_MII=y
+CONFIG_CMD_PING=y
+CONFIG_CMD_CDP=y
+CONFIG_CMD_SNTP=y
+CONFIG_CMD_DNS=y
+CONFIG_CMD_LINK_LOCAL=y
+CONFIG_CMD_PXE=y
+CONFIG_CMD_TIME=y
+CONFIG_CMD_EXT2=y
+CONFIG_CMD_EXT4=y
+CONFIG_CMD_EXT4_WRITE=y
+CONFIG_CMD_FAT=y
+CONFIG_CMD_FS_GENERIC=y
+CONFIG_EFI_PARTITION=y
+CONFIG_PARTITION_TYPE_GUID=y
+CONFIG_OF_BOARD=y
+CONFIG_ENV_IS_IN_SPI_FLASH=y
+CONFIG_USE_ENV_SPI_BUS=y
+CONFIG_ENV_SPI_BUS=0
+CONFIG_USE_ENV_SPI_CS=y
+CONFIG_ENV_SPI_CS=0
+CONFIG_USE_ENV_SPI_MAX_HZ=y
+CONFIG_ENV_SPI_MAX_HZ=16000000
+CONFIG_USE_ENV_SPI_MODE=y
+CONFIG_ENV_SPI_MODE=0x0
+CONFIG_NET_RANDOM_ETHADDR=y
+CONFIG_SCSI_AHCI=y
+CONFIG_AHCI_PCI=y
+CONFIG_DM_I2C=y
+CONFIG_MISC=y
+CONFIG_DM_MMC=y
+CONFIG_MMC_OCTEONTX=y
+CONFIG_MTD=y
+CONFIG_DM_SPI_FLASH=y
+CONFIG_SF_DEFAULT_MODE=0x0
+CONFIG_SF_DEFAULT_SPEED=16000000
+CONFIG_SPI_FLASH_SFDP_SUPPORT=y
+CONFIG_SPI_FLASH_MACRONIX=y
+CONFIG_SPI_FLASH_SPANSION=y
+CONFIG_SPI_FLASH_STMICRO=y
+CONFIG_SPI_FLASH_WINBOND=y
+CONFIG_PHYLIB=y
+CONFIG_PHY_AQUANTIA=y
+CONFIG_PHY_BROADCOM=y
+CONFIG_PHY_MARVELL=y
+CONFIG_PHY_MICREL=y
+CONFIG_PHY_REALTEK=y
+CONFIG_PHY_VITESSE=y
+CONFIG_DM_ETH=y
+CONFIG_E1000=y
+CONFIG_E1000_SPI=y
+CONFIG_CMD_E1000=y
+CONFIG_NVME=y
+CONFIG_PCI=y
+CONFIG_DM_PCI=y
+CONFIG_DM_PCI_COMPAT=y
+CONFIG_PCI_REGION_MULTI_ENTRY=y
+CONFIG_PCI_SRIOV=y
+CONFIG_PCI_ARID=y
+CONFIG_PCI_OCTEONTX=y
+CONFIG_DM_REGULATOR=y
+CONFIG_DM_REGULATOR_FIXED=y
+CONFIG_DM_REGULATOR_GPIO=y
+CONFIG_DM_RTC=y
+CONFIG_SCSI=y
+CONFIG_DM_SCSI=y
+CONFIG_DM_SERIAL=y
+CONFIG_DEBUG_UART_SKIP_INIT=y
+CONFIG_PL01X_SERIAL=y
+CONFIG_SPI=y
+CONFIG_DM_SPI=y
+CONFIG_USB=y
+CONFIG_DM_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PCI=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_HOST_ETHER=y
+CONFIG_USB_ETHER_ASIX=y
+CONFIG_USB_ETHER_ASIX88179=y
+CONFIG_USB_ETHER_RTL8152=y
+CONFIG_WDT=y
+CONFIG_ERRNO_STR=y
diff --git a/configs/octeontx_83xx_defconfig b/configs/octeontx_83xx_defconfig
new file mode 100644
index 0000000..e7dd3f6
--- /dev/null
+++ b/configs/octeontx_83xx_defconfig
@@ -0,0 +1,128 @@
+CONFIG_ARM=y
+# CONFIG_ARM64_SUPPORT_AARCH32 is not set
+CONFIG_ARCH_OCTEONTX=y
+CONFIG_SYS_TEXT_BASE=0x2800000
+CONFIG_SYS_MALLOC_F_LEN=0x4000
+CONFIG_NR_DRAM_BANKS=1
+CONFIG_ENV_SIZE=0x8000
+CONFIG_ENV_OFFSET=0xF00000
+CONFIG_ENV_SECT_SIZE=0x10000
+CONFIG_TARGET_OCTEONTX_83XX=y
+CONFIG_DM_GPIO=y
+CONFIG_DEBUG_UART_BASE=0x87e028000000
+CONFIG_DEBUG_UART_CLOCK=24000000
+CONFIG_DEBUG_UART=y
+CONFIG_AHCI=y
+CONFIG_FIT=y
+CONFIG_FIT_SIGNATURE=y
+CONFIG_OF_BOARD_SETUP=y
+CONFIG_BOOTDELAY=5
+CONFIG_USE_BOOTARGS=y
+CONFIG_BOOTARGS="console=ttyAMA0,115200n8 earlycon=pl011,0x87e028000000 maxcpus=24 rootwait rw root=/dev/sda2 coherent_pool=16M"
+CONFIG_VERSION_VARIABLE=y
+# CONFIG_DISPLAY_CPUINFO is not set
+CONFIG_BOARD_EARLY_INIT_R=y
+CONFIG_HUSH_PARSER=y
+CONFIG_SYS_PROMPT="Marvell> "
+# CONFIG_CMD_BOOTEFI_HELLO_COMPILE is not set
+CONFIG_CMD_MD5SUM=y
+CONFIG_MD5SUM_VERIFY=y
+CONFIG_CMD_MX_CYCLIC=y
+CONFIG_CMD_MEMTEST=y
+CONFIG_CMD_SHA1SUM=y
+CONFIG_SHA1SUM_VERIFY=y
+CONFIG_CMD_DM=y
+# CONFIG_CMD_FLASH is not set
+CONFIG_CMD_GPIO=y
+CONFIG_CMD_I2C=y
+CONFIG_CMD_MMC=y
+CONFIG_CMD_BKOPS_ENABLE=y
+CONFIG_CMD_PART=y
+CONFIG_CMD_PCI=y
+CONFIG_CMD_SF_TEST=y
+CONFIG_CMD_USB=y
+CONFIG_CMD_DHCP=y
+CONFIG_CMD_TFTPPUT=y
+CONFIG_CMD_TFTPSRV=y
+CONFIG_CMD_RARP=y
+CONFIG_CMD_MII=y
+CONFIG_CMD_PING=y
+CONFIG_CMD_CDP=y
+CONFIG_CMD_SNTP=y
+CONFIG_CMD_DNS=y
+CONFIG_CMD_LINK_LOCAL=y
+CONFIG_CMD_PXE=y
+CONFIG_CMD_EXT2=y
+CONFIG_CMD_EXT4=y
+CONFIG_CMD_EXT4_WRITE=y
+CONFIG_CMD_FAT=y
+CONFIG_CMD_FS_GENERIC=y
+CONFIG_EFI_PARTITION=y
+CONFIG_PARTITION_TYPE_GUID=y
+CONFIG_OF_BOARD=y
+CONFIG_ENV_IS_IN_SPI_FLASH=y
+CONFIG_USE_ENV_SPI_BUS=y
+CONFIG_ENV_SPI_BUS=0
+CONFIG_USE_ENV_SPI_CS=y
+CONFIG_ENV_SPI_CS=0
+CONFIG_USE_ENV_SPI_MAX_HZ=y
+CONFIG_ENV_SPI_MAX_HZ=16000000
+CONFIG_USE_ENV_SPI_MODE=y
+CONFIG_ENV_SPI_MODE=0x0
+CONFIG_NET_RANDOM_ETHADDR=y
+CONFIG_SCSI_AHCI=y
+CONFIG_AHCI_PCI=y
+CONFIG_DM_I2C=y
+CONFIG_MISC=y
+CONFIG_DM_MMC=y
+CONFIG_MMC_OCTEONTX=y
+CONFIG_MTD=y
+CONFIG_DM_SPI_FLASH=y
+CONFIG_SF_DEFAULT_MODE=0x0
+CONFIG_SF_DEFAULT_SPEED=16000000
+CONFIG_SPI_FLASH_SFDP_SUPPORT=y
+CONFIG_SPI_FLASH_MACRONIX=y
+CONFIG_SPI_FLASH_SPANSION=y
+CONFIG_SPI_FLASH_STMICRO=y
+CONFIG_SPI_FLASH_WINBOND=y
+CONFIG_PHYLIB=y
+CONFIG_PHY_AQUANTIA=y
+CONFIG_PHY_BROADCOM=y
+CONFIG_PHY_MARVELL=y
+CONFIG_PHY_MICREL=y
+CONFIG_PHY_REALTEK=y
+CONFIG_PHY_VITESSE=y
+CONFIG_DM_ETH=y
+CONFIG_E1000=y
+CONFIG_E1000_SPI=y
+CONFIG_CMD_E1000=y
+CONFIG_NVME=y
+CONFIG_PCI=y
+CONFIG_DM_PCI=y
+CONFIG_DM_PCI_COMPAT=y
+CONFIG_PCI_REGION_MULTI_ENTRY=y
+CONFIG_PCI_SRIOV=y
+CONFIG_PCI_ARID=y
+CONFIG_PCI_OCTEONTX=y
+CONFIG_DM_REGULATOR=y
+CONFIG_DM_REGULATOR_FIXED=y
+CONFIG_DM_REGULATOR_GPIO=y
+CONFIG_DM_RTC=y
+CONFIG_SCSI=y
+CONFIG_DM_SCSI=y
+CONFIG_DM_SERIAL=y
+CONFIG_DEBUG_UART_SKIP_INIT=y
+CONFIG_PL01X_SERIAL=y
+CONFIG_SPI=y
+CONFIG_DM_SPI=y
+CONFIG_USB=y
+CONFIG_DM_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PCI=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_HOST_ETHER=y
+CONFIG_USB_ETHER_ASIX=y
+CONFIG_USB_ETHER_ASIX88179=y
+CONFIG_USB_ETHER_RTL8152=y
+CONFIG_WDT=y
+CONFIG_ERRNO_STR=y
diff --git a/configs/qemu-x86_defconfig b/configs/qemu-x86_defconfig
index 6493fef..218026b 100644
--- a/configs/qemu-x86_defconfig
+++ b/configs/qemu-x86_defconfig
@@ -1,5 +1,6 @@
 CONFIG_X86=y
 CONFIG_SYS_TEXT_BASE=0xFFF00000
+CONFIG_SYS_MALLOC_F_LEN=0x1000
 CONFIG_NR_DRAM_BANKS=8
 CONFIG_ENV_SIZE=0x40000
 CONFIG_MAX_CPUS=2
diff --git a/configs/sandbox_defconfig b/configs/sandbox_defconfig
index 41de434..18bfdc0 100644
--- a/configs/sandbox_defconfig
+++ b/configs/sandbox_defconfig
@@ -181,6 +181,7 @@
 CONFIG_PCI=y
 CONFIG_DM_PCI=y
 CONFIG_DM_PCI_COMPAT=y
+CONFIG_PCI_REGION_MULTI_ENTRY=y
 CONFIG_PCI_SANDBOX=y
 CONFIG_PHY=y
 CONFIG_PHY_SANDBOX=y
diff --git a/configs/sandbox_flattree_defconfig b/configs/sandbox_flattree_defconfig
index 46e9dff..dd93167 100644
--- a/configs/sandbox_flattree_defconfig
+++ b/configs/sandbox_flattree_defconfig
@@ -136,6 +136,7 @@
 CONFIG_PCI=y
 CONFIG_DM_PCI=y
 CONFIG_DM_PCI_COMPAT=y
+CONFIG_PCI_REGION_MULTI_ENTRY=y
 CONFIG_PCI_SANDBOX=y
 CONFIG_PHY=y
 CONFIG_PHY_SANDBOX=y
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 47cdea1..22bc0d3 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1198,10 +1198,25 @@
 int ahci_probe_scsi_pci(struct udevice *ahci_dev)
 {
 	ulong base;
+	u16 vendor, device;
 
 	base = (ulong)dm_pci_map_bar(ahci_dev, PCI_BASE_ADDRESS_5,
 				     PCI_REGION_MEM);
 
+	/*
+	 * Note:
+	 * Right now, we have only one quirk here, which is not enough to
+	 * introduce a new Kconfig option to select this. Once we have more
+	 * quirks in this AHCI code, we should add a Kconfig option for
+	 * this though.
+	 */
+	dm_pci_read_config16(ahci_dev, PCI_VENDOR_ID, &vendor);
+	dm_pci_read_config16(ahci_dev, PCI_DEVICE_ID, &device);
+
+	if (vendor == PCI_VENDOR_ID_CAVIUM &&
+	    device == PCI_DEVICE_ID_CAVIUM_SATA)
+		base = (uintptr_t)dm_pci_map_bar(ahci_dev, PCI_BASE_ADDRESS_0,
+						 PCI_REGION_MEM);
 	return ahci_probe_scsi(ahci_dev, base);
 }
 #endif
diff --git a/drivers/core/read.c b/drivers/core/read.c
index 8bb456b..86f3f88 100644
--- a/drivers/core/read.c
+++ b/drivers/core/read.c
@@ -10,6 +10,7 @@
 #include <mapmem.h>
 #include <asm/types.h>
 #include <asm/io.h>
+#include <linux/ioport.h>
 
 int dev_read_u32(const struct udevice *dev, const char *propname, u32 *outp)
 {
@@ -359,3 +360,19 @@
 {
 	return ofnode_get_child_count(dev_ofnode(dev));
 }
+
+int dev_read_pci_bus_range(const struct udevice *dev,
+			   struct resource *res)
+{
+	const u32 *values;
+	int len;
+
+	values = dev_read_prop(dev, "bus-range", &len);
+	if (!values || len < sizeof(*values) * 2)
+		return -EINVAL;
+
+	res->start = *values++;
+	res->end = *values;
+
+	return 0;
+}
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index c29d1ea..0c252e3 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -305,6 +305,15 @@
 	  This selects PCI-based MMC controllers.
 	  If you have an MMC controller on a PCI bus, say Y here.
 
+config MMC_OCTEONTX
+	bool "Marvell OcteonTX Multimedia Card Interface support"
+	depends on (ARCH_OCTEONTX || ARCH_OCTEONTX2)
+	depends on DM_MMC
+	help
+	  This selects the OcteonTX Multimedia card Interface.
+	  If you have an OcteonTX/TX2 board with a Multimedia Card slot,
+	  say Y here.
+
 	  If unsure, say N.
 
 config PXA_MMC_GENERIC
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index d375669..22266ec 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -36,6 +36,7 @@
 obj-$(CONFIG_MMC_OMAP_HS)		+= omap_hsmmc.o
 obj-$(CONFIG_MMC_MXC)			+= mxcmmc.o
 obj-$(CONFIG_MMC_MXS)			+= mxsmmc.o
+obj-$(CONFIG_MMC_OCTEONTX)		+= octeontx_hsmmc.o
 obj-$(CONFIG_MMC_PCI)			+= pci_mmc.o
 obj-$(CONFIG_PXA_MMC_GENERIC) += pxa_mmc_gen.o
 obj-$(CONFIG_$(SPL_TPL_)SUPPORT_EMMC_RPMB) += rpmb.o
diff --git a/drivers/mmc/octeontx_hsmmc.c b/drivers/mmc/octeontx_hsmmc.c
new file mode 100644
index 0000000..ddc3669
--- /dev/null
+++ b/drivers/mmc/octeontx_hsmmc.c
@@ -0,0 +1,3897 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+//#define DEBUG
+#include <cpu_func.h>
+#include <dm.h>
+#include <dm/lists.h>
+#include <env.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <log.h>
+#include <malloc.h>
+#include <memalign.h>
+#include <mmc.h>
+#include <part.h>
+#include <pci.h>
+#include <pci_ids.h>
+#include <time.h>
+#include <watchdog.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+
+#include <asm/arch/board.h>
+#include <asm/arch/clock.h>
+#include <asm/arch/csrs/csrs-mio_emm.h>
+#include <asm/io.h>
+
+#include <power/regulator.h>
+
+#include "octeontx_hsmmc.h"
+
+#define MMC_TIMEOUT_SHORT	20	/* in ms */
+#define MMC_TIMEOUT_LONG	1000
+#define MMC_TIMEOUT_ERASE	10000
+
+#define MMC_DEFAULT_DATA_IN_TAP			10
+#define MMC_DEFAULT_CMD_IN_TAP			10
+#define MMC_DEFAULT_CMD_OUT_TAP			39
+#define MMC_DEFAULT_DATA_OUT_TAP		39
+#define MMC_DEFAULT_HS200_CMD_IN_TAP		24
+#define MMC_DEFAULT_HS200_DATA_IN_TAP		24
+#define MMC_DEFAULT_HS200_CMD_OUT_TAP	(otx_is_soc(CN95XX) ? 10 : 5)
+#define MMC_DEFAULT_HS200_DATA_OUT_TAP	(otx_is_soc(CN95XX) ? 10 : 5)
+#define MMC_DEFAULT_HS400_CMD_OUT_TAP	(otx_is_soc(CN95XX) ? 10 : 5)
+#define MMC_DEFAULT_HS400_DATA_OUT_TAP	(otx_is_soc(CN95XX) ? 5 : 3)
+#define MMC_DEFAULT_HS200_CMD_OUT_DLY		800	/* Delay in ps */
+#define MMC_DEFAULT_HS200_DATA_OUT_DLY		800	/* Delay in ps */
+#define MMC_DEFAULT_HS400_CMD_OUT_DLY		800	/* Delay in ps */
+#define MMC_DEFAULT_HS400_DATA_OUT_DLY		400	/* Delay in ps */
+#define MMC_DEFAULT_SD_UHS_SDR104_CMD_OUT_TAP	MMC_DEFAULT_HS200_CMD_OUT_TAP
+#define MMC_DEFAULT_SD_UHS_SDR104_DATA_OUT_TAP	MMC_DEFAULT_HS200_DATA_OUT_TAP
+#define MMC_LEGACY_DEFAULT_CMD_OUT_TAP		39
+#define MMC_LEGACY_DEFAULT_DATA_OUT_TAP		39
+#define MMC_SD_LEGACY_DEFAULT_CMD_OUT_TAP	63
+#define MMC_SD_LEGACY_DEFAULT_DATA_OUT_TAP	63
+#define MMC_HS_CMD_OUT_TAP			32
+#define MMC_HS_DATA_OUT_TAP			32
+#define MMC_SD_HS_CMD_OUT_TAP			26
+#define MMC_SD_HS_DATA_OUT_TAP			26
+#define MMC_SD_UHS_SDR25_CMD_OUT_TAP		26
+#define MMC_SD_UHS_SDR25_DATA_OUT_TAP		26
+#define MMC_SD_UHS_SDR50_CMD_OUT_TAP		26
+#define MMC_SD_UHS_SDR50_DATA_OUT_TAP		26
+#define MMC_DEFAULT_TAP_DELAY			4
+#define TOTAL_NO_OF_TAPS			512
+static void octeontx_mmc_switch_to(struct mmc *mmc);
+static int octeontx_mmc_configure_delay(struct mmc *mmc);
+static void octeontx_mmc_set_timing(struct mmc *mmc);
+static void set_wdog(struct mmc *mmc, u64 us);
+static void do_switch(struct mmc *mmc, union mio_emm_switch emm_switch);
+static int octeontx_mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
+				 struct mmc_data *data);
+static int octeontx2_mmc_calc_delay(struct mmc *mmc, int delay);
+static int octeontx_mmc_calibrate_delay(struct mmc *mmc);
+static int octeontx_mmc_set_input_bus_timing(struct mmc *mmc);
+static int octeontx_mmc_set_output_bus_timing(struct mmc *mmc);
+
+static bool host_probed;
+
+/**
+ * Get the slot data structure from a MMC data structure
+ */
+static inline struct octeontx_mmc_slot *mmc_to_slot(struct mmc *mmc)
+{
+	return container_of(mmc, struct octeontx_mmc_slot, mmc);
+}
+
+static inline struct octeontx_mmc_host *mmc_to_host(struct mmc *mmc)
+{
+	return mmc_to_slot(mmc)->host;
+}
+
+static inline struct octeontx_mmc_slot *dev_to_mmc_slot(struct udevice *dev)
+{
+	return dev_get_priv(dev);
+}
+
+static inline struct mmc *dev_to_mmc(struct udevice *dev)
+{
+	return &((struct octeontx_mmc_slot *)dev_get_priv(dev))->mmc;
+}
+
+#ifdef DEBUG
+const char *mmc_reg_str(u64 reg)
+{
+	if (reg == MIO_EMM_DMA_CFG())
+		return "MIO_EMM_DMA_CFG";
+	if (reg == MIO_EMM_DMA_ADR())
+		return "MIO_EMM_DMA_ADR";
+	if (reg == MIO_EMM_DMA_INT())
+		return "MIO_EMM_DMA_INT";
+	if (reg == MIO_EMM_CFG())
+		return "MIO_EMM_CFG";
+	if (reg == MIO_EMM_MODEX(0))
+		return "MIO_EMM_MODE0";
+	if (reg == MIO_EMM_MODEX(1))
+		return "MIO_EMM_MODE1";
+	if (reg == MIO_EMM_MODEX(2))
+		return "MIO_EMM_MODE2";
+	if (reg == MIO_EMM_MODEX(3))
+		return "MIO_EMM_MODE3";
+	if (reg == MIO_EMM_IO_CTL())
+		return "MIO_EMM_IO_CTL";
+	if (reg == MIO_EMM_SWITCH())
+		return "MIO_EMM_SWITCH";
+	if (reg == MIO_EMM_DMA())
+		return "MIO_EMM_DMA";
+	if (reg == MIO_EMM_CMD())
+		return "MIO_EMM_CMD";
+	if (reg == MIO_EMM_RSP_STS())
+		return "MIO_EMM_RSP_STS";
+	if (reg == MIO_EMM_RSP_LO())
+		return "MIO_EMM_RSP_LO";
+	if (reg == MIO_EMM_RSP_HI())
+		return "MIO_EMM_RSP_HI";
+	if (reg == MIO_EMM_INT())
+		return "MIO_EMM_INT";
+	if (reg == MIO_EMM_WDOG())
+		return "MIO_EMM_WDOG";
+	if (reg == MIO_EMM_DMA_ARG())
+		return "MIO_EMM_DMA_ARG";
+	if (IS_ENABLED(CONFIG_ARCH_OCTEONTX)) {
+		if (reg == MIO_EMM_SAMPLE())
+			return "MIO_EMM_SAMPLE";
+	}
+	if (reg == MIO_EMM_STS_MASK())
+		return "MIO_EMM_STS_MASK";
+	if (reg == MIO_EMM_RCA())
+		return "MIO_EMM_RCA";
+	if (reg == MIO_EMM_BUF_IDX())
+		return "MIO_EMM_BUF_IDX";
+	if (reg == MIO_EMM_BUF_DAT())
+		return "MIO_EMM_BUF_DAT";
+	if (!IS_ENABLED(CONFIG_ARCH_OCTEONTX)) {
+		if (reg == MIO_EMM_CALB())
+			return "MIO_EMM_CALB";
+		if (reg == MIO_EMM_TAP())
+			return "MIO_EMM_TAP";
+		if (reg == MIO_EMM_TIMING())
+			return "MIO_EMM_TIMING";
+		if (reg == MIO_EMM_DEBUG())
+			return "MIO_EMM_DEBUG";
+	}
+
+	return "UNKNOWN";
+}
+#endif
+
+static void octeontx_print_rsp_sts(struct mmc *mmc)
+{
+#ifdef DEBUG
+	union mio_emm_rsp_sts emm_rsp_sts;
+	const struct octeontx_mmc_host *host = mmc_to_host(mmc);
+	static const char * const ctype_xor_str[] = {
+		"No data",
+		"Read data into Dbuf",
+		"Write data from Dbuf",
+		"Reserved"
+	};
+
+	static const char * const rtype_xor_str[] = {
+		"No response",
+		"R1, 48 bits",
+		"R2, 136 bits",
+		"R3, 48 bits",
+		"R4, 48 bits",
+		"R5, 48 bits",
+		"Reserved 6",
+		"Reserved 7"
+	};
+
+	emm_rsp_sts.u = readq(host->base_addr + MIO_EMM_RSP_STS());
+	printf("\nMIO_EMM_RSP_STS:              0x%016llx\n", emm_rsp_sts.u);
+	printf("    60-61: bus_id:              %u\n", emm_rsp_sts.s.bus_id);
+	printf("    59:    cmd_val:             %s\n",
+	       emm_rsp_sts.s.cmd_val ? "yes" : "no");
+	printf("    58:    switch_val:          %s\n",
+	       emm_rsp_sts.s.switch_val ? "yes" : "no");
+	printf("    57:    dma_val:             %s\n",
+	       emm_rsp_sts.s.dma_val ? "yes" : "no");
+	printf("    56:    dma_pend:            %s\n",
+	       emm_rsp_sts.s.dma_pend ? "yes" : "no");
+	printf("    28:    dbuf_err:            %s\n",
+	       emm_rsp_sts.s.dbuf_err ? "yes" : "no");
+	printf("    23:    dbuf:                %u\n", emm_rsp_sts.s.dbuf);
+	printf("    22:    blk_timeout:         %s\n",
+	       emm_rsp_sts.s.blk_timeout ? "yes" : "no");
+	printf("    21:    blk_crc_err:         %s\n",
+	       emm_rsp_sts.s.blk_crc_err ? "yes" : "no");
+	printf("    20:    rsp_busybit:         %s\n",
+	       emm_rsp_sts.s.rsp_busybit ? "yes" : "no");
+	printf("    19:    stp_timeout:         %s\n",
+	       emm_rsp_sts.s.stp_timeout ? "yes" : "no");
+	printf("    18:    stp_crc_err:         %s\n",
+	       emm_rsp_sts.s.stp_crc_err ? "yes" : "no");
+	printf("    17:    stp_bad_sts:         %s\n",
+	       emm_rsp_sts.s.stp_bad_sts ? "yes" : "no");
+	printf("    16:    stp_val:             %s\n",
+	       emm_rsp_sts.s.stp_val ? "yes" : "no");
+	printf("    15:    rsp_timeout:         %s\n",
+	       emm_rsp_sts.s.rsp_timeout ? "yes" : "no");
+	printf("    14:    rsp_crc_err:         %s\n",
+	       emm_rsp_sts.s.rsp_crc_err ? "yes" : "no");
+	printf("    13:    rsp_bad_sts:         %s\n",
+	       emm_rsp_sts.s.rsp_bad_sts ? "yes" : "no");
+	printf("    12:    rsp_val:             %s\n",
+	       emm_rsp_sts.s.rsp_val ? "yes" : "no");
+	printf("    9-11:  rsp_type:            %s\n",
+	       rtype_xor_str[emm_rsp_sts.s.rsp_type]);
+	printf("    7-8:   cmd_type:            %s\n",
+	       ctype_xor_str[emm_rsp_sts.s.cmd_type]);
+	printf("    1-6:   cmd_idx:             %u\n",
+	       emm_rsp_sts.s.cmd_idx);
+	printf("    0:     cmd_done:            %s\n",
+	       emm_rsp_sts.s.cmd_done ? "yes" : "no");
+#endif
+}
+
+static inline u64 read_csr(struct mmc *mmc, u64 reg)
+{
+	const struct octeontx_mmc_host *host = mmc_to_host(mmc);
+	u64 value = readq(host->base_addr + reg);
+#ifdef DEBUG_CSR
+	printf("        %s: %s(0x%p) => 0x%llx\n", __func__,
+	       mmc_reg_str(reg), host->base_addr + reg,
+	       value);
+#endif
+	return value;
+}
+
+/**
+ * Writes to a CSR register
+ *
+ * @param[in]	mmc	pointer to mmc data structure
+ * @param	reg	register offset
+ * @param	value	value to write to register
+ */
+static inline void write_csr(struct mmc *mmc, u64 reg, u64 value)
+{
+	const struct octeontx_mmc_host *host = mmc_to_host(mmc);
+	void *addr = host->base_addr + reg;
+
+#ifdef DEBUG_CSR
+	printf("        %s: %s(0x%p) <= 0x%llx\n", __func__, mmc_reg_str(reg),
+	       addr, value);
+#endif
+	writeq(value, addr);
+}
+
+#ifdef DEBUG
+static void mmc_print_status(u32 status)
+{
+#ifdef DEBUG_STATUS
+	static const char * const state[] = {
+		"Idle",		/* 0 */
+		"Ready",	/* 1 */
+		"Ident",	/* 2 */
+		"Standby",	/* 3 */
+		"Tran",		/* 4 */
+		"Data",		/* 5 */
+		"Receive",	/* 6 */
+		"Program",	/* 7 */
+		"Dis",		/* 8 */
+		"Btst",		/* 9 */
+		"Sleep",	/* 10 */
+		"reserved",	/* 11 */
+		"reserved",	/* 12 */
+		"reserved",	/* 13 */
+		"reserved",	/* 14 */
+		"reserved"	/* 15 */ };
+	if (status & R1_APP_CMD)
+		puts("MMC ACMD\n");
+	if (status & R1_SWITCH_ERROR)
+		puts("MMC switch error\n");
+	if (status & R1_READY_FOR_DATA)
+		puts("MMC ready for data\n");
+	printf("MMC %s state\n", state[R1_CURRENT_STATE(status)]);
+	if (status & R1_ERASE_RESET)
+		puts("MMC erase reset\n");
+	if (status & R1_WP_ERASE_SKIP)
+		puts("MMC partial erase due to write protected blocks\n");
+	if (status & R1_CID_CSD_OVERWRITE)
+		puts("MMC CID/CSD overwrite error\n");
+	if (status & R1_ERROR)
+		puts("MMC undefined device error\n");
+	if (status & R1_CC_ERROR)
+		puts("MMC device error\n");
+	if (status & R1_CARD_ECC_FAILED)
+		puts("MMC internal ECC failed to correct data\n");
+	if (status & R1_ILLEGAL_COMMAND)
+		puts("MMC illegal command\n");
+	if (status & R1_COM_CRC_ERROR)
+		puts("MMC CRC of previous command failed\n");
+	if (status & R1_LOCK_UNLOCK_FAILED)
+		puts("MMC sequence or password error in lock/unlock device command\n");
+	if (status & R1_CARD_IS_LOCKED)
+		puts("MMC device locked by host\n");
+	if (status & R1_WP_VIOLATION)
+		puts("MMC attempt to program write protected block\n");
+	if (status & R1_ERASE_PARAM)
+		puts("MMC invalid selection of erase groups for erase\n");
+	if (status & R1_ERASE_SEQ_ERROR)
+		puts("MMC error in sequence of erase commands\n");
+	if (status & R1_BLOCK_LEN_ERROR)
+		puts("MMC block length error\n");
+	if (status & R1_ADDRESS_ERROR)
+		puts("MMC address misalign error\n");
+	if (status & R1_OUT_OF_RANGE)
+		puts("MMC address out of range\n");
+#endif
+}
+#endif
+
+/**
+ * Print out all of the register values where mmc is optional
+ *
+ * @param mmc	MMC device (can be NULL)
+ * @param host	Pointer to host data structure (can be NULL if mmc is !NULL)
+ */
+static void octeontx_mmc_print_registers2(struct mmc *mmc,
+					  struct octeontx_mmc_host *host)
+{
+	struct octeontx_mmc_slot *slot = mmc ? mmc->priv : NULL;
+	union mio_emm_dma_cfg emm_dma_cfg;
+	union mio_emm_dma_adr emm_dma_adr;
+	union mio_emm_dma_int emm_dma_int;
+	union mio_emm_cfg emm_cfg;
+	union mio_emm_modex emm_mode;
+	union mio_emm_switch emm_switch;
+	union mio_emm_dma emm_dma;
+	union mio_emm_cmd emm_cmd;
+	union mio_emm_rsp_sts emm_rsp_sts;
+	union mio_emm_rsp_lo emm_rsp_lo;
+	union mio_emm_rsp_hi emm_rsp_hi;
+	union mio_emm_int emm_int;
+	union mio_emm_wdog emm_wdog;
+	union mio_emm_sample emm_sample;
+	union mio_emm_calb emm_calb;
+	union mio_emm_tap emm_tap;
+	union mio_emm_timing emm_timing;
+	union mio_emm_io_ctl io_ctl;
+	union mio_emm_debug emm_debug;
+	union mio_emm_sts_mask emm_sts_mask;
+	union mio_emm_rca emm_rca;
+	int bus;
+
+	static const char * const bus_width_str[] = {
+		"1-bit data bus (power on)",
+		"4-bit data bus",
+		"8-bit data bus",
+		"reserved (3)",
+		"reserved (4)",
+		"4-bit data bus (dual data rate)",
+		"8-bit data bus (dual data rate)",
+		"reserved (7)",
+		"reserved (8)",
+		"invalid (9)",
+		"invalid (10)",
+		"invalid (11)",
+		"invalid (12)",
+		"invalid (13)",
+		"invalid (14)",
+		"invalid (15)",
+	};
+	static const char * const ctype_xor_str[] = {
+		"No data",
+		"Read data into Dbuf",
+		"Write data from Dbuf",
+		"Reserved"
+	};
+
+	static const char * const rtype_xor_str[] = {
+		"No response",
+		"R1, 48 bits",
+		"R2, 136 bits",
+		"R3, 48 bits",
+		"R4, 48 bits",
+		"R5, 48 bits",
+		"Reserved 6",
+		"Reserved 7"
+	};
+
+	if (!host && mmc)
+		host = mmc_to_host(mmc);
+
+	if (mmc)
+		printf("%s: bus id: %u\n", __func__, slot->bus_id);
+	emm_dma_cfg.u = readq(host->base_addr + MIO_EMM_DMA_CFG());
+	printf("MIO_EMM_DMA_CFG:                0x%016llx\n",
+	       emm_dma_cfg.u);
+	printf("    63:    en:                  %s\n",
+	       emm_dma_cfg.s.en ? "enabled" : "disabled");
+	printf("    62:    rw:                  %s\n",
+	       emm_dma_cfg.s.rw ? "write" : "read");
+	printf("    61:    clr:                 %s\n",
+	       emm_dma_cfg.s.clr ? "clear" : "not clear");
+	printf("    59:    swap32:              %s\n",
+	       emm_dma_cfg.s.swap32 ? "yes" : "no");
+	printf("    58:    swap16:              %s\n",
+	       emm_dma_cfg.s.swap16 ? "yes" : "no");
+	printf("    57:    swap8:               %s\n",
+	       emm_dma_cfg.s.swap8 ? "yes" : "no");
+	printf("    56:    endian:              %s\n",
+	       emm_dma_cfg.s.endian ? "little" : "big");
+	printf("    36-55: size:                %u\n",
+	       emm_dma_cfg.s.size);
+
+	emm_dma_adr.u = readq(host->base_addr + MIO_EMM_DMA_ADR());
+	printf("MIO_EMM_DMA_ADR:              0x%016llx\n", emm_dma_adr.u);
+	printf("    0-49:  adr:                 0x%llx\n",
+	       (u64)emm_dma_adr.s.adr);
+
+	emm_dma_int.u = readq(host->base_addr + MIO_EMM_DMA_INT());
+	printf("\nMIO_EMM_DMA_INT:              0x%016llx\n",
+	       emm_dma_int.u);
+	printf("    1:     FIFO:                %s\n",
+	       emm_dma_int.s.fifo ? "yes" : "no");
+	printf("    0:     Done:                %s\n",
+	       emm_dma_int.s.done ? "yes" : "no");
+		emm_cfg.u = readq(host->base_addr + MIO_EMM_CFG());
+
+	printf("\nMIO_EMM_CFG:                  0x%016llx\n",
+	       emm_cfg.u);
+	printf("    3:     bus_ena3:            %s\n",
+	       emm_cfg.s.bus_ena & 0x08 ? "yes" : "no");
+	printf("    2:     bus_ena2:            %s\n",
+	       emm_cfg.s.bus_ena & 0x04 ? "yes" : "no");
+	printf("    1:     bus_ena1:            %s\n",
+	       emm_cfg.s.bus_ena & 0x02 ? "yes" : "no");
+	printf("    0:     bus_ena0:            %s\n",
+	       emm_cfg.s.bus_ena & 0x01 ? "yes" : "no");
+	for (bus = 0; bus < 4; bus++) {
+		emm_mode.u = readq(host->base_addr + MIO_EMM_MODEX(bus));
+		printf("\nMIO_EMM_MODE%u:               0x%016llx\n",
+		       bus, emm_mode.u);
+		if (!IS_ENABLED(CONFIG_ARCH_OCTEONTX)) {
+			printf("    50:    hs400_timing:        %s\n",
+			       emm_mode.s.hs400_timing ? "yes" : "no");
+			printf("    49:    hs200_timing:        %s\n",
+			       emm_mode.s.hs200_timing ? "yes" : "no");
+		}
+		printf("    48:    hs_timing:           %s\n",
+		       emm_mode.s.hs_timing ? "yes" : "no");
+		printf("    40-42: bus_width:           %s\n",
+		       bus_width_str[emm_mode.s.bus_width]);
+		printf("    32-35: power_class          %u\n",
+		       emm_mode.s.power_class);
+		printf("    16-31: clk_hi:              %u\n",
+		       emm_mode.s.clk_hi);
+		printf("    0-15:  clk_lo:              %u\n",
+		       emm_mode.s.clk_lo);
+	}
+
+	emm_switch.u = readq(host->base_addr + MIO_EMM_SWITCH());
+	printf("\nMIO_EMM_SWITCH:               0x%016llx\n", emm_switch.u);
+	printf("    60-61: bus_id:              %u\n", emm_switch.s.bus_id);
+	printf("    59:    switch_exe:          %s\n",
+	       emm_switch.s.switch_exe ? "yes" : "no");
+	printf("    58:    switch_err0:         %s\n",
+	       emm_switch.s.switch_err0 ? "yes" : "no");
+	printf("    57:    switch_err1:         %s\n",
+	       emm_switch.s.switch_err1 ? "yes" : "no");
+	printf("    56:    switch_err2:         %s\n",
+	       emm_switch.s.switch_err2 ? "yes" : "no");
+	printf("    48:    hs_timing:           %s\n",
+	       emm_switch.s.hs_timing ? "yes" : "no");
+	printf("    42-40: bus_width:           %s\n",
+	       bus_width_str[emm_switch.s.bus_width]);
+	printf("    32-35: power_class:         %u\n",
+	       emm_switch.s.power_class);
+	printf("    16-31: clk_hi:              %u\n",
+	       emm_switch.s.clk_hi);
+	printf("    0-15:  clk_lo:              %u\n", emm_switch.s.clk_lo);
+
+	emm_dma.u = readq(host->base_addr + MIO_EMM_DMA());
+	printf("\nMIO_EMM_DMA:                  0x%016llx\n", emm_dma.u);
+	printf("    60-61: bus_id:              %u\n", emm_dma.s.bus_id);
+	printf("    59:    dma_val:             %s\n",
+	       emm_dma.s.dma_val ? "yes" : "no");
+	printf("    58:    sector:              %s mode\n",
+	       emm_dma.s.sector ? "sector" : "byte");
+	printf("    57:    dat_null:            %s\n",
+	       emm_dma.s.dat_null ? "yes" : "no");
+	printf("    51-56: thres:               %u\n", emm_dma.s.thres);
+	printf("    50:    rel_wr:              %s\n",
+	       emm_dma.s.rel_wr ? "yes" : "no");
+	printf("    49:    rw:                  %s\n",
+	       emm_dma.s.rw ? "write" : "read");
+	printf("    48:    multi:               %s\n",
+	       emm_dma.s.multi ? "yes" : "no");
+	printf("    32-47: block_cnt:           %u\n",
+	       emm_dma.s.block_cnt);
+	printf("    0-31:  card_addr:           0x%x\n",
+	       emm_dma.s.card_addr);
+
+	emm_cmd.u = readq(host->base_addr + MIO_EMM_CMD());
+	printf("\nMIO_EMM_CMD:                  0x%016llx\n", emm_cmd.u);
+	printf("\n  62:    skip_busy:           %s\n",
+	       emm_cmd.s.skip_busy ? "yes" : "no");
+	printf("    60-61: bus_id:              %u\n", emm_cmd.s.bus_id);
+	printf("    59:    cmd_val:             %s\n",
+	       emm_cmd.s.cmd_val ? "yes" : "no");
+	printf("    55:    dbuf:                %u\n", emm_cmd.s.dbuf);
+	printf("    49-54: offset:              %u\n", emm_cmd.s.offset);
+	printf("    41-42: ctype_xor:           %s\n",
+	       ctype_xor_str[emm_cmd.s.ctype_xor]);
+	printf("    38-40: rtype_xor:           %s\n",
+	       rtype_xor_str[emm_cmd.s.rtype_xor]);
+	printf("    32-37: cmd_idx:             %u\n", emm_cmd.s.cmd_idx);
+	printf("    0-31:  arg:                 0x%x\n", emm_cmd.s.arg);
+
+	emm_rsp_sts.u = readq(host->base_addr + MIO_EMM_RSP_STS());
+	printf("\nMIO_EMM_RSP_STS:              0x%016llx\n", emm_rsp_sts.u);
+	printf("    60-61: bus_id:              %u\n", emm_rsp_sts.s.bus_id);
+	printf("    59:    cmd_val:             %s\n",
+	       emm_rsp_sts.s.cmd_val ? "yes" : "no");
+	printf("    58:    switch_val:          %s\n",
+	       emm_rsp_sts.s.switch_val ? "yes" : "no");
+	printf("    57:    dma_val:             %s\n",
+	       emm_rsp_sts.s.dma_val ? "yes" : "no");
+	printf("    56:    dma_pend:            %s\n",
+	       emm_rsp_sts.s.dma_pend ? "yes" : "no");
+	printf("    28:    dbuf_err:            %s\n",
+	       emm_rsp_sts.s.dbuf_err ? "yes" : "no");
+	printf("    23:    dbuf:                %u\n", emm_rsp_sts.s.dbuf);
+	printf("    22:    blk_timeout:         %s\n",
+	       emm_rsp_sts.s.blk_timeout ? "yes" : "no");
+	printf("    21:    blk_crc_err:         %s\n",
+	       emm_rsp_sts.s.blk_crc_err ? "yes" : "no");
+	printf("    20:    rsp_busybit:         %s\n",
+	       emm_rsp_sts.s.rsp_busybit ? "yes" : "no");
+	printf("    19:    stp_timeout:         %s\n",
+	       emm_rsp_sts.s.stp_timeout ? "yes" : "no");
+	printf("    18:    stp_crc_err:         %s\n",
+	       emm_rsp_sts.s.stp_crc_err ? "yes" : "no");
+	printf("    17:    stp_bad_sts:         %s\n",
+	       emm_rsp_sts.s.stp_bad_sts ? "yes" : "no");
+	printf("    16:    stp_val:             %s\n",
+	       emm_rsp_sts.s.stp_val ? "yes" : "no");
+	printf("    15:    rsp_timeout:         %s\n",
+	       emm_rsp_sts.s.rsp_timeout ? "yes" : "no");
+	printf("    14:    rsp_crc_err:         %s\n",
+	       emm_rsp_sts.s.rsp_crc_err ? "yes" : "no");
+	printf("    13:    rsp_bad_sts:         %s\n",
+	       emm_rsp_sts.s.rsp_bad_sts ? "yes" : "no");
+	printf("    12:    rsp_val:             %s\n",
+	       emm_rsp_sts.s.rsp_val ? "yes" : "no");
+	printf("    9-11:  rsp_type:            %s\n",
+	       rtype_xor_str[emm_rsp_sts.s.rsp_type]);
+	printf("    7-8:   cmd_type:            %s\n",
+	       ctype_xor_str[emm_rsp_sts.s.cmd_type]);
+	printf("    1-6:   cmd_idx:             %u\n",
+	       emm_rsp_sts.s.cmd_idx);
+	printf("    0:     cmd_done:            %s\n",
+	       emm_rsp_sts.s.cmd_done ? "yes" : "no");
+
+	emm_rsp_lo.u = readq(host->base_addr + MIO_EMM_RSP_LO());
+	printf("\nMIO_EMM_RSP_STS_LO:           0x%016llx\n", emm_rsp_lo.u);
+
+	emm_rsp_hi.u = readq(host->base_addr + MIO_EMM_RSP_HI());
+	printf("\nMIO_EMM_RSP_STS_HI:           0x%016llx\n", emm_rsp_hi.u);
+
+	emm_int.u = readq(host->base_addr + MIO_EMM_INT());
+	printf("\nMIO_EMM_INT:                  0x%016llx\n", emm_int.u);
+	printf("    6:    switch_err:           %s\n",
+	       emm_int.s.switch_err ? "yes" : "no");
+	printf("    5:    switch_done:          %s\n",
+	       emm_int.s.switch_done ? "yes" : "no");
+	printf("    4:    dma_err:              %s\n",
+	       emm_int.s.dma_err ? "yes" : "no");
+	printf("    3:    cmd_err:              %s\n",
+	       emm_int.s.cmd_err ? "yes" : "no");
+	printf("    2:    dma_done:             %s\n",
+	       emm_int.s.dma_done ? "yes" : "no");
+	printf("    1:    cmd_done:             %s\n",
+	       emm_int.s.cmd_done ? "yes" : "no");
+	printf("    0:    buf_done:             %s\n",
+	       emm_int.s.buf_done ? "yes" : "no");
+
+	emm_wdog.u = readq(host->base_addr + MIO_EMM_WDOG());
+	printf("\nMIO_EMM_WDOG:                 0x%016llx (%u)\n",
+	       emm_wdog.u, emm_wdog.s.clk_cnt);
+
+	if (IS_ENABLED(CONFIG_ARCH_OCTEONTX)) {
+		emm_sample.u = readq(host->base_addr + MIO_EMM_SAMPLE());
+		printf("\nMIO_EMM_SAMPLE:               0x%016llx\n",
+		       emm_sample.u);
+		printf("    16-25: cmd_cnt:             %u\n",
+		       emm_sample.s.cmd_cnt);
+		printf("    0-9:   dat_cnt:             %u\n",
+		       emm_sample.s.dat_cnt);
+	}
+
+	emm_sts_mask.u = readq(host->base_addr + MIO_EMM_STS_MASK());
+	printf("\nMIO_EMM_STS_MASK:             0x%016llx\n", emm_sts_mask.u);
+
+	emm_rca.u = readq(host->base_addr + MIO_EMM_RCA());
+	printf("\nMIO_EMM_RCA:                  0x%016llx\n", emm_rca.u);
+	printf("    0-15:  card_rca:            0x%04x\n",
+	       emm_rca.s.card_rca);
+	if (!IS_ENABLED(CONFIG_ARCH_OCTEONTX)) {
+		emm_calb.u = readq(host->base_addr + MIO_EMM_CALB());
+		printf("\nMIO_EMM_CALB:                 0x%016llx\n",
+		       emm_calb.u);
+		printf("       0:  start:               %u\n",
+		       emm_calb.s.start);
+		emm_tap.u = readq(host->base_addr + MIO_EMM_TAP());
+		printf("\nMIO_EMM_TAP:                  0x%016llx\n",
+		       emm_tap.u);
+		printf("     7-0:  delay:               %u\n", emm_tap.s.delay);
+		emm_timing.u = readq(host->base_addr + MIO_EMM_TIMING());
+		printf("\nMIO_EMM_TIMING:               0x%016llx\n",
+		       emm_timing.u);
+		printf("   53-48:  cmd_in_tap:          %u\n",
+		       emm_timing.s.cmd_in_tap);
+		printf("   37-32:  cmd_out_tap:         %u\n",
+		       emm_timing.s.cmd_out_tap);
+		printf("   21-16:  data_in_tap:         %u\n",
+		       emm_timing.s.data_in_tap);
+		printf("     5-0:  data_out_tap:        %u\n",
+		       emm_timing.s.data_out_tap);
+		io_ctl.u = readq(host->base_addr + MIO_EMM_IO_CTL());
+		printf("\nMIO_IO_CTL:                   0x%016llx\n", io_ctl.u);
+		printf("     3-2:  drive:               %u (%u mA)\n",
+		       io_ctl.s.drive, 2 << io_ctl.s.drive);
+		printf("       0:  slew:                %u %s\n", io_ctl.s.slew,
+		       io_ctl.s.slew ? "high" : "low");
+		emm_debug.u = readq(host->base_addr + MIO_EMM_DEBUG());
+		printf("\nMIO_EMM_DEBUG:                0x%016llx\n",
+		       emm_debug.u);
+		printf("      21: rdsync_rst            0x%x\n",
+		       emm_debug.s.rdsync_rst);
+		printf("      20: emmc_clk_disable      0x%x\n",
+		       emm_debug.s.emmc_clk_disable);
+		printf("   19-16: dma_sm:               0x%x\n",
+		       emm_debug.s.dma_sm);
+		printf("   15-12: data_sm:              0x%x\n",
+		       emm_debug.s.data_sm);
+		printf("    11-8: cmd_sm:               0x%x\n",
+		       emm_debug.s.cmd_sm);
+		printf("       0: clk_on:               0x%x\n",
+		       emm_debug.s.clk_on);
+	}
+
+	puts("\n");
+}
+
+/**
+ * Print out all of the register values
+ *
+ * @param mmc	MMC device
+ */
+static void octeontx_mmc_print_registers(struct mmc *mmc)
+{
+#ifdef DEBUG_REGISTERS
+	const int print = 1;
+#else
+	const int print = 0;
+#endif
+	if (print)
+		octeontx_mmc_print_registers2(mmc, mmc_to_host(mmc));
+}
+
+static const struct octeontx_sd_mods octeontx_cr_types[] = {
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD0 */
+{ {0, 3}, {0, 3}, {0, 0} },	/* CMD1 */
+{ {0, 2}, {0, 2}, {0, 0} },	/* CMD2 */
+{ {0, 1}, {0, 3}, {0, 0} },	/* CMD3 SD_CMD_SEND_RELATIVE_ADDR 0, 2 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD4 */
+{ {0, 1}, {0, 1}, {0, 0} },	/* CMD5 */
+{ {0, 1}, {1, 1}, {0, 1} },	/*
+				 * CMD6 SD_CMD_SWITCH_FUNC 1,0
+				 * (ACMD) SD_APP_SET_BUS_WIDTH
+				 */
+{ {0, 1}, {0, 1}, {0, 0} },	/* CMD7 */
+{ {1, 1}, {0, 3}, {0, 0} },	/* CMD8 SD_CMD_SEND_IF_COND 1,2 */
+{ {0, 2}, {0, 2}, {0, 0} },	/* CMD9 */
+{ {0, 2}, {0, 2}, {0, 0} },	/* CMD10 */
+{ {1, 1}, {0, 1}, {1, 1} },	/* CMD11 SD_CMD_SWITCH_UHS18V 1,0 */
+{ {0, 1}, {0, 1}, {0, 0} },	/* CMD12 */
+{ {0, 1}, {0, 1}, {1, 3} },	/* CMD13 (ACMD)) SD_CMD_APP_SD_STATUS 1,2 */
+{ {1, 1}, {1, 1}, {0, 0} },	/* CMD14 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD15 */
+{ {0, 1}, {0, 1}, {0, 0} },	/* CMD16 */
+{ {1, 1}, {1, 1}, {0, 0} },	/* CMD17 */
+{ {1, 1}, {1, 1}, {0, 0} },	/* CMD18 */
+{ {3, 1}, {3, 1}, {0, 0} },	/* CMD19 */
+{ {2, 1}, {0, 0}, {0, 0} },	/* CMD20 */	/* SD 2,0 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD21 */
+{ {0, 0}, {0, 0}, {1, 1} },	/* CMD22 (ACMD) SD_APP_SEND_NUM_WR_BLKS 1,0 */
+{ {0, 1}, {0, 1}, {0, 1} },	/* CMD23 */	/* SD ACMD 1,0 */
+{ {2, 1}, {2, 1}, {2, 1} },	/* CMD24 */
+{ {2, 1}, {2, 1}, {2, 1} },	/* CMD25 */
+{ {2, 1}, {2, 1}, {2, 1} },	/* CMD26 */
+{ {2, 1}, {2, 1}, {2, 1} },	/* CMD27 */
+{ {0, 1}, {0, 1}, {0, 1} },	/* CMD28 */
+{ {0, 1}, {0, 1}, {0, 1} },	/* CMD29 */
+{ {1, 1}, {1, 1}, {1, 1} },	/* CMD30 */
+{ {1, 1}, {1, 1}, {1, 1} },	/* CMD31 */
+{ {0, 0}, {0, 1}, {0, 0} },	/* CMD32 SD_CMD_ERASE_WR_BLK_START 0,1 */
+{ {0, 0}, {0, 1}, {0, 0} },	/* CMD33 SD_CMD_ERASE_WR_BLK_END 0,1 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD34 */
+{ {0, 1}, {0, 1}, {0, 1} },	/* CMD35 */
+{ {0, 1}, {0, 1}, {0, 1} },	/* CMD36 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD37 */
+{ {0, 1}, {0, 1}, {0, 1} },	/* CMD38 */
+{ {0, 4}, {0, 4}, {0, 4} },	/* CMD39 */
+{ {0, 5}, {0, 5}, {0, 5} },	/* CMD40 */
+{ {0, 0}, {0, 0}, {0, 3} },	/* CMD41 (ACMD) SD_CMD_APP_SEND_OP_COND 0,3 */
+{ {2, 1}, {2, 1}, {2, 1} },	/* CMD42 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD43 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD44 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD45 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD46 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD47 */
+{ {0, 0}, {1, 0}, {0, 0} },	/* CMD48 SD_CMD_READ_EXTR_SINGLE */
+{ {0, 0}, {2, 0}, {0, 0} },	/* CMD49 SD_CMD_WRITE_EXTR_SINGLE */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD50 */
+{ {0, 0}, {0, 0}, {1, 1} },	/* CMD51 (ACMD) SD_CMD_APP_SEND_SCR 1,1 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD52 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD53 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD54 */
+{ {0, 1}, {0, 1}, {0, 1} },	/* CMD55 */
+{ {0xff, 0xff}, {0xff, 0xff}, {0xff, 0xff} },	/* CMD56 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD57 */
+{ {0, 0}, {0, 3}, {0, 3} },	/* CMD58 SD_CMD_SPI_READ_OCR 0,3 */
+{ {0, 0}, {0, 1}, {0, 0} },	/* CMD59 SD_CMD_SPI_CRC_ON_OFF 0,1 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD60 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD61 */
+{ {0, 0}, {0, 0}, {0, 0} },	/* CMD62 */
+{ {0, 0}, {0, 0}, {0, 0} }	/* CMD63 */
+};
+
+/**
+ * Returns XOR values needed for SD commands and other quirks
+ *
+ * @param	mmc	mmc device
+ * @param	cmd	command information
+ *
+ * @return octeontx_mmc_cr_mods data structure with various quirks and flags
+ */
+static struct octeontx_mmc_cr_mods
+octeontx_mmc_get_cr_mods(struct mmc *mmc, const struct mmc_cmd *cmd,
+			 const struct mmc_data *data)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	struct octeontx_mmc_cr_mods cr = {0, 0};
+	const struct octeontx_sd_mods *sdm =
+					&octeontx_cr_types[cmd->cmdidx & 0x3f];
+	u8 c = sdm->mmc.c, r = sdm->mmc.r;
+	u8 desired_ctype = 0;
+
+	if (IS_MMC(mmc)) {
+#ifdef MMC_SUPPORTS_TUNING
+		if (cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) {
+			if (cmd->resp_type == MMC_RSP_R1)
+				cr.rtype_xor = 1;
+			if (data && data->flags & MMC_DATA_READ)
+				cr.ctype_xor = 1;
+		}
+#endif
+		return cr;
+	}
+
+	if (cmd->cmdidx == 56)
+		c = (cmd->cmdarg & 1) ? 1 : 2;
+
+	if (data) {
+		if (data->flags & MMC_DATA_READ)
+			desired_ctype = 1;
+		else if (data->flags & MMC_DATA_WRITE)
+			desired_ctype = 2;
+	}
+
+	cr.ctype_xor = c ^ desired_ctype;
+	if (slot->is_acmd)
+		cr.rtype_xor = r ^ sdm->sdacmd.r;
+	else
+		cr.rtype_xor = r ^ sdm->sd.r;
+
+	debug("%s(%s): mmc c: %d, mmc r: %d, desired c: %d, xor c: %d, xor r: %d\n",
+	      __func__, mmc->dev->name, c, r, desired_ctype,
+	      cr.ctype_xor, cr.rtype_xor);
+	return cr;
+}
+
+/**
+ * Keep track of switch commands internally
+ */
+static void octeontx_mmc_track_switch(struct mmc *mmc, u32 cmd_arg)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	u8 how = (cmd_arg >> 24) & 3;
+	u8 where = (u8)(cmd_arg >> 16);
+	u8 val = (u8)(cmd_arg >> 8);
+
+	slot->want_switch = slot->cached_switch;
+
+	if (slot->is_acmd)
+		return;
+
+	if (how != 3)
+		return;
+
+	switch (where) {
+	case EXT_CSD_BUS_WIDTH:
+		slot->want_switch.s.bus_width = val;
+		break;
+	case EXT_CSD_POWER_CLASS:
+		slot->want_switch.s.power_class = val;
+		break;
+	case EXT_CSD_HS_TIMING:
+		slot->want_switch.s.hs_timing = 0;
+		slot->want_switch.s.hs200_timing = 0;
+		slot->want_switch.s.hs400_timing = 0;
+		switch (val & 0xf) {
+		case 0:
+			break;
+		case 1:
+			slot->want_switch.s.hs_timing = 1;
+			break;
+		case 2:
+			if (!slot->is_asim && !slot->is_emul)
+				slot->want_switch.s.hs200_timing = 1;
+			break;
+		case 3:
+			if (!slot->is_asim && !slot->is_emul)
+				slot->want_switch.s.hs400_timing = 1;
+			break;
+		default:
+			pr_err("%s(%s): Unsupported timing mode 0x%x\n",
+			       __func__, mmc->dev->name, val & 0xf);
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static int octeontx_mmc_print_rsp_errors(struct mmc *mmc,
+					 union mio_emm_rsp_sts rsp_sts)
+{
+	bool err = false;
+	const char *name = mmc->dev->name;
+
+	if (rsp_sts.s.acc_timeout) {
+		pr_warn("%s(%s): acc_timeout\n", __func__, name);
+		err = true;
+	}
+	if (rsp_sts.s.dbuf_err) {
+		pr_warn("%s(%s): dbuf_err\n", __func__, name);
+		err = true;
+	}
+	if (rsp_sts.s.blk_timeout) {
+		pr_warn("%s(%s): blk_timeout\n", __func__, name);
+		err = true;
+	}
+	if (rsp_sts.s.blk_crc_err) {
+		pr_warn("%s(%s): blk_crc_err\n", __func__, name);
+		err = true;
+	}
+	if (rsp_sts.s.stp_timeout) {
+		pr_warn("%s(%s): stp_timeout\n", __func__, name);
+		err = true;
+	}
+	if (rsp_sts.s.stp_crc_err) {
+		pr_warn("%s(%s): stp_crc_err\n", __func__, name);
+		err = true;
+	}
+	if (rsp_sts.s.stp_bad_sts) {
+		pr_warn("%s(%s): stp_bad_sts\n", __func__, name);
+		err = true;
+	}
+	if (err)
+		pr_warn("  rsp_sts: 0x%llx\n", rsp_sts.u);
+
+	return err ? -1 : 0;
+}
+
+/**
+ * Starts a DMA operation for block read/write
+ *
+ * @param	mmc	mmc device
+ * @param	write	true if write operation
+ * @param	clear	true to clear DMA operation
+ * @param	adr	source or destination DMA address
+ * @param	size	size in blocks
+ * @param	timeout	timeout in ms
+ */
+static void octeontx_mmc_start_dma(struct mmc *mmc, bool write,
+				   bool clear, u32 block, dma_addr_t adr,
+				   u32 size, int timeout)
+{
+	const struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	union mio_emm_dma_cfg emm_dma_cfg;
+	union mio_emm_dma_adr emm_dma_adr;
+	union mio_emm_dma emm_dma;
+
+	/* Clear any interrupts */
+	write_csr(mmc, MIO_EMM_DMA_INT(),
+		  read_csr(mmc, MIO_EMM_DMA_INT()));
+
+	emm_dma_cfg.u = 0;
+	emm_dma_cfg.s.en = 1;
+	emm_dma_cfg.s.rw = !!write;
+	emm_dma_cfg.s.clr = !!clear;
+	emm_dma_cfg.s.size = ((u64)(size * mmc->read_bl_len) / 8) - 1;
+#if __BYTE_ORDER != __BIG_ENDIAN
+	emm_dma_cfg.s.endian = 1;
+#endif
+	emm_dma_adr.u = 0;
+	emm_dma_adr.s.adr = adr;
+	write_csr(mmc, MIO_EMM_DMA_ADR(), emm_dma_adr.u);
+	write_csr(mmc, MIO_EMM_DMA_CFG(), emm_dma_cfg.u);
+
+	emm_dma.u = 0;
+	emm_dma.s.bus_id = slot->bus_id;
+	emm_dma.s.dma_val = 1;
+	emm_dma.s.rw = !!write;
+	emm_dma.s.sector = mmc->high_capacity ? 1 : 0;
+
+	if (size > 1 && ((IS_SD(mmc) && (mmc->scr[0] & 2)) || !IS_SD(mmc)))
+		emm_dma.s.multi = 1;
+	else
+		emm_dma.s.multi = 0;
+
+	emm_dma.s.block_cnt = size;
+	if (!mmc->high_capacity)
+		block *= mmc->read_bl_len;
+	emm_dma.s.card_addr = block;
+	debug("%s(%s): card address: 0x%x, size: %d, multi: %d\n",
+	      __func__, mmc->dev->name, block, size, emm_dma.s.multi);
+
+	if (timeout > 0)
+		timeout = (timeout * 1000) - 1000;
+	set_wdog(mmc, timeout);
+
+	debug("  Writing 0x%llx to mio_emm_dma\n", emm_dma.u);
+	write_csr(mmc, MIO_EMM_DMA(), emm_dma.u);
+}
+
+/**
+ * Waits for a DMA operation to complete
+ *
+ * @param	mmc	mmc device
+ * @param	timeout	timeout in ms
+ *
+ * @return	0 for success (could be DMA errors), -ETIMEDOUT on timeout
+ */
+
+/**
+ * Cleanup DMA engine after a failure
+ *
+ * @param	mmc	mmc device
+ * @param	rsp_sts	rsp status
+ */
+static void octeontx_mmc_cleanup_dma(struct mmc *mmc,
+				     union mio_emm_rsp_sts rsp_sts)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	union mio_emm_dma emm_dma;
+	ulong start;
+	int retries = 3;
+
+	do {
+		debug("%s(%s): rsp_sts: 0x%llx, rsp_lo: 0x%llx, dma_int: 0x%llx\n",
+		      __func__, mmc->dev->name, rsp_sts.u,
+		      read_csr(mmc, MIO_EMM_RSP_LO()),
+		      read_csr(mmc, MIO_EMM_DMA_INT()));
+		emm_dma.u = read_csr(mmc, MIO_EMM_DMA());
+		emm_dma.s.dma_val = 1;
+		emm_dma.s.dat_null = 1;
+		emm_dma.s.bus_id = slot->bus_id;
+		write_csr(mmc, MIO_EMM_DMA(), emm_dma.u);
+		start = get_timer(0);
+		do {
+			rsp_sts.u = read_csr(mmc, MIO_EMM_RSP_STS());
+			WATCHDOG_RESET();
+		} while (get_timer(start) < 100 &&
+			 (rsp_sts.s.dma_val || rsp_sts.s.dma_pend));
+	} while (retries-- >= 0 && rsp_sts.s.dma_pend);
+	if (rsp_sts.s.dma_val)
+		pr_err("%s(%s): Error: could not clean up DMA.  RSP_STS: 0x%llx, RSP_LO: 0x%llx\n",
+		       __func__, mmc->dev->name, rsp_sts.u,
+		       read_csr(mmc, MIO_EMM_RSP_LO()));
+	debug("  rsp_sts after clearing up DMA: 0x%llx\n",
+	      read_csr(mmc, MIO_EMM_RSP_STS()));
+}
+
+/**
+ * Waits for a DMA operation to complete
+ *
+ * @param	mmc	mmc device
+ * @param	timeout	timeout in ms
+ * @param	verbose	true to print out error information
+ *
+ * @return	0 for success (could be DMA errors), -ETIMEDOUT on timeout
+ *		or -EIO if IO error.
+ */
+static int octeontx_mmc_wait_dma(struct mmc *mmc, bool write, ulong timeout,
+				 bool verbose)
+{
+	struct octeontx_mmc_host *host = mmc_to_host(mmc);
+	ulong start_time = get_timer(0);
+	union mio_emm_dma_int emm_dma_int;
+	union mio_emm_rsp_sts rsp_sts;
+	union mio_emm_dma emm_dma;
+	bool timed_out = false;
+	bool err = false;
+
+	debug("%s(%s, %lu, %d), delay: %uus\n", __func__, mmc->dev->name,
+	      timeout, verbose, host->dma_wait_delay);
+
+	udelay(host->dma_wait_delay);
+	do {
+		emm_dma_int.u = read_csr(mmc, MIO_EMM_DMA_INT());
+		rsp_sts.u = read_csr(mmc, MIO_EMM_RSP_STS());
+		if (write) {
+			if ((rsp_sts.s.dma_pend && !rsp_sts.s.dma_val) ||
+			    rsp_sts.s.blk_timeout ||
+			    rsp_sts.s.stp_timeout ||
+			    rsp_sts.s.rsp_timeout) {
+				err = true;
+#ifdef DEBUG
+				debug("%s: f1\n", __func__);
+				octeontx_mmc_print_rsp_errors(mmc, rsp_sts);
+#endif
+				break;
+			}
+		} else {
+			if (rsp_sts.s.blk_crc_err ||
+			    (rsp_sts.s.dma_pend && !rsp_sts.s.dma_val)) {
+				err = true;
+#if defined(DEBUG)
+				octeontx_mmc_print_rsp_errors(mmc, rsp_sts);
+#endif
+				break;
+			}
+		}
+		if (rsp_sts.s.dma_pend) {
+			/*
+			 * If this is set then an error has occurred.
+			 * Try and restart the DMA operation.
+			 */
+			emm_dma.u = read_csr(mmc, MIO_EMM_DMA());
+			if (verbose) {
+				pr_err("%s(%s): DMA pending error: rsp_sts: 0x%llx, dma_int: 0x%llx, emm_dma: 0x%llx\n",
+				       __func__, mmc->dev->name, rsp_sts.u,
+				       emm_dma_int.u, emm_dma.u);
+				octeontx_print_rsp_sts(mmc);
+				debug("  MIO_EMM_DEBUG: 0x%llx\n",
+				      read_csr(mmc, MIO_EMM_DEBUG()));
+				pr_err("%s: Trying DMA resume...\n", __func__);
+			}
+			emm_dma.s.dma_val = 1;
+			emm_dma.s.dat_null = 1;
+			write_csr(mmc, MIO_EMM_DMA(), emm_dma.u);
+			udelay(10);
+		} else if (!rsp_sts.s.dma_val && emm_dma_int.s.done) {
+			break;
+		}
+		WATCHDOG_RESET();
+		timed_out = (get_timer(start_time) > timeout);
+	} while (!timed_out);
+
+	if (timed_out || err) {
+		if (verbose) {
+			pr_err("%s(%s): MMC DMA %s after %lu ms, rsp_sts: 0x%llx, dma_int: 0x%llx, rsp_sts_lo: 0x%llx, emm_dma: 0x%llx\n",
+			       __func__, mmc->dev->name,
+			       timed_out ? "timed out" : "error",
+			       get_timer(start_time), rsp_sts.u,
+			       emm_dma_int.u,
+			       read_csr(mmc, MIO_EMM_RSP_LO()),
+			       read_csr(mmc, MIO_EMM_DMA()));
+			octeontx_print_rsp_sts(mmc);
+		}
+		if (rsp_sts.s.dma_pend)
+			octeontx_mmc_cleanup_dma(mmc, rsp_sts);
+	} else {
+		write_csr(mmc, MIO_EMM_DMA_INT(),
+			  read_csr(mmc, MIO_EMM_DMA_INT()));
+	}
+
+	return timed_out ? -ETIMEDOUT : (err ? -EIO : 0);
+}
+
+/**
+ * Read blocks from the MMC/SD device
+ *
+ * @param	mmc	mmc device
+ * @param	cmd	command
+ * @param	data	data for read
+ * @param	verbose	true to print out error information
+ *
+ * @return	number of blocks read or 0 if error
+ */
+static int octeontx_mmc_read_blocks(struct mmc *mmc, struct mmc_cmd *cmd,
+				    struct mmc_data *data, bool verbose)
+{
+	struct octeontx_mmc_host *host = mmc_to_host(mmc);
+	union mio_emm_rsp_sts rsp_sts;
+	dma_addr_t dma_addr = (dma_addr_t)dm_pci_virt_to_mem(host->dev,
+							     data->dest);
+	ulong count;
+	ulong blkcnt = data->blocks;
+	ulong start = cmd->cmdarg;
+	int timeout = 1000 + blkcnt * 20;
+	bool timed_out = false;
+	bool multi_xfer = cmd->cmdidx == MMC_CMD_READ_MULTIPLE_BLOCK;
+
+	debug("%s(%s): dest: %p, dma address: 0x%llx, blkcnt: %lu, start: %lu\n",
+	      __func__, mmc->dev->name, data->dest, dma_addr, blkcnt, start);
+	debug("%s: rsp_sts: 0x%llx\n", __func__,
+	      read_csr(mmc, MIO_EMM_RSP_STS()));
+	/* use max timeout for multi-block transfers */
+	/* timeout = 0; */
+
+	/*
+	 * If we have a valid SD card in the slot, we set the response bit
+	 * mask to check for CRC errors and timeouts only.
+	 * Otherwise, use the default power on reset value.
+	 */
+	write_csr(mmc, MIO_EMM_STS_MASK(),
+		  IS_SD(mmc) ? 0x00b00000ull : 0xe4390080ull);
+	invalidate_dcache_range((u64)data->dest,
+				(u64)data->dest + blkcnt * data->blocksize);
+
+	if (multi_xfer) {
+		octeontx_mmc_start_dma(mmc, false, false, start, dma_addr,
+				       blkcnt, timeout);
+		timed_out = !!octeontx_mmc_wait_dma(mmc, false, timeout,
+						    verbose);
+		rsp_sts.u = read_csr(mmc, MIO_EMM_RSP_STS());
+		if (timed_out || rsp_sts.s.dma_val || rsp_sts.s.dma_pend) {
+			if (!verbose)
+				return 0;
+
+			pr_err("%s(%s): Error: DMA timed out.  rsp_sts: 0x%llx, emm_int: 0x%llx, dma_int: 0x%llx, rsp_lo: 0x%llx\n",
+			       __func__, mmc->dev->name, rsp_sts.u,
+			       read_csr(mmc, MIO_EMM_INT()),
+			       read_csr(mmc, MIO_EMM_DMA_INT()),
+			       read_csr(mmc, MIO_EMM_RSP_LO()));
+			pr_err("%s: block count: %lu, start: 0x%lx\n",
+			       __func__, blkcnt, start);
+			octeontx_mmc_print_registers(mmc);
+			return 0;
+		}
+	} else {
+		count = blkcnt;
+		timeout = 1000;
+		do {
+			octeontx_mmc_start_dma(mmc, false, false, start,
+					       dma_addr, 1, timeout);
+			dma_addr += mmc->read_bl_len;
+			start++;
+
+			timed_out = !!octeontx_mmc_wait_dma(mmc, false,
+							    timeout, verbose);
+			rsp_sts.u = read_csr(mmc, MIO_EMM_RSP_STS());
+			if (timed_out || rsp_sts.s.dma_val ||
+			    rsp_sts.s.dma_pend) {
+				if (verbose) {
+					pr_err("%s: Error: DMA timed out.  rsp_sts: 0x%llx, emm_int: 0x%llx, dma_int: 0x%llx, rsp_lo: 0x%llx\n",
+					       __func__, rsp_sts.u,
+					       read_csr(mmc, MIO_EMM_INT()),
+					       read_csr(mmc, MIO_EMM_DMA_INT()),
+					       read_csr(mmc, MIO_EMM_RSP_LO()));
+					pr_err("%s: block count: 1, start: 0x%lx\n",
+					       __func__, start);
+					octeontx_mmc_print_registers(mmc);
+				}
+				return blkcnt - count;
+			}
+			WATCHDOG_RESET();
+		} while (--count);
+	}
+#ifdef DEBUG
+	debug("%s(%s): Read %lu (0x%lx) blocks starting at block %u (0x%x) to address %p (dma address 0x%llx)\n",
+	      __func__, mmc->dev->name, blkcnt, blkcnt,
+	      cmd->cmdarg, cmd->cmdarg, data->dest,
+	      dm_pci_virt_to_mem(host->dev, data->dest));
+	print_buffer(0, data->dest, 1, 0x200, 0);
+#endif
+	return blkcnt;
+}
+
+static int octeontx_mmc_poll_ready(struct mmc *mmc, ulong timeout)
+{
+	ulong start;
+	struct mmc_cmd cmd;
+	int err;
+	bool not_ready = false;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cmdidx = MMC_CMD_SEND_STATUS;
+	cmd.cmdarg = mmc->rca << 16;
+	cmd.resp_type = MMC_RSP_R1;
+	start = get_timer(0);
+	do {
+		err = octeontx_mmc_send_cmd(mmc, &cmd, NULL);
+		if (err) {
+			pr_err("%s(%s): MMC command error: %d; Retry...\n",
+			       __func__, mmc->dev->name, err);
+			not_ready = true;
+		} else if (cmd.response[0] & R1_READY_FOR_DATA) {
+			return 0;
+		}
+		WATCHDOG_RESET();
+	} while (get_timer(start) < timeout);
+
+	if (not_ready)
+		pr_err("%s(%s): MMC command error; Retry timeout\n",
+		       __func__, mmc->dev->name);
+	return -ETIMEDOUT;
+}
+
+static ulong octeontx_mmc_write_blocks(struct mmc *mmc, struct mmc_cmd *cmd,
+				       struct mmc_data *data)
+{
+	struct octeontx_mmc_host *host = mmc_to_host(mmc);
+	ulong start = cmd->cmdarg;
+	ulong blkcnt = data->blocks;
+	dma_addr_t dma_addr;
+	union mio_emm_rsp_sts rsp_sts;
+	union mio_emm_sts_mask emm_sts_mask;
+	ulong timeout;
+	int count;
+	bool timed_out = false;
+	bool multi_xfer = (blkcnt > 1) &&
+			((IS_SD(mmc) && mmc->scr[0] & 2) || !IS_SD(mmc));
+
+	octeontx_mmc_switch_to(mmc);
+	emm_sts_mask.u = 0;
+	emm_sts_mask.s.sts_msk = R1_BLOCK_WRITE_MASK;
+	write_csr(mmc, MIO_EMM_STS_MASK(), emm_sts_mask.u);
+
+	if (octeontx_mmc_poll_ready(mmc, 10000)) {
+		pr_err("%s(%s): Ready timed out\n", __func__, mmc->dev->name);
+		return 0;
+	}
+	flush_dcache_range((u64)data->src,
+			   (u64)data->src + blkcnt * mmc->write_bl_len);
+	dma_addr = (u64)dm_pci_virt_to_mem(host->dev, (void *)data->src);
+	if (multi_xfer) {
+		timeout = 5000 + 100 * blkcnt;
+		octeontx_mmc_start_dma(mmc, true, false, start, dma_addr,
+				       blkcnt, timeout);
+		timed_out = !!octeontx_mmc_wait_dma(mmc, true, timeout, true);
+		rsp_sts.u = read_csr(mmc, MIO_EMM_RSP_STS());
+		if (timed_out || rsp_sts.s.dma_val || rsp_sts.s.dma_pend) {
+			pr_err("%s(%s): Error: multi-DMA timed out after %lums.  rsp_sts: 0x%llx, emm_int: 0x%llx, emm_dma_int: 0x%llx, rsp_sts_lo: 0x%llx, emm_dma: 0x%llx\n",
+			       __func__, mmc->dev->name, timeout,
+			       rsp_sts.u,
+			       read_csr(mmc, MIO_EMM_INT()),
+			       read_csr(mmc, MIO_EMM_DMA_INT()),
+			       read_csr(mmc, MIO_EMM_RSP_LO()),
+			       read_csr(mmc, MIO_EMM_DMA()));
+			return 0;
+		}
+	} else {
+		timeout = 5000;
+		count = blkcnt;
+		do {
+			octeontx_mmc_start_dma(mmc, true, false, start,
+					       dma_addr, 1, timeout);
+			dma_addr += mmc->read_bl_len;
+			start++;
+
+			timed_out = !!octeontx_mmc_wait_dma(mmc, true, timeout,
+							    true);
+			rsp_sts.u = read_csr(mmc, MIO_EMM_RSP_STS());
+			if (timed_out || rsp_sts.s.dma_val ||
+			    rsp_sts.s.dma_pend) {
+				pr_err("%s(%s): Error: single-DMA timed out after %lums.  rsp_sts: 0x%llx, emm_int: 0x%llx, emm_dma_int: 0x%llx, rsp_sts_lo: 0x%llx, emm_dma: 0x%llx\n",
+				       __func__, mmc->dev->name, timeout,
+				       rsp_sts.u,
+				       read_csr(mmc, MIO_EMM_RSP_STS()),
+				       read_csr(mmc, MIO_EMM_DMA_INT()),
+				       read_csr(mmc, MIO_EMM_RSP_LO()),
+				       read_csr(mmc, MIO_EMM_DMA()));
+				return blkcnt - count;
+			}
+			WATCHDOG_RESET();
+		} while (--count);
+	}
+
+	return blkcnt;
+}
+
+/**
+ * Send a command to the eMMC/SD device
+ *
+ * @param mmc	mmc device
+ * @param cmd	cmd to send and response
+ * @param data	additional data
+ * @param flags
+ * @return	0 for success, otherwise error
+ */
+static int octeontx_mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
+				 struct mmc_data *data)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	const char *name = slot->dev->name;
+	struct octeontx_mmc_cr_mods mods = {0, 0};
+	union mio_emm_rsp_sts rsp_sts;
+	union mio_emm_cmd emm_cmd;
+	union mio_emm_rsp_lo rsp_lo;
+	union mio_emm_buf_idx emm_buf_idx;
+	union mio_emm_buf_dat emm_buf_dat;
+	ulong start;
+	int i;
+	ulong blkcnt;
+
+	/**
+	 * This constant has a 1 bit for each command which should have a short
+	 * timeout and a 0 for each bit with a long timeout.  Currently the
+	 * following commands have a long timeout:
+	 *   CMD6, CMD17, CMD18, CMD24, CMD25, CMD32, CMD33, CMD35, CMD36 and
+	 *   CMD38.
+	 */
+	static const u64 timeout_short = 0xFFFFFFA4FCF9FFDFull;
+	uint timeout;
+
+	if (cmd->cmdidx == MMC_CMD_SEND_EXT_CSD) {
+		union mio_emm_rca emm_rca;
+
+		emm_rca.u = 0;
+		emm_rca.s.card_rca = mmc->rca;
+		write_csr(mmc, MIO_EMM_RCA(), emm_rca.u);
+	}
+
+	if (timeout_short & (1ull << cmd->cmdidx))
+		timeout = MMC_TIMEOUT_SHORT;
+	else if (cmd->cmdidx == MMC_CMD_SWITCH && IS_SD(mmc))
+		timeout = 2560;
+	else if (cmd->cmdidx == MMC_CMD_ERASE)
+		timeout = MMC_TIMEOUT_ERASE;
+	else
+		timeout = MMC_TIMEOUT_LONG;
+
+	debug("%s(%s): cmd idx: %u, arg: 0x%x, resp type: 0x%x, timeout: %u\n",
+	      __func__, name, cmd->cmdidx, cmd->cmdarg, cmd->resp_type,
+	      timeout);
+	if (data)
+		debug("  data: addr: %p, flags: 0x%x, blocks: %u, blocksize: %u\n",
+		      data->dest, data->flags, data->blocks, data->blocksize);
+
+	octeontx_mmc_switch_to(mmc);
+
+	/* Clear any interrupts */
+	write_csr(mmc, MIO_EMM_INT(), read_csr(mmc, MIO_EMM_INT()));
+
+	/*
+	 * We need to override the default command types and response types
+	 * when dealing with SD cards.
+	 */
+	mods = octeontx_mmc_get_cr_mods(mmc, cmd, data);
+
+	/* Handle block read/write/stop operations */
+	switch (cmd->cmdidx) {
+	case MMC_CMD_GO_IDLE_STATE:
+		slot->tuned = false;
+		slot->hs200_tuned = false;
+		slot->hs400_tuned = false;
+		break;
+	case MMC_CMD_STOP_TRANSMISSION:
+		return 0;
+	case MMC_CMD_READ_MULTIPLE_BLOCK:
+	case MMC_CMD_READ_SINGLE_BLOCK:
+		pr_debug("%s(%s): Reading blocks\n", __func__, name);
+		blkcnt = octeontx_mmc_read_blocks(mmc, cmd, data, true);
+		return (blkcnt > 0) ? 0 : -1;
+	case MMC_CMD_WRITE_MULTIPLE_BLOCK:
+	case MMC_CMD_WRITE_SINGLE_BLOCK:
+		blkcnt = octeontx_mmc_write_blocks(mmc, cmd, data);
+		return (blkcnt > 0) ? 0 : -1;
+	case MMC_CMD_SELECT_CARD:
+		/* Set the RCA register (is it set automatically?) */
+		if (IS_SD(mmc)) {
+			union mio_emm_rca emm_rca;
+
+			emm_rca.u = 0;
+			emm_rca.s.card_rca = (cmd->cmdarg >> 16);
+			write_csr(mmc, MIO_EMM_RCA(), emm_rca.u);
+			debug("%s: Set SD relative address (RCA) to 0x%x\n",
+			      __func__, emm_rca.s.card_rca);
+		}
+		break;
+
+	case MMC_CMD_SWITCH:
+		if (!data && !slot->is_acmd)
+			octeontx_mmc_track_switch(mmc, cmd->cmdarg);
+		break;
+	}
+
+	emm_cmd.u = 0;
+	emm_cmd.s.cmd_val = 1;
+	emm_cmd.s.bus_id = slot->bus_id;
+	emm_cmd.s.cmd_idx = cmd->cmdidx;
+	emm_cmd.s.arg = cmd->cmdarg;
+	emm_cmd.s.ctype_xor = mods.ctype_xor;
+	emm_cmd.s.rtype_xor = mods.rtype_xor;
+	if (data && data->blocks == 1 && data->blocksize != 512) {
+		emm_cmd.s.offset =
+			64 - ((data->blocks * data->blocksize) / 8);
+		debug("%s: offset set to %u\n", __func__, emm_cmd.s.offset);
+	}
+
+	if (data && data->flags & MMC_DATA_WRITE) {
+		u8 *src = (u8 *)data->src;
+
+		if (!src) {
+			pr_err("%s(%s): Error: data source for cmd 0x%x is NULL!\n",
+			       __func__, name, cmd->cmdidx);
+			return -1;
+		}
+		if (data->blocksize > 512) {
+			pr_err("%s(%s): Error: data for cmd 0x%x exceeds 512 bytes\n",
+			       __func__, name, cmd->cmdidx);
+			return -1;
+		}
+#ifdef DEBUG
+		debug("%s: Sending %d bytes data\n", __func__, data->blocksize);
+		print_buffer(0, src, 1, data->blocksize, 0);
+#endif
+		emm_buf_idx.u = 0;
+		emm_buf_idx.s.inc = 1;
+		write_csr(mmc, MIO_EMM_BUF_IDX(), emm_buf_idx.u);
+		for (i = 0; i < (data->blocksize + 7) / 8; i++) {
+			memcpy(&emm_buf_dat.u, src, sizeof(emm_buf_dat.u));
+			write_csr(mmc, MIO_EMM_BUF_DAT(),
+				  cpu_to_be64(emm_buf_dat.u));
+			src += sizeof(emm_buf_dat.u);
+		}
+		write_csr(mmc, MIO_EMM_BUF_IDX(), 0);
+	}
+	debug("%s(%s): Sending command %u (emm_cmd: 0x%llx)\n", __func__,
+	      name, cmd->cmdidx, emm_cmd.u);
+	set_wdog(mmc, timeout * 1000);
+	write_csr(mmc, MIO_EMM_CMD(), emm_cmd.u);
+
+	/* Wait for command to finish or time out */
+	start = get_timer(0);
+	do {
+		rsp_sts.u = read_csr(mmc, MIO_EMM_RSP_STS());
+		WATCHDOG_RESET();
+	} while (!rsp_sts.s.cmd_done && !rsp_sts.s.rsp_timeout &&
+		 (get_timer(start) < timeout + 10));
+	octeontx_mmc_print_rsp_errors(mmc, rsp_sts);
+	if (rsp_sts.s.rsp_timeout || !rsp_sts.s.cmd_done) {
+		debug("%s(%s): Error: command %u(0x%x) timed out.  rsp_sts: 0x%llx\n",
+		      __func__, name, cmd->cmdidx, cmd->cmdarg, rsp_sts.u);
+		octeontx_mmc_print_registers(mmc);
+		return -ETIMEDOUT;
+	}
+	if (rsp_sts.s.rsp_crc_err) {
+		debug("%s(%s): RSP CRC error, rsp_sts: 0x%llx, cmdidx: %u, arg: 0x%08x\n",
+		      __func__, name, rsp_sts.u, cmd->cmdidx, cmd->cmdarg);
+		octeontx_mmc_print_registers(mmc);
+		return -1;
+	}
+	if (slot->bus_id != rsp_sts.s.bus_id) {
+		pr_warn("%s(%s): bus id mismatch, got %d, expected %d for command 0x%x(0x%x)\n",
+			__func__, name,
+			rsp_sts.s.bus_id, slot->bus_id,
+			cmd->cmdidx, cmd->cmdarg);
+		goto error;
+	}
+	if (rsp_sts.s.rsp_bad_sts) {
+		rsp_lo.u = read_csr(mmc, MIO_EMM_RSP_LO());
+		debug("%s: Bad response for bus id %d, cmd id %d:\n"
+		      "    rsp_timeout: %d\n"
+		      "    rsp_bad_sts: %d\n"
+		      "    rsp_crc_err: %d\n",
+		      __func__, slot->bus_id, cmd->cmdidx,
+		      rsp_sts.s.rsp_timeout,
+		      rsp_sts.s.rsp_bad_sts,
+		      rsp_sts.s.rsp_crc_err);
+		if (rsp_sts.s.rsp_type == 1 && rsp_sts.s.rsp_bad_sts) {
+			debug("    Response status: 0x%llx\n",
+			      (rsp_lo.u >> 8) & 0xffffffff);
+#ifdef DEBUG
+			mmc_print_status((rsp_lo.u >> 8) & 0xffffffff);
+#endif
+		}
+		goto error;
+	}
+	if (rsp_sts.s.cmd_idx != cmd->cmdidx) {
+		debug("%s(%s): Command response index %d does not match command index %d\n",
+		      __func__, name, rsp_sts.s.cmd_idx, cmd->cmdidx);
+		octeontx_print_rsp_sts(mmc);
+		debug("%s: rsp_lo: 0x%llx\n", __func__,
+		      read_csr(mmc, MIO_EMM_RSP_LO()));
+
+		goto error;
+	}
+
+	slot->is_acmd = (cmd->cmdidx == MMC_CMD_APP_CMD);
+
+	if (!cmd->resp_type & MMC_RSP_PRESENT)
+		debug("  Response type: 0x%x, no response expected\n",
+		      cmd->resp_type);
+	/* Get the response if present */
+	if (rsp_sts.s.rsp_val && (cmd->resp_type & MMC_RSP_PRESENT)) {
+		union mio_emm_rsp_hi rsp_hi;
+
+		rsp_lo.u = read_csr(mmc, MIO_EMM_RSP_LO());
+
+		switch (rsp_sts.s.rsp_type) {
+		case 1:
+		case 3:
+		case 4:
+		case 5:
+			cmd->response[0] = (rsp_lo.u >> 8) & 0xffffffffull;
+			debug("  response: 0x%08x\n",
+			      cmd->response[0]);
+			cmd->response[1] = 0;
+			cmd->response[2] = 0;
+			cmd->response[3] = 0;
+			break;
+		case 2:
+			cmd->response[3] = rsp_lo.u & 0xffffffff;
+			cmd->response[2] = (rsp_lo.u >> 32) & 0xffffffff;
+			rsp_hi.u = read_csr(mmc, MIO_EMM_RSP_HI());
+			cmd->response[1] = rsp_hi.u & 0xffffffff;
+			cmd->response[0] = (rsp_hi.u >> 32) & 0xffffffff;
+			debug("  response: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			      cmd->response[0], cmd->response[1],
+			      cmd->response[2], cmd->response[3]);
+			break;
+		default:
+			pr_err("%s(%s): Unknown response type 0x%x for command %d, arg: 0x%x, rsp_sts: 0x%llx\n",
+			       __func__, name, rsp_sts.s.rsp_type, cmd->cmdidx,
+			       cmd->cmdarg, rsp_sts.u);
+			return -1;
+		}
+	} else {
+		debug("  Response not expected\n");
+	}
+
+	if (data && data->flags & MMC_DATA_READ) {
+		u8 *dest = (u8 *)data->dest;
+
+		if (!dest) {
+			pr_err("%s(%s): Error, destination buffer NULL!\n",
+			       __func__, mmc->dev->name);
+			goto error;
+		}
+		if (data->blocksize > 512) {
+			printf("%s(%s): Error: data size %u exceeds 512\n",
+			       __func__, mmc->dev->name,
+			       data->blocksize);
+			goto error;
+		}
+		emm_buf_idx.u = 0;
+		emm_buf_idx.s.inc = 1;
+		write_csr(mmc, MIO_EMM_BUF_IDX(), emm_buf_idx.u);
+		for (i = 0; i < (data->blocksize + 7) / 8; i++) {
+			emm_buf_dat.u = read_csr(mmc, MIO_EMM_BUF_DAT());
+			emm_buf_dat.u = be64_to_cpu(emm_buf_dat.u);
+			memcpy(dest, &emm_buf_dat.u, sizeof(emm_buf_dat.u));
+			dest += sizeof(emm_buf_dat.u);
+		}
+		write_csr(mmc, MIO_EMM_BUF_IDX(), 0);
+#ifdef DEBUG
+		debug("%s: Received %d bytes data\n", __func__,
+		      data->blocksize);
+		print_buffer(0, data->dest, 1, data->blocksize, 0);
+#endif
+	}
+
+	return 0;
+error:
+#ifdef DEBUG
+	octeontx_mmc_print_registers(mmc);
+#endif
+	return -1;
+}
+
+static int octeontx_mmc_dev_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
+				     struct mmc_data *data)
+{
+	return octeontx_mmc_send_cmd(dev_to_mmc(dev), cmd, data);
+}
+
+#ifdef MMC_SUPPORTS_TUNING
+static int octeontx_mmc_test_cmd(struct mmc *mmc, u32 opcode, int *statp)
+{
+	struct mmc_cmd cmd;
+	int err;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	debug("%s(%s, %u, %p)\n", __func__, mmc->dev->name, opcode, statp);
+	cmd.cmdidx = opcode;
+	cmd.resp_type = MMC_RSP_R1;
+	cmd.cmdarg = mmc->rca << 16;
+
+	err = octeontx_mmc_send_cmd(mmc, &cmd, NULL);
+	if (err)
+		debug("%s(%s, %u) returned %d\n", __func__,
+		      mmc->dev->name, opcode, err);
+	if (statp)
+		*statp = cmd.response[0];
+	return err;
+}
+
+static int octeontx_mmc_test_get_ext_csd(struct mmc *mmc, u32 opcode,
+					 int *statp)
+{
+	struct mmc_cmd cmd;
+	struct mmc_data data;
+	int err;
+	u8 ext_csd[MMC_MAX_BLOCK_LEN];
+
+	debug("%s(%s, %u, %p)\n",  __func__, mmc->dev->name, opcode, statp);
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
+	cmd.resp_type = MMC_RSP_R1;
+	cmd.cmdarg = 0;
+
+	data.dest = (char *)ext_csd;
+	data.blocks = 1;
+	data.blocksize = MMC_MAX_BLOCK_LEN;
+	data.flags = MMC_DATA_READ;
+
+	err = octeontx_mmc_send_cmd(mmc, &cmd, &data);
+	if (statp)
+		*statp = cmd.response[0];
+
+	return err;
+}
+
+/**
+ * Wrapper to set the MIO_EMM_TIMING register
+ *
+ * @param	mmc		pointer to mmc data structure
+ * @param	emm_timing	New emm_timing register value
+ *
+ * On some devices it is possible that changing the data out value can
+ * cause a glitch on an internal fifo.  This works around this problem
+ * by performing a soft-reset immediately before setting the timing register.
+ *
+ * Note: this function should not be called from any function that
+ * performs DMA or block operations since not all registers are
+ * preserved.
+ */
+static void octeontx_mmc_set_emm_timing(struct mmc *mmc,
+					union mio_emm_timing emm_timing)
+{
+	union mio_emm_cfg emm_cfg;
+	struct octeontx_mmc_slot *slot = mmc->priv;
+	union mio_emm_debug emm_debug;
+
+	debug("%s(%s, 0x%llx) din: %u\n", __func__, mmc->dev->name,
+	      emm_timing.u, emm_timing.s.data_in_tap);
+
+	udelay(1);
+	if (slot->host->tap_requires_noclk) {
+		/* Turn off the clock */
+		emm_debug.u = read_csr(mmc, MIO_EMM_DEBUG());
+		emm_debug.s.emmc_clk_disable = 1;
+		write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+		udelay(1);
+		emm_debug.s.rdsync_rst = 1;
+		write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+	}
+	emm_cfg.u = read_csr(mmc, MIO_EMM_CFG());
+	emm_cfg.s.bus_ena = 1 << 3;
+	write_csr(mmc, MIO_EMM_CFG(), emm_cfg.u);
+
+	udelay(1);
+	write_csr(mmc, MIO_EMM_TIMING(), emm_timing.u);
+	udelay(1);
+
+	if (slot->host->tap_requires_noclk) {
+		/* Turn on the clock */
+		emm_debug.s.rdsync_rst = 0;
+		write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+		udelay(1);
+		emm_debug.s.emmc_clk_disable = 0;
+		write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+		udelay(1);
+	}
+	emm_cfg.s.bus_ena = 1 << mmc_to_slot(mmc)->bus_id;
+	write_csr(mmc, MIO_EMM_CFG(), emm_cfg.u);
+}
+
+static const u8 octeontx_hs400_tuning_block[512] = {
+	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+	0xff, 0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+	0x00, 0xff, 0x00, 0xff, 0x55, 0xaa, 0x55, 0xaa,
+	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+	0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+	0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+	0x01, 0xfe, 0x01, 0xfe, 0xcc, 0xcc, 0xcc, 0xff,
+	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+
+};
+
+/**
+ * Perform tuning in HS400 mode
+ *
+ * @param[in]	mmc	mmc data structure
+ *
+ * @ret		0 for success, otherwise error
+ */
+static int octeontx_tune_hs400(struct mmc *mmc)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	struct mmc_cmd cmd;
+	struct mmc_data data;
+	union mio_emm_timing emm_timing;
+	u8 buffer[mmc->read_bl_len];
+	int tap_adj;
+	int err = -1;
+	int tap;
+	int run = 0;
+	int start_run = -1;
+	int best_run = 0;
+	int best_start = -1;
+	bool prev_ok = false;
+	char env_name[64];
+	char how[MAX_NO_OF_TAPS + 1] = "";
+
+	if (slot->hs400_tuning_block == -1)
+		return 0;
+
+	/* The eMMC standard disables all tuning support when operating in
+	 * DDR modes like HS400.  The problem with this is that there are
+	 * many cases where the HS200 tuning does not work for HS400 mode.
+	 * In order to perform this tuning, while in HS200 a block is written
+	 * to a block specified in the device tree (marvell,hs400-tuning-block)
+	 * which is used for tuning in this function by repeatedly reading
+	 * this block and comparing the data and return code.  This function
+	 * chooses the data input tap in the middle of the longest run of
+	 * successful read operations.
+	 */
+
+	emm_timing = slot->hs200_taps;
+	debug("%s(%s): Start ci: %d, co: %d, di: %d, do: %d\n",
+	      __func__, mmc->dev->name, emm_timing.s.cmd_in_tap,
+	      emm_timing.s.cmd_out_tap, emm_timing.s.data_in_tap,
+	      emm_timing.s.data_out_tap);
+	memset(buffer, 0xdb, sizeof(buffer));
+
+	snprintf(env_name, sizeof(env_name), "emmc%d_data_in_tap_hs400",
+		 slot->bus_id);
+	tap = env_get_ulong(env_name, 10, -1L);
+	if (tap >= 0 && tap < MAX_NO_OF_TAPS) {
+		printf("Overriding data input tap for HS400 mode to %d\n", tap);
+		emm_timing.s.data_in_tap = tap;
+		octeontx_mmc_set_emm_timing(mmc, emm_timing);
+		return 0;
+	}
+
+	for (tap = 0; tap <= MAX_NO_OF_TAPS; tap++, prev_ok = !err) {
+		if (tap < MAX_NO_OF_TAPS) {
+			debug("%s: Testing data in tap %d\n", __func__, tap);
+			emm_timing.s.data_in_tap = tap;
+			octeontx_mmc_set_emm_timing(mmc, emm_timing);
+
+			cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
+			cmd.cmdarg = slot->hs400_tuning_block;
+			cmd.resp_type = MMC_RSP_R1;
+			data.dest = (void *)buffer;
+			data.blocks = 1;
+			data.blocksize = mmc->read_bl_len;
+			data.flags = MMC_DATA_READ;
+			err = !octeontx_mmc_read_blocks(mmc, &cmd, &data,
+							false);
+			if (err || memcmp(buffer, octeontx_hs400_tuning_block,
+					  sizeof(buffer))) {
+#ifdef DEBUG
+				if (!err) {
+					debug("%s: data mismatch.  Read:\n",
+					      __func__);
+					print_buffer(0, buffer, 1,
+						     sizeof(buffer), 0);
+					debug("\nExpected:\n");
+					print_buffer(0,
+					    octeontx_hs400_tuning_block, 1,
+					    sizeof(octeontx_hs400_tuning_block),
+					    0);
+				} else {
+					debug("%s: Error %d reading block\n",
+					      __func__, err);
+				}
+#endif
+				err = -EINVAL;
+			} else {
+				debug("%s: tap %d good\n", __func__, tap);
+			}
+			how[tap] = "-+"[!err];
+		} else {
+			err = -EINVAL;
+		}
+
+		if (!err) {
+			if (!prev_ok)
+				start_run = tap;
+		} else if (prev_ok) {
+			run = tap - 1 - start_run;
+			if (start_run >= 0 && run > best_run) {
+				best_start = start_run;
+				best_run = run;
+			}
+		}
+	}
+
+	how[tap - 1] = '\0';
+	if (best_start < 0) {
+		printf("%s(%s): %lldMHz tuning failed for HS400\n",
+		       __func__, mmc->dev->name, slot->clock / 1000000);
+		return -EINVAL;
+	}
+	tap = best_start + best_run / 2;
+
+	snprintf(env_name, sizeof(env_name), "emmc%d_data_in_tap_adj_hs400",
+		 slot->bus_id);
+	tap_adj = env_get_ulong(env_name, 10, slot->hs400_tap_adj);
+	/*
+	 * Keep it in range and if out of range force it back in with a small
+	 * buffer.
+	 */
+	if (best_run > 3) {
+		tap = tap + tap_adj;
+		if (tap >= best_start + best_run)
+			tap = best_start + best_run - 2;
+		if (tap <= best_start)
+			tap = best_start + 2;
+	}
+	how[tap] = '@';
+	debug("Tuning: %s\n", how);
+	debug("%s(%s): HS400 tap: best run start: %d, length: %d, tap: %d\n",
+	      __func__, mmc->dev->name, best_start, best_run, tap);
+	slot->hs400_taps = slot->hs200_taps;
+	slot->hs400_taps.s.data_in_tap = tap;
+	slot->hs400_tuned = true;
+	if (env_get_yesno("emmc_export_hs400_taps") > 0) {
+		debug("%s(%s): Exporting HS400 taps\n",
+		      __func__, mmc->dev->name);
+		env_set_ulong("emmc_timing_tap", slot->host->timing_taps);
+		snprintf(env_name, sizeof(env_name),
+			 "emmc%d_hs400_data_in_tap_debug",
+			 slot->bus_id);
+		env_set(env_name, how);
+		snprintf(env_name, sizeof(env_name),
+			 "emmc%d_hs400_data_in_tap_val",
+			 slot->bus_id);
+		env_set_ulong(env_name, tap);
+		snprintf(env_name, sizeof(env_name),
+			 "emmc%d_hs400_data_in_tap_start",
+			 slot->bus_id);
+		env_set_ulong(env_name, best_start);
+		snprintf(env_name, sizeof(env_name),
+			 "emmc%d_hs400_data_in_tap_end",
+			 slot->bus_id);
+		env_set_ulong(env_name, best_start + best_run);
+		snprintf(env_name, sizeof(env_name),
+			 "emmc%d_hs400_cmd_in_tap",
+			 slot->bus_id);
+		env_set_ulong(env_name, slot->hs400_taps.s.cmd_in_tap);
+		snprintf(env_name, sizeof(env_name),
+			 "emmc%d_hs400_cmd_out_tap",
+			 slot->bus_id);
+		env_set_ulong(env_name, slot->hs400_taps.s.cmd_out_tap);
+		snprintf(env_name, sizeof(env_name),
+			 "emmc%d_hs400_cmd_out_delay",
+			 slot->bus_id);
+		env_set_ulong(env_name, slot->cmd_out_hs400_delay);
+		snprintf(env_name, sizeof(env_name),
+			 "emmc%d_hs400_data_out_tap",
+			 slot->bus_id);
+		env_set_ulong(env_name, slot->hs400_taps.s.data_out_tap);
+		snprintf(env_name, sizeof(env_name),
+			 "emmc%d_hs400_data_out_delay",
+			 slot->bus_id);
+		env_set_ulong(env_name, slot->data_out_hs400_delay);
+	} else {
+		debug("%s(%s): HS400 environment export disabled\n",
+		      __func__, mmc->dev->name);
+	}
+	octeontx_mmc_set_timing(mmc);
+
+	return 0;
+}
+
+struct adj {
+	const char *name;
+	u8 mask_shift;
+	int (*test)(struct mmc *mmc, u32 opcode, int *error);
+	u32 opcode;
+	bool ddr_only;
+	bool hs200_only;
+	bool not_hs200_only;
+	u8 num_runs;
+};
+
+struct adj adj[] = {
+	{ "CMD_IN", 48, octeontx_mmc_test_cmd, MMC_CMD_SEND_STATUS,
+	  false, false, false, 2, },
+/*	{ "CMD_OUT", 32, octeontx_mmc_test_cmd, MMC_CMD_SEND_STATUS, },*/
+	{ "DATA_IN(HS200)", 16, mmc_send_tuning,
+		MMC_CMD_SEND_TUNING_BLOCK_HS200, false, true, false, 2, },
+	{ "DATA_IN", 16, octeontx_mmc_test_get_ext_csd, 0, false, false,
+	  true, 2, },
+/*	{ "DATA_OUT", 0, octeontx_mmc_test_cmd, 0, true, false},*/
+	{ NULL, },
+};
+
+/**
+ * Perform tuning tests to find optimal timing
+ *
+ * @param	mmc	mmc device
+ * @param	adj	parameter to tune
+ * @param	opcode	command opcode to use
+ *
+ * @return	0 for success, -1 if tuning failed
+ */
+static int octeontx_mmc_adjust_tuning(struct mmc *mmc, struct adj *adj,
+				      u32 opcode)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	union mio_emm_timing timing;
+	union mio_emm_debug emm_debug;
+	int tap;
+	int err = -1;
+	int run = 0;
+	int count;
+	int start_run = -1;
+	int best_run = 0;
+	int best_start = -1;
+	bool prev_ok = false;
+	u64 tap_status = 0;
+	const int tap_adj = slot->hs200_tap_adj;
+	char how[MAX_NO_OF_TAPS + 1] = "";
+	bool is_hs200 = mmc->selected_mode == MMC_HS_200;
+
+	debug("%s(%s, %s, %d), hs200: %d\n", __func__, mmc->dev->name,
+	      adj->name, opcode, is_hs200);
+	octeontx_mmc_set_emm_timing(mmc,
+				    is_hs200 ? slot->hs200_taps : slot->taps);
+
+#ifdef DEBUG
+	if (opcode == MMC_CMD_SEND_TUNING_BLOCK_HS200) {
+		printf("%s(%s): Before tuning %s, opcode: %d\n",
+		       __func__, mmc->dev->name, adj->name, opcode);
+		octeontx_mmc_print_registers2(mmc, NULL);
+	}
+#endif
+
+	/*
+	 * The algorithm to find the optimal timing is to start
+	 * at the end and work backwards and select the second
+	 * value that passes.  Each test is repeated twice.
+	 */
+	for (tap = 0; tap <= MAX_NO_OF_TAPS; tap++, prev_ok = !err) {
+		if (tap < MAX_NO_OF_TAPS) {
+			if (slot->host->tap_requires_noclk) {
+				/* Turn off the clock */
+				emm_debug.u = read_csr(mmc, MIO_EMM_DEBUG());
+				emm_debug.s.emmc_clk_disable = 1;
+				write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+				udelay(1);
+				emm_debug.s.rdsync_rst = 1;
+				write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+				udelay(1);
+			}
+
+			timing.u = read_csr(mmc, MIO_EMM_TIMING());
+			timing.u &= ~(0x3full << adj->mask_shift);
+			timing.u |= (u64)tap << adj->mask_shift;
+			write_csr(mmc, MIO_EMM_TIMING(), timing.u);
+			debug("%s(%s): Testing ci: %d, co: %d, di: %d, do: %d\n",
+			      __func__, mmc->dev->name, timing.s.cmd_in_tap,
+			      timing.s.cmd_out_tap, timing.s.data_in_tap,
+			      timing.s.data_out_tap);
+
+			if (slot->host->tap_requires_noclk) {
+				/* Turn off the clock */
+				emm_debug.s.rdsync_rst = 0;
+				write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+				udelay(1);
+				emm_debug.u = read_csr(mmc, MIO_EMM_DEBUG());
+				emm_debug.s.emmc_clk_disable = 0;
+				write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+				udelay(1);
+			}
+			for (count = 0; count < 2; count++) {
+				err = adj->test(mmc, opcode, NULL);
+				if (err) {
+					debug("%s(%s, %s): tap %d failed, count: %d, rsp_sts: 0x%llx, rsp_lo: 0x%llx\n",
+					      __func__, mmc->dev->name,
+					      adj->name, tap, count,
+					      read_csr(mmc,
+						       MIO_EMM_RSP_STS()),
+					      read_csr(mmc,
+						       MIO_EMM_RSP_LO()));
+					debug("%s(%s, %s): tap: %d, do: %d, di: %d, co: %d, ci: %d\n",
+					      __func__, mmc->dev->name,
+					      adj->name, tap,
+					      timing.s.data_out_tap,
+					      timing.s.data_in_tap,
+					      timing.s.cmd_out_tap,
+					      timing.s.cmd_in_tap);
+					break;
+				}
+				debug("%s(%s, %s): tap %d passed, count: %d, rsp_sts: 0x%llx, rsp_lo: 0x%llx\n",
+				      __func__, mmc->dev->name, adj->name, tap,
+				      count,
+				      read_csr(mmc, MIO_EMM_RSP_STS()),
+				      read_csr(mmc, MIO_EMM_RSP_LO()));
+			}
+			tap_status |= (u64)(!err) << tap;
+			how[tap] = "-+"[!err];
+		} else {
+			/*
+			 * Putting the end+1 case in the loop simplifies
+			 * logic, allowing 'prev_ok' to process a sweet
+			 * spot in tuning which extends to the wall.
+			 */
+			err = -EINVAL;
+		}
+		if (!err) {
+			/*
+			 * If no CRC/etc errors in the response, but previous
+			 * failed, note the start of a new run.
+			 */
+			debug("  prev_ok: %d\n", prev_ok);
+			if (!prev_ok)
+				start_run = tap;
+		} else if (prev_ok) {
+			run = tap - 1 - start_run;
+			/* did we just exit a wider sweet spot? */
+			if (start_run >= 0 && run > best_run) {
+				best_start = start_run;
+				best_run = run;
+			}
+		}
+	}
+	how[tap - 1] = '\0';
+	if (best_start < 0) {
+		printf("%s(%s, %s): %lldMHz tuning %s failed\n", __func__,
+		       mmc->dev->name, adj->name, slot->clock / 1000000,
+		       adj->name);
+		return -EINVAL;
+	}
+
+	tap = best_start + best_run / 2;
+	debug("  tap %d is center, start: %d, run: %d\n", tap,
+	      best_start, best_run);
+	if (is_hs200) {
+		slot->hs200_taps.u &= ~(0x3full << adj->mask_shift);
+		slot->hs200_taps.u |= (u64)tap << adj->mask_shift;
+	} else {
+		slot->taps.u &= ~(0x3full << adj->mask_shift);
+		slot->taps.u |= (u64)tap << adj->mask_shift;
+	}
+	if (best_start < 0) {
+		printf("%s(%s, %s): %lldMHz tuning %s failed\n", __func__,
+		       mmc->dev->name, adj->name, slot->clock / 1000000,
+		       adj->name);
+		return -EINVAL;
+	}
+
+	tap = best_start + best_run / 2;
+	if (is_hs200 && (tap + tap_adj >= 0) && (tap + tap_adj < 64) &&
+	    tap_status & (1ULL << (tap + tap_adj))) {
+		debug("Adjusting tap from %d by %d to %d\n",
+		      tap, tap_adj, tap + tap_adj);
+		tap += tap_adj;
+	}
+	how[tap] = '@';
+	debug("%s/%s %d/%d/%d %s\n", mmc->dev->name,
+	      adj->name, best_start, tap, best_start + best_run, how);
+
+	if (is_hs200) {
+		slot->hs200_taps.u &= ~(0x3full << adj->mask_shift);
+		slot->hs200_taps.u |= (u64)tap << adj->mask_shift;
+	} else {
+		slot->taps.u &= ~(0x3full << adj->mask_shift);
+		slot->taps.u |= (u64)tap << adj->mask_shift;
+	}
+
+#ifdef DEBUG
+	if (opcode == MMC_CMD_SEND_TUNING_BLOCK_HS200) {
+		debug("%s(%s, %s): After successful tuning\n",
+		      __func__, mmc->dev->name, adj->name);
+		debug("%s(%s, %s): tap: %d, new do: %d, di: %d, co: %d, ci: %d\n",
+		      __func__, mmc->dev->name, adj->name, tap,
+		      slot->taps.s.data_out_tap,
+		      slot->taps.s.data_in_tap,
+		      slot->taps.s.cmd_out_tap,
+		      slot->taps.s.cmd_in_tap);
+		debug("%s(%s, %s): tap: %d, new do HS200: %d, di: %d, co: %d, ci: %d\n",
+		      __func__, mmc->dev->name, adj->name, tap,
+		      slot->hs200_taps.s.data_out_tap,
+		      slot->hs200_taps.s.data_in_tap,
+		      slot->hs200_taps.s.cmd_out_tap,
+		      slot->hs200_taps.s.cmd_in_tap);
+	}
+#endif
+	octeontx_mmc_set_timing(mmc);
+
+	if (is_hs200 && env_get_yesno("emmc_export_hs200_taps")) {
+		char env_name[64];
+
+		env_set_ulong("emmc_timing_tap", slot->host->timing_taps);
+		switch (opcode) {
+		case MMC_CMD_SEND_TUNING_BLOCK:
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_hs200_data_in_tap_debug",
+				 slot->bus_id);
+			env_set(env_name, how);
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_hs200_data_in_tap_val", slot->bus_id);
+			env_set_ulong(env_name, tap);
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_hs200_data_in_tap_start",
+				 slot->bus_id);
+			env_set_ulong(env_name, best_start);
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_hs200_data_in_tap_end",
+				 slot->bus_id);
+			env_set_ulong(env_name, best_start + best_run);
+			break;
+		case MMC_CMD_SEND_STATUS:
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_hs200_cmd_in_tap_debug",
+				 slot->bus_id);
+			env_set(env_name, how);
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_hs200_cmd_in_tap_val", slot->bus_id);
+			env_set_ulong(env_name, tap);
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_hs200_cmd_in_tap_start",
+				 slot->bus_id);
+			env_set_ulong(env_name, best_start);
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_hs200_cmd_in_tap_end",
+				 slot->bus_id);
+			env_set_ulong(env_name, best_start + best_run);
+			break;
+		default:
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_hs200_data_out_tap", slot->bus_id);
+			env_set_ulong(env_name, slot->data_out_hs200_delay);
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_hs200_cmd_out_tap", slot->bus_id);
+			env_set_ulong(env_name, slot->cmd_out_hs200_delay);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int octeontx_mmc_execute_tuning(struct udevice *dev, u32 opcode)
+{
+	struct mmc *mmc = dev_to_mmc(dev);
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	union mio_emm_timing emm_timing;
+	int err;
+	struct adj *a;
+	bool is_hs200;
+	char env_name[64];
+
+	pr_info("%s re-tuning, opcode 0x%x\n", dev->name, opcode);
+
+	if (slot->is_asim || slot->is_emul)
+		return 0;
+
+	is_hs200 = (mmc->selected_mode == MMC_HS_200);
+	if (is_hs200) {
+		slot->hs200_tuned = false;
+		slot->hs400_tuned = false;
+	} else {
+		slot->tuned = false;
+	}
+	octeontx_mmc_set_output_bus_timing(mmc);
+	octeontx_mmc_set_input_bus_timing(mmc);
+	emm_timing.u = read_csr(mmc, MIO_EMM_TIMING());
+	if (mmc->selected_mode == MMC_HS_200) {
+		slot->hs200_taps.s.cmd_out_tap = emm_timing.s.cmd_out_tap;
+		slot->hs200_taps.s.data_out_tap = emm_timing.s.data_out_tap;
+	} else {
+		slot->taps.s.cmd_out_tap = emm_timing.s.cmd_out_tap;
+		slot->taps.s.data_out_tap = emm_timing.s.data_out_tap;
+	}
+	octeontx_mmc_set_input_bus_timing(mmc);
+	octeontx_mmc_set_output_bus_timing(mmc);
+
+	for (a = adj; a->name; a++) {
+		ulong in_tap;
+
+		if (!strcmp(a->name, "CMD_IN")) {
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_cmd_in_tap", slot->bus_id);
+			in_tap = env_get_ulong(env_name, 10, (ulong)-1);
+			if (in_tap != (ulong)-1) {
+				if (mmc->selected_mode == MMC_HS_200 ||
+				    a->hs200_only) {
+					slot->hs200_taps.s.cmd_in_tap = in_tap;
+					slot->hs400_taps.s.cmd_in_tap = in_tap;
+				} else {
+					slot->taps.s.cmd_in_tap = in_tap;
+				}
+				continue;
+			}
+		} else if (a->hs200_only &&
+			   !strcmp(a->name, "DATA_IN(HS200)")) {
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_data_in_tap_hs200", slot->bus_id);
+			in_tap = env_get_ulong(env_name, 10, (ulong)-1);
+			if (in_tap != (ulong)-1) {
+				debug("%s(%s): Overriding HS200 data in tap to %d\n",
+				      __func__, dev->name, (int)in_tap);
+				slot->hs200_taps.s.data_in_tap = in_tap;
+				continue;
+			}
+		} else if (!a->hs200_only && !strcmp(a->name, "DATA_IN")) {
+			snprintf(env_name, sizeof(env_name),
+				 "emmc%d_data_in_tap", slot->bus_id);
+			in_tap = env_get_ulong(env_name, 10, (ulong)-1);
+			if (in_tap != (ulong)-1) {
+				debug("%s(%s): Overriding non-HS200 data in tap to %d\n",
+				      __func__, dev->name, (int)in_tap);
+				slot->taps.s.data_in_tap = in_tap;
+				continue;
+			}
+		}
+
+		debug("%s(%s): Testing: %s, mode: %s, opcode: %u\n", __func__,
+		      dev->name, a->name, mmc_mode_name(mmc->selected_mode),
+		      opcode);
+
+		/* Skip DDR only test when not in DDR mode */
+		if (a->ddr_only && !mmc->ddr_mode) {
+			debug("%s(%s): Skipping %s due to non-DDR mode\n",
+			      __func__, dev->name, a->name);
+			continue;
+		}
+		/* Skip hs200 tests in non-hs200 mode and
+		 * non-hs200 tests in hs200 mode
+		 */
+		if (is_hs200) {
+			if (a->not_hs200_only) {
+				debug("%s(%s): Skipping %s\n", __func__,
+				      dev->name, a->name);
+				continue;
+			}
+		} else {
+			if (a->hs200_only) {
+				debug("%s(%s): Skipping %s\n", __func__,
+				      dev->name, a->name);
+				continue;
+			}
+		}
+
+		err = octeontx_mmc_adjust_tuning(mmc, a, a->opcode ?
+						 a->opcode : opcode);
+		if (err) {
+			pr_err("%s(%s, %u): tuning %s failed\n", __func__,
+			       dev->name, opcode, a->name);
+			return err;
+		}
+	}
+
+	octeontx_mmc_set_timing(mmc);
+	if (is_hs200)
+		slot->hs200_tuned = true;
+	else
+		slot->tuned = true;
+
+	if (slot->hs400_tuning_block != -1) {
+		struct mmc_cmd cmd;
+		struct mmc_data data;
+		u8 buffer[mmc->read_bl_len];
+
+		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
+		cmd.cmdarg = slot->hs400_tuning_block;
+		cmd.resp_type = MMC_RSP_R1;
+		data.dest = (void *)buffer;
+		data.blocks = 1;
+		data.blocksize = mmc->read_bl_len;
+		data.flags = MMC_DATA_READ;
+		err = octeontx_mmc_read_blocks(mmc, &cmd, &data, true) != 1;
+
+		if (err) {
+			printf("%s: Cannot read HS400 tuning block %u\n",
+			       dev->name, slot->hs400_tuning_block);
+			return err;
+		}
+		if (memcmp(buffer, octeontx_hs400_tuning_block,
+			   sizeof(buffer))) {
+			debug("%s(%s): Writing new HS400 tuning block to block %d\n",
+			      __func__, dev->name, slot->hs400_tuning_block);
+			cmd.cmdidx = MMC_CMD_WRITE_SINGLE_BLOCK;
+			data.src = (void *)octeontx_hs400_tuning_block;
+			data.flags = MMC_DATA_WRITE;
+			err = !octeontx_mmc_write_blocks(mmc, &cmd, &data);
+			if (err) {
+				printf("%s: Cannot write HS400 tuning block %u\n",
+				       dev->name, slot->hs400_tuning_block);
+				return -EINVAL;
+			}
+		}
+	}
+
+	return 0;
+}
+#else /* MMC_SUPPORTS_TUNING */
+static void octeontx_mmc_set_emm_timing(struct mmc *mmc,
+					union mio_emm_timing emm_timing)
+{
+}
+#endif /* MMC_SUPPORTS_TUNING */
+
+/**
+ * Calculate the clock period with rounding up
+ *
+ * @param	mmc	mmc device
+ * @return	clock period in system clocks for clk_lo + clk_hi
+ */
+static u32 octeontx_mmc_calc_clk_period(struct mmc *mmc)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	struct octeontx_mmc_host *host = slot->host;
+
+	return DIV_ROUND_UP(host->sys_freq, mmc->clock);
+}
+
+static int octeontx_mmc_set_ios(struct udevice *dev)
+{
+	struct octeontx_mmc_slot *slot = dev_to_mmc_slot(dev);
+	struct mmc *mmc = &slot->mmc;
+	struct octeontx_mmc_host *host = slot->host;
+	union mio_emm_switch emm_switch;
+	union mio_emm_modex mode;
+	uint clock;
+	int bus_width = 0;
+	int clk_period = 0;
+	int power_class = 10;
+	int err = 0;
+	bool is_hs200 = false;
+	bool is_hs400 = false;
+
+	debug("%s(%s): Entry\n", __func__, dev->name);
+	debug("  clock: %u, bus width: %u, mode: %u\n", mmc->clock,
+	      mmc->bus_width, mmc->selected_mode);
+	debug("  host caps: 0x%x, card caps: 0x%x\n", mmc->host_caps,
+	      mmc->card_caps);
+	octeontx_mmc_switch_to(mmc);
+
+	clock = mmc->clock;
+	if (!clock)
+		clock = mmc->cfg->f_min;
+
+	switch (mmc->bus_width) {
+	case 8:
+		bus_width = 2;
+		break;
+	case 4:
+		bus_width = 1;
+		break;
+	case 1:
+		bus_width = 0;
+		break;
+	default:
+		pr_warn("%s(%s): Invalid bus width %d, defaulting to 1\n",
+			__func__, dev->name, mmc->bus_width);
+		bus_width = 0;
+	}
+
+	/* DDR is available for 4/8 bit bus width */
+	if (mmc->ddr_mode && bus_width)
+		bus_width |= 4;
+
+	debug("%s: sys_freq: %llu\n", __func__, host->sys_freq);
+	clk_period = octeontx_mmc_calc_clk_period(mmc);
+
+	emm_switch.u = 0;
+	emm_switch.s.bus_width = bus_width;
+	emm_switch.s.power_class = power_class;
+	emm_switch.s.clk_hi = clk_period / 2;
+	emm_switch.s.clk_lo = clk_period / 2;
+
+	debug("%s: last mode: %d, mode: %d, last clock: %u, clock: %u, ddr: %d\n",
+	      __func__, slot->last_mode, mmc->selected_mode,
+	      slot->last_clock, mmc->clock, mmc->ddr_mode);
+	switch (mmc->selected_mode) {
+	case MMC_LEGACY:
+		break;
+	case MMC_HS:
+	case SD_HS:
+	case MMC_HS_52:
+		emm_switch.s.hs_timing = 1;
+		break;
+	case MMC_HS_200:
+		is_hs200 = true;
+		fallthrough;
+	case UHS_SDR12:
+	case UHS_SDR25:
+	case UHS_SDR50:
+	case UHS_SDR104:
+		emm_switch.s.hs200_timing = 1;
+		break;
+	case MMC_HS_400:
+		is_hs400 = true;
+		fallthrough;
+	case UHS_DDR50:
+	case MMC_DDR_52:
+		emm_switch.s.hs400_timing = 1;
+		break;
+	default:
+		pr_err("%s(%s): Unsupported mode 0x%x\n", __func__, dev->name,
+		       mmc->selected_mode);
+		return -1;
+	}
+	emm_switch.s.bus_id = slot->bus_id;
+
+	if (!is_hs200 && !is_hs400 &&
+	    (mmc->selected_mode != slot->last_mode ||
+	     mmc->clock != slot->last_clock) &&
+	    !mmc->ddr_mode) {
+		slot->tuned = false;
+		slot->last_mode = mmc->selected_mode;
+		slot->last_clock = mmc->clock;
+	}
+
+	if (CONFIG_IS_ENABLED(MMC_VERBOSE)) {
+		debug("%s(%s): Setting bus mode to %s\n", __func__, dev->name,
+		      mmc_mode_name(mmc->selected_mode));
+	} else {
+		debug("%s(%s): Setting bus mode to 0x%x\n", __func__, dev->name,
+		      mmc->selected_mode);
+	}
+
+	debug(" Trying switch 0x%llx w%d hs:%d hs200:%d hs400:%d\n",
+	      emm_switch.u, emm_switch.s.bus_width, emm_switch.s.hs_timing,
+	      emm_switch.s.hs200_timing, emm_switch.s.hs400_timing);
+
+	set_wdog(mmc, 1000);
+	do_switch(mmc, emm_switch);
+	mdelay(100);
+	mode.u = read_csr(mmc, MIO_EMM_MODEX(slot->bus_id));
+	debug("%s(%s): mode: 0x%llx w:%d, hs:%d, hs200:%d, hs400:%d\n",
+	      __func__, dev->name, mode.u, mode.s.bus_width,
+	      mode.s.hs_timing, mode.s.hs200_timing, mode.s.hs400_timing);
+
+	err = octeontx_mmc_configure_delay(mmc);
+
+#ifdef MMC_SUPPORTS_TUNING
+	if (!err && mmc->selected_mode == MMC_HS_400 && !slot->hs400_tuned) {
+		debug("%s: Tuning HS400 mode\n", __func__);
+		err = octeontx_tune_hs400(mmc);
+	}
+#endif
+
+	return err;
+}
+
+/**
+ * Gets the status of the card detect pin
+ */
+static int octeontx_mmc_get_cd(struct udevice *dev)
+{
+	struct octeontx_mmc_slot *slot = dev_to_mmc_slot(dev);
+	int val = 1;
+
+	if (dm_gpio_is_valid(&slot->cd_gpio)) {
+		val = dm_gpio_get_value(&slot->cd_gpio);
+		val ^= slot->cd_inverted;
+	}
+	debug("%s(%s): cd: %d\n", __func__, dev->name, val);
+	return val;
+}
+
+/**
+ * Gets the status of the write protect pin
+ */
+static int octeontx_mmc_get_wp(struct udevice *dev)
+{
+	struct octeontx_mmc_slot *slot = dev_to_mmc_slot(dev);
+	int val = 0;
+
+	if (dm_gpio_is_valid(&slot->wp_gpio)) {
+		val = dm_gpio_get_value(&slot->wp_gpio);
+		val ^= slot->wp_inverted;
+	}
+	debug("%s(%s): wp: %d\n", __func__, dev->name, val);
+	return val;
+}
+
+static void octeontx_mmc_set_timing(struct mmc *mmc)
+{
+	union mio_emm_timing timing;
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+
+	switch (mmc->selected_mode) {
+	case MMC_HS_200:
+		timing = slot->hs200_taps;
+		break;
+	case MMC_HS_400:
+		timing = slot->hs400_tuned ?
+				slot->hs400_taps : slot->hs200_taps;
+		break;
+	default:
+		timing = slot->taps;
+		break;
+	}
+
+	debug("%s(%s):\n  cmd_in_tap: %u\n  cmd_out_tap: %u\n  data_in_tap: %u\n  data_out_tap: %u\n",
+	      __func__, mmc->dev->name, timing.s.cmd_in_tap,
+	      timing.s.cmd_out_tap, timing.s.data_in_tap,
+	      timing.s.data_out_tap);
+
+	octeontx_mmc_set_emm_timing(mmc, timing);
+}
+
+static int octeontx_mmc_configure_delay(struct mmc *mmc)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	struct octeontx_mmc_host *host __maybe_unused = slot->host;
+	bool __maybe_unused is_hs200;
+	bool __maybe_unused is_hs400;
+
+	debug("%s(%s)\n", __func__, mmc->dev->name);
+
+	if (IS_ENABLED(CONFIG_ARCH_OCTEONTX)) {
+		union mio_emm_sample emm_sample;
+
+		emm_sample.u = 0;
+		emm_sample.s.cmd_cnt = slot->cmd_cnt;
+		emm_sample.s.dat_cnt = slot->dat_cnt;
+		write_csr(mmc, MIO_EMM_SAMPLE(), emm_sample.u);
+	} else {
+		is_hs200 = (mmc->selected_mode == MMC_HS_200);
+		is_hs400 = (mmc->selected_mode == MMC_HS_400);
+
+		if ((is_hs200 && slot->hs200_tuned) ||
+		    (is_hs400 && slot->hs400_tuned) ||
+		    (!is_hs200 && !is_hs400 && slot->tuned)) {
+			octeontx_mmc_set_output_bus_timing(mmc);
+		} else {
+			int half = MAX_NO_OF_TAPS / 2;
+			int dout, cout;
+
+			switch (mmc->selected_mode) {
+			case MMC_LEGACY:
+				if (IS_SD(mmc)) {
+					cout = MMC_SD_LEGACY_DEFAULT_CMD_OUT_TAP;
+					dout = MMC_SD_LEGACY_DEFAULT_DATA_OUT_TAP;
+				} else {
+					cout = MMC_LEGACY_DEFAULT_CMD_OUT_TAP;
+					dout = MMC_LEGACY_DEFAULT_DATA_OUT_TAP;
+				}
+				break;
+			case MMC_HS:
+				cout = MMC_HS_CMD_OUT_TAP;
+				dout = MMC_HS_DATA_OUT_TAP;
+				break;
+			case SD_HS:
+			case UHS_SDR12:
+			case UHS_SDR25:
+			case UHS_SDR50:
+				cout = MMC_SD_HS_CMD_OUT_TAP;
+				dout = MMC_SD_HS_DATA_OUT_TAP;
+				break;
+			case UHS_SDR104:
+			case UHS_DDR50:
+			case MMC_HS_52:
+			case MMC_DDR_52:
+				cout = MMC_DEFAULT_CMD_OUT_TAP;
+				dout = MMC_DEFAULT_DATA_OUT_TAP;
+				break;
+			case MMC_HS_200:
+				cout = -1;
+				dout = -1;
+				if (host->timing_calibrated) {
+					cout = octeontx2_mmc_calc_delay(
+						mmc, slot->cmd_out_hs200_delay);
+					dout = octeontx2_mmc_calc_delay(
+						mmc,
+						slot->data_out_hs200_delay);
+					debug("%s(%s): Calibrated HS200/HS400 cmd out delay: %dps tap: %d, data out delay: %d, tap: %d\n",
+					      __func__, mmc->dev->name,
+					      slot->cmd_out_hs200_delay, cout,
+					      slot->data_out_hs200_delay, dout);
+				} else {
+					cout = MMC_DEFAULT_HS200_CMD_OUT_TAP;
+					dout = MMC_DEFAULT_HS200_DATA_OUT_TAP;
+				}
+				is_hs200 = true;
+				break;
+			case MMC_HS_400:
+				cout = -1;
+				dout = -1;
+				if (host->timing_calibrated) {
+					if (slot->cmd_out_hs400_delay)
+						cout = octeontx2_mmc_calc_delay(
+							mmc,
+							slot->cmd_out_hs400_delay);
+					if (slot->data_out_hs400_delay)
+						dout = octeontx2_mmc_calc_delay(
+							mmc,
+							slot->data_out_hs400_delay);
+					debug("%s(%s): Calibrated HS200/HS400 cmd out delay: %dps tap: %d, data out delay: %d, tap: %d\n",
+					      __func__, mmc->dev->name,
+					      slot->cmd_out_hs400_delay, cout,
+					      slot->data_out_hs400_delay, dout);
+				} else {
+					cout = MMC_DEFAULT_HS400_CMD_OUT_TAP;
+					dout = MMC_DEFAULT_HS400_DATA_OUT_TAP;
+				}
+				is_hs400 = true;
+				break;
+			default:
+				pr_err("%s(%s): Invalid mode %d\n", __func__,
+				       mmc->dev->name, mmc->selected_mode);
+				return -1;
+			}
+			debug("%s(%s): Not tuned, hs200: %d, hs200 tuned: %d, hs400: %d, hs400 tuned: %d, tuned: %d\n",
+			      __func__, mmc->dev->name, is_hs200,
+			      slot->hs200_tuned,
+			      is_hs400, slot->hs400_tuned, slot->tuned);
+			/* Set some defaults */
+			if (is_hs200) {
+				slot->hs200_taps.u = 0;
+				slot->hs200_taps.s.cmd_out_tap = cout;
+				slot->hs200_taps.s.data_out_tap = dout;
+				slot->hs200_taps.s.cmd_in_tap = half;
+				slot->hs200_taps.s.data_in_tap = half;
+			} else if (is_hs400) {
+				slot->hs400_taps.u = 0;
+				slot->hs400_taps.s.cmd_out_tap = cout;
+				slot->hs400_taps.s.data_out_tap = dout;
+				slot->hs400_taps.s.cmd_in_tap = half;
+				slot->hs400_taps.s.data_in_tap = half;
+			} else {
+				slot->taps.u = 0;
+				slot->taps.s.cmd_out_tap = cout;
+				slot->taps.s.data_out_tap = dout;
+				slot->taps.s.cmd_in_tap = half;
+				slot->taps.s.data_in_tap = half;
+			}
+		}
+
+		if (is_hs200)
+			debug("%s(%s): hs200 taps: ci: %u, co: %u, di: %u, do: %u\n",
+			      __func__, mmc->dev->name,
+			      slot->hs200_taps.s.cmd_in_tap,
+			      slot->hs200_taps.s.cmd_out_tap,
+			      slot->hs200_taps.s.data_in_tap,
+			      slot->hs200_taps.s.data_out_tap);
+		else if (is_hs400)
+			debug("%s(%s): hs400 taps: ci: %u, co: %u, di: %u, do: %u\n",
+			      __func__, mmc->dev->name,
+			      slot->hs400_taps.s.cmd_in_tap,
+			      slot->hs400_taps.s.cmd_out_tap,
+			      slot->hs400_taps.s.data_in_tap,
+			      slot->hs400_taps.s.data_out_tap);
+		else
+			debug("%s(%s): taps: ci: %u, co: %u, di: %u, do: %u\n",
+			      __func__, mmc->dev->name, slot->taps.s.cmd_in_tap,
+			      slot->taps.s.cmd_out_tap,
+			      slot->taps.s.data_in_tap,
+			      slot->taps.s.data_out_tap);
+		octeontx_mmc_set_timing(mmc);
+		debug("%s: Done\n", __func__);
+	}
+
+	return 0;
+}
+
+/**
+ * Sets the MMC watchdog timer in microseconds
+ *
+ * @param	mmc	mmc device
+ * @param	us	timeout in microseconds, 0 for maximum timeout
+ */
+static void set_wdog(struct mmc *mmc, u64 us)
+{
+	union mio_emm_wdog wdog;
+	u64 val;
+
+	val = (us * mmc->clock) / 1000000;
+	if (val >= (1 << 26) || !us) {
+		if (us)
+			pr_debug("%s: warning: timeout %llu exceeds max value %llu, truncating\n",
+				 __func__, us,
+				 (u64)(((1ULL << 26) - 1) * 1000000ULL) /
+					mmc->clock);
+		val = (1 << 26) - 1;
+	}
+	wdog.u = 0;
+	wdog.s.clk_cnt = val;
+	write_csr(mmc, MIO_EMM_WDOG(), wdog.u);
+}
+
+/**
+ * Set the IO drive strength and slew
+ *
+ * @param	mmc	mmc device
+ */
+static void octeontx_mmc_io_drive_setup(struct mmc *mmc)
+{
+	if (!IS_ENABLED(CONFIG_ARCH_OCTEONTX)) {
+		struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+		union mio_emm_io_ctl io_ctl;
+
+		if (slot->drive < 0 || slot->slew < 0)
+			return;
+
+		io_ctl.u = 0;
+		io_ctl.s.drive = slot->drive;
+		io_ctl.s.slew = slot->slew;
+		write_csr(mmc, MIO_EMM_IO_CTL(), io_ctl.u);
+	}
+}
+
+/**
+ * Print switch errors
+ *
+ * @param	mmc	mmc device
+ */
+static void check_switch_errors(struct mmc *mmc)
+{
+	union mio_emm_switch emm_switch;
+
+	emm_switch.u = read_csr(mmc, MIO_EMM_SWITCH());
+	if (emm_switch.s.switch_err0)
+		pr_err("%s: Switch power class error\n", mmc->cfg->name);
+	if (emm_switch.s.switch_err1)
+		pr_err("%s: Switch HS timing error\n", mmc->cfg->name);
+	if (emm_switch.s.switch_err2)
+		pr_err("%s: Switch bus width error\n", mmc->cfg->name);
+}
+
+static void do_switch(struct mmc *mmc, union mio_emm_switch emm_switch)
+{
+	union mio_emm_rsp_sts rsp_sts;
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	int bus_id = emm_switch.s.bus_id;
+	ulong start;
+
+	if (emm_switch.s.bus_id != 0) {
+		emm_switch.s.bus_id = 0;
+		write_csr(mmc, MIO_EMM_SWITCH(), emm_switch.u);
+		udelay(100);
+		emm_switch.s.bus_id = bus_id;
+	}
+	debug("%s(%s, 0x%llx)\n", __func__, mmc->dev->name, emm_switch.u);
+	write_csr(mmc, MIO_EMM_SWITCH(), emm_switch.u);
+
+	start = get_timer(0);
+	do {
+		rsp_sts.u = read_csr(mmc, MIO_EMM_RSP_STS());
+		if (!rsp_sts.s.switch_val)
+			break;
+		udelay(100);
+	} while (get_timer(start) < 10);
+	if (rsp_sts.s.switch_val) {
+		pr_warn("%s(%s): Warning: writing 0x%llx to emm_switch timed out, status: 0x%llx\n",
+			__func__, mmc->dev->name, emm_switch.u, rsp_sts.u);
+	}
+	slot->cached_switch = emm_switch;
+	check_switch_errors(mmc);
+	slot->cached_switch.u = emm_switch.u;
+	debug("%s: emm_switch: 0x%llx, rsp_lo: 0x%llx\n",
+	      __func__, read_csr(mmc, MIO_EMM_SWITCH()),
+				 read_csr(mmc, MIO_EMM_RSP_LO()));
+}
+
+/**
+ * Given a delay in ps, return the tap delay count
+ *
+ * @param	mmc	mmc data structure
+ * @param	delay	delay in picoseconds
+ *
+ * @return	Number of tap cycles or error if -1
+ */
+static int octeontx2_mmc_calc_delay(struct mmc *mmc, int delay)
+{
+	struct octeontx_mmc_host *host = mmc_to_host(mmc);
+
+	if (host->is_asim || host->is_emul)
+		return 63;
+
+	if (!host->timing_taps) {
+		pr_err("%s(%s): Error: host timing not calibrated\n",
+		       __func__, mmc->dev->name);
+		return -1;
+	}
+	debug("%s(%s, %d) timing taps: %llu\n", __func__, mmc->dev->name,
+	      delay, host->timing_taps);
+	return min_t(int, DIV_ROUND_UP(delay, host->timing_taps), 63);
+}
+
+/**
+ * Calibrates the delay based on the internal clock
+ *
+ * @param	mmc	Pointer to mmc data structure
+ *
+ * @return	0 for success or -ETIMEDOUT on error
+ *
+ * NOTE: On error a default value will be calculated.
+ */
+static int octeontx_mmc_calibrate_delay(struct mmc *mmc)
+{
+	union mio_emm_calb emm_calb;
+	union mio_emm_tap emm_tap;
+	union mio_emm_cfg emm_cfg;
+	union mio_emm_io_ctl emm_io_ctl;
+	union mio_emm_switch emm_switch;
+	union mio_emm_wdog emm_wdog;
+	union mio_emm_sts_mask emm_sts_mask;
+	union mio_emm_debug emm_debug;
+	union mio_emm_timing emm_timing;
+	struct octeontx_mmc_host *host = mmc_to_host(mmc);
+	ulong start;
+	u8 bus_id, bus_ena;
+
+	debug("%s: Calibrating delay\n", __func__);
+	if (host->is_asim || host->is_emul) {
+		debug("  No calibration for ASIM\n");
+		return 0;
+	}
+	emm_tap.u = 0;
+	if (host->calibrate_glitch) {
+		emm_tap.s.delay = MMC_DEFAULT_TAP_DELAY;
+	} else {
+		/* Save registers */
+		emm_cfg.u = read_csr(mmc, MIO_EMM_CFG());
+		emm_io_ctl.u = read_csr(mmc, MIO_EMM_IO_CTL());
+		emm_switch.u = read_csr(mmc, MIO_EMM_SWITCH());
+		emm_wdog.u = read_csr(mmc, MIO_EMM_WDOG());
+		emm_sts_mask.u = read_csr(mmc, MIO_EMM_STS_MASK());
+		emm_debug.u = read_csr(mmc, MIO_EMM_DEBUG());
+		emm_timing.u = read_csr(mmc, MIO_EMM_TIMING());
+		bus_ena = emm_cfg.s.bus_ena;
+		bus_id = emm_switch.s.bus_id;
+		emm_cfg.s.bus_ena = 0;
+		write_csr(mmc, MIO_EMM_CFG(), emm_cfg.u);
+		udelay(1);
+		emm_cfg.s.bus_ena = 1ULL << 3;
+		write_csr(mmc, MIO_EMM_CFG(), emm_cfg.u);
+		mdelay(1);
+		emm_calb.u = 0;
+		write_csr(mmc, MIO_EMM_CALB(), emm_calb.u);
+		emm_calb.s.start = 1;
+		write_csr(mmc, MIO_EMM_CALB(), emm_calb.u);
+		start = get_timer(0);
+		/* This should only take 3 microseconds */
+		do {
+			udelay(5);
+			emm_tap.u = read_csr(mmc, MIO_EMM_TAP());
+		} while (!emm_tap.s.delay && get_timer(start) < 10);
+
+		emm_calb.s.start = 0;
+		write_csr(mmc, MIO_EMM_CALB(), emm_calb.u);
+
+		emm_cfg.s.bus_ena = 0;
+		write_csr(mmc, MIO_EMM_CFG(), emm_cfg.u);
+		udelay(1);
+		/* Restore registers */
+		emm_cfg.s.bus_ena = bus_ena;
+		write_csr(mmc, MIO_EMM_CFG(), emm_cfg.u);
+		if (host->tap_requires_noclk) {
+			/* Turn off the clock */
+			emm_debug.u = read_csr(mmc, MIO_EMM_DEBUG());
+			emm_debug.s.emmc_clk_disable = 1;
+			write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+			udelay(1);
+			emm_debug.s.rdsync_rst = 1;
+			write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+			udelay(1);
+		}
+
+		write_csr(mmc, MIO_EMM_TIMING(), emm_timing.u);
+		if (host->tap_requires_noclk) {
+			/* Turn the clock back on */
+			udelay(1);
+			emm_debug.s.rdsync_rst = 0;
+			write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+			udelay(1);
+			emm_debug.s.emmc_clk_disable = 0;
+			write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+		}
+		udelay(1);
+		write_csr(mmc, MIO_EMM_IO_CTL(), emm_io_ctl.u);
+		bus_id = emm_switch.s.bus_id;
+		emm_switch.s.bus_id = 0;
+		write_csr(mmc, MIO_EMM_SWITCH(), emm_switch.u);
+		emm_switch.s.bus_id = bus_id;
+		write_csr(mmc, MIO_EMM_SWITCH(), emm_switch.u);
+		write_csr(mmc, MIO_EMM_WDOG(), emm_wdog.u);
+		write_csr(mmc, MIO_EMM_STS_MASK(), emm_sts_mask.u);
+		write_csr(mmc, MIO_EMM_RCA(), mmc->rca);
+		write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+
+		if (!emm_tap.s.delay) {
+			pr_err("%s: Error: delay calibration failed, timed out.\n",
+			       __func__);
+			/* Set to default value if timed out */
+			emm_tap.s.delay = MMC_DEFAULT_TAP_DELAY;
+			return -ETIMEDOUT;
+		}
+	}
+	/* Round up */
+	host->timing_taps = (10 * 1000 * emm_tap.s.delay) / TOTAL_NO_OF_TAPS;
+	debug("%s(%s): timing taps: %llu, delay: %u\n",
+	      __func__, mmc->dev->name, host->timing_taps, emm_tap.s.delay);
+	host->timing_calibrated = true;
+	return 0;
+}
+
+static int octeontx_mmc_set_input_bus_timing(struct mmc *mmc)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+
+	if (IS_ENABLED(CONFIG_ARCH_OCTEONTX)) {
+		union mio_emm_sample sample;
+
+		sample.u = 0;
+		sample.s.cmd_cnt = slot->cmd_clk_skew;
+		sample.s.dat_cnt = slot->dat_clk_skew;
+		write_csr(mmc, MIO_EMM_SAMPLE(), sample.u);
+	} else {
+		union mio_emm_timing timing;
+
+		timing.u = read_csr(mmc, MIO_EMM_TIMING());
+		if (mmc->selected_mode == MMC_HS_200) {
+			if (slot->hs200_tuned) {
+				timing.s.cmd_in_tap =
+					slot->hs200_taps.s.cmd_in_tap;
+				timing.s.data_in_tap =
+					slot->hs200_taps.s.data_in_tap;
+			} else {
+				pr_warn("%s(%s): Warning: hs200 timing not tuned\n",
+					__func__, mmc->dev->name);
+				timing.s.cmd_in_tap =
+					MMC_DEFAULT_HS200_CMD_IN_TAP;
+				timing.s.data_in_tap =
+					MMC_DEFAULT_HS200_DATA_IN_TAP;
+			}
+		} else if (mmc->selected_mode == MMC_HS_400) {
+			if (slot->hs400_tuned) {
+				timing.s.cmd_in_tap =
+					slot->hs400_taps.s.cmd_in_tap;
+				timing.s.data_in_tap =
+					slot->hs400_taps.s.data_in_tap;
+			} else if (slot->hs200_tuned) {
+				timing.s.cmd_in_tap =
+					slot->hs200_taps.s.cmd_in_tap;
+				timing.s.data_in_tap =
+					slot->hs200_taps.s.data_in_tap;
+			} else {
+				pr_warn("%s(%s): Warning: hs400 timing not tuned\n",
+					__func__, mmc->dev->name);
+				timing.s.cmd_in_tap =
+					MMC_DEFAULT_HS200_CMD_IN_TAP;
+				timing.s.data_in_tap =
+					MMC_DEFAULT_HS200_DATA_IN_TAP;
+			}
+		} else if (slot->tuned) {
+			timing.s.cmd_in_tap = slot->taps.s.cmd_in_tap;
+			timing.s.data_in_tap = slot->taps.s.data_in_tap;
+		} else {
+			timing.s.cmd_in_tap = MMC_DEFAULT_CMD_IN_TAP;
+			timing.s.data_in_tap = MMC_DEFAULT_DATA_IN_TAP;
+		}
+		octeontx_mmc_set_emm_timing(mmc, timing);
+	}
+
+	return 0;
+}
+
+/**
+ * Sets the default bus timing for the current mode.
+ *
+ * @param	mmc	mmc data structure
+ *
+ * @return	0 for success, error otherwise
+ */
+static int octeontx_mmc_set_output_bus_timing(struct mmc *mmc)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	union mio_emm_timing timing;
+	int cout_bdelay, dout_bdelay;
+	unsigned int cout_delay, dout_delay;
+	char env_name[32];
+
+	if (IS_ENABLED(CONFIG_ARCH_OCTEONTX))
+		return 0;
+
+	debug("%s(%s)\n", __func__, mmc->dev->name);
+	if (slot->is_asim || slot->is_emul)
+		return 0;
+
+	octeontx_mmc_calibrate_delay(mmc);
+
+	if (mmc->clock < 26000000) {
+		cout_delay = 5000;
+		dout_delay = 5000;
+	} else if (mmc->clock <= 52000000) {
+		cout_delay = 2500;
+		dout_delay = 2500;
+	} else if (!mmc_is_mode_ddr(mmc->selected_mode)) {
+		cout_delay = slot->cmd_out_hs200_delay;
+		dout_delay = slot->data_out_hs200_delay;
+	} else {
+		cout_delay = slot->cmd_out_hs400_delay;
+		dout_delay = slot->data_out_hs400_delay;
+	}
+
+	snprintf(env_name, sizeof(env_name), "mmc%d_hs200_dout_delay_ps",
+		 slot->bus_id);
+	dout_delay = env_get_ulong(env_name, 10, dout_delay);
+	debug("%s: dout_delay: %u\n", __func__, dout_delay);
+
+	cout_bdelay = octeontx2_mmc_calc_delay(mmc, cout_delay);
+	dout_bdelay = octeontx2_mmc_calc_delay(mmc, dout_delay);
+
+	debug("%s: cmd output delay: %u, data output delay: %u, cmd bdelay: %d, data bdelay: %d, clock: %d\n",
+	      __func__, cout_delay, dout_delay, cout_bdelay, dout_bdelay,
+	      mmc->clock);
+	if (cout_bdelay < 0 || dout_bdelay < 0) {
+		pr_err("%s: Error: could not calculate command and/or data clock skew\n",
+		       __func__);
+		return -1;
+	}
+	timing.u = read_csr(mmc, MIO_EMM_TIMING());
+	timing.s.cmd_out_tap = cout_bdelay;
+	timing.s.data_out_tap = dout_bdelay;
+	if (mmc->selected_mode == MMC_HS_200) {
+		slot->hs200_taps.s.cmd_out_tap = cout_bdelay;
+		slot->hs200_taps.s.data_out_tap = dout_bdelay;
+	} else if (mmc->selected_mode == MMC_HS_400) {
+		slot->hs400_taps.s.cmd_out_tap = cout_bdelay;
+		slot->hs400_taps.s.data_out_tap = dout_bdelay;
+	} else {
+		slot->taps.s.cmd_out_tap = cout_bdelay;
+		slot->taps.s.data_out_tap = dout_bdelay;
+	}
+	octeontx_mmc_set_emm_timing(mmc, timing);
+	debug("%s(%s): bdelay: %d/%d, clock: %d, ddr: %s, timing taps: %llu, do: %d, di: %d, co: %d, ci: %d\n",
+	      __func__, mmc->dev->name, cout_bdelay, dout_bdelay, mmc->clock,
+	      mmc->ddr_mode ? "yes" : "no",
+	      mmc_to_host(mmc)->timing_taps,
+	      timing.s.data_out_tap,
+	      timing.s.data_in_tap,
+	      timing.s.cmd_out_tap,
+	      timing.s.cmd_in_tap);
+
+	return 0;
+}
+
+static void octeontx_mmc_set_clock(struct mmc *mmc)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	uint clock;
+
+	clock = min(mmc->cfg->f_max, (uint)slot->clock);
+	clock = max(mmc->cfg->f_min, clock);
+	debug("%s(%s): f_min: %u, f_max: %u, clock: %u\n", __func__,
+	      mmc->dev->name, mmc->cfg->f_min, mmc->cfg->f_max, clock);
+	slot->clock = clock;
+	mmc->clock = clock;
+}
+
+/**
+ * This switches I/O power as needed when switching between slots.
+ *
+ * @param	mmc	mmc data structure
+ */
+static void octeontx_mmc_switch_io(struct mmc *mmc)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	struct octeontx_mmc_host *host = slot->host;
+	struct mmc *last_mmc = host->last_mmc;
+	static struct udevice *last_reg;
+	union mio_emm_cfg emm_cfg;
+	int bus;
+	static bool initialized;
+
+	/* First time? */
+	if (!initialized || mmc != host->last_mmc) {
+		struct mmc *ommc;
+
+		/* Switch to bus 3 which is unused */
+		emm_cfg.u = read_csr(mmc, MIO_EMM_CFG());
+		emm_cfg.s.bus_ena = 1 << 3;
+		write_csr(mmc, MIO_EMM_CFG(), emm_cfg.u);
+
+		/* Turn off all other I/O interfaces with first initialization
+		 * if at least one supply was found.
+		 */
+		for (bus = 0; bus <= OCTEONTX_MAX_MMC_SLOT; bus++) {
+			ommc = &host->slots[bus].mmc;
+
+			/* Handle self case later */
+			if (ommc == mmc || !ommc->vqmmc_supply)
+				continue;
+
+			/* Skip if we're not switching regulators */
+			if (last_reg == mmc->vqmmc_supply)
+				continue;
+
+			/* Turn off other regulators */
+			if (ommc->vqmmc_supply != mmc->vqmmc_supply)
+				regulator_set_enable(ommc->vqmmc_supply, false);
+		}
+		/* Turn ourself on */
+		if (mmc->vqmmc_supply && last_reg != mmc->vqmmc_supply)
+			regulator_set_enable(mmc->vqmmc_supply, true);
+		mdelay(1);	/* Settle time */
+		/* Switch to new bus */
+		emm_cfg.s.bus_ena = 1 << slot->bus_id;
+		write_csr(mmc, MIO_EMM_CFG(), emm_cfg.u);
+		last_reg = mmc->vqmmc_supply;
+		initialized = true;
+		return;
+	}
+
+	/* No change in device */
+	if (last_mmc == mmc)
+		return;
+
+	if (!last_mmc) {
+		pr_warn("%s(%s): No previous slot detected in IO slot switch!\n",
+			__func__, mmc->dev->name);
+		return;
+	}
+
+	debug("%s(%s): last: %s, supply: %p\n", __func__, mmc->dev->name,
+	      last_mmc->dev->name, mmc->vqmmc_supply);
+
+	/* The supply is the same so we do nothing */
+	if (last_mmc->vqmmc_supply == mmc->vqmmc_supply)
+		return;
+
+	/* Turn off the old slot I/O supply */
+	if (last_mmc->vqmmc_supply) {
+		debug("%s(%s): Turning off IO to %s, supply: %s\n",
+		      __func__, mmc->dev->name, last_mmc->dev->name,
+		      last_mmc->vqmmc_supply->name);
+		regulator_set_enable(last_mmc->vqmmc_supply, false);
+	}
+	/* Turn on the new slot I/O supply */
+	if (mmc->vqmmc_supply)  {
+		debug("%s(%s): Turning on IO to slot %d, supply: %s\n",
+		      __func__, mmc->dev->name, slot->bus_id,
+		      mmc->vqmmc_supply->name);
+		regulator_set_enable(mmc->vqmmc_supply, true);
+	}
+	/* Allow power to settle */
+	mdelay(1);
+}
+
+/**
+ * Called to switch between mmc devices
+ *
+ * @param	mmc	new mmc device
+ */
+static void octeontx_mmc_switch_to(struct mmc *mmc)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	struct octeontx_mmc_slot *old_slot;
+	struct octeontx_mmc_host *host = slot->host;
+	union mio_emm_switch emm_switch;
+	union mio_emm_sts_mask emm_sts_mask;
+	union mio_emm_rca emm_rca;
+
+	if (slot->bus_id == host->last_slotid)
+		return;
+
+	debug("%s(%s) switching from slot %d to slot %d\n", __func__,
+	      mmc->dev->name, host->last_slotid, slot->bus_id);
+	octeontx_mmc_switch_io(mmc);
+
+	if (host->last_slotid >= 0 && slot->valid) {
+		old_slot = &host->slots[host->last_slotid];
+		old_slot->cached_switch.u = read_csr(mmc, MIO_EMM_SWITCH());
+		old_slot->cached_rca.u = read_csr(mmc, MIO_EMM_RCA());
+	}
+	if (mmc->rca)
+		write_csr(mmc, MIO_EMM_RCA(), mmc->rca);
+	emm_switch = slot->cached_switch;
+	do_switch(mmc, emm_switch);
+	emm_rca.u = 0;
+	emm_rca.s.card_rca = mmc->rca;
+	write_csr(mmc, MIO_EMM_RCA(), emm_rca.u);
+	mdelay(100);
+
+	set_wdog(mmc, 100000);
+	if (octeontx_mmc_set_output_bus_timing(mmc) ||
+	    octeontx_mmc_set_input_bus_timing(mmc))
+		pr_err("%s(%s): Error setting bus timing\n", __func__,
+		       mmc->dev->name);
+	octeontx_mmc_io_drive_setup(mmc);
+
+	emm_sts_mask.u = 0;
+	emm_sts_mask.s.sts_msk = 1 << 7 | 1 << 22 | 1 << 23 | 1 << 19;
+	write_csr(mmc, MIO_EMM_STS_MASK(), emm_sts_mask.u);
+	host->last_slotid = slot->bus_id;
+	host->last_mmc = mmc;
+	mdelay(10);
+}
+
+/**
+ * Perform initial timing configuration
+ *
+ * @param mmc	mmc device
+ *
+ * @return 0 for success
+ *
+ * NOTE: This will need to be updated when new silicon comes out
+ */
+static int octeontx_mmc_init_timing(struct mmc *mmc)
+{
+	union mio_emm_timing timing;
+
+	if (mmc_to_slot(mmc)->is_asim || mmc_to_slot(mmc)->is_emul)
+		return 0;
+
+	debug("%s(%s)\n", __func__, mmc->dev->name);
+	timing.u = 0;
+	timing.s.cmd_out_tap = MMC_DEFAULT_CMD_OUT_TAP;
+	timing.s.data_out_tap = MMC_DEFAULT_DATA_OUT_TAP;
+	timing.s.cmd_in_tap = MMC_DEFAULT_CMD_IN_TAP;
+	timing.s.data_in_tap = MMC_DEFAULT_DATA_IN_TAP;
+	octeontx_mmc_set_emm_timing(mmc, timing);
+	return 0;
+}
+
+/**
+ * Perform low-level initialization
+ *
+ * @param	mmc	mmc device
+ *
+ * @return	0 for success, error otherwise
+ */
+static int octeontx_mmc_init_lowlevel(struct mmc *mmc)
+{
+	struct octeontx_mmc_slot *slot = mmc_to_slot(mmc);
+	struct octeontx_mmc_host *host = slot->host;
+	union mio_emm_switch emm_switch;
+	u32 clk_period;
+
+	debug("%s(%s): lowlevel init for slot %d\n", __func__,
+	      mmc->dev->name, slot->bus_id);
+	host->emm_cfg.s.bus_ena &= ~(1 << slot->bus_id);
+	write_csr(mmc, MIO_EMM_CFG(), host->emm_cfg.u);
+	udelay(100);
+	host->emm_cfg.s.bus_ena |= 1 << slot->bus_id;
+	write_csr(mmc, MIO_EMM_CFG(), host->emm_cfg.u);
+	udelay(10);
+	slot->clock = mmc->cfg->f_min;
+	octeontx_mmc_set_clock(&slot->mmc);
+
+	if (IS_ENABLED(CONFIG_ARCH_OCTEONTX2)) {
+		if (host->cond_clock_glitch) {
+			union mio_emm_debug emm_debug;
+
+			emm_debug.u = read_csr(mmc, MIO_EMM_DEBUG());
+			emm_debug.s.clk_on = 1;
+			write_csr(mmc, MIO_EMM_DEBUG(), emm_debug.u);
+		}
+		octeontx_mmc_calibrate_delay(&slot->mmc);
+	}
+
+	clk_period = octeontx_mmc_calc_clk_period(mmc);
+	emm_switch.u = 0;
+	emm_switch.s.power_class = 10;
+	emm_switch.s.clk_lo = clk_period / 2;
+	emm_switch.s.clk_hi = clk_period / 2;
+
+	emm_switch.s.bus_id = slot->bus_id;
+	debug("%s: Performing switch\n", __func__);
+	do_switch(mmc, emm_switch);
+	slot->cached_switch.u = emm_switch.u;
+
+	if (!IS_ENABLED(CONFIG_ARCH_OCTEONTX))
+		octeontx_mmc_init_timing(mmc);
+
+	set_wdog(mmc, 1000000); /* Set to 1 second */
+	write_csr(mmc, MIO_EMM_STS_MASK(), 0xe4390080ull);
+	write_csr(mmc, MIO_EMM_RCA(), 1);
+	mdelay(10);
+	debug("%s: done\n", __func__);
+	return 0;
+}
+
+/**
+ * Translates a voltage number to bits in MMC register
+ *
+ * @param	voltage	voltage in microvolts
+ *
+ * @return	MMC register value for voltage
+ */
+static u32 xlate_voltage(u32 voltage)
+{
+	u32 volt = 0;
+
+	/* Convert to millivolts */
+	voltage /= 1000;
+	if (voltage >= 1650 && voltage <= 1950)
+		volt |= MMC_VDD_165_195;
+	if (voltage >= 2000 && voltage <= 2100)
+		volt |= MMC_VDD_20_21;
+	if (voltage >= 2100 && voltage <= 2200)
+		volt |= MMC_VDD_21_22;
+	if (voltage >= 2200 && voltage <= 2300)
+		volt |= MMC_VDD_22_23;
+	if (voltage >= 2300 && voltage <= 2400)
+		volt |= MMC_VDD_23_24;
+	if (voltage >= 2400 && voltage <= 2500)
+		volt |= MMC_VDD_24_25;
+	if (voltage >= 2500 && voltage <= 2600)
+		volt |= MMC_VDD_25_26;
+	if (voltage >= 2600 && voltage <= 2700)
+		volt |= MMC_VDD_26_27;
+	if (voltage >= 2700 && voltage <= 2800)
+		volt |= MMC_VDD_27_28;
+	if (voltage >= 2800 && voltage <= 2900)
+		volt |= MMC_VDD_28_29;
+	if (voltage >= 2900 && voltage <= 3000)
+		volt |= MMC_VDD_29_30;
+	if (voltage >= 3000 && voltage <= 3100)
+		volt |= MMC_VDD_30_31;
+	if (voltage >= 3100 && voltage <= 3200)
+		volt |= MMC_VDD_31_32;
+	if (voltage >= 3200 && voltage <= 3300)
+		volt |= MMC_VDD_32_33;
+	if (voltage >= 3300 && voltage <= 3400)
+		volt |= MMC_VDD_33_34;
+	if (voltage >= 3400 && voltage <= 3500)
+		volt |= MMC_VDD_34_35;
+	if (voltage >= 3500 && voltage <= 3600)
+		volt |= MMC_VDD_35_36;
+
+	return volt;
+}
+
+/**
+ * Check if a slot is valid in the device tree
+ *
+ * @param	dev	slot device to check
+ *
+ * @return	true if status reports "ok" or "okay" or if no status,
+ *		false otherwise.
+ */
+static bool octeontx_mmc_get_valid(struct udevice *dev)
+{
+	const char *stat = ofnode_read_string(dev->node, "status");
+
+	if (!stat || !strncmp(stat, "ok", 2))
+		return true;
+	else
+		return false;
+}
+
+/**
+ * Reads slot configuration from the device tree
+ *
+ * @param	dev	slot device
+ *
+ * @return	0 on success, otherwise error
+ */
+static int octeontx_mmc_get_config(struct udevice *dev)
+{
+	struct octeontx_mmc_slot *slot = dev_to_mmc_slot(dev);
+	uint voltages[2];
+	uint low, high;
+	char env_name[32];
+	int err;
+	ofnode node = dev->node;
+	int bus_width = 1;
+	ulong new_max_freq;
+
+	debug("%s(%s)", __func__, dev->name);
+	slot->cfg.name = dev->name;
+
+	slot->cfg.f_max = ofnode_read_s32_default(dev->node, "max-frequency",
+						  26000000);
+	snprintf(env_name, sizeof(env_name), "mmc_max_frequency%d",
+		 slot->bus_id);
+
+	new_max_freq = env_get_ulong(env_name, 10, slot->cfg.f_max);
+	debug("Reading %s, got %lu\n", env_name, new_max_freq);
+
+	if (new_max_freq != slot->cfg.f_max) {
+		printf("Overriding device tree MMC maximum frequency %u to %lu\n",
+		       slot->cfg.f_max, new_max_freq);
+		slot->cfg.f_max = new_max_freq;
+	}
+	slot->cfg.f_min = 400000;
+	slot->cfg.b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
+
+	if (IS_ENABLED(CONFIG_ARCH_OCTEONTX2)) {
+		slot->hs400_tuning_block =
+			ofnode_read_s32_default(dev->node,
+						"marvell,hs400-tuning-block",
+						-1);
+		debug("%s(%s): mmc HS400 tuning block: %d\n", __func__,
+		      dev->name, slot->hs400_tuning_block);
+
+		slot->hs200_tap_adj =
+			ofnode_read_s32_default(dev->node,
+						"marvell,hs200-tap-adjust", 0);
+		debug("%s(%s): hs200-tap-adjust: %d\n", __func__, dev->name,
+		      slot->hs200_tap_adj);
+		slot->hs400_tap_adj =
+			ofnode_read_s32_default(dev->node,
+						"marvell,hs400-tap-adjust", 0);
+		debug("%s(%s): hs400-tap-adjust: %d\n", __func__, dev->name,
+		      slot->hs400_tap_adj);
+	}
+
+	err = ofnode_read_u32_array(dev->node, "voltage-ranges", voltages, 2);
+	if (err) {
+		slot->cfg.voltages = MMC_VDD_32_33 | MMC_VDD_33_34;
+	} else {
+		low = xlate_voltage(voltages[0]);
+		high = xlate_voltage(voltages[1]);
+		debug("  low voltage: 0x%x (%u), high: 0x%x (%u)\n",
+		      low, voltages[0], high, voltages[1]);
+		if (low > high || !low || !high) {
+			pr_err("Invalid MMC voltage range [%u-%u] specified for %s\n",
+			       low, high, dev->name);
+			return -1;
+		}
+		slot->cfg.voltages = 0;
+		do {
+			slot->cfg.voltages |= low;
+			low <<= 1;
+		} while (low <= high);
+	}
+	debug("%s: config voltages: 0x%x\n", __func__, slot->cfg.voltages);
+	slot->slew = ofnode_read_s32_default(node, "cavium,clk-slew", -1);
+	slot->drive = ofnode_read_s32_default(node, "cavium,drv-strength", -1);
+	gpio_request_by_name(dev, "cd-gpios", 0, &slot->cd_gpio, GPIOD_IS_IN);
+	slot->cd_inverted = ofnode_read_bool(node, "cd-inverted");
+	gpio_request_by_name(dev, "wp-gpios", 0, &slot->wp_gpio, GPIOD_IS_IN);
+	slot->wp_inverted = ofnode_read_bool(node, "wp-inverted");
+	if (slot->cfg.voltages & MMC_VDD_165_195) {
+		slot->is_1_8v = true;
+		slot->is_3_3v = false;
+	} else if (slot->cfg.voltages & (MMC_VDD_30_31 | MMC_VDD_31_32 |
+					 MMC_VDD_33_34 | MMC_VDD_34_35 |
+					 MMC_VDD_35_36)) {
+		slot->is_1_8v = false;
+		slot->is_3_3v = true;
+	}
+
+	bus_width = ofnode_read_u32_default(node, "bus-width", 1);
+	/* Note fall-through */
+	switch (bus_width) {
+	case 8:
+		slot->cfg.host_caps |= MMC_MODE_8BIT;
+	case 4:
+		slot->cfg.host_caps |= MMC_MODE_4BIT;
+	case 1:
+		slot->cfg.host_caps |= MMC_MODE_1BIT;
+		break;
+	}
+	if (ofnode_read_bool(node, "no-1-8-v")) {
+		slot->is_3_3v = true;
+		slot->is_1_8v = false;
+		if (!(slot->cfg.voltages & (MMC_VDD_32_33 | MMC_VDD_33_34)))
+			pr_warn("%s(%s): voltages indicate 3.3v but 3.3v not supported\n",
+				__func__, dev->name);
+	}
+	if (ofnode_read_bool(node, "mmc-ddr-3-3v")) {
+		slot->is_3_3v = true;
+		slot->is_1_8v = false;
+		if (!(slot->cfg.voltages & (MMC_VDD_32_33 | MMC_VDD_33_34)))
+			pr_warn("%s(%s): voltages indicate 3.3v but 3.3v not supported\n",
+				__func__, dev->name);
+	}
+	if (ofnode_read_bool(node, "cap-sd-highspeed") ||
+	    ofnode_read_bool(node, "cap-mmc-highspeed") ||
+	    ofnode_read_bool(node, "sd-uhs-sdr25"))
+		slot->cfg.host_caps |= MMC_MODE_HS;
+	if (slot->cfg.f_max >= 50000000 &&
+	    slot->cfg.host_caps & MMC_MODE_HS)
+		slot->cfg.host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
+	if (ofnode_read_bool(node, "sd-uhs-sdr50"))
+		slot->cfg.host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
+	if (ofnode_read_bool(node, "sd-uhs-ddr50"))
+		slot->cfg.host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz |
+				       MMC_MODE_DDR_52MHz;
+
+	if (IS_ENABLED(CONFIG_ARCH_OCTEONTX2)) {
+		if (!slot->is_asim && !slot->is_emul) {
+			if (ofnode_read_bool(node, "mmc-hs200-1_8v"))
+				slot->cfg.host_caps |= MMC_MODE_HS200 |
+					MMC_MODE_HS_52MHz;
+			if (ofnode_read_bool(node, "mmc-hs400-1_8v"))
+				slot->cfg.host_caps |= MMC_MODE_HS400 |
+					MMC_MODE_HS_52MHz |
+					MMC_MODE_HS200 |
+					MMC_MODE_DDR_52MHz;
+			slot->cmd_out_hs200_delay =
+				ofnode_read_u32_default(node,
+					"marvell,cmd-out-hs200-dly",
+					MMC_DEFAULT_HS200_CMD_OUT_DLY);
+			debug("%s(%s): HS200 cmd out delay: %d\n",
+			      __func__, dev->name, slot->cmd_out_hs200_delay);
+			slot->data_out_hs200_delay =
+				ofnode_read_u32_default(node,
+					"marvell,data-out-hs200-dly",
+					MMC_DEFAULT_HS200_DATA_OUT_DLY);
+			debug("%s(%s): HS200 data out delay: %d\n",
+			      __func__, dev->name, slot->data_out_hs200_delay);
+			slot->cmd_out_hs400_delay =
+				ofnode_read_u32_default(node,
+					"marvell,cmd-out-hs400-dly",
+					MMC_DEFAULT_HS400_CMD_OUT_DLY);
+			debug("%s(%s): HS400 cmd out delay: %d\n",
+			      __func__, dev->name, slot->cmd_out_hs400_delay);
+			slot->data_out_hs400_delay =
+				ofnode_read_u32_default(node,
+					"marvell,data-out-hs400-dly",
+					MMC_DEFAULT_HS400_DATA_OUT_DLY);
+			debug("%s(%s): HS400 data out delay: %d\n",
+			      __func__, dev->name, slot->data_out_hs400_delay);
+		}
+	}
+
+	slot->disable_ddr = ofnode_read_bool(node, "marvell,disable-ddr");
+	slot->non_removable = ofnode_read_bool(node, "non-removable");
+	slot->cmd_clk_skew = ofnode_read_u32_default(node,
+						     "cavium,cmd-clk-skew", 0);
+	slot->dat_clk_skew = ofnode_read_u32_default(node,
+						     "cavium,dat-clk-skew", 0);
+	debug("%s(%s): host caps: 0x%x\n", __func__,
+	      dev->name, slot->cfg.host_caps);
+	return 0;
+}
+
+/**
+ * Probes a MMC slot
+ *
+ * @param	dev	mmc device
+ *
+ * @return	0 for success, error otherwise
+ */
+static int octeontx_mmc_slot_probe(struct udevice *dev)
+{
+	struct octeontx_mmc_slot *slot;
+	struct mmc *mmc;
+	int err;
+
+	printk("%s (%d)\n", __func__, __LINE__); // test-only
+	debug("%s(%s)\n", __func__, dev->name);
+	if (!host_probed) {
+		pr_err("%s(%s): Error: host not probed yet\n",
+		       __func__, dev->name);
+	}
+	slot = dev_to_mmc_slot(dev);
+	mmc = &slot->mmc;
+	mmc->dev = dev;
+
+	slot->valid = false;
+	if (!octeontx_mmc_get_valid(dev)) {
+		debug("%s(%s): slot is invalid\n", __func__, dev->name);
+		return -ENODEV;
+	}
+
+	debug("%s(%s): Getting config\n", __func__, dev->name);
+	err = octeontx_mmc_get_config(dev);
+	if (err) {
+		pr_err("probe(%s): Error getting config\n", dev->name);
+		return err;
+	}
+
+	debug("%s(%s): mmc bind, mmc: %p\n", __func__, dev->name, &slot->mmc);
+	err = mmc_bind(dev, &slot->mmc, &slot->cfg);
+	if (err) {
+		pr_err("%s(%s): Error binding mmc\n", __func__, dev->name);
+		return -1;
+	}
+
+	/* For some reason, mmc_bind always assigns priv to the device */
+	slot->mmc.priv = slot;
+
+	debug("%s(%s): lowlevel init\n", __func__, dev->name);
+	err = octeontx_mmc_init_lowlevel(mmc);
+	if (err) {
+		pr_err("probe(%s): Low-level init failed\n", dev->name);
+		return err;
+	}
+
+	slot->valid = true;
+
+	debug("%s(%s):\n"
+	      "  base address : %p\n"
+	      "  bus id       : %d\n", __func__, dev->name,
+		slot->base_addr, slot->bus_id);
+
+	return err;
+}
+
+/**
+ * MMC slot driver operations
+ */
+static const struct dm_mmc_ops octeontx_hsmmc_ops = {
+	.send_cmd = octeontx_mmc_dev_send_cmd,
+	.set_ios = octeontx_mmc_set_ios,
+	.get_cd = octeontx_mmc_get_cd,
+	.get_wp = octeontx_mmc_get_wp,
+#ifdef MMC_SUPPORTS_TUNING
+	.execute_tuning = octeontx_mmc_execute_tuning,
+#endif
+};
+
+static const struct udevice_id octeontx_hsmmc_ids[] = {
+	{ .compatible = "mmc-slot" },
+	{ }
+};
+
+U_BOOT_DRIVER(octeontx_hsmmc_slot) = {
+	.name	= "octeontx_hsmmc_slot",
+	.id	= UCLASS_MMC,
+	.of_match = of_match_ptr(octeontx_hsmmc_ids),
+	.probe = octeontx_mmc_slot_probe,
+	.ops = &octeontx_hsmmc_ops,
+};
+
+/*****************************************************************
+ * PCI host driver
+ *
+ * The PCI host driver contains the resources used by all of the
+ * slot drivers.
+ *
+ * The slot drivers are pseudo drivers.
+ */
+
+/**
+ * Probe the MMC host controller
+ *
+ * @param	dev	mmc host controller device
+ *
+ * @return	0 for success, -1 on error
+ */
+static int octeontx_mmc_host_probe(struct udevice *dev)
+{
+	pci_dev_t bdf = dm_pci_get_bdf(dev);
+	struct octeontx_mmc_host *host = dev_get_priv(dev);
+	union mio_emm_int emm_int;
+	u8 rev;
+
+	debug("%s(%s): Entry host: %p\n", __func__, dev->name, host);
+
+	if (!octeontx_mmc_get_valid(dev)) {
+		debug("%s(%s): mmc host not valid\n", __func__, dev->name);
+		return -ENODEV;
+	}
+	memset(host, 0, sizeof(*host));
+	host->base_addr = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+					 PCI_REGION_MEM);
+	if (!host->base_addr) {
+		pr_err("%s: Error: MMC base address not found\n", __func__);
+		return -1;
+	}
+	host->dev = dev;
+	debug("%s(%s): Base address: %p\n", __func__, dev->name,
+	      host->base_addr);
+	if (!dev_has_of_node(dev)) {
+		pr_err("%s: No device tree information found\n", __func__);
+		return -1;
+	}
+	host->node = dev->node;
+	dev->req_seq = PCI_FUNC(bdf);
+	host->last_slotid = -1;
+	if (otx_is_platform(PLATFORM_ASIM))
+		host->is_asim = true;
+	if (otx_is_platform(PLATFORM_EMULATOR))
+		host->is_emul = true;
+	host->dma_wait_delay =
+		ofnode_read_u32_default(dev->node, "marvell,dma-wait-delay", 1);
+	/* Force reset of eMMC */
+	writeq(0, host->base_addr + MIO_EMM_CFG());
+	debug("%s: Clearing MIO_EMM_CFG\n", __func__);
+	udelay(100);
+	emm_int.u = readq(host->base_addr + MIO_EMM_INT());
+	debug("%s: Writing 0x%llx to MIO_EMM_INT\n", __func__, emm_int.u);
+	writeq(emm_int.u, host->base_addr + MIO_EMM_INT());
+
+	debug("%s(%s): Getting I/O clock\n", __func__, dev->name);
+	host->sys_freq = octeontx_get_io_clock();
+	debug("%s(%s): I/O clock %llu\n", __func__, dev->name, host->sys_freq);
+
+	if (IS_ENABLED(CONFIG_ARCH_OCTEONTX2)) {
+		/* Flags for issues to work around */
+		dm_pci_read_config8(dev, PCI_REVISION_ID, &rev);
+		if (otx_is_soc(CN96XX)) {
+			debug("%s: CN96XX revision %d\n", __func__, rev);
+			switch (rev) {
+			case 0:
+				host->calibrate_glitch = true;
+				host->cond_clock_glitch = true;
+				break;
+			case 1:
+				break;
+			case 2:
+				break;
+			case 0x10:	/* C0 */
+				host->hs400_skew_needed = true;
+				debug("HS400 skew support enabled\n");
+				fallthrough;
+			default:
+				debug("CN96XX rev C0+ detected\n");
+				host->tap_requires_noclk = true;
+				break;
+			}
+		} else if (otx_is_soc(CN95XX)) {
+			if (!rev)
+				host->cond_clock_glitch = true;
+		}
+	}
+
+	host_probed = true;
+
+	return 0;
+}
+
+/**
+ * This performs some initial setup before a probe occurs.
+ *
+ * @param dev:	MMC slot device
+ *
+ * @return 0 for success, -1 on failure
+ *
+ * Do some pre-initialization before probing a slot.
+ */
+static int octeontx_mmc_host_child_pre_probe(struct udevice *dev)
+{
+	struct octeontx_mmc_host *host = dev_get_priv(dev_get_parent(dev));
+	struct octeontx_mmc_slot *slot;
+	struct mmc_uclass_priv *upriv;
+	ofnode node = dev->node;
+	u32 bus_id;
+	char name[16];
+	int err;
+
+	debug("%s(%s) Pre-Probe\n", __func__, dev->name);
+	if (ofnode_read_u32(node, "reg", &bus_id)) {
+		pr_err("%s(%s): Error: \"reg\" not found in device tree\n",
+		       __func__, dev->name);
+		return -1;
+	}
+	if (bus_id > OCTEONTX_MAX_MMC_SLOT) {
+		pr_err("%s(%s): Error: \"reg\" out of range of 0..%d\n",
+		       __func__, dev->name, OCTEONTX_MAX_MMC_SLOT);
+		return -1;
+	}
+
+	slot = &host->slots[bus_id];
+	dev->priv = slot;
+	slot->host = host;
+	slot->bus_id = bus_id;
+	slot->dev = dev;
+	slot->base_addr = host->base_addr;
+	slot->is_asim = host->is_asim;
+	slot->is_emul = host->is_emul;
+
+	snprintf(name, sizeof(name), "octeontx-mmc%d", bus_id);
+	err = device_set_name(dev, name);
+
+	if (!dev->uclass_priv) {
+		debug("%s(%s): Allocating uclass priv\n", __func__,
+		      dev->name);
+		upriv = calloc(1, sizeof(struct mmc_uclass_priv));
+		if (!upriv)
+			return -ENOMEM;
+		dev->uclass_priv = upriv;
+		dev->uclass->priv = upriv;
+	} else {
+		upriv = dev->uclass_priv;
+	}
+
+	upriv->mmc = &slot->mmc;
+	debug("%s: uclass priv: %p, mmc: %p\n", dev->name, upriv, upriv->mmc);
+
+	debug("%s: ret: %d\n", __func__, err);
+	return err;
+}
+
+static const struct udevice_id octeontx_hsmmc_host_ids[] = {
+	{ .compatible = "cavium,thunder-8890-mmc" },
+	{ }
+};
+
+U_BOOT_DRIVER(octeontx_hsmmc_host) = {
+	.name	= "octeontx_hsmmc_host",
+	.id	= UCLASS_MISC,
+	.of_match = of_match_ptr(octeontx_hsmmc_host_ids),
+	.probe	= octeontx_mmc_host_probe,
+	.priv_auto_alloc_size = sizeof(struct octeontx_mmc_host),
+	.child_pre_probe = octeontx_mmc_host_child_pre_probe,
+	.flags	= DM_FLAG_PRE_RELOC,
+};
+
+static struct pci_device_id octeontx_mmc_supported[] = {
+	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_EMMC) },
+	{ },
+};
+
+U_BOOT_PCI_DEVICE(octeontx_hsmmc_host, octeontx_mmc_supported);
diff --git a/drivers/mmc/octeontx_hsmmc.h b/drivers/mmc/octeontx_hsmmc.h
new file mode 100644
index 0000000..70844b1
--- /dev/null
+++ b/drivers/mmc/octeontx_hsmmc.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+#ifndef __OCTEONTX_HSMMC_H__
+#define __OCTEONTX_HSMMC_H__
+#include <asm/gpio.h>
+
+/** Name of our driver */
+#define OCTEONTX_MMC_DRIVER_NAME	"octeontx-hsmmc"
+
+/** Maximum supported MMC slots */
+#define OCTEONTX_MAX_MMC_SLOT		3
+
+#define POWER_ON_TIME			40 /** See SD 4.1 spec figure 6-5 */
+
+/**
+ * Timeout used when waiting for commands to complete.  We need to keep this
+ * above the hardware watchdog timeout which is usually limited to 1000ms
+ */
+#define WATCHDOG_COUNT			(1100)	/* in msecs */
+
+/**
+ * Long timeout for commands which might take a while to complete.
+ */
+#define MMC_TIMEOUT_LONG		1000
+
+/**
+ * Short timeout used for most commands in msecs
+ */
+#define MMC_TIMEOUT_SHORT		20
+
+#define NSEC_PER_SEC			1000000000L
+
+#define MAX_NO_OF_TAPS			64
+
+#define EXT_CSD_POWER_CLASS		187	/* R/W */
+
+/* default HS400 tuning block number */
+#define DEFAULT_HS400_TUNING_BLOCK	1
+
+struct octeontx_mmc_host;
+
+/** MMC/SD slot data structure */
+struct octeontx_mmc_slot {
+	struct mmc		mmc;
+	struct mmc_config	cfg;
+	struct octeontx_mmc_host *host;
+	struct udevice		*dev;
+	void			*base_addr;	/** Same as host base_addr */
+	u64			clock;
+	int			bus_id;		/** slot number */
+	uint			bus_width;
+	uint			max_width;
+	int			hs200_tap_adj;
+	int			hs400_tap_adj;
+	int			hs400_tuning_block;
+	struct gpio_desc	cd_gpio;
+	struct gpio_desc	wp_gpio;
+	struct gpio_desc	power_gpio;
+	enum bus_mode		mode;
+	union mio_emm_switch	cached_switch;
+	union mio_emm_switch	want_switch;
+	union mio_emm_rca	cached_rca;
+	union mio_emm_timing	taps;	/* otx2: MIO_EMM_TIMING */
+	union mio_emm_timing	hs200_taps;
+	union mio_emm_timing	hs400_taps;
+	/* These are used to see if our tuning is still valid or not */
+	enum bus_mode		last_mode;
+	u32			last_clock;
+	u32			block_len;
+	u32			block_count;
+	int			cmd_clk_skew;
+	int			dat_clk_skew;
+	uint			cmd_cnt;	/* otx: sample cmd in delay */
+	uint			dat_cnt;	/* otx: sample data in delay */
+	uint			drive;		/* Current drive */
+	uint			slew;		/* clock skew */
+	uint			cmd_out_hs200_delay;
+	uint			data_out_hs200_delay;
+	uint			cmd_out_hs400_delay;
+	uint			data_out_hs400_delay;
+	uint			clk_period;
+	bool			valid:1;
+	bool			is_acmd:1;
+	bool			tuned:1;
+	bool			hs200_tuned:1;
+	bool			hs400_tuned:1;
+	bool			is_1_8v:1;
+	bool			is_3_3v:1;
+	bool			is_ddr:1;
+	bool			is_asim:1;
+	bool			is_emul:1;
+	bool			cd_inverted:1;
+	bool			wp_inverted:1;
+	bool			disable_ddr:1;
+	bool			non_removable:1;
+};
+
+struct octeontx_mmc_cr_mods {
+	u8 ctype_xor;
+	u8 rtype_xor;
+};
+
+struct octeontx_mmc_cr {
+	u8 c;
+	u8 r;
+};
+
+struct octeontx_sd_mods {
+	struct octeontx_mmc_cr mmc;
+	struct octeontx_mmc_cr sd;
+	struct octeontx_mmc_cr sdacmd;
+};
+
+/** Host controller data structure */
+struct octeontx_mmc_host {
+	struct		udevice *dev;
+	void		*base_addr;
+	struct octeontx_mmc_slot slots[OCTEONTX_MAX_MMC_SLOT + 1];
+	pci_dev_t	pdev;
+	u64		sys_freq;
+	union mio_emm_cfg emm_cfg;
+	u64		timing_taps;
+	struct mmc	*last_mmc;	/** Last mmc used */
+	ofnode		node;
+	int		cur_slotid;
+	int		last_slotid;
+	int		max_width;
+	uint		per_tap_delay;
+	uint		num_slots;
+	uint		dma_wait_delay;	/* Delay before polling DMA in usecs */
+	bool		initialized:1;
+	bool		timing_calibrated:1;
+	bool		is_asim:1;
+	bool		is_emul:1;
+	bool		calibrate_glitch:1;
+	bool		cond_clock_glitch:1;
+	bool		tap_requires_noclk:1;
+	bool		hs400_skew_needed:1;
+};
+
+/*
+ * NOTE: This was copied from the Linux kernel.
+ *
+ * MMC status in R1, for native mode (SPI bits are different)
+ * Type
+ *	e:error bit
+ *	s:status bit
+ *	r:detected and set for the actual command response
+ *	x:detected and set during command execution. the host must poll
+ *	    the card by sending status command in order to read these bits.
+ * Clear condition
+ *	a:according to the card state
+ *	b:always related to the previous command. Reception of
+ *	    a valid command will clear it (with a delay of one command)
+ *	c:clear by read
+ */
+#define R1_OUT_OF_RANGE		BIT(31)		/* er, c */
+#define R1_ADDRESS_ERROR	BIT(30)		/* erx, c */
+#define R1_BLOCK_LEN_ERROR	BIT(29)		/* er, c */
+#define R1_ERASE_SEQ_ERROR	BIT(28)		/* er, c */
+#define R1_ERASE_PARAM          BIT(27)		/* ex, c */
+#define R1_WP_VIOLATION		BIT(26)		/* erx, c */
+#define R1_CARD_IS_LOCKED	BIT(25)		/* sx, a */
+#define R1_LOCK_UNLOCK_FAILED	BIT(24)		/* erx, c */
+#define R1_COM_CRC_ERROR	BIT(23)		/* er, b */
+/*#define R1_ILLEGAL_COMMAND	BIT(22)*/		/* er, b */
+#define R1_CARD_ECC_FAILED	BIT(21)		/* ex, c */
+#define R1_CC_ERROR		BIT(20)		/* erx, c */
+#define R1_ERROR		BIT(19)		/* erx, c */
+#define R1_UNDERRUN		BIT(18)		/* ex, c */
+#define R1_OVERRUN		BIT(17)		/* ex, c */
+#define R1_CID_CSD_OVERWRITE	BIT(16)		/* erx, c, CID/CSD overwrite */
+#define R1_WP_ERASE_SKIP	BIT(15)		/* sx, c */
+#define R1_CARD_ECC_DISABLED	BIT(14)		/* sx, a */
+#define R1_ERASE_RESET		BIT(13)		/* sr, c */
+#define R1_STATUS(x)		((x) & 0xFFFFE000)
+#define R1_CURRENT_STATE(x)	(((x) & 0x00001E00) >> 9) /* sx, b (4 bits) */
+#define R1_READY_FOR_DATA	BIT(8)		/* sx, a */
+#define R1_SWITCH_ERROR		BIT(7)		/* sx, c */
+
+#define R1_BLOCK_READ_MASK	R1_OUT_OF_RANGE |	\
+				R1_ADDRESS_ERROR |	\
+				R1_BLOCK_LEN_ERROR |	\
+				R1_CARD_IS_LOCKED |	\
+				R1_COM_CRC_ERROR |	\
+				R1_ILLEGAL_COMMAND |	\
+				R1_CARD_ECC_FAILED |	\
+				R1_CC_ERROR |		\
+				R1_ERROR
+#define R1_BLOCK_WRITE_MASK	R1_OUT_OF_RANGE |	\
+				R1_ADDRESS_ERROR |	\
+				R1_BLOCK_LEN_ERROR |	\
+				R1_WP_VIOLATION |	\
+				R1_CARD_IS_LOCKED |	\
+				R1_COM_CRC_ERROR |	\
+				R1_ILLEGAL_COMMAND |	\
+				R1_CARD_ECC_FAILED |	\
+				R1_CC_ERROR |		\
+				R1_ERROR |		\
+				R1_UNDERRUN |		\
+				R1_OVERRUN
+
+#endif /* __OCTEONTX_HSMMC_H__ */
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 5e0a393..06d39df 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -43,6 +43,35 @@
 	help
 	  Enable PCI memory and I/O space resource allocation and assignment.
 
+config PCI_REGION_MULTI_ENTRY
+	bool "Enable Multiple entries of region type MEMORY in ranges for PCI"
+	depends on PCI || DM_PCI
+	default n
+	help
+	  Enable PCI memory regions to be of multiple entry. Multiple entry
+	  here refers to allow more than one count of address ranges for MEMORY
+	  region type. This helps to add support for SoC's like OcteonTX/TX2
+	  where every peripheral is on the PCI bus.
+
+config PCI_SRIOV
+	bool "Enable Single Root I/O Virtualization support for PCI"
+	depends on PCI || DM_PCI
+	default n
+	help
+	  Say Y here if you want to enable PCI Single Root I/O Virtualization
+	  capability support. This helps to enumerate Virtual Function devices
+	  if available on a PCI Physical Function device and probe for
+	  applicable drivers.
+
+config PCI_ARID
+        bool "Enable Alternate Routing-ID support for PCI"
+        depends on PCI || DM_PCI
+        default n
+        help
+          Say Y here if you want to enable Alternate Routing-ID capability
+          support on PCI devices. This helps to skip some devices in BDF
+          scan that are not present.
+
 config PCIE_ECAM_GENERIC
 	bool "Generic ECAM-based PCI host controller support"
 	default n
@@ -120,6 +149,14 @@
 	  with a total of 5 lanes. Some boards require this for Ethernet
 	  support to work (e.g. beaver, jetson-tk1).
 
+config PCI_OCTEONTX
+	bool "OcteonTX PCI support"
+	depends on (ARCH_OCTEONTX || ARCH_OCTEONTX2)
+	help
+	  Enable support for the OcteonTX/TX2 SoC family ECAM/PEM controllers.
+	  These controllers provide PCI configuration access to all on-board
+	  peripherals so it should only be disabled for testing purposes
+
 config PCI_XILINX
 	bool "Xilinx AXI Bridge for PCI Express"
 	depends on DM_PCI
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 9db90fb..8b4d49a 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -49,3 +49,4 @@
 obj-$(CONFIG_PCIE_MEDIATEK) += pcie_mediatek.o
 obj-$(CONFIG_PCIE_ROCKCHIP) += pcie_rockchip.o
 obj-$(CONFIG_PCI_BRCMSTB) += pcie_brcmstb.o
+obj-$(CONFIG_PCI_OCTEONTX) += pci_octeontx.o
diff --git a/drivers/pci/pci-uclass.c b/drivers/pci/pci-uclass.c
index 40cc9f1..d8a6647 100644
--- a/drivers/pci/pci-uclass.c
+++ b/drivers/pci/pci-uclass.c
@@ -539,7 +539,8 @@
 		int ret;
 
 		debug("%s: device %s\n", __func__, dev->name);
-		if (dev_read_bool(dev, "pci,no-autoconfig"))
+		if (dev_of_valid(dev) &&
+		    dev_read_bool(dev, "pci,no-autoconfig"))
 			continue;
 		ret = dm_pciauto_config_device(dev);
 		if (ret < 0)
@@ -620,10 +621,19 @@
 {
 	int sub_bus;
 	int ret;
+	int ea_pos;
+	u8 reg;
 
 	debug("%s\n", __func__);
 
-	sub_bus = pci_get_bus_max() + 1;
+	ea_pos = dm_pci_find_capability(bus, PCI_CAP_ID_EA);
+	if (ea_pos) {
+		dm_pci_read_config8(bus, ea_pos + sizeof(u32) + sizeof(u8),
+				    &reg);
+		sub_bus = reg;
+	} else {
+		sub_bus = pci_get_bus_max() + 1;
+	}
 	debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name);
 	dm_pciauto_prescan_setup_bridge(bus, sub_bus);
 
@@ -633,12 +643,15 @@
 		      ret);
 		return ret;
 	}
-	if (sub_bus != bus->seq) {
-		printf("%s: Internal error, bus '%s' got seq %d, expected %d\n",
-		       __func__, bus->name, bus->seq, sub_bus);
-		return -EPIPE;
+
+	if (!ea_pos) {
+		if (sub_bus != bus->seq) {
+			debug("%s: Internal error, bus '%s' got seq %d, expected %d\n",
+			      __func__, bus->name, bus->seq, sub_bus);
+			return -EPIPE;
+		}
+		sub_bus = pci_get_bus_max();
 	}
-	sub_bus = pci_get_bus_max();
 	dm_pciauto_postscan_setup_bridge(bus, sub_bus);
 
 	return sub_bus;
@@ -696,7 +709,8 @@
 	      find_id->vendor, find_id->device);
 
 	/* Determine optional OF node */
-	pci_dev_find_ofnode(parent, bdf, &node);
+	if (ofnode_valid(dev_ofnode(parent)))
+		pci_dev_find_ofnode(parent, bdf, &node);
 
 	if (ofnode_valid(node) && !ofnode_is_available(node)) {
 		debug("%s: Ignoring disabled device\n", __func__);
@@ -785,6 +799,7 @@
 	ulong header_type;
 	pci_dev_t bdf, end;
 	bool found_multi;
+	int ari_off;
 	int ret;
 
 	found_multi = false;
@@ -858,6 +873,31 @@
 		pplat->vendor = vendor;
 		pplat->device = device;
 		pplat->class = class;
+
+		if (IS_ENABLED(CONFIG_PCI_ARID)) {
+			ari_off = dm_pci_find_ext_capability(dev,
+							     PCI_EXT_CAP_ID_ARI);
+			if (ari_off) {
+				u16 ari_cap;
+
+				/*
+				 * Read Next Function number in ARI Cap
+				 * Register
+				 */
+				dm_pci_read_config16(dev, ari_off + 4,
+						     &ari_cap);
+				/*
+				 * Update next scan on this function number,
+				 * subtract 1 in BDF to satisfy loop increment.
+				 */
+				if (ari_cap & 0xff00) {
+					bdf = PCI_BDF(PCI_BUS(bdf),
+						      PCI_DEV(ari_cap),
+						      PCI_FUNC(ari_cap));
+					bdf = bdf - 0x100;
+				}
+			}
+		}
 	}
 
 	return 0;
@@ -871,8 +911,10 @@
 			   ofnode node)
 {
 	int pci_addr_cells, addr_cells, size_cells;
+	struct bd_info *bd = gd->bd;
 	int cells_per_record;
 	const u32 *prop;
+	int max_regions;
 	int len;
 	int i;
 
@@ -892,7 +934,13 @@
 	hose->region_count = 0;
 	debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
 	      cells_per_record);
-	for (i = 0; i < MAX_PCI_REGIONS; i++, len -= cells_per_record) {
+
+	/* Dynamically allocate the regions array */
+	max_regions = len / cells_per_record + CONFIG_NR_DRAM_BANKS;
+	hose->regions = (struct pci_region *)
+		calloc(1, max_regions * sizeof(struct pci_region));
+
+	for (i = 0; i < max_regions; i++, len -= cells_per_record) {
 		u64 pci_addr, addr, size;
 		int space_code;
 		u32 flags;
@@ -927,10 +975,13 @@
 		}
 
 		pos = -1;
-		for (i = 0; i < hose->region_count; i++) {
-			if (hose->regions[i].flags == type)
-				pos = i;
+		if (!IS_ENABLED(CONFIG_PCI_REGION_MULTI_ENTRY)) {
+			for (i = 0; i < hose->region_count; i++) {
+				if (hose->regions[i].flags == type)
+					pos = i;
+			}
 		}
+
 		if (pos == -1)
 			pos = hose->region_count++;
 		debug(" - type=%d, pos=%d\n", type, pos);
@@ -938,18 +989,10 @@
 	}
 
 	/* Add a region for our local memory */
-#ifdef CONFIG_NR_DRAM_BANKS
-	struct bd_info *bd = gd->bd;
-
 	if (!bd)
 		return;
 
 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; ++i) {
-		if (hose->region_count == MAX_PCI_REGIONS) {
-			pr_err("maximum number of regions parsed, aborting\n");
-			break;
-		}
-
 		if (bd->bi_dram[i].size) {
 			pci_set_region(hose->regions + hose->region_count++,
 				       bd->bi_dram[i].start,
@@ -958,19 +1001,6 @@
 				       PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
 		}
 	}
-#else
-	phys_addr_t base = 0, size;
-
-	size = gd->ram_size;
-#ifdef CONFIG_SYS_SDRAM_BASE
-	base = CONFIG_SYS_SDRAM_BASE;
-#endif
-	if (gd->pci_ram_top && gd->pci_ram_top < base + size)
-		size = gd->pci_ram_top - base;
-	if (size)
-		pci_set_region(hose->regions + hose->region_count++, base,
-			base, size, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
-#endif
 
 	return;
 }
@@ -996,8 +1026,11 @@
 	hose->bus = bus;
 	hose->first_busno = bus->seq;
 	hose->last_busno = bus->seq;
-	hose->skip_auto_config_until_reloc =
-		dev_read_bool(bus, "u-boot,skip-auto-config-until-reloc");
+	if (dev_of_valid(bus)) {
+		hose->skip_auto_config_until_reloc =
+			dev_read_bool(bus,
+				      "u-boot,skip-auto-config-until-reloc");
+	}
 
 	return 0;
 }
@@ -1406,14 +1439,55 @@
 	return bus_addr;
 }
 
+static phys_addr_t dm_pci_map_ea_virt(struct udevice *dev, int ea_off,
+				      struct pci_child_platdata *pdata)
+{
+	phys_addr_t addr = 0;
+
+	/*
+	 * In the case of a Virtual Function device using BAR
+	 * base and size, add offset for VFn BAR(1, 2, 3...n)
+	 */
+	if (pdata->is_virtfn) {
+		size_t sz;
+		u32 ea_entry;
+
+		/* MaxOffset, 1st DW */
+		dm_pci_read_config32(dev, ea_off + 8, &ea_entry);
+		sz = ea_entry & PCI_EA_FIELD_MASK;
+		/* Fill up lower 2 bits */
+		sz |= (~PCI_EA_FIELD_MASK);
+
+		if (ea_entry & PCI_EA_IS_64) {
+			/* MaxOffset 2nd DW */
+			dm_pci_read_config32(dev, ea_off + 16, &ea_entry);
+			sz |= ((u64)ea_entry) << 32;
+		}
+
+		addr = (pdata->virtid - 1) * (sz + 1);
+	}
+
+	return addr;
+}
+
 static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, int flags,
-			       int ea_off)
+			       int ea_off, struct pci_child_platdata *pdata)
 {
 	int ea_cnt, i, entry_size;
 	int bar_id = (bar - PCI_BASE_ADDRESS_0) >> 2;
 	u32 ea_entry;
 	phys_addr_t addr;
 
+	if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
+		/*
+		 * In the case of a Virtual Function device, device is
+		 * Physical function, so pdata will point to required VF
+		 * specific data.
+		 */
+		if (pdata->is_virtfn)
+			bar_id += PCI_EA_BEI_VF_BAR0;
+	}
+
 	/* EA capability structure header */
 	dm_pci_read_config32(dev, ea_off, &ea_entry);
 	ea_cnt = (ea_entry >> 16) & PCI_EA_NUM_ENT_MASK;
@@ -1436,8 +1510,11 @@
 			addr |= ((u64)ea_entry) << 32;
 		}
 
+		if (IS_ENABLED(CONFIG_PCI_SRIOV))
+			addr += dm_pci_map_ea_virt(dev, ea_off, pdata);
+
 		/* size ignored for now */
-		return map_physmem(addr, flags, 0);
+		return map_physmem(addr, 0, flags);
 	}
 
 	return 0;
@@ -1445,29 +1522,42 @@
 
 void *dm_pci_map_bar(struct udevice *dev, int bar, int flags)
 {
+	struct pci_child_platdata *pdata = dev_get_parent_platdata(dev);
+	struct udevice *udev = dev;
 	pci_addr_t pci_bus_addr;
 	u32 bar_response;
 	int ea_off;
 
+	if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
+		/*
+		 * In case of Virtual Function devices, use PF udevice
+		 * as EA capability is defined in Physical Function
+		 */
+		if (pdata->is_virtfn)
+			udev = pdata->pfdev;
+	}
+
 	/*
 	 * if the function supports Enhanced Allocation use that instead of
 	 * BARs
+	 * Incase of virtual functions, pdata will help read VF BEI
+	 * and EA entry size.
 	 */
-	ea_off = dm_pci_find_capability(dev, PCI_CAP_ID_EA);
+	ea_off = dm_pci_find_capability(udev, PCI_CAP_ID_EA);
 	if (ea_off)
-		return dm_pci_map_ea_bar(dev, bar, flags, ea_off);
+		return dm_pci_map_ea_bar(udev, bar, flags, ea_off, pdata);
 
 	/* read BAR address */
-	dm_pci_read_config32(dev, bar, &bar_response);
+	dm_pci_read_config32(udev, bar, &bar_response);
 	pci_bus_addr = (pci_addr_t)(bar_response & ~0xf);
 
 	/*
 	 * Pass "0" as the length argument to pci_bus_to_virt.  The arg
-	 * isn't actualy used on any platform because u-boot assumes a static
+	 * isn't actually used on any platform because U-Boot assumes a static
 	 * linear mapping.  In the future, this could read the BAR size
 	 * and pass that as the size if needed.
 	 */
-	return dm_pci_bus_to_virt(dev, pci_bus_addr, flags, 0, MAP_NOCACHE);
+	return dm_pci_bus_to_virt(udev, pci_bus_addr, flags, 0, MAP_NOCACHE);
 }
 
 static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap)
@@ -1582,6 +1672,120 @@
 
 	return 0;
 }
+
+#if defined(CONFIG_PCI_SRIOV)
+int pci_sriov_init(struct udevice *pdev, int vf_en)
+{
+	u16 vendor, device;
+	struct udevice *bus;
+	struct udevice *dev;
+	pci_dev_t bdf;
+	u16 ctrl;
+	u16 num_vfs;
+	u16 total_vf;
+	u16 vf_offset;
+	u16 vf_stride;
+	int vf, ret;
+	int pos;
+
+	pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+	if (!pos) {
+		debug("Error: SRIOV capability not found\n");
+		return -ENOENT;
+	}
+
+	dm_pci_read_config16(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
+
+	dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
+	if (vf_en > total_vf)
+		vf_en = total_vf;
+	dm_pci_write_config16(pdev, pos + PCI_SRIOV_NUM_VF, vf_en);
+
+	ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+	dm_pci_write_config16(pdev, pos + PCI_SRIOV_CTRL, ctrl);
+
+	dm_pci_read_config16(pdev, pos + PCI_SRIOV_NUM_VF, &num_vfs);
+	if (num_vfs > vf_en)
+		num_vfs = vf_en;
+
+	dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_OFFSET, &vf_offset);
+	dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_STRIDE, &vf_stride);
+
+	dm_pci_read_config16(pdev, PCI_VENDOR_ID, &vendor);
+	dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_DID, &device);
+
+	bdf = dm_pci_get_bdf(pdev);
+
+	pci_get_bus(PCI_BUS(bdf), &bus);
+
+	if (!bus)
+		return -ENODEV;
+
+	bdf += PCI_BDF(0, 0, vf_offset);
+
+	for (vf = 0; vf < num_vfs; vf++) {
+		struct pci_child_platdata *pplat;
+		ulong class;
+
+		pci_bus_read_config(bus, bdf, PCI_CLASS_DEVICE,
+				    &class, PCI_SIZE_16);
+
+		debug("%s: bus %d/%s: found VF %x:%x\n", __func__,
+		      bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
+
+		/* Find this device in the device tree */
+		ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
+
+		if (ret == -ENODEV) {
+			struct pci_device_id find_id;
+
+			memset(&find_id, '\0', sizeof(find_id));
+			find_id.vendor = vendor;
+			find_id.device = device;
+			find_id.class = class;
+
+			ret = pci_find_and_bind_driver(bus, &find_id,
+						       bdf, &dev);
+
+			if (ret)
+				return ret;
+		}
+
+		/* Update the platform data */
+		pplat = dev_get_parent_platdata(dev);
+		pplat->devfn = PCI_MASK_BUS(bdf);
+		pplat->vendor = vendor;
+		pplat->device = device;
+		pplat->class = class;
+		pplat->is_virtfn = true;
+		pplat->pfdev = pdev;
+		pplat->virtid = vf * vf_stride + vf_offset;
+
+		debug("%s: bus %d/%s: found VF %x:%x %x:%x class %lx id %x\n",
+		      __func__, dev->seq, dev->name, PCI_DEV(bdf),
+		      PCI_FUNC(bdf), vendor, device, class, pplat->virtid);
+		bdf += PCI_BDF(0, 0, vf_stride);
+	}
+
+	return 0;
+}
+
+int pci_sriov_get_totalvfs(struct udevice *pdev)
+{
+	u16 total_vf;
+	int pos;
+
+	pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+	if (!pos) {
+		debug("Error: SRIOV capability not found\n");
+		return -ENOENT;
+	}
+
+	dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
+
+	return total_vf;
+}
+#endif /* SRIOV */
 
 UCLASS_DRIVER(pci) = {
 	.id		= UCLASS_PCI,
diff --git a/drivers/pci/pci_octeontx.c b/drivers/pci/pci_octeontx.c
new file mode 100644
index 0000000..3053754
--- /dev/null
+++ b/drivers/pci/pci_octeontx.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier:    GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <dm.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <log.h>
+#include <malloc.h>
+#include <pci.h>
+
+#include <asm/io.h>
+
+#include <linux/ioport.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/*
+ * This driver supports multiple types of operations / host bridges / busses:
+ *
+ * OTX_ECAM: Octeon TX & TX2 ECAM (Enhanced Configuration Access Mechanism)
+ *	     Used to access the internal on-chip devices which are connected
+ *	     to internal buses
+ * OTX_PEM:  Octeon TX PEM (PCI Express MAC)
+ *	     Used to access the external (off-chip) PCI devices
+ * OTX2_PEM: Octeon TX2 PEM (PCI Express MAC)
+ *	     Used to access the external (off-chip) PCI devices
+ */
+enum {
+	OTX_ECAM,
+	OTX_PEM,
+	OTX2_PEM,
+};
+
+/**
+ * struct octeontx_pci - Driver private data
+ * @type:	Device type matched via compatible (e.g. OTX_ECAM etc)
+ * @cfg:	Config resource
+ * @bus:	Bus resource
+ */
+struct octeontx_pci {
+	unsigned int type;
+
+	struct resource cfg;
+	struct resource bus;
+};
+
+static uintptr_t octeontx_cfg_addr(struct octeontx_pci *pcie,
+				   int bus_offs, int shift_offs,
+				   pci_dev_t bdf, uint offset)
+{
+	u32 bus, dev, func;
+	uintptr_t address;
+
+	bus = PCI_BUS(bdf) + bus_offs;
+	dev = PCI_DEV(bdf);
+	func = PCI_FUNC(bdf);
+
+	address = (bus << (20 + shift_offs)) |
+		(dev << (15 + shift_offs)) |
+		(func << (12 + shift_offs)) | offset;
+	address += pcie->cfg.start;
+
+	return address;
+}
+
+static ulong readl_size(uintptr_t addr, enum pci_size_t size)
+{
+	ulong val;
+
+	switch (size) {
+	case PCI_SIZE_8:
+		val = readb(addr);
+		break;
+	case PCI_SIZE_16:
+		val = readw(addr);
+		break;
+	case PCI_SIZE_32:
+		val = readl(addr);
+		break;
+	default:
+		printf("Invalid size\n");
+		return -EINVAL;
+	};
+
+	return val;
+}
+
+static void writel_size(uintptr_t addr, enum pci_size_t size, ulong valuep)
+{
+	switch (size) {
+	case PCI_SIZE_8:
+		writeb(valuep, addr);
+		break;
+	case PCI_SIZE_16:
+		writew(valuep, addr);
+		break;
+	case PCI_SIZE_32:
+		writel(valuep, addr);
+		break;
+	default:
+		printf("Invalid size\n");
+	};
+}
+
+static bool octeontx_bdf_invalid(pci_dev_t bdf)
+{
+	if (PCI_BUS(bdf) == 1 && PCI_DEV(bdf) > 0)
+		return true;
+
+	return false;
+}
+
+static int octeontx_ecam_read_config(const struct udevice *bus, pci_dev_t bdf,
+				     uint offset, ulong *valuep,
+				     enum pci_size_t size)
+{
+	struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus);
+	struct pci_controller *hose = dev_get_uclass_priv(bus);
+	uintptr_t address;
+
+	address = octeontx_cfg_addr(pcie, pcie->bus.start - hose->first_busno,
+				    0, bdf, offset);
+	*valuep = readl_size(address, size);
+
+	debug("%02x.%02x.%02x: u%d %x -> %lx\n",
+	      PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), size, offset, *valuep);
+
+	return 0;
+}
+
+static int octeontx_ecam_write_config(struct udevice *bus, pci_dev_t bdf,
+				      uint offset, ulong value,
+				      enum pci_size_t size)
+{
+	struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus);
+	struct pci_controller *hose = dev_get_uclass_priv(bus);
+	uintptr_t address;
+
+	address = octeontx_cfg_addr(pcie, pcie->bus.start - hose->first_busno,
+				    0, bdf, offset);
+	writel_size(address, size, value);
+
+	debug("%02x.%02x.%02x: u%d %x <- %lx\n",
+	      PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), size, offset, value);
+
+	return 0;
+}
+
+static int octeontx_pem_read_config(const struct udevice *bus, pci_dev_t bdf,
+				    uint offset, ulong *valuep,
+				    enum pci_size_t size)
+{
+	struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus);
+	struct pci_controller *hose = dev_get_uclass_priv(bus);
+	uintptr_t address;
+	u8 hdrtype;
+	u8 pri_bus = pcie->bus.start + 1 - hose->first_busno;
+	u32 bus_offs = (pri_bus << 16) | (pri_bus << 8) | (pri_bus << 0);
+
+	address = octeontx_cfg_addr(pcie, 1 - hose->first_busno, 4,
+				    bdf, 0);
+
+	*valuep = pci_conv_32_to_size(~0UL, offset, size);
+
+	if (octeontx_bdf_invalid(bdf))
+		return -EPERM;
+
+	*valuep = readl_size(address + offset, size);
+
+	hdrtype = readb(address + PCI_HEADER_TYPE);
+	if (hdrtype == PCI_HEADER_TYPE_BRIDGE &&
+	    offset >= PCI_PRIMARY_BUS &&
+	    offset <= PCI_SUBORDINATE_BUS &&
+	    *valuep != pci_conv_32_to_size(~0UL, offset, size))
+		*valuep -= pci_conv_32_to_size(bus_offs, offset, size);
+
+	return 0;
+}
+
+static int octeontx_pem_write_config(struct udevice *bus, pci_dev_t bdf,
+				     uint offset, ulong value,
+				     enum pci_size_t size)
+{
+	struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus);
+	struct pci_controller *hose = dev_get_uclass_priv(bus);
+	uintptr_t address;
+	u8 hdrtype;
+	u8 pri_bus = pcie->bus.start + 1 - hose->first_busno;
+	u32 bus_offs = (pri_bus << 16) | (pri_bus << 8) | (pri_bus << 0);
+
+	address = octeontx_cfg_addr(pcie, 1 - hose->first_busno, 4, bdf, 0);
+
+	hdrtype = readb(address + PCI_HEADER_TYPE);
+	if (hdrtype == PCI_HEADER_TYPE_BRIDGE &&
+	    offset >= PCI_PRIMARY_BUS &&
+	    offset <= PCI_SUBORDINATE_BUS &&
+	    value != pci_conv_32_to_size(~0UL, offset, size))
+		value +=  pci_conv_32_to_size(bus_offs, offset, size);
+
+	if (octeontx_bdf_invalid(bdf))
+		return -EPERM;
+
+	writel_size(address + offset, size, value);
+
+	debug("%02x.%02x.%02x: u%d %x (%lx) <- %lx\n",
+	      PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), size, offset,
+	      address, value);
+
+	return 0;
+}
+
+static int octeontx2_pem_read_config(const struct udevice *bus, pci_dev_t bdf,
+				     uint offset, ulong *valuep,
+				     enum pci_size_t size)
+{
+	struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus);
+	struct pci_controller *hose = dev_get_uclass_priv(bus);
+	uintptr_t address;
+
+	address = octeontx_cfg_addr(pcie, 1 - hose->first_busno, 0,
+				    bdf, 0);
+
+	*valuep = pci_conv_32_to_size(~0UL, offset, size);
+
+	if (octeontx_bdf_invalid(bdf))
+		return -EPERM;
+
+	*valuep = readl_size(address + offset, size);
+
+	debug("%02x.%02x.%02x: u%d %x (%lx) -> %lx\n",
+	      PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), size, offset,
+	      address, *valuep);
+
+	return 0;
+}
+
+static int octeontx2_pem_write_config(struct udevice *bus, pci_dev_t bdf,
+				      uint offset, ulong value,
+				      enum pci_size_t size)
+{
+	struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus);
+	struct pci_controller *hose = dev_get_uclass_priv(bus);
+	uintptr_t address;
+
+	address = octeontx_cfg_addr(pcie, 1 - hose->first_busno, 0,
+				    bdf, 0);
+
+	if (octeontx_bdf_invalid(bdf))
+		return -EPERM;
+
+	writel_size(address + offset, size, value);
+
+	debug("%02x.%02x.%02x: u%d %x (%lx) <- %lx\n",
+	      PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf), size, offset,
+	      address, value);
+
+	return 0;
+}
+
+int pci_octeontx_read_config(const struct udevice *bus, pci_dev_t bdf,
+			     uint offset, ulong *valuep,
+			     enum pci_size_t size)
+{
+	struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus);
+	int ret = -EIO;
+
+	switch (pcie->type) {
+	case OTX_ECAM:
+		ret = octeontx_ecam_read_config(bus, bdf, offset, valuep,
+						size);
+		break;
+	case OTX_PEM:
+		ret = octeontx_pem_read_config(bus, bdf, offset, valuep,
+					       size);
+		break;
+	case OTX2_PEM:
+		ret = octeontx2_pem_read_config(bus, bdf, offset, valuep,
+						size);
+		break;
+	}
+
+	return ret;
+}
+
+int pci_octeontx_write_config(struct udevice *bus, pci_dev_t bdf,
+			      uint offset, ulong value,
+			      enum pci_size_t size)
+{
+	struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(bus);
+	int ret = -EIO;
+
+	switch (pcie->type) {
+	case OTX_ECAM:
+		ret = octeontx_ecam_write_config(bus, bdf, offset, value,
+						 size);
+		break;
+	case OTX_PEM:
+		ret = octeontx_pem_write_config(bus, bdf, offset, value,
+						size);
+		break;
+	case OTX2_PEM:
+		ret = octeontx2_pem_write_config(bus, bdf, offset, value,
+						 size);
+		break;
+	}
+
+	return ret;
+}
+
+static int pci_octeontx_ofdata_to_platdata(struct udevice *dev)
+{
+	return 0;
+}
+
+static int pci_octeontx_probe(struct udevice *dev)
+{
+	struct octeontx_pci *pcie = (struct octeontx_pci *)dev_get_priv(dev);
+	int err;
+
+	pcie->type = dev_get_driver_data(dev);
+
+	err = dev_read_resource(dev, 0, &pcie->cfg);
+	if (err) {
+		debug("Error reading resource: %s\n", fdt_strerror(err));
+		return err;
+	}
+
+	err = dev_read_pci_bus_range(dev, &pcie->bus);
+	if (err) {
+		debug("Error reading resource: %s\n", fdt_strerror(err));
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct dm_pci_ops pci_octeontx_ops = {
+	.read_config	= pci_octeontx_read_config,
+	.write_config	= pci_octeontx_write_config,
+};
+
+static const struct udevice_id pci_octeontx_ids[] = {
+	{ .compatible = "cavium,pci-host-thunder-ecam", .data = OTX_ECAM },
+	{ .compatible = "cavium,pci-host-octeontx-ecam", .data = OTX_ECAM },
+	{ .compatible = "pci-host-ecam-generic", .data = OTX_ECAM },
+	{ .compatible = "cavium,pci-host-thunder-pem", .data = OTX_PEM },
+	{ .compatible = "marvell,pci-host-octeontx2-pem", .data = OTX2_PEM },
+	{ }
+};
+
+U_BOOT_DRIVER(pci_octeontx) = {
+	.name	= "pci_octeontx",
+	.id	= UCLASS_PCI,
+	.of_match = pci_octeontx_ids,
+	.ops	= &pci_octeontx_ops,
+	.ofdata_to_platdata = pci_octeontx_ofdata_to_platdata,
+	.probe	= pci_octeontx_probe,
+	.priv_auto_alloc_size = sizeof(struct octeontx_pci),
+	.flags = DM_FLAG_PRE_RELOC,
+};
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0ebf116..210d9f8 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -139,6 +139,16 @@
 	  The watchdog timer is stopped when initialized.
 	  It performs full SoC reset.
 
+config WDT_OCTEONTX
+	bool "OcteonTX core watchdog support"
+	depends on WDT && (ARCH_OCTEONTX || ARCH_OCTEONTX2)
+	default y
+	imply WATCHDOG
+	help
+	  This enables OcteonTX watchdog driver, which can be
+	  found on OcteonTX/TX2 chipsets and inline with driver model.
+	  Only supports watchdog reset.
+
 config WDT_OMAP3
 	bool "TI OMAP watchdog timer support"
 	depends on WDT && ARCH_OMAP2PLUS
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 111e258..01b8231 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -26,6 +26,7 @@
 obj-$(CONFIG_WDT_MPC8xx) += mpc8xx_wdt.o
 obj-$(CONFIG_WDT_MT7621) += mt7621_wdt.o
 obj-$(CONFIG_WDT_MTK) += mtk_wdt.o
+obj-$(CONFIG_WDT_OCTEONTX) += octeontx_wdt.o
 obj-$(CONFIG_WDT_OMAP3) += omap_wdt.o
 obj-$(CONFIG_WDT_SBSA) += sbsa_gwdt.o
 obj-$(CONFIG_WDT_K3_RTI) += rti_wdt.o
diff --git a/drivers/watchdog/octeontx_wdt.c b/drivers/watchdog/octeontx_wdt.c
new file mode 100644
index 0000000..1e0670e
--- /dev/null
+++ b/drivers/watchdog/octeontx_wdt.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#include <dm.h>
+#include <errno.h>
+#include <wdt.h>
+#include <asm/io.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define CORE0_POKE_OFFSET	0x50000
+#define CORE0_POKE_OFFSET_MASK	0xfffffULL
+
+struct octeontx_wdt {
+	void __iomem *reg;
+};
+
+static int octeontx_wdt_reset(struct udevice *dev)
+{
+	struct octeontx_wdt *priv = dev_get_priv(dev);
+
+	writeq(~0ULL, priv->reg);
+
+	return 0;
+}
+
+static int octeontx_wdt_probe(struct udevice *dev)
+{
+	struct octeontx_wdt *priv = dev_get_priv(dev);
+
+	priv->reg = dev_remap_addr(dev);
+	if (!priv->reg)
+		return -EINVAL;
+
+	/*
+	 * Save core poke register address in reg (its not 0xa0000 as
+	 * extracted from the DT but 0x50000 instead)
+	 */
+	priv->reg = (void __iomem *)(((u64)priv->reg &
+				      ~CORE0_POKE_OFFSET_MASK) |
+				     CORE0_POKE_OFFSET);
+
+	return 0;
+}
+
+static const struct wdt_ops octeontx_wdt_ops = {
+	.reset = octeontx_wdt_reset,
+};
+
+static const struct udevice_id octeontx_wdt_ids[] = {
+	{ .compatible = "arm,sbsa-gwdt" },
+	{}
+};
+
+U_BOOT_DRIVER(wdt_octeontx) = {
+	.name = "wdt_octeontx",
+	.id = UCLASS_WDT,
+	.of_match = octeontx_wdt_ids,
+	.ops = &octeontx_wdt_ops,
+	.priv_auto_alloc_size = sizeof(struct octeontx_wdt),
+	.probe = octeontx_wdt_probe,
+};
diff --git a/include/configs/octeontx2_common.h b/include/configs/octeontx2_common.h
new file mode 100644
index 0000000..7c585ad
--- /dev/null
+++ b/include/configs/octeontx2_common.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#ifndef __OCTEONTX2_COMMON_H__
+#define __OCTEONTX2_COMMON_H__
+
+#define CONFIG_SUPPORT_RAW_INITRD
+
+/** Maximum size of image supported for bootm (and bootable FIT images) */
+#define CONFIG_SYS_BOOTM_LEN		(256 << 20)
+
+/** Memory base address */
+#define CONFIG_SYS_SDRAM_BASE		CONFIG_SYS_TEXT_BASE
+
+/** Stack starting address */
+#define CONFIG_SYS_INIT_SP_ADDR		(CONFIG_SYS_SDRAM_BASE + 0xffff0)
+
+/** Heap size for U-Boot */
+#define CONFIG_SYS_MALLOC_LEN		(CONFIG_ENV_SIZE + 64 * 1024 * 1024)
+
+#define CONFIG_SYS_LOAD_ADDR		CONFIG_SYS_SDRAM_BASE
+
+#define CONFIG_LAST_STAGE_INIT
+
+/* Allow environment variable to be overwritten */
+#define CONFIG_ENV_OVERWRITE
+
+/** Reduce hashes printed out */
+#define CONFIG_TFTP_TSIZE
+
+/* Autoboot options */
+#define CONFIG_RESET_TO_RETRY
+#define CONFIG_BOOT_RETRY_TIME		-1
+#define CONFIG_BOOT_RETRY_MIN		30
+
+/* BOOTP options */
+#define CONFIG_BOOTP_BOOTFILESIZE
+
+/** Extra environment settings */
+#define CONFIG_EXTRA_ENV_SETTINGS	\
+					"loadaddr=20080000\0"	\
+					"ethrotate=yes\0"	\
+					"autoload=0\0"
+
+/** Environment defines */
+#if defined(CONFIG_ENV_IS_IN_MMC)
+#define CONFIG_SYS_MMC_ENV_DEV		0
+#endif
+
+/* Monitor Command Prompt */
+#define CONFIG_SYS_CBSIZE		1024	/** Console I/O Buffer Size */
+#define CONFIG_SYS_BARGSIZE		CONFIG_SYS_CBSIZE
+
+#define CONFIG_SYS_MAXARGS		64	/** max command args */
+
+#define CONFIG_SYS_MMC_MAX_BLK_COUNT	8192
+
+#undef CONFIG_SYS_PROMPT
+#define CONFIG_SYS_PROMPT		env_get("prompt")
+
+#if defined(CONFIG_MMC_OCTEONTX)
+#define MMC_SUPPORTS_TUNING
+/** EMMC specific defines */
+#define CONFIG_SUPPORT_EMMC_BOOT
+#define CONFIG_SUPPORT_EMMC_RPMB
+#endif
+
+#endif /* __OCTEONTX2_COMMON_H__ */
diff --git a/include/configs/octeontx_common.h b/include/configs/octeontx_common.h
new file mode 100644
index 0000000..810b2bd
--- /dev/null
+++ b/include/configs/octeontx_common.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier:    GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * https://spdx.org/licenses
+ */
+
+#ifndef __OCTEONTX_COMMON_H__
+#define __OCTEONTX_COMMON_H__
+
+#define CONFIG_SUPPORT_RAW_INITRD
+
+/** Maximum size of image supported for bootm (and bootable FIT images) */
+#define CONFIG_SYS_BOOTM_LEN		(256 << 20)
+
+/** Memory base address */
+#define CONFIG_SYS_SDRAM_BASE		CONFIG_SYS_TEXT_BASE
+
+/** Stack starting address */
+#define CONFIG_SYS_INIT_SP_ADDR		(CONFIG_SYS_SDRAM_BASE + 0xffff0)
+
+/** Heap size for U-Boot */
+#define CONFIG_SYS_MALLOC_LEN		(CONFIG_ENV_SIZE + 64 * 1024 * 1024)
+
+#define CONFIG_SYS_LOAD_ADDR		CONFIG_SYS_SDRAM_BASE
+
+/* Allow environment variable to be overwritten */
+#define CONFIG_ENV_OVERWRITE
+
+/** Reduce hashes printed out */
+#define CONFIG_TFTP_TSIZE
+
+/* Autoboot options */
+#define CONFIG_RESET_TO_RETRY
+#define CONFIG_BOOT_RETRY_TIME		-1
+#define CONFIG_BOOT_RETRY_MIN		30
+
+/* BOOTP options */
+#define CONFIG_BOOTP_BOOTFILESIZE
+
+/* AHCI support Definitions */
+#ifdef CONFIG_DM_SCSI
+/** Enable 48-bit SATA addressing */
+# define CONFIG_LBA48
+/** Enable 64-bit addressing */
+# define CONFIG_SYS_64BIT_LBA
+#endif
+
+/***** SPI Defines *********/
+#ifdef CONFIG_DM_SPI_FLASH
+# define CONFIG_SF_DEFAULT_BUS	0
+# define CONFIG_SF_DEFAULT_CS	0
+#endif
+
+/** Extra environment settings */
+#define CONFIG_EXTRA_ENV_SETTINGS	\
+					"loadaddr=20080000\0"	\
+					"autoload=0\0"
+
+/** Environment defines */
+#if defined(CONFIG_ENV_IS_IN_MMC)
+#define CONFIG_SYS_MMC_ENV_DEV		0
+#endif
+
+/* Monitor Command Prompt */
+#define CONFIG_SYS_CBSIZE		1024	/** Console I/O Buffer Size */
+#define CONFIG_SYS_BARGSIZE		CONFIG_SYS_CBSIZE
+
+#define CONFIG_SYS_MAXARGS		64	/** max command args */
+
+#define CONFIG_SYS_MMC_MAX_BLK_COUNT	8192
+
+#undef CONFIG_SYS_PROMPT
+#define CONFIG_SYS_PROMPT		env_get("prompt")
+
+/** EMMC specific defines */
+#if defined(CONFIG_MMC_OCTEONTX)
+#define CONFIG_SUPPORT_EMMC_BOOT
+#define CONFIG_SUPPORT_EMMC_RPMB
+#endif
+
+#if defined(CONFIG_NAND_OCTEONTX)
+/*#define CONFIG_MTD_CONCAT */
+#define CONFIG_SYS_MAX_NAND_DEVICE 8
+#define CONFIG_SYS_NAND_ONFI_DETECTION
+#endif
+
+#endif /* __OCTEONTX_COMMON_H__ */
diff --git a/include/dm/read.h b/include/dm/read.h
index 0a7aacd..67db94a 100644
--- a/include/dm/read.h
+++ b/include/dm/read.h
@@ -680,6 +680,18 @@
  */
 int dev_get_child_count(const struct udevice *dev);
 
+/**
+ * dev_read_pci_bus_range - Read PCI bus-range resource
+ *
+ * Look at the bus range property of a device node and return the pci bus
+ * range for this node.
+ *
+ * @dev: device to examine
+ * @res returns the resource
+ * @return 0 if ok, negative on error
+ */
+int dev_read_pci_bus_range(const struct udevice *dev, struct resource *res);
+
 #else /* CONFIG_DM_DEV_READ_INLINE is enabled */
 
 static inline int dev_read_u32(const struct udevice *dev,
diff --git a/include/fdtdec.h b/include/fdtdec.h
index bc79389..152eb07 100644
--- a/include/fdtdec.h
+++ b/include/fdtdec.h
@@ -445,6 +445,19 @@
 			 u32 *bar);
 
 /**
+ * Look at the bus range property of a device node and return the pci bus
+ * range for this node.
+ * The property must hold one fdt_pci_addr with a length.
+ * @param blob		FDT blob
+ * @param node		node to examine
+ * @param res		the resource structure to return the bus range
+ * @return 0 if ok, negative on error
+ */
+
+int fdtdec_get_pci_bus_range(const void *blob, int node,
+			     struct fdt_resource *res);
+
+/**
  * Look up a 32-bit integer property in a node and return it. The property
  * must have at least 4 bytes of data. The value of the first cell is
  * returned.
diff --git a/include/pci.h b/include/pci.h
index 2089db9..1c5b366 100644
--- a/include/pci.h
+++ b/include/pci.h
@@ -465,6 +465,9 @@
 #define PCI_EA_FIRST_ENT	4	/* First EA Entry in List */
 #define  PCI_EA_ES		0x00000007 /* Entry Size */
 #define  PCI_EA_BEI		0x000000f0 /* BAR Equivalent Indicator */
+/* 9-14 map to VF BARs 0-5 respectively */
+#define  PCI_EA_BEI_VF_BAR0	9
+#define  PCI_EA_BEI_VF_BAR5	14
 /* Base, MaxOffset registers */
 /* bit 0 is reserved */
 #define  PCI_EA_IS_64		0x00000002	/* 64-bit field flag */
@@ -493,6 +496,17 @@
 #define PCI_EXP_SLTCAP		20	/* Slot Capabilities */
 #define  PCI_EXP_SLTCAP_PSN	0xfff80000 /* Physical Slot Number */
 #define PCI_EXP_LNKCTL2		48	/* Link Control 2 */
+/* Single Root I/O Virtualization Registers */
+#define PCI_SRIOV_CAP		0x04	/* SR-IOV Capabilities */
+#define PCI_SRIOV_CTRL		0x08	/* SR-IOV Control */
+#define  PCI_SRIOV_CTRL_VFE	0x01	/* VF Enable */
+#define  PCI_SRIOV_CTRL_MSE	0x08	/* VF Memory Space Enable */
+#define PCI_SRIOV_INITIAL_VF	0x0c	/* Initial VFs */
+#define PCI_SRIOV_TOTAL_VF	0x0e	/* Total VFs */
+#define PCI_SRIOV_NUM_VF	0x10	/* Number of VFs */
+#define PCI_SRIOV_VF_OFFSET	0x14	/* First VF Offset */
+#define PCI_SRIOV_VF_STRIDE	0x16	/* Following VF Stride */
+#define PCI_SRIOV_VF_DID	0x1a	/* VF Device ID */
 
 /* Include the ID list */
 
@@ -590,8 +604,6 @@
 extern void pci_cfgfunc_config_device(struct pci_controller* hose, pci_dev_t dev,
 				      struct pci_config_table *);
 
-#define MAX_PCI_REGIONS		7
-
 #define INDIRECT_TYPE_NO_PCIE_LINK	1
 
 /**
@@ -632,7 +644,7 @@
 	 * for PCI controllers and a separate UCLASS (or perhaps
 	 * UCLASS_PCI_GENERIC) is used for bridges.
 	 */
-	struct pci_region regions[MAX_PCI_REGIONS];
+	struct pci_region *regions;
 	int region_count;
 
 	struct pci_config_table *config_table;
@@ -892,12 +904,20 @@
  * @vendor:	PCI vendor ID (see pci_ids.h)
  * @device:	PCI device ID (see pci_ids.h)
  * @class:	PCI class, 3 bytes: (base, sub, prog-if)
+ * @is_virtfn:	True for Virtual Function device
+ * @pfdev:	Handle to Physical Function device
+ * @virtid:	Virtual Function Index
  */
 struct pci_child_platdata {
 	int devfn;
 	unsigned short vendor;
 	unsigned short device;
 	unsigned int class;
+
+	/* Variables for CONFIG_PCI_SRIOV */
+	bool is_virtfn;
+	struct udevice *pfdev;
+	int virtid;
 };
 
 /* PCI bus operations */
@@ -1210,6 +1230,25 @@
 	ulong *valuep,
 	enum pci_size_t size);
 
+#if defined(CONFIG_PCI_SRIOV)
+/**
+ * pci_sriov_init() - Scan Virtual Function devices
+ *
+ * @pdev:	Physical Function udevice handle
+ * @vf_en:	Number of Virtual Function devices to enable
+ * @return 0 on success, -ve on error
+ */
+int pci_sriov_init(struct udevice *pdev, int vf_en);
+
+/**
+ * pci_sriov_get_totalvfs() - Get total available Virtual Function devices
+ *
+ * @pdev:	Physical Function udevice handle
+ * @return count on success, -ve on error
+ */
+int pci_sriov_get_totalvfs(struct udevice *pdev);
+#endif
+
 #ifdef CONFIG_DM_PCI_COMPAT
 /* Compatibility with old naming */
 static inline int pci_write_config_dword(pci_dev_t pcidev, int offset,
diff --git a/lib/fdtdec.c b/lib/fdtdec.c
index 30a1c6a..d3b22ec 100644
--- a/lib/fdtdec.c
+++ b/lib/fdtdec.c
@@ -243,6 +243,22 @@
 
 	return 0;
 }
+
+int fdtdec_get_pci_bus_range(const void *blob, int node,
+			     struct fdt_resource *res)
+{
+	const u32 *values;
+	int len;
+
+	values = fdt_getprop(blob, node, "bus-range", &len);
+	if (!values || len < sizeof(*values) * 2)
+		return -EINVAL;
+
+	res->start = fdt32_to_cpu(*values++);
+	res->end = fdt32_to_cpu(*values);
+
+	return 0;
+}
 #endif
 
 uint64_t fdtdec_get_uint64(const void *blob, int node, const char *prop_name,
diff --git a/test/dm/pci.c b/test/dm/pci.c
index fd66ed7..76490be 100644
--- a/test/dm/pci.c
+++ b/test/dm/pci.c
@@ -354,3 +354,25 @@
 	return 0;
 }
 DM_TEST(dm_test_pci_on_bus, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
+
+/*
+ * Test support for multiple memory regions enabled via
+ * CONFIG_PCI_REGION_MULTI_ENTRY. When this feature is not enabled,
+ * only the last region of one type is stored. In this test-case,
+ * we have 2 memory regions, the first at 0x3000.0000 and the 2nd
+ * at 0x3100.0000. A correct test results now in BAR1 located at
+ * 0x3000.0000.
+ */
+static int dm_test_pci_region_multi(struct unit_test_state *uts)
+{
+	struct udevice *dev;
+	ulong mem_addr;
+
+	/* Test memory BAR1 on bus#1 */
+	ut_assertok(dm_pci_bus_find_bdf(PCI_BDF(1, 0x08, 0), &dev));
+	mem_addr = dm_pci_read_bar32(dev, 1);
+	ut_asserteq(mem_addr, 0x30000000);
+
+	return 0;
+}
+DM_TEST(dm_test_pci_region_multi, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);