armv8: layerscape: Convert to use common MMU framework

Drop platform code to create static MMU tables. Use common framework
to create MMU tables on the run. Tested on LS2080ARDB with secure and
non-secure ram scenarios.

Signed-off-by: York Sun <york.sun@nxp.com>
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/cpu.c b/arch/arm/cpu/armv8/fsl-layerscape/cpu.c
index a397f5d..b6ebedc 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/cpu.c
+++ b/arch/arm/cpu/armv8/fsl-layerscape/cpu.c
@@ -26,13 +26,7 @@
 
 DECLARE_GLOBAL_DATA_PTR;
 
-static struct mm_region layerscape_mem_map[] = {
-	{
-		/* List terminator */
-		0,
-	}
-};
-struct mm_region *mem_map = layerscape_mem_map;
+struct mm_region *mem_map = early_map;
 
 void cpu_name(char *name)
 {
@@ -56,234 +50,36 @@
 }
 
 #ifndef CONFIG_SYS_DCACHE_OFF
-static void set_pgtable_section(u64 *page_table, u64 index, u64 section,
-			u64 memory_type, u64 attribute)
-{
-       u64 value;
-
-       value = section | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
-       value |= PMD_ATTRINDX(memory_type);
-       value |= attribute;
-       page_table[index] = value;
-}
-
-static void set_pgtable_table(u64 *page_table, u64 index, u64 *table_addr)
-{
-       u64 value;
-
-       value = (u64)table_addr | PTE_TYPE_TABLE;
-       page_table[index] = value;
-}
-
-/*
- * Set the block entries according to the information of the table.
- */
-static int set_block_entry(const struct sys_mmu_table *list,
-			   struct table_info *table)
-{
-	u64 block_size = 0, block_shift = 0;
-	u64 block_addr, index;
-	int j;
-
-	if (table->entry_size == BLOCK_SIZE_L1) {
-		block_size = BLOCK_SIZE_L1;
-		block_shift = SECTION_SHIFT_L1;
-	} else if (table->entry_size == BLOCK_SIZE_L2) {
-		block_size = BLOCK_SIZE_L2;
-		block_shift = SECTION_SHIFT_L2;
-	} else {
-		return -EINVAL;
-	}
-
-	block_addr = list->phys_addr;
-	index = (list->virt_addr - table->table_base) >> block_shift;
-
-	for (j = 0; j < (list->size >> block_shift); j++) {
-		set_pgtable_section(table->ptr,
-				    index,
-				    block_addr,
-				    list->memory_type,
-				    list->attribute);
-		block_addr += block_size;
-		index++;
-	}
-
-	return 0;
-}
-
-/*
- * Find the corresponding table entry for the list.
- */
-static int find_table(const struct sys_mmu_table *list,
-		      struct table_info *table, u64 *level0_table)
-{
-	u64 index = 0, level = 0;
-	u64 *level_table = level0_table;
-	u64 temp_base = 0, block_size = 0, block_shift = 0;
-
-	while (level < 3) {
-		if (level == 0) {
-			block_size = BLOCK_SIZE_L0;
-			block_shift = SECTION_SHIFT_L0;
-		} else if (level == 1) {
-			block_size = BLOCK_SIZE_L1;
-			block_shift = SECTION_SHIFT_L1;
-		} else if (level == 2) {
-			block_size = BLOCK_SIZE_L2;
-			block_shift = SECTION_SHIFT_L2;
-		}
-
-		index = 0;
-		while (list->virt_addr >= temp_base) {
-			index++;
-			temp_base += block_size;
-		}
-
-		temp_base -= block_size;
-
-		if ((level_table[index - 1] & PTE_TYPE_MASK) ==
-		    PTE_TYPE_TABLE) {
-			level_table = (u64 *)(level_table[index - 1] &
-				      ~PTE_TYPE_MASK);
-			level++;
-			continue;
-		} else {
-			if (level == 0)
-				return -EINVAL;
-
-			if ((list->phys_addr + list->size) >
-			    (temp_base + block_size * NUM_OF_ENTRY))
-				return -EINVAL;
-
-			/*
-			 * Check the address and size of the list member is
-			 * aligned with the block size.
-			 */
-			if (((list->phys_addr & (block_size - 1)) != 0) ||
-			    ((list->size & (block_size - 1)) != 0))
-				return -EINVAL;
-
-			table->ptr = level_table;
-			table->table_base = temp_base -
-					    ((index - 1) << block_shift);
-			table->entry_size = block_size;
-
-			return 0;
-		}
-	}
-	return -EINVAL;
-}
-
 /*
  * To start MMU before DDR is available, we create MMU table in SRAM.
  * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
  * levels of translation tables here to cover 40-bit address space.
  * We use 4KB granule size, with 40 bits physical address, T0SZ=24
- * Level 0 IA[39], table address @0
- * Level 1 IA[38:30], table address @0x1000, 0x2000
- * Level 2 IA[29:21], table address @0x3000, 0x4000
- * Address above 0x5000 is free for other purpose.
+ * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
+ * Note, the debug print in cache_v8.c is not usable for debugging
+ * these early MMU tables because UART is not yet available.
  */
 static inline void early_mmu_setup(void)
 {
-	unsigned int el, i;
-	u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE;
-	u64 *level1_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000);
-	u64 *level1_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000);
-	u64 *level2_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000);
-	u64 *level2_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000);
-
-	struct table_info table = {level0_table, 0, BLOCK_SIZE_L0};
-
-	/* Invalidate all table entries */
-	memset(level0_table, 0, 0x5000);
+	unsigned int el = current_el();
 
-	/* Fill in the table entries */
-	set_pgtable_table(level0_table, 0, level1_table0);
-	set_pgtable_table(level0_table, 1, level1_table1);
-	set_pgtable_table(level1_table0, 0, level2_table0);
+	/* global data is already setup, no allocation yet */
+	gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
+	gd->arch.tlb_fillptr = gd->arch.tlb_addr;
+	gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
 
-#ifdef CONFIG_FSL_LSCH3
-	set_pgtable_table(level1_table0,
-			  CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1,
-			  level2_table1);
-#elif defined(CONFIG_FSL_LSCH2)
-	set_pgtable_table(level1_table0, 1, level2_table1);
-#endif
-	/* Find the table and fill in the block entries */
-	for (i = 0; i < ARRAY_SIZE(early_mmu_table); i++) {
-		if (find_table(&early_mmu_table[i],
-			       &table, level0_table) == 0) {
-			/*
-			 * If find_table() returns error, it cannot be dealt
-			 * with here. Breakpoint can be added for debugging.
-			 */
-			set_block_entry(&early_mmu_table[i], &table);
-			/*
-			 * If set_block_entry() returns error, it cannot be
-			 * dealt with here too.
-			 */
-		}
-	}
-
-	el = current_el();
+	/* Create early page tables */
+	setup_pgtables();
 
-	set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR,
+	/* point TTBR to the new table */
+	set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
+			  get_tcr(el, NULL, NULL) &
+			  ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
 			  MEMORY_ATTRIBUTES);
+
 	set_sctlr(get_sctlr() | CR_M);
 }
 
-#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
-/*
- * Called from final mmu setup. The phys_addr is new, non-existing
- * address. A new sub table is created @level2_table_secure to cover
- * size of CONFIG_SYS_MEM_RESERVE_SECURE memory.
- */
-static inline int final_secure_ddr(u64 *level0_table,
-				   u64 *level2_table_secure,
-				   phys_addr_t phys_addr)
-{
-	int ret = -EINVAL;
-	struct table_info table = {};
-	struct sys_mmu_table ddr_entry = {
-		0, 0, BLOCK_SIZE_L1, MT_NORMAL,
-		PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
-	};
-	u64 index;
-
-	/* Need to create a new table */
-	ddr_entry.virt_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1);
-	ddr_entry.phys_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1);
-	ret = find_table(&ddr_entry, &table, level0_table);
-	if (ret)
-		return ret;
-	index = (ddr_entry.virt_addr - table.table_base) >> SECTION_SHIFT_L1;
-	set_pgtable_table(table.ptr, index, level2_table_secure);
-	table.ptr = level2_table_secure;
-	table.table_base = ddr_entry.virt_addr;
-	table.entry_size = BLOCK_SIZE_L2;
-	ret = set_block_entry(&ddr_entry, &table);
-	if (ret) {
-		printf("MMU error: could not fill non-secure ddr block entries\n");
-		return ret;
-	}
-	ddr_entry.virt_addr = phys_addr;
-	ddr_entry.phys_addr = phys_addr;
-	ddr_entry.size = CONFIG_SYS_MEM_RESERVE_SECURE;
-	ddr_entry.attribute = PTE_BLOCK_OUTER_SHARE;
-	ret = find_table(&ddr_entry, &table, level0_table);
-	if (ret) {
-		printf("MMU error: could not find secure ddr table\n");
-		return ret;
-	}
-	ret = set_block_entry(&ddr_entry, &table);
-	if (ret)
-		printf("MMU error: could not set secure ddr block entry\n");
-
-	return ret;
-}
-#endif
-
 /*
  * The final tables look similar to early tables, but different in detail.
  * These tables are in DRAM. Sub tables are added to enable cache for
@@ -291,113 +87,59 @@
  *
  * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
  * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
- *
- * Level 1 table 0 contains 512 entries for each 1GB from 0 to 512GB.
- * Level 1 table 1 contains 512 entries for each 1GB from 512GB to 1TB.
- * Level 2 table 0 contains 512 entries for each 2MB from 0 to 1GB.
- *
- * For LSCH3:
- * Level 2 table 1 contains 512 entries for each 2MB from 32GB to 33GB.
- * For LSCH2:
- * Level 2 table 1 contains 512 entries for each 2MB from 1GB to 2GB.
- * Level 2 table 2 contains 512 entries for each 2MB from 20GB to 21GB.
  */
 static inline void final_mmu_setup(void)
 {
+	u64 tlb_addr_save = gd->arch.tlb_addr;
 	unsigned int el = current_el();
-	unsigned int i;
-	u64 *level0_table = (u64 *)gd->arch.tlb_addr;
-	u64 *level1_table0;
-	u64 *level1_table1;
-	u64 *level2_table0;
-	u64 *level2_table1;
-#ifdef CONFIG_FSL_LSCH2
-	u64 *level2_table2;
-#endif
-	struct table_info table = {NULL, 0, BLOCK_SIZE_L0};
-
 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
-	u64 *level2_table_secure;
-
-	if (el == 3) {
-		/*
-		 * Only use gd->arch.secure_ram if the address is recalculated
-		 * Align to 4KB for MMU table
-		 */
-		if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED)
-			level0_table = (u64 *)(gd->arch.secure_ram & ~0xfff);
-		else
-			printf("MMU warning: gd->arch.secure_ram is not maintained, disabled.\n");
-	}
-#endif
-	level1_table0 = level0_table + 512;
-	level1_table1 = level1_table0 + 512;
-	level2_table0 = level1_table1 + 512;
-	level2_table1 = level2_table0 + 512;
-#ifdef CONFIG_FSL_LSCH2
-	level2_table2 = level2_table1 + 512;
-#endif
-	table.ptr = level0_table;
-
-	/* Invalidate all table entries */
-	memset(level0_table, 0, PGTABLE_SIZE);
-
-	/* Fill in the table entries */
-	set_pgtable_table(level0_table, 0, level1_table0);
-	set_pgtable_table(level0_table, 1, level1_table1);
-	set_pgtable_table(level1_table0, 0, level2_table0);
-#ifdef CONFIG_FSL_LSCH3
-	set_pgtable_table(level1_table0,
-			  CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1,
-			  level2_table1);
-#elif defined(CONFIG_FSL_LSCH2)
-	set_pgtable_table(level1_table0, 1, level2_table1);
-	set_pgtable_table(level1_table0,
-			  CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1,
-			  level2_table2);
+	int index;
 #endif
 
-	/* Find the table and fill in the block entries */
-	for (i = 0; i < ARRAY_SIZE(final_mmu_table); i++) {
-		if (find_table(&final_mmu_table[i],
-			       &table, level0_table) == 0) {
-			if (set_block_entry(&final_mmu_table[i],
-					    &table) != 0) {
-				printf("MMU error: could not set block entry for %p\n",
-				       &final_mmu_table[i]);
-			}
+	mem_map = final_map;
 
-		} else {
-			printf("MMU error: could not find the table for %p\n",
-			       &final_mmu_table[i]);
-		}
-	}
-	/* Set the secure memory to secure in MMU */
 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
-	if (el == 3 && gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
-#ifdef CONFIG_FSL_LSCH3
-		level2_table_secure = level2_table1 + 512;
-#elif defined(CONFIG_FSL_LSCH2)
-		level2_table_secure = level2_table2 + 512;
-#endif
-		if (!final_secure_ddr(level0_table,
-				      level2_table_secure,
-				      gd->arch.secure_ram & ~0x3)) {
+	if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
+		if (el == 3) {
+			/*
+			 * Only use gd->arch.secure_ram if the address is
+			 * recalculated. Align to 4KB for MMU table.
+			 */
+			/* put page tables in secure ram */
+			index = ARRAY_SIZE(final_map) - 2;
+			gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
+			final_map[index].virt = gd->arch.secure_ram & ~0x3;
+			final_map[index].phys = final_map[index].virt;
+			final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
+			final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
 			gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
-			debug("Now MMU table is in secured memory at 0x%llx\n",
-			      gd->arch.secure_ram & ~0x3);
+			tlb_addr_save = gd->arch.tlb_addr;
 		} else {
-			printf("MMU warning: Failed to secure DDR\n");
+			/* Use allocated (board_f.c) memory for TLB */
+			tlb_addr_save = gd->arch.tlb_allocated;
+			gd->arch.tlb_addr = tlb_addr_save;
 		}
 	}
 #endif
 
+	/* Reset the fill ptr */
+	gd->arch.tlb_fillptr = tlb_addr_save;
+
+	/* Create normal system page tables */
+	setup_pgtables();
+
+	/* Create emergency page tables */
+	gd->arch.tlb_addr = gd->arch.tlb_fillptr;
+	gd->arch.tlb_emerg = gd->arch.tlb_addr;
+	setup_pgtables();
+	gd->arch.tlb_addr = tlb_addr_save;
+
 	/* flush new MMU table */
-	flush_dcache_range((ulong)level0_table,
-			   (ulong)level0_table + gd->arch.tlb_size);
+	flush_dcache_range(gd->arch.tlb_addr,
+			   gd->arch.tlb_addr + gd->arch.tlb_size);
 
 	/* point TTBR to the new table */
-	set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR_FINAL,
+	set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
 			  MEMORY_ATTRIBUTES);
 	/*
 	 * MMU is already enabled, just need to invalidate TLB to load the
diff --git a/arch/arm/include/asm/arch-fsl-layerscape/cpu.h b/arch/arm/include/asm/arch-fsl-layerscape/cpu.h
index 197b0eb..5fd5e87 100644
--- a/arch/arm/include/asm/arch-fsl-layerscape/cpu.h
+++ b/arch/arm/include/asm/arch-fsl-layerscape/cpu.h
@@ -19,29 +19,6 @@
 
 #ifndef CONFIG_SYS_DCACHE_OFF
 
-#define SECTION_SHIFT_L0		39UL
-#define SECTION_SHIFT_L1		30UL
-#define SECTION_SHIFT_L2		21UL
-#define BLOCK_SIZE_L0			0x8000000000
-#define BLOCK_SIZE_L1			0x40000000
-#define BLOCK_SIZE_L2			0x200000
-#define NUM_OF_ENTRY			512
-#define TCR_EL2_PS_40BIT		(2 << 16)
-
-#define LAYERSCAPE_VA_BITS		(40)
-#define LAYERSCAPE_TCR		(TCR_TG0_4K		| \
-				TCR_EL2_PS_40BIT	| \
-				TCR_SHARED_NON		| \
-				TCR_ORGN_NC		| \
-				TCR_IRGN_NC		| \
-				TCR_T0SZ(LAYERSCAPE_VA_BITS))
-#define LAYERSCAPE_TCR_FINAL	(TCR_TG0_4K		| \
-				TCR_EL2_PS_40BIT	| \
-				TCR_SHARED_OUTER	| \
-				TCR_ORGN_WBWA		| \
-				TCR_IRGN_WBWA		| \
-				TCR_T0SZ(LAYERSCAPE_VA_BITS))
-
 #ifdef CONFIG_FSL_LSCH3
 #define CONFIG_SYS_FSL_CCSR_BASE	0x00000000
 #define CONFIG_SYS_FSL_CCSR_SIZE	0x10000000
@@ -101,174 +78,261 @@
 #define CONFIG_SYS_FSL_DRAM_SIZE3	0x7800000000	/* 480GB */
 #endif
 
-struct sys_mmu_table {
-	u64 virt_addr;
-	u64 phys_addr;
-	u64 size;
-	u64 memory_type;
-	u64 attribute;
-};
-
-struct table_info {
-	u64 *ptr;
-	u64 table_base;
-	u64 entry_size;
-};
-
-static const struct sys_mmu_table early_mmu_table[] = {
+#define EARLY_PGTABLE_SIZE 0x5000
+static struct mm_region early_map[] = {
 #ifdef CONFIG_FSL_LSCH3
 	{ CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
-	  CONFIG_SYS_FSL_CCSR_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_CCSR_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
-	  CONFIG_SYS_FSL_OCRAM_SIZE, MT_NORMAL, PTE_BLOCK_NON_SHARE },
+	  CONFIG_SYS_FSL_OCRAM_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
-	  CONFIG_SYS_FSL_QSPI_SIZE1,  MT_NORMAL, PTE_BLOCK_NON_SHARE},
+	  CONFIG_SYS_FSL_QSPI_SIZE1,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
 	/* For IFC Region #1, only the first 4MB is cache-enabled */
 	{ CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
-	  CONFIG_SYS_FSL_IFC_SIZE1_1, MT_NORMAL, PTE_BLOCK_NON_SHARE },
+	  CONFIG_SYS_FSL_IFC_SIZE1_1,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
 	  CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
 	  CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
-	  MT_DEVICE_NGNRNE, PTE_BLOCK_NON_SHARE },
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
-	  CONFIG_SYS_FSL_IFC_SIZE1, MT_DEVICE_NGNRNE, PTE_BLOCK_NON_SHARE },
+	  CONFIG_SYS_FSL_IFC_SIZE1,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
-	  CONFIG_SYS_FSL_DRAM_SIZE1, MT_NORMAL,
-	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS },
+	  CONFIG_SYS_FSL_DRAM_SIZE1,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
+	},
 	/* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
 	{ CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
 	  CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
-	  MT_DEVICE_NGNRNE, PTE_BLOCK_NON_SHARE },
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
-	  CONFIG_SYS_FSL_DCSR_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_DCSR_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
-	  CONFIG_SYS_FSL_DRAM_SIZE2, MT_NORMAL,
-	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS },
+	  CONFIG_SYS_FSL_DRAM_SIZE2,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
+	},
 #elif defined(CONFIG_FSL_LSCH2)
 	{ CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
-	  CONFIG_SYS_FSL_CCSR_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_CCSR_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
-	  CONFIG_SYS_FSL_OCRAM_SIZE, MT_NORMAL, PTE_BLOCK_NON_SHARE },
+	  CONFIG_SYS_FSL_OCRAM_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
-	  CONFIG_SYS_FSL_DCSR_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_DCSR_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
-	  CONFIG_SYS_FSL_QSPI_SIZE, MT_DEVICE_NGNRNE, PTE_BLOCK_NON_SHARE },
+	  CONFIG_SYS_FSL_QSPI_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
-	  CONFIG_SYS_FSL_IFC_SIZE, MT_DEVICE_NGNRNE, PTE_BLOCK_NON_SHARE },
+	  CONFIG_SYS_FSL_IFC_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
-	  CONFIG_SYS_FSL_DRAM_SIZE1, MT_NORMAL,
-	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS },
+	  CONFIG_SYS_FSL_DRAM_SIZE1,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
+	},
 	{ CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
-	  CONFIG_SYS_FSL_DRAM_SIZE2, MT_NORMAL,
-	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS },
+	  CONFIG_SYS_FSL_DRAM_SIZE2,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
+	},
 #endif
+	{},	/* list terminator */
 };
 
-static const struct sys_mmu_table final_mmu_table[] = {
+static struct mm_region final_map[] = {
 #ifdef CONFIG_FSL_LSCH3
 	{ CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
-	  CONFIG_SYS_FSL_CCSR_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_CCSR_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
-	  CONFIG_SYS_FSL_OCRAM_SIZE, MT_NORMAL, PTE_BLOCK_NON_SHARE },
+	  CONFIG_SYS_FSL_OCRAM_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
-	  CONFIG_SYS_FSL_DRAM_SIZE1, MT_NORMAL,
-	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS },
+	  CONFIG_SYS_FSL_DRAM_SIZE1,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
+	},
 	{ CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
-	  CONFIG_SYS_FSL_QSPI_SIZE1,  MT_NORMAL, PTE_BLOCK_NON_SHARE},
+	  CONFIG_SYS_FSL_QSPI_SIZE1,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
-	  CONFIG_SYS_FSL_QSPI_SIZE2, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_QSPI_SIZE2,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
-	  CONFIG_SYS_FSL_IFC_SIZE2, MT_DEVICE_NGNRNE, PTE_BLOCK_NON_SHARE },
+	  CONFIG_SYS_FSL_IFC_SIZE2,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
-	  CONFIG_SYS_FSL_DCSR_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_DCSR_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
-	  CONFIG_SYS_FSL_MC_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_MC_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
-	  CONFIG_SYS_FSL_NI_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_NI_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	/* For QBMAN portal, only the first 64MB is cache-enabled */
 	{ CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
-	  CONFIG_SYS_FSL_QBMAN_SIZE_1, MT_NORMAL,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS },
+	  CONFIG_SYS_FSL_QBMAN_SIZE_1,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
+	},
 	{ CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
 	  CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
 	  CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
-	  MT_DEVICE_NGNRNE, PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
-	  CONFIG_SYS_PCIE1_PHYS_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_PCIE1_PHYS_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
-	  CONFIG_SYS_PCIE2_PHYS_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_PCIE2_PHYS_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
-	  CONFIG_SYS_PCIE3_PHYS_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_PCIE3_PHYS_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 #ifdef CONFIG_LS2080A
 	{ CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
-	  CONFIG_SYS_PCIE4_PHYS_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_PCIE4_PHYS_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 #endif
 	{ CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
-	  CONFIG_SYS_FSL_WRIOP1_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_WRIOP1_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
-	  CONFIG_SYS_FSL_AIOP1_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_AIOP1_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
-	  CONFIG_SYS_FSL_PEBUF_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_PEBUF_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
-	  CONFIG_SYS_FSL_DRAM_SIZE2, MT_NORMAL,
-	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS },
+	  CONFIG_SYS_FSL_DRAM_SIZE2,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
+	},
 #elif defined(CONFIG_FSL_LSCH2)
 	{ CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
-	  CONFIG_SYS_FSL_BOOTROM_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_BOOTROM_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
-	  CONFIG_SYS_FSL_CCSR_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_CCSR_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
-	  CONFIG_SYS_FSL_OCRAM_SIZE, MT_NORMAL, PTE_BLOCK_NON_SHARE },
+	  CONFIG_SYS_FSL_OCRAM_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
-	  CONFIG_SYS_FSL_DCSR_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_DCSR_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
-	  CONFIG_SYS_FSL_QSPI_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_QSPI_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
-	  CONFIG_SYS_FSL_IFC_SIZE, MT_DEVICE_NGNRNE, PTE_BLOCK_NON_SHARE },
+	  CONFIG_SYS_FSL_IFC_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
+	},
 	{ CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
-	  CONFIG_SYS_FSL_DRAM_SIZE1, MT_NORMAL,
-	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS },
+	  CONFIG_SYS_FSL_DRAM_SIZE1,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
+	},
 	{ CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
-	  CONFIG_SYS_FSL_QBMAN_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_FSL_QBMAN_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
-	  CONFIG_SYS_FSL_DRAM_SIZE2, MT_NORMAL,
-	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS },
+	  CONFIG_SYS_FSL_DRAM_SIZE2,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
+	},
 	{ CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
-	  CONFIG_SYS_PCIE1_PHYS_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_PCIE1_PHYS_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
-	  CONFIG_SYS_PCIE2_PHYS_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_PCIE2_PHYS_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
-	  CONFIG_SYS_PCIE3_PHYS_SIZE, MT_DEVICE_NGNRNE,
-	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN },
+	  CONFIG_SYS_PCIE3_PHYS_SIZE,
+	  PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
+	  PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
+	},
 	{ CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
-	  CONFIG_SYS_FSL_DRAM_SIZE3, MT_NORMAL,
-	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS },
+	  CONFIG_SYS_FSL_DRAM_SIZE3,
+	  PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+	  PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
+	},
 #endif
-};
+#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
+	{},	/* space holder for secure mem */
 #endif
+	{},
+};
+#endif	/* !CONFIG_SYS_DCACHE_OFF */
 
 int fsl_qoriq_core_to_cluster(unsigned int core);
 u32 cpu_mask(void);