lmb: make LMB memory map persistent and global

The current LMB API's for allocating and reserving memory use a
per-caller based memory view. Memory allocated by a caller can then be
overwritten by another caller. Make these allocations and reservations
persistent using the alloced list data structure.

Two alloced lists are declared -- one for the available(free) memory,
and one for the used memory. Once full, the list can then be extended
at runtime.

[sjg: Use a stack to store pointer of lmb struct when running lmb tests]

Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
Signed-off-by: Simon Glass <sjg@chromium.org>
[sjg: Optimise the logic to add a region in lmb_add_region_flags()]
diff --git a/arch/arm/mach-stm32mp/dram_init.c b/arch/arm/mach-stm32mp/dram_init.c
index 6024959..e8b0a38 100644
--- a/arch/arm/mach-stm32mp/dram_init.c
+++ b/arch/arm/mach-stm32mp/dram_init.c
@@ -47,7 +47,6 @@
 {
 	phys_size_t size;
 	phys_addr_t reg;
-	struct lmb lmb;
 
 	if (!total_size)
 		return gd->ram_top;
@@ -59,12 +58,11 @@
 	gd->ram_top = clamp_val(gd->ram_top, 0, SZ_4G - 1);
 
 	/* found enough not-reserved memory to relocated U-Boot */
-	lmb_init(&lmb);
-	lmb_add(&lmb, gd->ram_base, gd->ram_top - gd->ram_base);
-	boot_fdt_add_mem_rsv_regions(&lmb, (void *)gd->fdt_blob);
+	lmb_add(gd->ram_base, gd->ram_top - gd->ram_base);
+	boot_fdt_add_mem_rsv_regions((void *)gd->fdt_blob);
 	/* add 8M for reserved memory for display, fdt, gd,... */
 	size = ALIGN(SZ_8M + CONFIG_SYS_MALLOC_LEN + total_size, MMU_SECTION_SIZE),
-	reg = lmb_alloc(&lmb, size, MMU_SECTION_SIZE);
+	reg = lmb_alloc(size, MMU_SECTION_SIZE);
 
 	if (!reg)
 		reg = gd->ram_top - size;
diff --git a/arch/arm/mach-stm32mp/stm32mp1/cpu.c b/arch/arm/mach-stm32mp/stm32mp1/cpu.c
index 478c3ef..a913737 100644
--- a/arch/arm/mach-stm32mp/stm32mp1/cpu.c
+++ b/arch/arm/mach-stm32mp/stm32mp1/cpu.c
@@ -30,8 +30,6 @@
  */
 u8 early_tlb[PGTABLE_SIZE] __section(".data") __aligned(0x4000);
 
-struct lmb lmb;
-
 u32 get_bootmode(void)
 {
 	/* read bootmode from TAMP backup register */
@@ -80,7 +78,7 @@
 	     i < (start >> MMU_SECTION_SHIFT) + (size >> MMU_SECTION_SHIFT);
 	     i++) {
 		option = DCACHE_DEFAULT_OPTION;
-		if (use_lmb && lmb_is_reserved_flags(&lmb, i << MMU_SECTION_SHIFT, LMB_NOMAP))
+		if (use_lmb && lmb_is_reserved_flags(i << MMU_SECTION_SHIFT, LMB_NOMAP))
 			option = 0; /* INVALID ENTRY in TLB */
 		set_section_dcache(i, option);
 	}
@@ -144,7 +142,7 @@
 void enable_caches(void)
 {
 	/* parse device tree when data cache is still activated */
-	lmb_init_and_reserve(&lmb, gd->bd, (void *)gd->fdt_blob);
+	lmb_init_and_reserve(gd->bd, (void *)gd->fdt_blob);
 
 	/* I-cache is already enabled in start.S: icache_enable() not needed */