blob: d2d3e346a36fe975e34765f0f18d7b72417761d3 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Mingkai Hu0e58b512015-10-26 19:47:50 +08002/*
Gaurav Jain476c6392022-03-24 11:50:35 +05303 * Copyright 2017-2021 NXP
Mingkai Hu0e58b512015-10-26 19:47:50 +08004 * Copyright 2014-2015 Freescale Semiconductor, Inc.
Mingkai Hu0e58b512015-10-26 19:47:50 +08005 */
6
Tom Rinidec7ea02024-05-20 13:35:03 -06007#include <config.h>
Tom Rini8c70baa2021-12-14 13:36:40 -05008#include <clock_legacy.h>
Simon Glass33d1e702019-11-14 12:57:32 -07009#include <cpu_func.h>
Simon Glassdb229612019-08-01 09:46:42 -060010#include <env.h>
Simon Glass8e16b1e2019-12-28 10:45:05 -070011#include <init.h>
Simon Glassf11478f2019-12-28 10:45:07 -070012#include <hang.h>
Sughosh Ganu0502b222024-10-15 21:07:12 +053013#include <lmb.h>
Simon Glass0f2af882020-05-10 11:40:05 -060014#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060015#include <net.h>
Simon Glassf5c208d2019-11-14 12:57:20 -070016#include <vsprintf.h>
Simon Glass274e0b02020-05-10 11:39:56 -060017#include <asm/cache.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060018#include <asm/global_data.h>
Mingkai Hu0e58b512015-10-26 19:47:50 +080019#include <asm/io.h>
Simon Glass6b9f0102020-05-10 11:40:06 -060020#include <asm/ptrace.h>
Michael Walle166ea482022-04-22 14:53:27 +053021#include <linux/arm-smccc.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090022#include <linux/errno.h>
Mingkai Hu0e58b512015-10-26 19:47:50 +080023#include <asm/system.h>
Joe Hershberger8f454d92018-07-16 15:33:51 -050024#include <fm_eth.h>
Mingkai Hu0e58b512015-10-26 19:47:50 +080025#include <asm/armv8/mmu.h>
26#include <asm/io.h>
27#include <asm/arch/fsl_serdes.h>
28#include <asm/arch/soc.h>
29#include <asm/arch/cpu.h>
30#include <asm/arch/speed.h>
Ashish Kumar11234062017-08-11 11:09:14 +053031#include <fsl_immap.h>
Mingkai Hu0e58b512015-10-26 19:47:50 +080032#include <asm/arch/mp.h>
Alexander Graf12be31c2016-11-17 01:03:01 +010033#include <efi_loader.h>
Mingkai Hu0e58b512015-10-26 19:47:50 +080034#include <fsl-mc/fsl_mc.h>
35#ifdef CONFIG_FSL_ESDHC
36#include <fsl_esdhc.h>
37#endif
Hou Zhiqiang21c4d552016-06-28 20:18:15 +080038#include <asm/armv8/sec_firmware.h>
Shengzhou Liu15875a52016-11-21 11:36:48 +080039#ifdef CONFIG_SYS_FSL_DDR
Tom Rini56184602022-02-25 11:19:53 -050040#include <fsl_ddr_sdram.h>
Shengzhou Liu15875a52016-11-21 11:36:48 +080041#include <fsl_ddr.h>
42#endif
Simon Glass243182c2017-05-17 08:23:06 -060043#include <asm/arch/clock.h>
Prabhakar Kushwaha0acce842017-11-10 11:32:52 +053044#include <hwconfig.h>
Ahmed Mansouraa270b42017-12-15 16:01:00 -050045#include <fsl_qbman.h>
Mingkai Hu0e58b512015-10-26 19:47:50 +080046
Rajesh Bhagat583da8b2018-11-05 18:01:42 +000047#ifdef CONFIG_TFABOOT
Simon Glass9d1f6192019-08-02 09:44:25 -060048#include <env_internal.h>
Pankit Gargbdbf84f2018-11-05 18:01:52 +000049#ifdef CONFIG_CHAIN_OF_TRUST
50#include <fsl_validate.h>
51#endif
Rajesh Bhagat583da8b2018-11-05 18:01:42 +000052#endif
Simon Glasscaefa322019-11-14 12:57:31 -070053#include <linux/mii.h>
Gaurav Jain476c6392022-03-24 11:50:35 +053054#include <dm.h>
Rajesh Bhagat583da8b2018-11-05 18:01:42 +000055
Mingkai Hu0e58b512015-10-26 19:47:50 +080056DECLARE_GLOBAL_DATA_PTR;
57
York Sunef4cef92018-11-05 18:01:06 +000058static struct cpu_type cpu_type_list[] = {
59 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
60 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
61 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
62 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
63 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
64 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
65 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
66 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
67 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
68 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
Hou Zhiqiangb9aedf92018-12-20 06:31:17 +000069 CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
York Sunef4cef92018-11-05 18:01:06 +000070 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
Hou Zhiqiangb9aedf92018-12-20 06:31:17 +000071 CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
York Sunef4cef92018-11-05 18:01:06 +000072 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
73 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
74 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
75 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
Yuantian Tangf463d752019-09-18 16:50:52 +080076 CPU_TYPE_ENTRY(LS1017A, LS1017A, 1),
77 CPU_TYPE_ENTRY(LS1018A, LS1018A, 1),
78 CPU_TYPE_ENTRY(LS1027A, LS1027A, 2),
Yuantian Tang4aefa162019-04-10 16:43:33 +080079 CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
York Sunef4cef92018-11-05 18:01:06 +000080 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
81 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
82 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
83 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
Priyanka Jainef76b2e2018-10-29 09:17:09 +000084 CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
85 CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
86 CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
Meenakshi Aggarwalccb5d5d2020-10-29 19:16:16 +053087 CPU_TYPE_ENTRY(LX2162A, LX2162A, 16),
88 CPU_TYPE_ENTRY(LX2122A, LX2122A, 12),
89 CPU_TYPE_ENTRY(LX2082A, LX2082A, 8),
York Sunef4cef92018-11-05 18:01:06 +000090};
91
92#define EARLY_PGTABLE_SIZE 0x5000
93static struct mm_region early_map[] = {
94#ifdef CONFIG_FSL_LSCH3
Tom Rini364d0022023-01-10 11:19:45 -050095 { CFG_SYS_FSL_CCSR_BASE, CFG_SYS_FSL_CCSR_BASE,
96 CFG_SYS_FSL_CCSR_SIZE,
York Sunef4cef92018-11-05 18:01:06 +000097 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
98 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
99 },
Tom Rini376b88a2022-10-28 20:27:13 -0400100 { CFG_SYS_FSL_OCRAM_BASE, CFG_SYS_FSL_OCRAM_BASE,
York Sunef4cef92018-11-05 18:01:06 +0000101 SYS_FSL_OCRAM_SPACE_SIZE,
102 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
103 },
Tom Rini376b88a2022-10-28 20:27:13 -0400104 { CFG_SYS_FSL_QSPI_BASE1, CFG_SYS_FSL_QSPI_BASE1,
Tom Rini364d0022023-01-10 11:19:45 -0500105 CFG_SYS_FSL_QSPI_SIZE1,
York Sunef4cef92018-11-05 18:01:06 +0000106 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
107#ifdef CONFIG_FSL_IFC
108 /* For IFC Region #1, only the first 4MB is cache-enabled */
Tom Rini364d0022023-01-10 11:19:45 -0500109 { CFG_SYS_FSL_IFC_BASE1, CFG_SYS_FSL_IFC_BASE1,
110 CFG_SYS_FSL_IFC_SIZE1_1,
York Sunef4cef92018-11-05 18:01:06 +0000111 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
112 },
Tom Rini364d0022023-01-10 11:19:45 -0500113 { CFG_SYS_FSL_IFC_BASE1 + CFG_SYS_FSL_IFC_SIZE1_1,
114 CFG_SYS_FSL_IFC_BASE1 + CFG_SYS_FSL_IFC_SIZE1_1,
115 CFG_SYS_FSL_IFC_SIZE1 - CFG_SYS_FSL_IFC_SIZE1_1,
York Sunef4cef92018-11-05 18:01:06 +0000116 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
117 },
Tom Rini364d0022023-01-10 11:19:45 -0500118 { CFG_SYS_FLASH_BASE, CFG_SYS_FSL_IFC_BASE1,
119 CFG_SYS_FSL_IFC_SIZE1,
York Sunef4cef92018-11-05 18:01:06 +0000120 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
121 },
122#endif
Tom Rini364d0022023-01-10 11:19:45 -0500123 { CFG_SYS_FSL_DRAM_BASE1, CFG_SYS_FSL_DRAM_BASE1,
124 CFG_SYS_FSL_DRAM_SIZE1,
Rajesh Bhagat5efbecf2018-11-05 18:01:37 +0000125#if defined(CONFIG_TFABOOT) || \
Simon Glass85ed77d2024-09-29 19:49:46 -0600126 (defined(CONFIG_SPL) && !defined(CONFIG_XPL_BUILD))
York Sunef4cef92018-11-05 18:01:06 +0000127 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
128#else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
129 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
130#endif
131 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
132 },
133#ifdef CONFIG_FSL_IFC
Tom Rini6a5dccc2022-11-16 13:10:41 -0500134 /* Map IFC region #2 up to CFG_SYS_FLASH_BASE for NAND boot */
Tom Rini364d0022023-01-10 11:19:45 -0500135 { CFG_SYS_FSL_IFC_BASE2, CFG_SYS_FSL_IFC_BASE2,
136 CFG_SYS_FLASH_BASE - CFG_SYS_FSL_IFC_BASE2,
York Sunef4cef92018-11-05 18:01:06 +0000137 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
138 },
139#endif
Tom Rini364d0022023-01-10 11:19:45 -0500140 { CFG_SYS_FSL_DCSR_BASE, CFG_SYS_FSL_DCSR_BASE,
141 CFG_SYS_FSL_DCSR_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000142 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
143 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
144 },
Tom Rini364d0022023-01-10 11:19:45 -0500145 { CFG_SYS_FSL_DRAM_BASE2, CFG_SYS_FSL_DRAM_BASE2,
146 CFG_SYS_FSL_DRAM_SIZE2,
York Sunef4cef92018-11-05 18:01:06 +0000147 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
148 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
149 },
Tom Rini364d0022023-01-10 11:19:45 -0500150#ifdef CFG_SYS_FSL_DRAM_BASE3
151 { CFG_SYS_FSL_DRAM_BASE3, CFG_SYS_FSL_DRAM_BASE3,
152 CFG_SYS_FSL_DRAM_SIZE3,
Priyanka Jain88c25662018-10-29 09:11:29 +0000153 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
154 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
155 },
156#endif
York Sunef4cef92018-11-05 18:01:06 +0000157#elif defined(CONFIG_FSL_LSCH2)
Tom Rini364d0022023-01-10 11:19:45 -0500158 { CFG_SYS_FSL_CCSR_BASE, CFG_SYS_FSL_CCSR_BASE,
159 CFG_SYS_FSL_CCSR_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000160 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
161 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
162 },
Tom Rini376b88a2022-10-28 20:27:13 -0400163 { CFG_SYS_FSL_OCRAM_BASE, CFG_SYS_FSL_OCRAM_BASE,
York Sunef4cef92018-11-05 18:01:06 +0000164 SYS_FSL_OCRAM_SPACE_SIZE,
165 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
166 },
Tom Rini364d0022023-01-10 11:19:45 -0500167 { CFG_SYS_FSL_DCSR_BASE, CFG_SYS_FSL_DCSR_BASE,
168 CFG_SYS_FSL_DCSR_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000169 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
170 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
171 },
Tom Rini376b88a2022-10-28 20:27:13 -0400172 { CFG_SYS_FSL_QSPI_BASE, CFG_SYS_FSL_QSPI_BASE,
Tom Rini364d0022023-01-10 11:19:45 -0500173 CFG_SYS_FSL_QSPI_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000174 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
175 },
176#ifdef CONFIG_FSL_IFC
Tom Rini364d0022023-01-10 11:19:45 -0500177 { CFG_SYS_FSL_IFC_BASE, CFG_SYS_FSL_IFC_BASE,
178 CFG_SYS_FSL_IFC_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000179 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
180 },
181#endif
Tom Rini364d0022023-01-10 11:19:45 -0500182 { CFG_SYS_FSL_DRAM_BASE1, CFG_SYS_FSL_DRAM_BASE1,
183 CFG_SYS_FSL_DRAM_SIZE1,
Rajesh Bhagat5efbecf2018-11-05 18:01:37 +0000184#if defined(CONFIG_TFABOOT) || \
Simon Glass85ed77d2024-09-29 19:49:46 -0600185 (defined(CONFIG_SPL) && !defined(CONFIG_XPL_BUILD))
York Sunef4cef92018-11-05 18:01:06 +0000186 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
187#else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
188 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
189#endif
190 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
191 },
Tom Rini364d0022023-01-10 11:19:45 -0500192 { CFG_SYS_FSL_DRAM_BASE2, CFG_SYS_FSL_DRAM_BASE2,
193 CFG_SYS_FSL_DRAM_SIZE2,
York Sunef4cef92018-11-05 18:01:06 +0000194 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
195 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
196 },
197#endif
198 {}, /* list terminator */
199};
200
201static struct mm_region final_map[] = {
202#ifdef CONFIG_FSL_LSCH3
Tom Rini364d0022023-01-10 11:19:45 -0500203 { CFG_SYS_FSL_CCSR_BASE, CFG_SYS_FSL_CCSR_BASE,
204 CFG_SYS_FSL_CCSR_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000205 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
206 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
207 },
Tom Rini376b88a2022-10-28 20:27:13 -0400208 { CFG_SYS_FSL_OCRAM_BASE, CFG_SYS_FSL_OCRAM_BASE,
York Sunef4cef92018-11-05 18:01:06 +0000209 SYS_FSL_OCRAM_SPACE_SIZE,
210 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
211 },
Tom Rini364d0022023-01-10 11:19:45 -0500212 { CFG_SYS_FSL_DRAM_BASE1, CFG_SYS_FSL_DRAM_BASE1,
213 CFG_SYS_FSL_DRAM_SIZE1,
York Sunef4cef92018-11-05 18:01:06 +0000214 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
215 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
216 },
Tom Rini376b88a2022-10-28 20:27:13 -0400217 { CFG_SYS_FSL_QSPI_BASE1, CFG_SYS_FSL_QSPI_BASE1,
Tom Rini364d0022023-01-10 11:19:45 -0500218 CFG_SYS_FSL_QSPI_SIZE1,
York Sunef4cef92018-11-05 18:01:06 +0000219 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
220 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
221 },
Tom Rini376b88a2022-10-28 20:27:13 -0400222 { CFG_SYS_FSL_QSPI_BASE2, CFG_SYS_FSL_QSPI_BASE2,
Tom Rini364d0022023-01-10 11:19:45 -0500223 CFG_SYS_FSL_QSPI_SIZE2,
York Sunef4cef92018-11-05 18:01:06 +0000224 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
225 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
226 },
227#ifdef CONFIG_FSL_IFC
Tom Rini364d0022023-01-10 11:19:45 -0500228 { CFG_SYS_FSL_IFC_BASE2, CFG_SYS_FSL_IFC_BASE2,
229 CFG_SYS_FSL_IFC_SIZE2,
York Sunef4cef92018-11-05 18:01:06 +0000230 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
231 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
232 },
233#endif
Tom Rini364d0022023-01-10 11:19:45 -0500234 { CFG_SYS_FSL_DCSR_BASE, CFG_SYS_FSL_DCSR_BASE,
235 CFG_SYS_FSL_DCSR_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000236 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
237 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
238 },
Tom Rini364d0022023-01-10 11:19:45 -0500239 { CFG_SYS_FSL_MC_BASE, CFG_SYS_FSL_MC_BASE,
240 CFG_SYS_FSL_MC_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000241 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
242 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
243 },
Tom Rini364d0022023-01-10 11:19:45 -0500244 { CFG_SYS_FSL_NI_BASE, CFG_SYS_FSL_NI_BASE,
245 CFG_SYS_FSL_NI_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000246 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
247 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
248 },
249 /* For QBMAN portal, only the first 64MB is cache-enabled */
Tom Rini364d0022023-01-10 11:19:45 -0500250 { CFG_SYS_FSL_QBMAN_BASE, CFG_SYS_FSL_QBMAN_BASE,
251 CFG_SYS_FSL_QBMAN_SIZE_1,
York Sunef4cef92018-11-05 18:01:06 +0000252 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
253 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
254 },
Tom Rini364d0022023-01-10 11:19:45 -0500255 { CFG_SYS_FSL_QBMAN_BASE + CFG_SYS_FSL_QBMAN_SIZE_1,
256 CFG_SYS_FSL_QBMAN_BASE + CFG_SYS_FSL_QBMAN_SIZE_1,
257 CFG_SYS_FSL_QBMAN_SIZE - CFG_SYS_FSL_QBMAN_SIZE_1,
York Sunef4cef92018-11-05 18:01:06 +0000258 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
259 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
260 },
Tom Rini56af6592022-11-16 13:10:33 -0500261 { CFG_SYS_PCIE1_PHYS_ADDR, CFG_SYS_PCIE1_PHYS_ADDR,
262 CFG_SYS_PCIE1_PHYS_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000263 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
264 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
265 },
Tom Rini56af6592022-11-16 13:10:33 -0500266 { CFG_SYS_PCIE2_PHYS_ADDR, CFG_SYS_PCIE2_PHYS_ADDR,
267 CFG_SYS_PCIE2_PHYS_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000268 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
269 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
270 },
Tom Rini56af6592022-11-16 13:10:33 -0500271#ifdef CFG_SYS_PCIE3_PHYS_ADDR
272 { CFG_SYS_PCIE3_PHYS_ADDR, CFG_SYS_PCIE3_PHYS_ADDR,
273 CFG_SYS_PCIE3_PHYS_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000274 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
275 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
276 },
Yuantian Tang4aefa162019-04-10 16:43:33 +0800277#endif
Tom Rini56af6592022-11-16 13:10:33 -0500278#ifdef CFG_SYS_PCIE4_PHYS_ADDR
279 { CFG_SYS_PCIE4_PHYS_ADDR, CFG_SYS_PCIE4_PHYS_ADDR,
280 CFG_SYS_PCIE4_PHYS_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000281 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
282 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
283 },
284#endif
Hou Zhiqiangd08f9702019-04-08 10:15:41 +0000285#ifdef SYS_PCIE5_PHYS_ADDR
286 { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
287 SYS_PCIE5_PHYS_SIZE,
288 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
289 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
290 },
291#endif
292#ifdef SYS_PCIE6_PHYS_ADDR
293 { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
294 SYS_PCIE6_PHYS_SIZE,
295 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
296 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
297 },
298#endif
Tom Rini364d0022023-01-10 11:19:45 -0500299 { CFG_SYS_FSL_WRIOP1_BASE, CFG_SYS_FSL_WRIOP1_BASE,
300 CFG_SYS_FSL_WRIOP1_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000301 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
302 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
303 },
Tom Rini364d0022023-01-10 11:19:45 -0500304 { CFG_SYS_FSL_AIOP1_BASE, CFG_SYS_FSL_AIOP1_BASE,
305 CFG_SYS_FSL_AIOP1_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000306 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
307 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
308 },
Tom Rini364d0022023-01-10 11:19:45 -0500309 { CFG_SYS_FSL_PEBUF_BASE, CFG_SYS_FSL_PEBUF_BASE,
310 CFG_SYS_FSL_PEBUF_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000311 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
312 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
313 },
Tom Rini364d0022023-01-10 11:19:45 -0500314 { CFG_SYS_FSL_DRAM_BASE2, CFG_SYS_FSL_DRAM_BASE2,
315 CFG_SYS_FSL_DRAM_SIZE2,
York Sunef4cef92018-11-05 18:01:06 +0000316 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
317 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
318 },
Tom Rini364d0022023-01-10 11:19:45 -0500319#ifdef CFG_SYS_FSL_DRAM_BASE3
320 { CFG_SYS_FSL_DRAM_BASE3, CFG_SYS_FSL_DRAM_BASE3,
321 CFG_SYS_FSL_DRAM_SIZE3,
Priyanka Jain88c25662018-10-29 09:11:29 +0000322 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
323 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
324 },
325#endif
York Sunef4cef92018-11-05 18:01:06 +0000326#elif defined(CONFIG_FSL_LSCH2)
327 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
328 CONFIG_SYS_FSL_BOOTROM_SIZE,
329 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
330 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
331 },
Tom Rini364d0022023-01-10 11:19:45 -0500332 { CFG_SYS_FSL_CCSR_BASE, CFG_SYS_FSL_CCSR_BASE,
333 CFG_SYS_FSL_CCSR_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000334 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
335 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
336 },
Tom Rini376b88a2022-10-28 20:27:13 -0400337 { CFG_SYS_FSL_OCRAM_BASE, CFG_SYS_FSL_OCRAM_BASE,
York Sunef4cef92018-11-05 18:01:06 +0000338 SYS_FSL_OCRAM_SPACE_SIZE,
339 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
340 },
Tom Rini364d0022023-01-10 11:19:45 -0500341 { CFG_SYS_FSL_DCSR_BASE, CFG_SYS_FSL_DCSR_BASE,
342 CFG_SYS_FSL_DCSR_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000343 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
344 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
345 },
Tom Rini376b88a2022-10-28 20:27:13 -0400346 { CFG_SYS_FSL_QSPI_BASE, CFG_SYS_FSL_QSPI_BASE,
Tom Rini364d0022023-01-10 11:19:45 -0500347 CFG_SYS_FSL_QSPI_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000348 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
349 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
350 },
351#ifdef CONFIG_FSL_IFC
Tom Rini364d0022023-01-10 11:19:45 -0500352 { CFG_SYS_FSL_IFC_BASE, CFG_SYS_FSL_IFC_BASE,
353 CFG_SYS_FSL_IFC_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000354 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
355 },
356#endif
Tom Rini364d0022023-01-10 11:19:45 -0500357 { CFG_SYS_FSL_DRAM_BASE1, CFG_SYS_FSL_DRAM_BASE1,
358 CFG_SYS_FSL_DRAM_SIZE1,
York Sunef4cef92018-11-05 18:01:06 +0000359 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
360 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
361 },
Tom Rini364d0022023-01-10 11:19:45 -0500362 { CFG_SYS_FSL_QBMAN_BASE, CFG_SYS_FSL_QBMAN_BASE,
363 CFG_SYS_FSL_QBMAN_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000364 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
365 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
366 },
Tom Rini364d0022023-01-10 11:19:45 -0500367 { CFG_SYS_FSL_DRAM_BASE2, CFG_SYS_FSL_DRAM_BASE2,
368 CFG_SYS_FSL_DRAM_SIZE2,
York Sunef4cef92018-11-05 18:01:06 +0000369 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
370 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
371 },
Tom Rini56af6592022-11-16 13:10:33 -0500372 { CFG_SYS_PCIE1_PHYS_ADDR, CFG_SYS_PCIE1_PHYS_ADDR,
373 CFG_SYS_PCIE1_PHYS_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000374 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
375 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
376 },
Tom Rini56af6592022-11-16 13:10:33 -0500377 { CFG_SYS_PCIE2_PHYS_ADDR, CFG_SYS_PCIE2_PHYS_ADDR,
378 CFG_SYS_PCIE2_PHYS_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000379 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
380 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
381 },
Tom Rini56af6592022-11-16 13:10:33 -0500382#ifdef CFG_SYS_PCIE3_PHYS_ADDR
383 { CFG_SYS_PCIE3_PHYS_ADDR, CFG_SYS_PCIE3_PHYS_ADDR,
384 CFG_SYS_PCIE3_PHYS_SIZE,
York Sunef4cef92018-11-05 18:01:06 +0000385 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
386 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
387 },
Yuantian Tang4aefa162019-04-10 16:43:33 +0800388#endif
Tom Rini364d0022023-01-10 11:19:45 -0500389 { CFG_SYS_FSL_DRAM_BASE3, CFG_SYS_FSL_DRAM_BASE3,
390 CFG_SYS_FSL_DRAM_SIZE3,
York Sunef4cef92018-11-05 18:01:06 +0000391 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
392 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
393 },
394#endif
Tom Rini6a5dccc2022-11-16 13:10:41 -0500395#ifdef CFG_SYS_MEM_RESERVE_SECURE
York Sunef4cef92018-11-05 18:01:06 +0000396 {}, /* space holder for secure mem */
397#endif
398 {},
399};
400
York Sun9da8f502016-06-24 16:46:23 -0700401struct mm_region *mem_map = early_map;
Alexander Grafce0a64e2016-03-04 01:09:54 +0100402
Mingkai Hu0e58b512015-10-26 19:47:50 +0800403void cpu_name(char *name)
404{
Tom Rini376b88a2022-10-28 20:27:13 -0400405 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
Mingkai Hu0e58b512015-10-26 19:47:50 +0800406 unsigned int i, svr, ver;
407
408 svr = gur_in32(&gur->svr);
409 ver = SVR_SOC_VER(svr);
410
411 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
412 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
413 strcpy(name, cpu_type_list[i].name);
Meenakshi Aggarwalccb5d5d2020-10-29 19:16:16 +0530414#if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
Priyanka Jainef76b2e2018-10-29 09:17:09 +0000415 if (IS_C_PROCESSOR(svr))
416 strcat(name, "C");
417#endif
Mingkai Hu0e58b512015-10-26 19:47:50 +0800418
419 if (IS_E_PROCESSOR(svr))
420 strcat(name, "E");
Wenbin Song863a33a2016-09-13 16:13:54 +0800421
422 sprintf(name + strlen(name), " Rev%d.%d",
423 SVR_MAJ(svr), SVR_MIN(svr));
Mingkai Hu0e58b512015-10-26 19:47:50 +0800424 break;
425 }
426
427 if (i == ARRAY_SIZE(cpu_type_list))
428 strcpy(name, "unknown");
429}
430
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400431#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Mingkai Hu0e58b512015-10-26 19:47:50 +0800432/*
433 * To start MMU before DDR is available, we create MMU table in SRAM.
Tom Rini376b88a2022-10-28 20:27:13 -0400434 * The base address of SRAM is CFG_SYS_FSL_OCRAM_BASE. We use three
Mingkai Hu0e58b512015-10-26 19:47:50 +0800435 * levels of translation tables here to cover 40-bit address space.
436 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
York Sun9da8f502016-06-24 16:46:23 -0700437 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
438 * Note, the debug print in cache_v8.c is not usable for debugging
439 * these early MMU tables because UART is not yet available.
Mingkai Hu0e58b512015-10-26 19:47:50 +0800440 */
441static inline void early_mmu_setup(void)
442{
York Sun9da8f502016-06-24 16:46:23 -0700443 unsigned int el = current_el();
Mingkai Hu0e58b512015-10-26 19:47:50 +0800444
York Sun9da8f502016-06-24 16:46:23 -0700445 /* global data is already setup, no allocation yet */
Pankit Gargc4d39eb2018-11-05 18:01:28 +0000446 if (el == 3)
Tom Rini376b88a2022-10-28 20:27:13 -0400447 gd->arch.tlb_addr = CFG_SYS_FSL_OCRAM_BASE;
Pankit Gargc4d39eb2018-11-05 18:01:28 +0000448 else
Tom Rini6a5dccc2022-11-16 13:10:41 -0500449 gd->arch.tlb_addr = CFG_SYS_DDR_SDRAM_BASE;
York Sun9da8f502016-06-24 16:46:23 -0700450 gd->arch.tlb_fillptr = gd->arch.tlb_addr;
451 gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
Mingkai Hu0e58b512015-10-26 19:47:50 +0800452
York Sun9da8f502016-06-24 16:46:23 -0700453 /* Create early page tables */
454 setup_pgtables();
Mingkai Hu0e58b512015-10-26 19:47:50 +0800455
York Sun9da8f502016-06-24 16:46:23 -0700456 /* point TTBR to the new table */
457 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
Andre Przywara630a7942022-06-14 00:11:10 +0100458 get_tcr(NULL, NULL) &
York Sun9da8f502016-06-24 16:46:23 -0700459 ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
Mingkai Hu0e58b512015-10-26 19:47:50 +0800460 MEMORY_ATTRIBUTES);
York Sun9da8f502016-06-24 16:46:23 -0700461
Mingkai Hu0e58b512015-10-26 19:47:50 +0800462 set_sctlr(get_sctlr() | CR_M);
463}
464
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800465static void fix_pcie_mmu_map(void)
466{
York Sun4ce6fbf2017-03-27 11:41:01 -0700467#ifdef CONFIG_ARCH_LS2080A
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800468 unsigned int i;
469 u32 svr, ver;
Tom Rini376b88a2022-10-28 20:27:13 -0400470 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800471
472 svr = gur_in32(&gur->svr);
473 ver = SVR_SOC_VER(svr);
474
475 /* Fix PCIE base and size for LS2088A */
476 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
Priyanka Jain2b361782017-04-27 15:08:06 +0530477 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
478 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800479 for (i = 0; i < ARRAY_SIZE(final_map); i++) {
480 switch (final_map[i].phys) {
Tom Rini56af6592022-11-16 13:10:33 -0500481 case CFG_SYS_PCIE1_PHYS_ADDR:
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800482 final_map[i].phys = 0x2000000000ULL;
483 final_map[i].virt = 0x2000000000ULL;
484 final_map[i].size = 0x800000000ULL;
485 break;
Tom Rini56af6592022-11-16 13:10:33 -0500486 case CFG_SYS_PCIE2_PHYS_ADDR:
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800487 final_map[i].phys = 0x2800000000ULL;
488 final_map[i].virt = 0x2800000000ULL;
489 final_map[i].size = 0x800000000ULL;
490 break;
Tom Rini56af6592022-11-16 13:10:33 -0500491#ifdef CFG_SYS_PCIE3_PHYS_ADDR
492 case CFG_SYS_PCIE3_PHYS_ADDR:
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800493 final_map[i].phys = 0x3000000000ULL;
494 final_map[i].virt = 0x3000000000ULL;
495 final_map[i].size = 0x800000000ULL;
496 break;
Yuantian Tang4aefa162019-04-10 16:43:33 +0800497#endif
Tom Rini56af6592022-11-16 13:10:33 -0500498#ifdef CFG_SYS_PCIE4_PHYS_ADDR
499 case CFG_SYS_PCIE4_PHYS_ADDR:
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800500 final_map[i].phys = 0x3800000000ULL;
501 final_map[i].virt = 0x3800000000ULL;
502 final_map[i].size = 0x800000000ULL;
503 break;
Hou Zhiqiangd5d1ce32019-04-08 10:15:32 +0000504#endif
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800505 default:
506 break;
507 }
508 }
509 }
510#endif
511}
512
Mingkai Hu0e58b512015-10-26 19:47:50 +0800513/*
514 * The final tables look similar to early tables, but different in detail.
515 * These tables are in DRAM. Sub tables are added to enable cache for
516 * QBMan and OCRAM.
517 *
York Sun1ef95cc2016-06-24 16:46:18 -0700518 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
519 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
Mingkai Hu0e58b512015-10-26 19:47:50 +0800520 */
521static inline void final_mmu_setup(void)
522{
York Sun9da8f502016-06-24 16:46:23 -0700523 u64 tlb_addr_save = gd->arch.tlb_addr;
York Sun0804d562015-12-04 11:57:08 -0800524 unsigned int el = current_el();
York Sun9da8f502016-06-24 16:46:23 -0700525 int index;
Mingkai Hu0e58b512015-10-26 19:47:50 +0800526
Hou Zhiqiang92fecb52017-03-03 12:35:09 +0800527 /* fix the final_map before filling in the block entries */
528 fix_pcie_mmu_map();
529
York Sun9da8f502016-06-24 16:46:23 -0700530 mem_map = final_map;
Mingkai Hu0e58b512015-10-26 19:47:50 +0800531
York Sun75488ed2017-03-06 09:02:30 -0800532 /* Update mapping for DDR to actual size */
533 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
534 /*
535 * Find the entry for DDR mapping and update the address and
536 * size. Zero-sized mapping will be skipped when creating MMU
537 * table.
538 */
539 switch (final_map[index].virt) {
Tom Rini364d0022023-01-10 11:19:45 -0500540 case CFG_SYS_FSL_DRAM_BASE1:
York Sun75488ed2017-03-06 09:02:30 -0800541 final_map[index].virt = gd->bd->bi_dram[0].start;
542 final_map[index].phys = gd->bd->bi_dram[0].start;
543 final_map[index].size = gd->bd->bi_dram[0].size;
544 break;
Tom Rini364d0022023-01-10 11:19:45 -0500545#ifdef CFG_SYS_FSL_DRAM_BASE2
546 case CFG_SYS_FSL_DRAM_BASE2:
York Sun75488ed2017-03-06 09:02:30 -0800547#if (CONFIG_NR_DRAM_BANKS >= 2)
548 final_map[index].virt = gd->bd->bi_dram[1].start;
549 final_map[index].phys = gd->bd->bi_dram[1].start;
550 final_map[index].size = gd->bd->bi_dram[1].size;
551#else
552 final_map[index].size = 0;
553#endif
554 break;
555#endif
Tom Rini364d0022023-01-10 11:19:45 -0500556#ifdef CFG_SYS_FSL_DRAM_BASE3
557 case CFG_SYS_FSL_DRAM_BASE3:
York Sun75488ed2017-03-06 09:02:30 -0800558#if (CONFIG_NR_DRAM_BANKS >= 3)
559 final_map[index].virt = gd->bd->bi_dram[2].start;
560 final_map[index].phys = gd->bd->bi_dram[2].start;
561 final_map[index].size = gd->bd->bi_dram[2].size;
562#else
563 final_map[index].size = 0;
564#endif
565 break;
566#endif
567 default:
568 break;
569 }
570 }
571
Tom Rini6a5dccc2022-11-16 13:10:41 -0500572#ifdef CFG_SYS_MEM_RESERVE_SECURE
York Sun9da8f502016-06-24 16:46:23 -0700573 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
574 if (el == 3) {
575 /*
576 * Only use gd->arch.secure_ram if the address is
577 * recalculated. Align to 4KB for MMU table.
578 */
579 /* put page tables in secure ram */
580 index = ARRAY_SIZE(final_map) - 2;
581 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
582 final_map[index].virt = gd->arch.secure_ram & ~0x3;
583 final_map[index].phys = final_map[index].virt;
Tom Rini6a5dccc2022-11-16 13:10:41 -0500584 final_map[index].size = CFG_SYS_MEM_RESERVE_SECURE;
York Sun9da8f502016-06-24 16:46:23 -0700585 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
York Sun1ef95cc2016-06-24 16:46:18 -0700586 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
York Sun9da8f502016-06-24 16:46:23 -0700587 tlb_addr_save = gd->arch.tlb_addr;
York Sun0804d562015-12-04 11:57:08 -0800588 } else {
York Sun9da8f502016-06-24 16:46:23 -0700589 /* Use allocated (board_f.c) memory for TLB */
590 tlb_addr_save = gd->arch.tlb_allocated;
591 gd->arch.tlb_addr = tlb_addr_save;
York Sun0804d562015-12-04 11:57:08 -0800592 }
593 }
594#endif
Mingkai Hu0e58b512015-10-26 19:47:50 +0800595
York Sun9da8f502016-06-24 16:46:23 -0700596 /* Reset the fill ptr */
597 gd->arch.tlb_fillptr = tlb_addr_save;
598
599 /* Create normal system page tables */
600 setup_pgtables();
601
602 /* Create emergency page tables */
603 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
604 gd->arch.tlb_emerg = gd->arch.tlb_addr;
605 setup_pgtables();
606 gd->arch.tlb_addr = tlb_addr_save;
607
York Suncf64ced2017-03-06 09:02:31 -0800608 /* Disable cache and MMU */
609 dcache_disable(); /* TLBs are invalidated */
610 invalidate_icache_all();
Mingkai Hu0e58b512015-10-26 19:47:50 +0800611
612 /* point TTBR to the new table */
Andre Przywara630a7942022-06-14 00:11:10 +0100613 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(NULL, NULL),
Mingkai Hu0e58b512015-10-26 19:47:50 +0800614 MEMORY_ATTRIBUTES);
York Suncf64ced2017-03-06 09:02:31 -0800615
York Suneb6eac12016-07-22 10:52:23 -0700616 set_sctlr(get_sctlr() | CR_M);
Mingkai Hu0e58b512015-10-26 19:47:50 +0800617}
618
Alexander Grafbc78b922016-03-21 20:26:12 +0100619u64 get_page_table_size(void)
620{
621 return 0x10000;
622}
623
Mingkai Hu0e58b512015-10-26 19:47:50 +0800624int arch_cpu_init(void)
625{
York Sune6b871e2017-05-15 08:51:59 -0700626 /*
627 * This function is called before U-Boot relocates itself to speed up
628 * on system running. It is not necessary to run if performance is not
629 * critical. Skip if MMU is already enabled by SPL or other means.
630 */
631 if (get_sctlr() & CR_M)
632 return 0;
633
Mingkai Hu0e58b512015-10-26 19:47:50 +0800634 icache_enable();
635 __asm_invalidate_dcache_all();
636 __asm_invalidate_tlb_all();
637 early_mmu_setup();
638 set_sctlr(get_sctlr() | CR_C);
639 return 0;
640}
641
Hou Zhiqianga7befa52016-06-28 20:18:12 +0800642void mmu_setup(void)
643{
644 final_mmu_setup();
645}
646
Mingkai Hu0e58b512015-10-26 19:47:50 +0800647/*
Hou Zhiqianga7befa52016-06-28 20:18:12 +0800648 * This function is called from common/board_r.c.
649 * It recreates MMU table in main memory.
Mingkai Hu0e58b512015-10-26 19:47:50 +0800650 */
651void enable_caches(void)
652{
Hou Zhiqianga7befa52016-06-28 20:18:12 +0800653 mmu_setup();
Mingkai Hu0e58b512015-10-26 19:47:50 +0800654 __asm_invalidate_tlb_all();
Hou Zhiqianga7befa52016-06-28 20:18:12 +0800655 icache_enable();
656 dcache_enable();
Mingkai Hu0e58b512015-10-26 19:47:50 +0800657}
Trevor Woerner43ec7e02019-05-03 09:41:00 -0400658#endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
Rajesh Bhagat583da8b2018-11-05 18:01:42 +0000659
660#ifdef CONFIG_TFABOOT
661enum boot_src __get_boot_src(u32 porsr1)
662{
663 enum boot_src src = BOOT_SOURCE_RESERVED;
664 u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
Priyanka Jain88c25662018-10-29 09:11:29 +0000665#if !defined(CONFIG_NXP_LSCH3_2)
Rajesh Bhagat583da8b2018-11-05 18:01:42 +0000666 u32 val;
667#endif
668 debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
669
670#if defined(CONFIG_FSL_LSCH3)
Priyanka Jain88c25662018-10-29 09:11:29 +0000671#if defined(CONFIG_NXP_LSCH3_2)
Rajesh Bhagat583da8b2018-11-05 18:01:42 +0000672 switch (rcw_src) {
673 case RCW_SRC_SDHC1_VAL:
674 src = BOOT_SOURCE_SD_MMC;
675 break;
676 case RCW_SRC_SDHC2_VAL:
677 src = BOOT_SOURCE_SD_MMC2;
678 break;
679 case RCW_SRC_I2C1_VAL:
680 src = BOOT_SOURCE_I2C1_EXTENDED;
681 break;
682 case RCW_SRC_FLEXSPI_NAND2K_VAL:
683 src = BOOT_SOURCE_XSPI_NAND;
684 break;
685 case RCW_SRC_FLEXSPI_NAND4K_VAL:
686 src = BOOT_SOURCE_XSPI_NAND;
687 break;
688 case RCW_SRC_RESERVED_1_VAL:
689 src = BOOT_SOURCE_RESERVED;
690 break;
691 case RCW_SRC_FLEXSPI_NOR_24B:
692 src = BOOT_SOURCE_XSPI_NOR;
693 break;
694 default:
695 src = BOOT_SOURCE_RESERVED;
696 }
697#else
698 val = rcw_src & RCW_SRC_TYPE_MASK;
699 if (val == RCW_SRC_NOR_VAL) {
700 val = rcw_src & NOR_TYPE_MASK;
701
702 switch (val) {
703 case NOR_16B_VAL:
704 case NOR_32B_VAL:
705 src = BOOT_SOURCE_IFC_NOR;
706 break;
707 default:
708 src = BOOT_SOURCE_RESERVED;
709 }
710 } else {
711 /* RCW SRC Serial Flash */
712 val = rcw_src & RCW_SRC_SERIAL_MASK;
713 switch (val) {
714 case RCW_SRC_QSPI_VAL:
715 /* RCW SRC Serial NOR (QSPI) */
716 src = BOOT_SOURCE_QSPI_NOR;
717 break;
718 case RCW_SRC_SD_CARD_VAL:
719 /* RCW SRC SD Card */
720 src = BOOT_SOURCE_SD_MMC;
721 break;
722 case RCW_SRC_EMMC_VAL:
723 /* RCW SRC EMMC */
Rajesh Bhagat5b73c902018-12-27 04:37:49 +0000724 src = BOOT_SOURCE_SD_MMC;
Rajesh Bhagat583da8b2018-11-05 18:01:42 +0000725 break;
726 case RCW_SRC_I2C1_VAL:
727 /* RCW SRC I2C1 Extended */
728 src = BOOT_SOURCE_I2C1_EXTENDED;
729 break;
730 default:
731 src = BOOT_SOURCE_RESERVED;
732 }
733 }
734#endif
735#elif defined(CONFIG_FSL_LSCH2)
736 /* RCW SRC NAND */
737 val = rcw_src & RCW_SRC_NAND_MASK;
738 if (val == RCW_SRC_NAND_VAL) {
739 val = rcw_src & NAND_RESERVED_MASK;
740 if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
741 src = BOOT_SOURCE_IFC_NAND;
742
743 } else {
744 /* RCW SRC NOR */
745 val = rcw_src & RCW_SRC_NOR_MASK;
746 if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
747 src = BOOT_SOURCE_IFC_NOR;
748 } else {
749 switch (rcw_src) {
750 case QSPI_VAL1:
751 case QSPI_VAL2:
752 src = BOOT_SOURCE_QSPI_NOR;
753 break;
754 case SD_VAL:
755 src = BOOT_SOURCE_SD_MMC;
756 break;
757 default:
758 src = BOOT_SOURCE_RESERVED;
759 }
760 }
761 }
762#endif
York Sun8f3f4ef2018-11-05 18:02:09 +0000763
Simon Glass20b7cd62023-02-05 15:40:55 -0700764 if (IS_ENABLED(CONFIG_SYS_FSL_ERRATUM_A010539) && !rcw_src)
York Sun8f3f4ef2018-11-05 18:02:09 +0000765 src = BOOT_SOURCE_QSPI_NOR;
766
Rajesh Bhagat583da8b2018-11-05 18:01:42 +0000767 debug("%s: src 0x%x\n", __func__, src);
768 return src;
769}
770
771enum boot_src get_boot_src(void)
772{
Michael Walle166ea482022-04-22 14:53:27 +0530773 struct arm_smccc_res res;
York Sun8f3f4ef2018-11-05 18:02:09 +0000774 u32 porsr1 = 0;
Rajesh Bhagat583da8b2018-11-05 18:01:42 +0000775
776#if defined(CONFIG_FSL_LSCH3)
777 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
Rajesh Bhagat583da8b2018-11-05 18:01:42 +0000778#elif defined(CONFIG_FSL_LSCH2)
Tom Rini376b88a2022-10-28 20:27:13 -0400779 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
York Sun8f3f4ef2018-11-05 18:02:09 +0000780#endif
Rajesh Bhagat583da8b2018-11-05 18:01:42 +0000781
York Sun8f3f4ef2018-11-05 18:02:09 +0000782 if (current_el() == 2) {
Michael Walle166ea482022-04-22 14:53:27 +0530783 arm_smccc_smc(SIP_SVC_RCW, 0, 0, 0, 0, 0, 0, 0, &res);
784 if (!res.a0)
785 porsr1 = res.a1;
York Sun8f3f4ef2018-11-05 18:02:09 +0000786 }
787
788 if (current_el() == 3 || !porsr1) {
789#ifdef CONFIG_FSL_LSCH3
790 porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
791#elif defined(CONFIG_FSL_LSCH2)
792 porsr1 = in_be32(&gur->porsr1);
Mingkai Hu0e58b512015-10-26 19:47:50 +0800793#endif
York Sun8f3f4ef2018-11-05 18:02:09 +0000794 }
795
Rajesh Bhagat583da8b2018-11-05 18:01:42 +0000796 debug("%s: porsr1 0x%x\n", __func__, porsr1);
797
798 return __get_boot_src(porsr1);
799}
800
801#ifdef CONFIG_ENV_IS_IN_MMC
802int mmc_get_env_dev(void)
803{
804 enum boot_src src = get_boot_src();
805 int dev = CONFIG_SYS_MMC_ENV_DEV;
806
807 switch (src) {
808 case BOOT_SOURCE_SD_MMC:
809 dev = 0;
810 break;
811 case BOOT_SOURCE_SD_MMC2:
812 dev = 1;
813 break;
814 default:
815 break;
816 }
817
818 return dev;
819}
820#endif
821
Marek Vasut11377122022-04-06 02:21:33 +0200822enum env_location arch_env_get_location(enum env_operation op, int prio)
Rajesh Bhagat583da8b2018-11-05 18:01:42 +0000823{
824 enum boot_src src = get_boot_src();
825 enum env_location env_loc = ENVL_NOWHERE;
826
827 if (prio)
828 return ENVL_UNKNOWN;
829
Udit Agarwal5e9720c2019-04-23 06:06:04 +0000830#ifdef CONFIG_ENV_IS_NOWHERE
831 return env_loc;
Pankit Gargbdbf84f2018-11-05 18:01:52 +0000832#endif
833
Rajesh Bhagat583da8b2018-11-05 18:01:42 +0000834 switch (src) {
835 case BOOT_SOURCE_IFC_NOR:
836 env_loc = ENVL_FLASH;
837 break;
838 case BOOT_SOURCE_QSPI_NOR:
839 /* FALLTHROUGH */
840 case BOOT_SOURCE_XSPI_NOR:
841 env_loc = ENVL_SPI_FLASH;
842 break;
843 case BOOT_SOURCE_IFC_NAND:
844 /* FALLTHROUGH */
845 case BOOT_SOURCE_QSPI_NAND:
846 /* FALLTHROUGH */
847 case BOOT_SOURCE_XSPI_NAND:
848 env_loc = ENVL_NAND;
849 break;
850 case BOOT_SOURCE_SD_MMC:
851 /* FALLTHROUGH */
852 case BOOT_SOURCE_SD_MMC2:
853 env_loc = ENVL_MMC;
854 break;
855 case BOOT_SOURCE_I2C1_EXTENDED:
856 /* FALLTHROUGH */
857 default:
858 break;
859 }
860
861 return env_loc;
862}
863#endif /* CONFIG_TFABOOT */
Mingkai Hu0e58b512015-10-26 19:47:50 +0800864
Priyanka Jain9a276702016-11-17 12:29:56 +0530865u32 initiator_type(u32 cluster, int init_id)
Mingkai Hu0e58b512015-10-26 19:47:50 +0800866{
Tom Rini376b88a2022-10-28 20:27:13 -0400867 struct ccsr_gur *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
Mingkai Hu0e58b512015-10-26 19:47:50 +0800868 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
869 u32 type = 0;
870
871 type = gur_in32(&gur->tp_ityp[idx]);
872 if (type & TP_ITYP_AV)
873 return type;
874
875 return 0;
876}
877
York Suned7fbe32016-09-13 12:40:30 -0700878u32 cpu_pos_mask(void)
879{
Tom Rini376b88a2022-10-28 20:27:13 -0400880 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
York Suned7fbe32016-09-13 12:40:30 -0700881 int i = 0;
882 u32 cluster, type, mask = 0;
883
884 do {
885 int j;
886
887 cluster = gur_in32(&gur->tp_cluster[i].lower);
888 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
889 type = initiator_type(cluster, j);
890 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
891 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
892 }
893 i++;
894 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
895
896 return mask;
897}
898
Mingkai Hu0e58b512015-10-26 19:47:50 +0800899u32 cpu_mask(void)
900{
Tom Rini376b88a2022-10-28 20:27:13 -0400901 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
Mingkai Hu0e58b512015-10-26 19:47:50 +0800902 int i = 0, count = 0;
903 u32 cluster, type, mask = 0;
904
905 do {
906 int j;
907
908 cluster = gur_in32(&gur->tp_cluster[i].lower);
909 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
910 type = initiator_type(cluster, j);
911 if (type) {
912 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
913 mask |= 1 << count;
914 count++;
915 }
916 }
917 i++;
918 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
919
920 return mask;
921}
922
923/*
924 * Return the number of cores on this SOC.
925 */
926int cpu_numcores(void)
927{
928 return hweight32(cpu_mask());
929}
930
931int fsl_qoriq_core_to_cluster(unsigned int core)
932{
933 struct ccsr_gur __iomem *gur =
Tom Rini376b88a2022-10-28 20:27:13 -0400934 (void __iomem *)(CFG_SYS_FSL_GUTS_ADDR);
Mingkai Hu0e58b512015-10-26 19:47:50 +0800935 int i = 0, count = 0;
936 u32 cluster;
937
938 do {
939 int j;
940
941 cluster = gur_in32(&gur->tp_cluster[i].lower);
942 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
943 if (initiator_type(cluster, j)) {
944 if (count == core)
945 return i;
946 count++;
947 }
948 }
949 i++;
950 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
951
952 return -1; /* cannot identify the cluster */
953}
954
955u32 fsl_qoriq_core_to_type(unsigned int core)
956{
957 struct ccsr_gur __iomem *gur =
Tom Rini376b88a2022-10-28 20:27:13 -0400958 (void __iomem *)(CFG_SYS_FSL_GUTS_ADDR);
Mingkai Hu0e58b512015-10-26 19:47:50 +0800959 int i = 0, count = 0;
960 u32 cluster, type;
961
962 do {
963 int j;
964
965 cluster = gur_in32(&gur->tp_cluster[i].lower);
966 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
967 type = initiator_type(cluster, j);
968 if (type) {
969 if (count == core)
970 return type;
971 count++;
972 }
973 }
974 i++;
975 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
976
977 return -1; /* cannot identify the cluster */
978}
979
Priyanka Jain96b001f2016-11-17 12:29:51 +0530980#ifndef CONFIG_FSL_LSCH3
Sriram Dash9282d262016-06-13 09:58:32 +0530981uint get_svr(void)
982{
Tom Rini376b88a2022-10-28 20:27:13 -0400983 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
Sriram Dash9282d262016-06-13 09:58:32 +0530984
985 return gur_in32(&gur->svr);
986}
Priyanka Jain96b001f2016-11-17 12:29:51 +0530987#endif
Sriram Dash9282d262016-06-13 09:58:32 +0530988
Mingkai Hu0e58b512015-10-26 19:47:50 +0800989#ifdef CONFIG_DISPLAY_CPUINFO
990int print_cpuinfo(void)
991{
Tom Rini376b88a2022-10-28 20:27:13 -0400992 struct ccsr_gur __iomem *gur = (void *)(CFG_SYS_FSL_GUTS_ADDR);
Mingkai Hu0e58b512015-10-26 19:47:50 +0800993 struct sys_info sysinfo;
994 char buf[32];
995 unsigned int i, core;
York Suncbe8e1c2016-04-04 11:41:26 -0700996 u32 type, rcw, svr = gur_in32(&gur->svr);
Mingkai Hu0e58b512015-10-26 19:47:50 +0800997
998 puts("SoC: ");
999
1000 cpu_name(buf);
York Suncbe8e1c2016-04-04 11:41:26 -07001001 printf(" %s (0x%x)\n", buf, svr);
Mingkai Hu0e58b512015-10-26 19:47:50 +08001002 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
1003 get_sys_info(&sysinfo);
1004 puts("Clock Configuration:");
1005 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
1006 if (!(i % 3))
1007 puts("\n ");
1008 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
1009 printf("CPU%d(%s):%-4s MHz ", core,
1010 type == TY_ITYP_VER_A7 ? "A7 " :
1011 (type == TY_ITYP_VER_A53 ? "A53" :
Alison Wang79808392016-07-05 16:01:52 +08001012 (type == TY_ITYP_VER_A57 ? "A57" :
1013 (type == TY_ITYP_VER_A72 ? "A72" : " "))),
Mingkai Hu0e58b512015-10-26 19:47:50 +08001014 strmhz(buf, sysinfo.freq_processor[core]));
1015 }
Hou Zhiqiang3f91cda2017-01-10 16:44:15 +08001016 /* Display platform clock as Bus frequency. */
Mingkai Hu0e58b512015-10-26 19:47:50 +08001017 printf("\n Bus: %-4s MHz ",
Hou Zhiqiang3f91cda2017-01-10 16:44:15 +08001018 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
Mingkai Hu0e58b512015-10-26 19:47:50 +08001019 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
Shaohui Xie04643262015-10-26 19:47:54 +08001020#ifdef CONFIG_SYS_DPAA_FMAN
1021 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
1022#endif
Prabhakar Kushwaha122bcfd2015-11-09 16:42:07 +05301023#ifdef CONFIG_SYS_FSL_HAS_DP_DDR
York Suncbe8e1c2016-04-04 11:41:26 -07001024 if (soc_has_dp_ddr()) {
1025 printf(" DP-DDR: %-4s MT/s",
1026 strmhz(buf, sysinfo.freq_ddrbus2));
1027 }
Mingkai Hu0e58b512015-10-26 19:47:50 +08001028#endif
1029 puts("\n");
1030
1031 /*
1032 * Display the RCW, so that no one gets confused as to what RCW
1033 * we're actually using for this boot.
1034 */
1035 puts("Reset Configuration Word (RCW):");
1036 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
1037 rcw = gur_in32(&gur->rcwsr[i]);
1038 if ((i % 4) == 0)
1039 printf("\n %08x:", i * 4);
1040 printf(" %08x", rcw);
1041 }
1042 puts("\n");
1043
1044 return 0;
1045}
1046#endif
1047
1048#ifdef CONFIG_FSL_ESDHC
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09001049int cpu_mmc_init(struct bd_info *bis)
Mingkai Hu0e58b512015-10-26 19:47:50 +08001050{
1051 return fsl_esdhc_mmc_init(bis);
1052}
1053#endif
1054
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09001055int cpu_eth_init(struct bd_info *bis)
Mingkai Hu0e58b512015-10-26 19:47:50 +08001056{
1057 int error = 0;
1058
Simon Glass85ed77d2024-09-29 19:49:46 -06001059#if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_XPL_BUILD)
Mingkai Hu0e58b512015-10-26 19:47:50 +08001060 error = fsl_mc_ldpaa_init(bis);
1061#endif
1062 return error;
1063}
1064
Jiafei Panded62e52021-04-21 12:12:49 +08001065int check_psci(void)
Mingkai Hu0e58b512015-10-26 19:47:50 +08001066{
Yuantian Tangaec3b142017-04-19 13:27:39 +08001067 unsigned int psci_ver;
Prabhakar Kushwaha22cfe962015-11-05 12:00:14 +05301068
Yuantian Tangaec3b142017-04-19 13:27:39 +08001069 psci_ver = sec_firmware_support_psci_version();
1070 if (psci_ver == PSCI_INVALID_VER)
1071 return 1;
1072
1073 return 0;
1074}
1075
Prabhakar Kushwaha0acce842017-11-10 11:32:52 +05301076static void config_core_prefetch(void)
1077{
1078 char *buf = NULL;
1079 char buffer[HWCONFIG_BUFFER_SIZE];
1080 const char *prefetch_arg = NULL;
Michael Walle166ea482022-04-22 14:53:27 +05301081 struct arm_smccc_res res;
Prabhakar Kushwaha0acce842017-11-10 11:32:52 +05301082 size_t arglen;
1083 unsigned int mask;
Prabhakar Kushwaha0acce842017-11-10 11:32:52 +05301084
1085 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
1086 buf = buffer;
Pankaj Bansal6c772772019-10-31 05:41:09 +00001087 else
1088 return;
Prabhakar Kushwaha0acce842017-11-10 11:32:52 +05301089
1090 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
1091 &arglen, buf);
1092
1093 if (prefetch_arg) {
1094 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
1095 if (mask & 0x1) {
1096 printf("Core0 prefetch can't be disabled\n");
1097 return;
1098 }
1099
1100#define SIP_PREFETCH_DISABLE_64 0xC200FF13
Michael Walle166ea482022-04-22 14:53:27 +05301101 arm_smccc_smc(SIP_PREFETCH_DISABLE_64, mask, 0, 0, 0, 0, 0, 0,
1102 &res);
Prabhakar Kushwaha0acce842017-11-10 11:32:52 +05301103
Michael Walle166ea482022-04-22 14:53:27 +05301104 if (res.a0)
Prabhakar Kushwaha0acce842017-11-10 11:32:52 +05301105 printf("Prefetch disable config failed for mask ");
1106 else
1107 printf("Prefetch disable config passed for mask ");
1108 printf("0x%x\n", mask);
1109 }
1110}
1111
Alex Marginean762a2682019-11-27 17:19:32 +02001112#ifdef CONFIG_PCIE_ECAM_GENERIC
1113__weak void set_ecam_icids(void)
1114{
1115}
1116#endif
1117
Yuantian Tangaec3b142017-04-19 13:27:39 +08001118int arch_early_init_r(void)
1119{
Prabhakar Kushwaha22cfe962015-11-05 12:00:14 +05301120#ifdef CONFIG_SYS_FSL_ERRATUM_A009635
Priyanka Jain823e0422017-02-14 10:34:31 +05301121 u32 svr_dev_id;
1122 /*
1123 * erratum A009635 is valid only for LS2080A SoC and
1124 * its personalitiesi
1125 */
Wenbin song5d8a61c2017-12-04 12:18:28 +08001126 svr_dev_id = get_svr();
1127 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
Priyanka Jain823e0422017-02-14 10:34:31 +05301128 erratum_a009635();
Prabhakar Kushwaha22cfe962015-11-05 12:00:14 +05301129#endif
Shengzhou Liu15875a52016-11-21 11:36:48 +08001130#if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
1131 erratum_a009942_check_cpo();
1132#endif
Yuantian Tangaec3b142017-04-19 13:27:39 +08001133 if (check_psci()) {
1134 debug("PSCI: PSCI does not exist.\n");
1135
1136 /* if PSCI does not exist, boot secondary cores here */
1137 if (fsl_layerscape_wake_seconday_cores())
Hou Zhiqiang21c4d552016-06-28 20:18:15 +08001138 printf("Did not wake secondary cores\n");
1139 }
Mingkai Hu0e58b512015-10-26 19:47:50 +08001140
Prabhakar Kushwaha0acce842017-11-10 11:32:52 +05301141 config_core_prefetch();
1142
Mingkai Hu0e58b512015-10-26 19:47:50 +08001143#ifdef CONFIG_SYS_HAS_SERDES
1144 fsl_serdes_init();
1145#endif
Pankaj Bansalcc840622018-10-29 11:28:26 +00001146#ifdef CONFIG_SYS_FSL_HAS_RGMII
1147 /* some dpmacs in armv8a based freescale layerscape SOCs can be
Vladimir Oltean6a6e4022021-09-18 15:32:34 +03001148 * configured via both serdes(sgmii, 10gbase-r, xlaui etc) bits and via
Pankaj Bansalcc840622018-10-29 11:28:26 +00001149 * EC*_PMUX(rgmii) bits in RCW.
1150 * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
1151 * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
Razvan Ionut Cirjan912f2d82020-10-23 16:20:38 +05301152 * Now if a dpmac is enabled as RGMII through ECx_PMUX then it takes
1153 * precedence over SerDes protocol. i.e. in LX2160A if we select serdes
1154 * protocol that configures dpmac17 as SGMII and set the EC1_PMUX as
1155 * RGMII, then the dpmac is RGMII and not SGMII.
Pankaj Bansalcc840622018-10-29 11:28:26 +00001156 *
Razvan Ionut Cirjan912f2d82020-10-23 16:20:38 +05301157 * Therefore, even thought fsl_rgmii_init is after fsl_serdes_init
1158 * function of SOC, the dpmac will be enabled as RGMII even if it was
1159 * also enabled before as SGMII. If ECx_PMUX is not configured for
1160 * RGMII, DPMAC will remain configured as SGMII from fsl_serdes_init().
Pankaj Bansalcc840622018-10-29 11:28:26 +00001161 */
1162 fsl_rgmii_init();
1163#endif
Shaohui Xie04643262015-10-26 19:47:54 +08001164#ifdef CONFIG_FMAN_ENET
Madalin Bucurb76b0a62020-04-23 16:25:19 +03001165#ifndef CONFIG_DM_ETH
Shaohui Xie04643262015-10-26 19:47:54 +08001166 fman_enet_init();
1167#endif
Madalin Bucurb76b0a62020-04-23 16:25:19 +03001168#endif
Ahmed Mansouraa270b42017-12-15 16:01:00 -05001169#ifdef CONFIG_SYS_DPAA_QBMAN
1170 setup_qbman_portals();
1171#endif
Alex Marginean762a2682019-11-27 17:19:32 +02001172#ifdef CONFIG_PCIE_ECAM_GENERIC
1173 set_ecam_icids();
1174#endif
Mingkai Hu0e58b512015-10-26 19:47:50 +08001175 return 0;
1176}
1177
1178int timer_init(void)
1179{
Tom Rini376b88a2022-10-28 20:27:13 -04001180 u32 __iomem *cntcr = (u32 *)CFG_SYS_FSL_TIMER_ADDR;
Mingkai Hu0e58b512015-10-26 19:47:50 +08001181#ifdef CONFIG_FSL_LSCH3
Tom Rini376b88a2022-10-28 20:27:13 -04001182 u32 __iomem *cltbenr = (u32 *)CFG_SYS_FSL_PMU_CLTBENR;
Mingkai Hu0e58b512015-10-26 19:47:50 +08001183#endif
Thomas Schaefercbec2b82019-08-08 16:00:30 +08001184#if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1185 defined(CONFIG_ARCH_LS1028A)
Yunhui Cui3dfb82a2016-06-08 10:31:42 +08001186 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
Priyanka Jain3d31ec72016-11-17 12:29:52 +05301187 u32 svr_dev_id;
Yunhui Cui3dfb82a2016-06-08 10:31:42 +08001188#endif
Mingkai Hu0e58b512015-10-26 19:47:50 +08001189#ifdef COUNTER_FREQUENCY_REAL
1190 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
1191
1192 /* Update with accurate clock frequency */
York Sune6b871e2017-05-15 08:51:59 -07001193 if (current_el() == 3)
1194 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
Mingkai Hu0e58b512015-10-26 19:47:50 +08001195#endif
1196
1197#ifdef CONFIG_FSL_LSCH3
1198 /* Enable timebase for all clusters.
1199 * It is safe to do so even some clusters are not enabled.
1200 */
1201 out_le32(cltbenr, 0xf);
1202#endif
1203
Thomas Schaefercbec2b82019-08-08 16:00:30 +08001204#if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1205 defined(CONFIG_ARCH_LS1028A)
Yunhui Cui3dfb82a2016-06-08 10:31:42 +08001206 /*
1207 * In certain Layerscape SoCs, the clock for each core's
1208 * has an enable bit in the PMU Physical Core Time Base Enable
1209 * Register (PCTBENR), which allows the watchdog to operate.
1210 */
1211 setbits_le32(pctbenr, 0xff);
Priyanka Jain3d31ec72016-11-17 12:29:52 +05301212 /*
1213 * For LS2080A SoC and its personalities, timer controller
1214 * offset is different
1215 */
Wenbin song5d8a61c2017-12-04 12:18:28 +08001216 svr_dev_id = get_svr();
1217 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
Priyanka Jain3d31ec72016-11-17 12:29:52 +05301218 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
1219
Yunhui Cui3dfb82a2016-06-08 10:31:42 +08001220#endif
1221
Mingkai Hu0e58b512015-10-26 19:47:50 +08001222 /* Enable clock for timer
1223 * This is a global setting.
1224 */
1225 out_le32(cntcr, 0x1);
1226
1227 return 0;
1228}
1229
Sean Andersondd79e0c2022-09-26 12:47:37 -04001230#if !CONFIG_IS_ENABLED(SYSRESET)
Tom Rini376b88a2022-10-28 20:27:13 -04001231__efi_runtime_data u32 __iomem *rstcr = (u32 *)CFG_SYS_FSL_RST_ADDR;
Alexander Graf12be31c2016-11-17 01:03:01 +01001232
Harald Seiler6f14d5f2020-12-15 16:47:52 +01001233void __efi_runtime reset_cpu(void)
Mingkai Hu0e58b512015-10-26 19:47:50 +08001234{
Meenakshi Aggarwalccb5d5d2020-10-29 19:16:16 +05301235#if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
Meenakshi Aggarwal95a805b2020-09-09 14:06:05 +05301236 /* clear the RST_REQ_MSK and SW_RST_REQ */
1237 out_le32(rstcr, 0x0);
1238
1239 /* initiate the sw reset request */
1240 out_le32(rstcr, 0x1);
Priyanka Jainef76b2e2018-10-29 09:17:09 +00001241#else
Meenakshi Aggarwal95a805b2020-09-09 14:06:05 +05301242 u32 val;
1243
Mingkai Hu0e58b512015-10-26 19:47:50 +08001244 /* Raise RESET_REQ_B */
1245 val = scfg_in32(rstcr);
1246 val |= 0x02;
1247 scfg_out32(rstcr, val);
Priyanka Jainef76b2e2018-10-29 09:17:09 +00001248#endif
Mingkai Hu0e58b512015-10-26 19:47:50 +08001249}
Sean Andersondd79e0c2022-09-26 12:47:37 -04001250#endif
York Sun928b6812015-12-07 11:08:58 -08001251
Mathew McBride8f137732019-10-18 14:27:54 +11001252#if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET)
Alexander Graf12be31c2016-11-17 01:03:01 +01001253
1254void __efi_runtime EFIAPI efi_reset_system(
1255 enum efi_reset_type reset_type,
1256 efi_status_t reset_status,
1257 unsigned long data_size, void *reset_data)
1258{
1259 switch (reset_type) {
1260 case EFI_RESET_COLD:
1261 case EFI_RESET_WARM:
Heinrich Schuchardt450d4c82018-02-06 22:00:22 +01001262 case EFI_RESET_PLATFORM_SPECIFIC:
Harald Seiler6f14d5f2020-12-15 16:47:52 +01001263 reset_cpu();
Alexander Graf12be31c2016-11-17 01:03:01 +01001264 break;
1265 case EFI_RESET_SHUTDOWN:
1266 /* Nothing we can do */
1267 break;
1268 }
1269
1270 while (1) { }
1271}
1272
Heinrich Schuchardt099b3b72018-03-03 15:28:59 +01001273efi_status_t efi_reset_system_init(void)
Alexander Graf12be31c2016-11-17 01:03:01 +01001274{
Heinrich Schuchardt099b3b72018-03-03 15:28:59 +01001275 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
Alexander Graf12be31c2016-11-17 01:03:01 +01001276}
1277
1278#endif
1279
York Sun2db54082017-09-07 10:12:32 -07001280/*
1281 * Calculate reserved memory with given memory bank
1282 * Return aligned memory size on success
1283 * Return (ram_size + needed size) for failure
1284 */
York Sun928b6812015-12-07 11:08:58 -08001285phys_size_t board_reserve_ram_top(phys_size_t ram_size)
1286{
1287 phys_size_t ram_top = ram_size;
1288
Simon Glass85ed77d2024-09-29 19:49:46 -06001289#if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_XPL_BUILD)
York Sun2db54082017-09-07 10:12:32 -07001290 ram_top = mc_get_dram_block_size();
1291 if (ram_top > ram_size)
1292 return ram_size + ram_top;
1293
1294 ram_top = ram_size - ram_top;
York Sun4de24ef2017-03-06 09:02:28 -08001295 /* The start address of MC reserved memory needs to be aligned. */
York Sun928b6812015-12-07 11:08:58 -08001296 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
1297#endif
York Sun4de24ef2017-03-06 09:02:28 -08001298
1299 return ram_size - ram_top;
1300}
1301
1302phys_size_t get_effective_memsize(void)
1303{
1304 phys_size_t ea_size, rem = 0;
1305
1306 /*
1307 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
Sumit Garged0deea2017-10-04 03:20:49 +05301308 * first region is 2GB space at 0x8000_0000. Secure memory needs to
1309 * allocated from first region. If the memory extends to the second
1310 * region (or the third region if applicable), Management Complex (MC)
1311 * memory should be put into the highest region, i.e. the end of DDR
Tom Rinibc9d46b2022-12-04 10:04:50 -05001312 * memory. CFG_MAX_MEM_MAPPED is set to the size of first region so
Sumit Garged0deea2017-10-04 03:20:49 +05301313 * U-Boot doesn't relocate itself into higher address. Should DDR be
1314 * configured to skip the first region, this function needs to be
1315 * adjusted.
York Sun4de24ef2017-03-06 09:02:28 -08001316 */
Tom Rinibc9d46b2022-12-04 10:04:50 -05001317 if (gd->ram_size > CFG_MAX_MEM_MAPPED) {
1318 ea_size = CFG_MAX_MEM_MAPPED;
York Sun4de24ef2017-03-06 09:02:28 -08001319 rem = gd->ram_size - ea_size;
1320 } else {
1321 ea_size = gd->ram_size;
1322 }
1323
Tom Rini6a5dccc2022-11-16 13:10:41 -05001324#ifdef CFG_SYS_MEM_RESERVE_SECURE
York Sun4de24ef2017-03-06 09:02:28 -08001325 /* Check if we have enough space for secure memory */
Tom Rini6a5dccc2022-11-16 13:10:41 -05001326 if (ea_size > CFG_SYS_MEM_RESERVE_SECURE)
1327 ea_size -= CFG_SYS_MEM_RESERVE_SECURE;
Sumit Garged0deea2017-10-04 03:20:49 +05301328 else
1329 printf("Error: No enough space for secure memory.\n");
York Sun4de24ef2017-03-06 09:02:28 -08001330#endif
1331 /* Check if we have enough memory for MC */
1332 if (rem < board_reserve_ram_top(rem)) {
1333 /* Not enough memory in high region to reserve */
York Sun2db54082017-09-07 10:12:32 -07001334 if (ea_size > board_reserve_ram_top(ea_size))
1335 ea_size -= board_reserve_ram_top(ea_size);
York Sun4de24ef2017-03-06 09:02:28 -08001336 else
1337 printf("Error: No enough space for reserved memory.\n");
1338 }
1339
1340 return ea_size;
1341}
1342
Rajesh Bhagat1dde2d22018-11-05 18:01:58 +00001343#ifdef CONFIG_TFABOOT
1344phys_size_t tfa_get_dram_size(void)
1345{
Michael Walle166ea482022-04-22 14:53:27 +05301346 struct arm_smccc_res res;
Rajesh Bhagat1dde2d22018-11-05 18:01:58 +00001347
Michael Walle166ea482022-04-22 14:53:27 +05301348 arm_smccc_smc(SMC_DRAM_BANK_INFO, -1, 0, 0, 0, 0, 0, 0, &res);
1349 if (res.a0)
Rajesh Bhagat1dde2d22018-11-05 18:01:58 +00001350 return 0;
1351
Michael Walle166ea482022-04-22 14:53:27 +05301352 return res.a1;
Rajesh Bhagat1dde2d22018-11-05 18:01:58 +00001353}
1354
1355static int tfa_dram_init_banksize(void)
1356{
1357 int i = 0, ret = 0;
Rajesh Bhagat1dde2d22018-11-05 18:01:58 +00001358 phys_size_t dram_size = tfa_get_dram_size();
Michael Walle166ea482022-04-22 14:53:27 +05301359 struct arm_smccc_res res;
Rajesh Bhagat1dde2d22018-11-05 18:01:58 +00001360
1361 debug("dram_size %llx\n", dram_size);
1362
1363 if (!dram_size)
1364 return -EINVAL;
1365
1366 do {
Michael Walle166ea482022-04-22 14:53:27 +05301367 arm_smccc_smc(SMC_DRAM_BANK_INFO, i, 0, 0, 0, 0, 0, 0, &res);
1368 if (res.a0) {
Rajesh Bhagat1dde2d22018-11-05 18:01:58 +00001369 ret = -EINVAL;
1370 break;
1371 }
1372
Michael Walle166ea482022-04-22 14:53:27 +05301373 debug("bank[%d]: start %lx, size %lx\n", i, res.a1, res.a2);
1374 gd->bd->bi_dram[i].start = res.a1;
1375 gd->bd->bi_dram[i].size = res.a2;
Rajesh Bhagat1dde2d22018-11-05 18:01:58 +00001376
1377 dram_size -= gd->bd->bi_dram[i].size;
1378
1379 i++;
1380 } while (dram_size);
1381
1382 if (i > 0)
1383 ret = 0;
1384
Simon Glass85ed77d2024-09-29 19:49:46 -06001385#if defined(CONFIG_RESV_RAM) && !defined(CONFIG_XPL_BUILD)
Rajesh Bhagat1dde2d22018-11-05 18:01:58 +00001386 /* Assign memory for MC */
1387#ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1388 if (gd->bd->bi_dram[2].size >=
1389 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1390 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1391 gd->bd->bi_dram[2].size -
1392 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1393 } else
1394#endif
1395 {
1396 if (gd->bd->bi_dram[1].size >=
1397 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1398 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1399 gd->bd->bi_dram[1].size -
1400 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1401 } else if (gd->bd->bi_dram[0].size >
1402 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1403 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1404 gd->bd->bi_dram[0].size -
1405 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1406 }
1407 }
Hou Zhiqiang80de3732020-04-28 10:19:28 +08001408#endif /* CONFIG_RESV_RAM */
Rajesh Bhagat1dde2d22018-11-05 18:01:58 +00001409
1410 return ret;
1411}
1412#endif
1413
Simon Glass2f949c32017-03-31 08:40:32 -06001414int dram_init_banksize(void)
York Sun4de24ef2017-03-06 09:02:28 -08001415{
1416#ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1417 phys_size_t dp_ddr_size;
1418#endif
1419
Rajesh Bhagat1dde2d22018-11-05 18:01:58 +00001420#ifdef CONFIG_TFABOOT
1421 if (!tfa_dram_init_banksize())
1422 return 0;
1423#endif
York Sun4de24ef2017-03-06 09:02:28 -08001424 /*
1425 * gd->ram_size has the total size of DDR memory, less reserved secure
1426 * memory. The DDR extends from low region to high region(s) presuming
1427 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
1428 * the location of secure memory. gd->arch.resv_ram tracks the location
York Sunc9e3e042017-09-28 08:42:12 -07001429 * of reserved memory for Management Complex (MC). Because gd->ram_size
1430 * is reduced by this function if secure memory is reserved, checking
1431 * gd->arch.secure_ram should be done to avoid running it repeatedly.
York Sun4de24ef2017-03-06 09:02:28 -08001432 */
York Sunc9e3e042017-09-28 08:42:12 -07001433
Tom Rini6a5dccc2022-11-16 13:10:41 -05001434#ifdef CFG_SYS_MEM_RESERVE_SECURE
York Sunc9e3e042017-09-28 08:42:12 -07001435 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
1436 debug("No need to run again, skip %s\n", __func__);
1437
1438 return 0;
1439 }
1440#endif
1441
Tom Rinibb4dd962022-11-16 13:10:37 -05001442 gd->bd->bi_dram[0].start = CFG_SYS_SDRAM_BASE;
Tom Rini6a5dccc2022-11-16 13:10:41 -05001443 if (gd->ram_size > CFG_SYS_DDR_BLOCK1_SIZE) {
1444 gd->bd->bi_dram[0].size = CFG_SYS_DDR_BLOCK1_SIZE;
1445 gd->bd->bi_dram[1].start = CFG_SYS_DDR_BLOCK2_BASE;
York Sun4de24ef2017-03-06 09:02:28 -08001446 gd->bd->bi_dram[1].size = gd->ram_size -
Tom Rini6a5dccc2022-11-16 13:10:41 -05001447 CFG_SYS_DDR_BLOCK1_SIZE;
York Sun4de24ef2017-03-06 09:02:28 -08001448#ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1449 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
1450 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
1451 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
1452 CONFIG_SYS_DDR_BLOCK2_SIZE;
1453 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
1454 }
1455#endif
1456 } else {
1457 gd->bd->bi_dram[0].size = gd->ram_size;
1458 }
Tom Rini6a5dccc2022-11-16 13:10:41 -05001459#ifdef CFG_SYS_MEM_RESERVE_SECURE
Sumit Garged0deea2017-10-04 03:20:49 +05301460 if (gd->bd->bi_dram[0].size >
Tom Rini6a5dccc2022-11-16 13:10:41 -05001461 CFG_SYS_MEM_RESERVE_SECURE) {
Sumit Garged0deea2017-10-04 03:20:49 +05301462 gd->bd->bi_dram[0].size -=
Tom Rini6a5dccc2022-11-16 13:10:41 -05001463 CFG_SYS_MEM_RESERVE_SECURE;
Sumit Garged0deea2017-10-04 03:20:49 +05301464 gd->arch.secure_ram = gd->bd->bi_dram[0].start +
1465 gd->bd->bi_dram[0].size;
York Sun4de24ef2017-03-06 09:02:28 -08001466 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
Tom Rini6a5dccc2022-11-16 13:10:41 -05001467 gd->ram_size -= CFG_SYS_MEM_RESERVE_SECURE;
York Sun4de24ef2017-03-06 09:02:28 -08001468 }
Tom Rini6a5dccc2022-11-16 13:10:41 -05001469#endif /* CFG_SYS_MEM_RESERVE_SECURE */
York Sun4de24ef2017-03-06 09:02:28 -08001470
Simon Glass85ed77d2024-09-29 19:49:46 -06001471#if defined(CONFIG_RESV_RAM) && !defined(CONFIG_XPL_BUILD)
York Sun4de24ef2017-03-06 09:02:28 -08001472 /* Assign memory for MC */
1473#ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1474 if (gd->bd->bi_dram[2].size >=
1475 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
1476 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
1477 gd->bd->bi_dram[2].size -
1478 board_reserve_ram_top(gd->bd->bi_dram[2].size);
1479 } else
1480#endif
1481 {
1482 if (gd->bd->bi_dram[1].size >=
1483 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
1484 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
1485 gd->bd->bi_dram[1].size -
1486 board_reserve_ram_top(gd->bd->bi_dram[1].size);
1487 } else if (gd->bd->bi_dram[0].size >
1488 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
1489 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
1490 gd->bd->bi_dram[0].size -
1491 board_reserve_ram_top(gd->bd->bi_dram[0].size);
1492 }
1493 }
Hou Zhiqiang80de3732020-04-28 10:19:28 +08001494#endif /* CONFIG_RESV_RAM */
York Sun4de24ef2017-03-06 09:02:28 -08001495
1496#ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1497#ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1498#error "This SoC shouldn't have DP DDR"
1499#endif
1500 if (soc_has_dp_ddr()) {
1501 /* initialize DP-DDR here */
1502 puts("DP-DDR: ");
1503 /*
1504 * DDR controller use 0 as the base address for binding.
1505 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
1506 */
1507 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
1508 CONFIG_DP_DDR_CTRL,
1509 CONFIG_DP_DDR_NUM_CTRLS,
1510 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
1511 NULL, NULL, NULL);
1512 if (dp_ddr_size) {
1513 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
1514 gd->bd->bi_dram[2].size = dp_ddr_size;
1515 } else {
1516 puts("Not detected");
1517 }
1518 }
1519#endif
Simon Glass2f949c32017-03-31 08:40:32 -06001520
Tom Rini6a5dccc2022-11-16 13:10:41 -05001521#ifdef CFG_SYS_MEM_RESERVE_SECURE
York Sunc9e3e042017-09-28 08:42:12 -07001522 debug("%s is called. gd->ram_size is reduced to %lu\n",
1523 __func__, (ulong)gd->ram_size);
1524#endif
1525
Simon Glass2f949c32017-03-31 08:40:32 -06001526 return 0;
York Sun4de24ef2017-03-06 09:02:28 -08001527}
1528
Sughosh Ganu0502b222024-10-15 21:07:12 +05301529#if CONFIG_IS_ENABLED(LMB_ARCH_MEM_MAP)
1530void lmb_arch_add_memory(void)
York Sun4de24ef2017-03-06 09:02:28 -08001531{
1532 int i;
Michael Walle282d3862020-05-17 12:29:19 +02001533 phys_addr_t ram_start;
York Sun4de24ef2017-03-06 09:02:28 -08001534 phys_size_t ram_size;
York Sun928b6812015-12-07 11:08:58 -08001535
York Sun4de24ef2017-03-06 09:02:28 -08001536 /* Add RAM */
1537 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
1538#ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1539#ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1540#error "This SoC shouldn't have DP DDR"
1541#endif
1542 if (i == 2)
1543 continue; /* skip DP-DDR */
1544#endif
1545 ram_start = gd->bd->bi_dram[i].start;
1546 ram_size = gd->bd->bi_dram[i].size;
1547#ifdef CONFIG_RESV_RAM
1548 if (gd->arch.resv_ram >= ram_start &&
1549 gd->arch.resv_ram < ram_start + ram_size)
1550 ram_size = gd->arch.resv_ram - ram_start;
1551#endif
Sughosh Ganu0502b222024-10-15 21:07:12 +05301552 lmb_add(ram_start, ram_size);
York Sun4de24ef2017-03-06 09:02:28 -08001553 }
York Sun928b6812015-12-07 11:08:58 -08001554}
York Sun4de24ef2017-03-06 09:02:28 -08001555#endif
York Sun729f2d12017-03-06 09:02:34 -08001556
1557/*
1558 * Before DDR size is known, early MMU table have DDR mapped as device memory
1559 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
1560 * needs to be set for these mappings.
1561 * If a special case configures DDR with holes in the mapping, the holes need
1562 * to be marked as invalid. This is not implemented in this function.
1563 */
1564void update_early_mmu_table(void)
1565{
1566 if (!gd->arch.tlb_addr)
1567 return;
1568
Tom Rini364d0022023-01-10 11:19:45 -05001569 if (gd->ram_size <= CFG_SYS_FSL_DRAM_SIZE1) {
York Sun729f2d12017-03-06 09:02:34 -08001570 mmu_change_region_attr(
Tom Rinibb4dd962022-11-16 13:10:37 -05001571 CFG_SYS_SDRAM_BASE,
York Sun729f2d12017-03-06 09:02:34 -08001572 gd->ram_size,
1573 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1574 PTE_BLOCK_OUTER_SHARE |
1575 PTE_BLOCK_NS |
1576 PTE_TYPE_VALID);
1577 } else {
1578 mmu_change_region_attr(
Tom Rinibb4dd962022-11-16 13:10:37 -05001579 CFG_SYS_SDRAM_BASE,
Tom Rini6a5dccc2022-11-16 13:10:41 -05001580 CFG_SYS_DDR_BLOCK1_SIZE,
York Sun729f2d12017-03-06 09:02:34 -08001581 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1582 PTE_BLOCK_OUTER_SHARE |
1583 PTE_BLOCK_NS |
1584 PTE_TYPE_VALID);
1585#ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1586#ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
1587#error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
1588#endif
Tom Rini6a5dccc2022-11-16 13:10:41 -05001589 if (gd->ram_size - CFG_SYS_DDR_BLOCK1_SIZE >
York Sun729f2d12017-03-06 09:02:34 -08001590 CONFIG_SYS_DDR_BLOCK2_SIZE) {
1591 mmu_change_region_attr(
Tom Rini6a5dccc2022-11-16 13:10:41 -05001592 CFG_SYS_DDR_BLOCK2_BASE,
York Sun729f2d12017-03-06 09:02:34 -08001593 CONFIG_SYS_DDR_BLOCK2_SIZE,
1594 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1595 PTE_BLOCK_OUTER_SHARE |
1596 PTE_BLOCK_NS |
1597 PTE_TYPE_VALID);
1598 mmu_change_region_attr(
1599 CONFIG_SYS_DDR_BLOCK3_BASE,
1600 gd->ram_size -
Tom Rini6a5dccc2022-11-16 13:10:41 -05001601 CFG_SYS_DDR_BLOCK1_SIZE -
York Sun729f2d12017-03-06 09:02:34 -08001602 CONFIG_SYS_DDR_BLOCK2_SIZE,
1603 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1604 PTE_BLOCK_OUTER_SHARE |
1605 PTE_BLOCK_NS |
1606 PTE_TYPE_VALID);
1607 } else
1608#endif
1609 {
1610 mmu_change_region_attr(
Tom Rini6a5dccc2022-11-16 13:10:41 -05001611 CFG_SYS_DDR_BLOCK2_BASE,
York Sun729f2d12017-03-06 09:02:34 -08001612 gd->ram_size -
Tom Rini6a5dccc2022-11-16 13:10:41 -05001613 CFG_SYS_DDR_BLOCK1_SIZE,
York Sun729f2d12017-03-06 09:02:34 -08001614 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
1615 PTE_BLOCK_OUTER_SHARE |
1616 PTE_BLOCK_NS |
1617 PTE_TYPE_VALID);
1618 }
1619 }
1620}
1621
1622__weak int dram_init(void)
1623{
Tom Rini56184602022-02-25 11:19:53 -05001624#ifdef CONFIG_SYS_FSL_DDR
Simon Glass0e0ac202017-04-06 12:47:04 -06001625 fsl_initdram();
Rajesh Bhagat5efbecf2018-11-05 18:01:37 +00001626#if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
Simon Glass85ed77d2024-09-29 19:49:46 -06001627 defined(CONFIG_XPL_BUILD)
York Sun729f2d12017-03-06 09:02:34 -08001628 /* This will break-before-make MMU for DDR */
1629 update_early_mmu_table();
1630#endif
Tom Rini56184602022-02-25 11:19:53 -05001631#endif
York Sun729f2d12017-03-06 09:02:34 -08001632
1633 return 0;
1634}
Alex Marginean47568ce2020-01-11 01:05:40 +02001635
1636#ifdef CONFIG_ARCH_MISC_INIT
1637__weak int serdes_misc_init(void)
1638{
1639 return 0;
1640}
1641
1642int arch_misc_init(void)
1643{
Gaurav Jain476c6392022-03-24 11:50:35 +05301644 if (IS_ENABLED(CONFIG_FSL_CAAM)) {
1645 struct udevice *dev;
1646 int ret;
1647
1648 ret = uclass_get_device_by_driver(UCLASS_MISC, DM_DRIVER_GET(caam_jr), &dev);
1649 if (ret)
Ye Liec346892022-05-11 13:56:20 +05301650 printf("Failed to initialize caam_jr: %d\n", ret);
Gaurav Jain476c6392022-03-24 11:50:35 +05301651 }
Alex Marginean47568ce2020-01-11 01:05:40 +02001652 serdes_misc_init();
1653
1654 return 0;
1655}
1656#endif