blob: 29bf602f3b962537aa5ef53ee0cdf2bea81f64e9 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley2bd4ef22014-04-09 13:14:54 +010031#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <arch_helpers.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010033#include <assert.h>
34#include <bl_common.h>
Vikram Kanigiri96377452014-04-24 11:02:16 +010035#include <cci400.h>
Dan Handley714a0d22014-04-09 13:13:04 +010036#include <debug.h>
Dan Handley2bd4ef22014-04-09 13:14:54 +010037#include <mmio.h>
Jon Medhurstb1eb0932014-02-26 16:27:53 +000038#include <platform.h>
39#include <xlat_tables.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010040
Achin Gupta4f6ad662013-10-25 09:08:21 +010041/*******************************************************************************
42 * This array holds the characteristics of the differences between the three
43 * FVP platforms (Base, A53_A57 & Foundation). It will be populated during cold
44 * boot at each boot stage by the primary before enabling the MMU (to allow cci
45 * configuration) & used thereafter. Each BL will have its own copy to allow
46 * independent operation.
47 ******************************************************************************/
48static unsigned long platform_config[CONFIG_LIMIT];
49
50/*******************************************************************************
Sandrine Bailleux74a62b32014-05-09 11:35:36 +010051 * Macro generating the code for the function enabling the MMU in the given
52 * exception level, assuming that the pagetables have already been created.
53 *
54 * _el: Exception level at which the function will run
55 * _tcr_extra: Extra bits to set in the TCR register. This mask will
56 * be OR'ed with the default TCR value.
57 * _tlbi_fct: Function to invalidate the TLBs at the current
58 * exception level
59 ******************************************************************************/
60#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
61 void enable_mmu_el##_el(void) \
62 { \
63 uint64_t mair, tcr, ttbr; \
64 uint32_t sctlr; \
65 \
66 assert(IS_IN_EL(_el)); \
67 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \
68 \
69 /* Set attributes in the right indices of the MAIR */ \
70 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
71 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
72 ATTR_IWBWA_OWBWA_NTR_INDEX); \
73 write_mair_el##_el(mair); \
74 \
75 /* Invalidate TLBs at the current exception level */ \
76 _tlbi_fct(); \
77 \
78 /* Set TCR bits as well. */ \
79 /* Inner & outer WBWA & shareable + T0SZ = 32 */ \
80 tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
81 TCR_RGN_INNER_WBA | TCR_T0SZ_4GB; \
82 tcr |= _tcr_extra; \
83 write_tcr_el##_el(tcr); \
84 \
85 /* Set TTBR bits as well */ \
86 ttbr = (uint64_t) l1_xlation_table; \
87 write_ttbr0_el##_el(ttbr); \
88 \
89 /* Ensure all translation table writes have drained */ \
90 /* into memory, the TLB invalidation is complete, */ \
91 /* and translation register writes are committed */ \
92 /* before enabling the MMU */ \
93 dsb(); \
94 isb(); \
95 \
96 sctlr = read_sctlr_el##_el(); \
97 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT; \
98 sctlr |= SCTLR_A_BIT | SCTLR_C_BIT; \
99 write_sctlr_el##_el(sctlr); \
100 \
101 /* Ensure the MMU enable takes effect immediately */ \
102 isb(); \
Vikram Kanigiri78a6e0c2014-03-11 17:41:00 +0000103 }
Achin Gupta4f6ad662013-10-25 09:08:21 +0100104
Sandrine Bailleux74a62b32014-05-09 11:35:36 +0100105/* Define EL1 and EL3 variants of the function enabling the MMU */
106DEFINE_ENABLE_MMU_EL(1, 0, tlbivmalle1)
107DEFINE_ENABLE_MMU_EL(3, TCR_EL3_RES1, tlbialle3)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100108
Jon Medhurstb1eb0932014-02-26 16:27:53 +0000109/*
110 * Table of regions to map using the MMU.
Sandrine Bailleux74a62b32014-05-09 11:35:36 +0100111 * This doesn't include TZRAM as the 'mem_layout' argument passed to
112 * configure_mmu_elx() will give the available subset of that,
Jon Medhurstb1eb0932014-02-26 16:27:53 +0000113 */
Dan Handleye2712bc2014-04-10 15:37:22 +0100114const mmap_region_t fvp_mmap[] = {
Jon Medhurstb1eb0932014-02-26 16:27:53 +0000115 { TZROM_BASE, TZROM_SIZE, MT_MEMORY | MT_RO | MT_SECURE },
116 { TZDRAM_BASE, TZDRAM_SIZE, MT_MEMORY | MT_RW | MT_SECURE },
117 { FLASH0_BASE, FLASH0_SIZE, MT_MEMORY | MT_RO | MT_SECURE },
118 { FLASH1_BASE, FLASH1_SIZE, MT_MEMORY | MT_RO | MT_SECURE },
119 { VRAM_BASE, VRAM_SIZE, MT_MEMORY | MT_RW | MT_SECURE },
120 { DEVICE0_BASE, DEVICE0_SIZE, MT_DEVICE | MT_RW | MT_SECURE },
121 { NSRAM_BASE, NSRAM_SIZE, MT_MEMORY | MT_RW | MT_NS },
122 { DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_SECURE },
123 /* 2nd GB as device for now...*/
124 { 0x40000000, 0x40000000, MT_DEVICE | MT_RW | MT_SECURE },
125 { DRAM_BASE, DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS },
126 {0}
127};
128
Achin Gupta4f6ad662013-10-25 09:08:21 +0100129/*******************************************************************************
Sandrine Bailleux74a62b32014-05-09 11:35:36 +0100130 * Macro generating the code for the function setting up the pagetables as per
131 * the platform memory map & initialize the mmu, for the given exception level
132 ******************************************************************************/
133#define DEFINE_CONFIGURE_MMU_EL(_el) \
Vikram Kanigirid8c9d262014-05-16 18:48:12 +0100134 void configure_mmu_el##_el(unsigned long total_base, \
135 unsigned long total_size, \
Sandrine Bailleux74a62b32014-05-09 11:35:36 +0100136 unsigned long ro_start, \
137 unsigned long ro_limit, \
138 unsigned long coh_start, \
139 unsigned long coh_limit) \
140 { \
Vikram Kanigirid8c9d262014-05-16 18:48:12 +0100141 mmap_add_region(total_base, \
142 total_size, \
Sandrine Bailleux74a62b32014-05-09 11:35:36 +0100143 MT_MEMORY | MT_RW | MT_SECURE); \
144 mmap_add_region(ro_start, ro_limit - ro_start, \
145 MT_MEMORY | MT_RO | MT_SECURE); \
146 mmap_add_region(coh_start, coh_limit - coh_start, \
147 MT_DEVICE | MT_RW | MT_SECURE); \
148 mmap_add(fvp_mmap); \
149 init_xlat_tables(); \
150 \
151 enable_mmu_el##_el(); \
152 }
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000153
Sandrine Bailleux74a62b32014-05-09 11:35:36 +0100154/* Define EL1 and EL3 variants of the function initialising the MMU */
155DEFINE_CONFIGURE_MMU_EL(1)
156DEFINE_CONFIGURE_MMU_EL(3)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100157
158/* Simple routine which returns a configuration variable value */
159unsigned long platform_get_cfgvar(unsigned int var_id)
160{
161 assert(var_id < CONFIG_LIMIT);
162 return platform_config[var_id];
163}
164
165/*******************************************************************************
166 * A single boot loader stack is expected to work on both the Foundation FVP
167 * models and the two flavours of the Base FVP models (AEMv8 & Cortex). The
168 * SYS_ID register provides a mechanism for detecting the differences between
169 * these platforms. This information is stored in a per-BL array to allow the
170 * code to take the correct path.Per BL platform configuration.
171 ******************************************************************************/
172int platform_config_setup(void)
173{
174 unsigned int rev, hbi, bld, arch, sys_id, midr_pn;
175
176 sys_id = mmio_read_32(VE_SYSREGS_BASE + V2M_SYS_ID);
177 rev = (sys_id >> SYS_ID_REV_SHIFT) & SYS_ID_REV_MASK;
178 hbi = (sys_id >> SYS_ID_HBI_SHIFT) & SYS_ID_HBI_MASK;
179 bld = (sys_id >> SYS_ID_BLD_SHIFT) & SYS_ID_BLD_MASK;
180 arch = (sys_id >> SYS_ID_ARCH_SHIFT) & SYS_ID_ARCH_MASK;
181
James Morrissey40a6f642014-02-10 14:24:36 +0000182 if ((rev != REV_FVP) || (arch != ARCH_MODEL))
183 panic();
Achin Gupta4f6ad662013-10-25 09:08:21 +0100184
185 /*
186 * The build field in the SYS_ID tells which variant of the GIC
187 * memory is implemented by the model.
188 */
189 switch (bld) {
190 case BLD_GIC_VE_MMAP:
191 platform_config[CONFIG_GICD_ADDR] = VE_GICD_BASE;
192 platform_config[CONFIG_GICC_ADDR] = VE_GICC_BASE;
193 platform_config[CONFIG_GICH_ADDR] = VE_GICH_BASE;
194 platform_config[CONFIG_GICV_ADDR] = VE_GICV_BASE;
195 break;
196 case BLD_GIC_A53A57_MMAP:
197 platform_config[CONFIG_GICD_ADDR] = BASE_GICD_BASE;
198 platform_config[CONFIG_GICC_ADDR] = BASE_GICC_BASE;
199 platform_config[CONFIG_GICH_ADDR] = BASE_GICH_BASE;
200 platform_config[CONFIG_GICV_ADDR] = BASE_GICV_BASE;
201 break;
202 default:
203 assert(0);
204 }
205
206 /*
207 * The hbi field in the SYS_ID is 0x020 for the Base FVP & 0x010
208 * for the Foundation FVP.
209 */
210 switch (hbi) {
211 case HBI_FOUNDATION:
212 platform_config[CONFIG_MAX_AFF0] = 4;
213 platform_config[CONFIG_MAX_AFF1] = 1;
214 platform_config[CONFIG_CPU_SETUP] = 0;
215 platform_config[CONFIG_BASE_MMAP] = 0;
Harry Liebel30affd52013-10-30 17:41:48 +0000216 platform_config[CONFIG_HAS_CCI] = 0;
Harry Liebelcef93392014-04-01 19:27:38 +0100217 platform_config[CONFIG_HAS_TZC] = 0;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100218 break;
219 case HBI_FVP_BASE:
220 midr_pn = (read_midr() >> MIDR_PN_SHIFT) & MIDR_PN_MASK;
221 if ((midr_pn == MIDR_PN_A57) || (midr_pn == MIDR_PN_A53))
222 platform_config[CONFIG_CPU_SETUP] = 1;
223 else
224 platform_config[CONFIG_CPU_SETUP] = 0;
225
226 platform_config[CONFIG_MAX_AFF0] = 4;
227 platform_config[CONFIG_MAX_AFF1] = 2;
228 platform_config[CONFIG_BASE_MMAP] = 1;
Harry Liebel30affd52013-10-30 17:41:48 +0000229 platform_config[CONFIG_HAS_CCI] = 1;
Harry Liebelcef93392014-04-01 19:27:38 +0100230 platform_config[CONFIG_HAS_TZC] = 1;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100231 break;
232 default:
233 assert(0);
234 }
235
236 return 0;
237}
238
Ian Spray84687392014-01-02 16:57:12 +0000239unsigned long plat_get_ns_image_entrypoint(void)
240{
Achin Gupta4f6ad662013-10-25 09:08:21 +0100241 return NS_IMAGE_OFFSET;
242}
Sandrine Bailleux3fa98472014-03-31 11:25:18 +0100243
244uint64_t plat_get_syscnt_freq(void)
245{
246 uint64_t counter_base_frequency;
247
248 /* Read the frequency from Frequency modes table */
249 counter_base_frequency = mmio_read_32(SYS_CNTCTL_BASE + CNTFID_OFF);
250
251 /* The first entry of the frequency modes table must not be 0 */
252 assert(counter_base_frequency != 0);
253
254 return counter_base_frequency;
255}
Vikram Kanigiri96377452014-04-24 11:02:16 +0100256
257void fvp_cci_setup(void)
258{
259 unsigned long cci_setup;
260
261 /*
262 * Enable CCI-400 for this cluster. No need
263 * for locks as no other cpu is active at the
264 * moment
265 */
266 cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI);
267 if (cci_setup)
268 cci_enable_coherency(read_mpidr());
269}
270
271
272/*******************************************************************************
273 * Set SPSR and secure state for BL32 image
274 ******************************************************************************/
275void fvp_set_bl32_ep_info(entry_point_info_t *bl32_ep_info)
276{
277 SET_SECURITY_STATE(bl32_ep_info->h.attr, SECURE);
278 /*
279 * The Secure Payload Dispatcher service is responsible for
280 * setting the SPSR prior to entry into the BL32 image.
281 */
282 bl32_ep_info->spsr = 0;
283}
284
285/*******************************************************************************
286 * Set SPSR and secure state for BL33 image
287 ******************************************************************************/
288void fvp_set_bl33_ep_info(entry_point_info_t *bl33_ep_info)
289{
290 unsigned long el_status;
291 unsigned int mode;
292
293 /* Figure out what mode we enter the non-secure world in */
294 el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
295 el_status &= ID_AA64PFR0_ELX_MASK;
296
297 if (el_status)
298 mode = MODE_EL2;
299 else
300 mode = MODE_EL1;
301
302 /*
303 * TODO: Consider the possibility of specifying the SPSR in
304 * the FIP ToC and allowing the platform to have a say as
305 * well.
306 */
307 bl33_ep_info->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
308 SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
309}