blob: 6c59e846e1938c535a05319dcded9aeca2d66bd8 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleyab2d31e2013-12-02 19:25:12 +00002 * Copyright (c) 2013, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <string.h>
32#include <assert.h>
33#include <arch_helpers.h>
34#include <platform.h>
35#include <bl31.h>
36#include <bl_common.h>
37#include <pl011.h>
38#include <bakery_lock.h>
39#include <cci400.h>
40#include <gic.h>
41#include <fvp_pwrc.h>
42
43/*******************************************************************************
44 * Declarations of linker defined symbols which will help us find the layout
45 * of trusted SRAM
46 ******************************************************************************/
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000047extern unsigned long __RO_START__;
48extern unsigned long __RO_END__;
Achin Gupta4f6ad662013-10-25 09:08:21 +010049
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000050extern unsigned long __COHERENT_RAM_START__;
51extern unsigned long __COHERENT_RAM_END__;
Achin Gupta4f6ad662013-10-25 09:08:21 +010052
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000053/*
54 * The next 2 constants identify the extents of the code & RO data region.
55 * These addresses are used by the MMU setup code and therefore they must be
56 * page-aligned. It is the responsibility of the linker script to ensure that
57 * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
58 */
59#define BL31_RO_BASE (unsigned long)(&__RO_START__)
60#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
61
62/*
63 * The next 2 constants identify the extents of the coherent memory region.
64 * These addresses are used by the MMU setup code and therefore they must be
65 * page-aligned. It is the responsibility of the linker script to ensure that
66 * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols
67 * refer to page-aligned addresses.
68 */
69#define BL31_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
70#define BL31_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
Achin Gupta4f6ad662013-10-25 09:08:21 +010071
72/*******************************************************************************
Sandrine Bailleux93ca2212013-12-02 15:57:09 +000073 * This data structure holds information copied by BL31 from BL2 to pass
74 * control to the normal world software images.
75 * TODO: Can this be moved out of device memory.
Achin Gupta4f6ad662013-10-25 09:08:21 +010076 ******************************************************************************/
Sandrine Bailleux93ca2212013-12-02 15:57:09 +000077static el_change_info ns_entry_info
Achin Gupta4f6ad662013-10-25 09:08:21 +010078__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
Sandrine Bailleux204aa032013-10-28 15:14:00 +000079 section("tzfw_coherent_mem")));
Achin Gupta4f6ad662013-10-25 09:08:21 +010080
81/* Data structure which holds the extents of the trusted SRAM for BL31 */
82static meminfo bl31_tzram_layout
83__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
Sandrine Bailleux204aa032013-10-28 15:14:00 +000084 section("tzfw_coherent_mem")));
Achin Gupta4f6ad662013-10-25 09:08:21 +010085
Sandrine Bailleuxee12f6f2013-11-28 14:55:58 +000086meminfo *bl31_plat_sec_mem_layout(void)
Achin Gupta4f6ad662013-10-25 09:08:21 +010087{
Sandrine Bailleuxee12f6f2013-11-28 14:55:58 +000088 return &bl31_tzram_layout;
Achin Gupta4f6ad662013-10-25 09:08:21 +010089}
90
91/*******************************************************************************
92 * Return information about passing control to the non-trusted software images
93 * to common code.TODO: In the initial architecture, the image after BL31 will
94 * always run in the non-secure state. In the final architecture there
95 * will be a series of images. This function will need enhancement then
96 ******************************************************************************/
Sandrine Bailleux93ca2212013-12-02 15:57:09 +000097el_change_info *bl31_get_next_image_info(void)
Achin Gupta4f6ad662013-10-25 09:08:21 +010098{
Sandrine Bailleux93ca2212013-12-02 15:57:09 +000099 return &ns_entry_info;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100100}
101
102/*******************************************************************************
103 * Perform any BL31 specific platform actions. Here we copy parameters passed
104 * by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they are lost
105 * (potentially). This is done before the MMU is initialized so that the memory
106 * layout can be used while creating page tables.
107 ******************************************************************************/
108void bl31_early_platform_setup(meminfo *mem_layout,
Sandrine Bailleux93ca2212013-12-02 15:57:09 +0000109 void *data)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100110{
111 el_change_info *image_info = (el_change_info *) data;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100112
113 /* Setup the BL31 memory layout */
114 bl31_tzram_layout.total_base = mem_layout->total_base;
115 bl31_tzram_layout.total_size = mem_layout->total_size;
116 bl31_tzram_layout.free_base = mem_layout->free_base;
117 bl31_tzram_layout.free_size = mem_layout->free_size;
118 bl31_tzram_layout.attr = mem_layout->attr;
119 bl31_tzram_layout.next = 0;
120
Sandrine Bailleux93ca2212013-12-02 15:57:09 +0000121 /* Save information about jumping into the normal world */
122 ns_entry_info.entrypoint = image_info->entrypoint;
123 ns_entry_info.spsr = image_info->spsr;
124 ns_entry_info.args = image_info->args;
125 ns_entry_info.security_state = image_info->security_state;
126 ns_entry_info.next = image_info->next;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100127
128 /* Initialize the platform config for future decision making */
129 platform_config_setup();
130}
131
132/*******************************************************************************
133 * Initialize the gic, configure the CLCD and zero out variables needed by the
134 * secondaries to boot up correctly.
135 ******************************************************************************/
136void bl31_platform_setup()
137{
138 unsigned int reg_val;
139
140 /* Initialize the gic cpu and distributor interfaces */
141 gic_setup();
142
143 /*
144 * TODO: Configure the CLCD before handing control to
145 * linux. Need to see if a separate driver is needed
146 * instead.
147 */
148 mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGDATA, 0);
149 mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGCTRL,
150 (1ull << 31) | (1 << 30) | (7 << 20) | (0 << 16));
151
152 /* Allow access to the System counter timer module */
153 reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
154 reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
155 reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
156 mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val);
157 mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val);
158
159 reg_val = (1 << CNTNSAR_NS_SHIFT(0)) | (1 << CNTNSAR_NS_SHIFT(1));
160 mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val);
161
162 /* Intialize the power controller */
163 fvp_pwrc_setup();
164
165 /* Topologies are best known to the platform. */
166 plat_setup_topology();
167}
168
169/*******************************************************************************
170 * Perform the very early platform specific architectural setup here. At the
171 * moment this is only intializes the mmu in a quick and dirty way.
172 ******************************************************************************/
173void bl31_plat_arch_setup()
174{
Achin Gupta4f6ad662013-10-25 09:08:21 +0100175 configure_mmu(&bl31_tzram_layout,
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000176 BL31_RO_BASE,
177 BL31_RO_LIMIT,
178 BL31_COHERENT_RAM_BASE,
179 BL31_COHERENT_RAM_LIMIT);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100180}
181
182/*******************************************************************************
183 * TODO: Move GIC setup to a separate file in case it is needed by other BL
184 * stages or ELs
185 * TODO: Revisit if priorities are being set such that no non-secure interrupt
186 * can have a higher priority than a secure one as recommended in the GICv2 spec
187 *******************************************************************************/
188
189/*******************************************************************************
190 * This function does some minimal GICv3 configuration. The Firmware itself does
191 * not fully support GICv3 at this time and relies on GICv2 emulation as
192 * provided by GICv3. This function allows software (like Linux) in later stages
193 * to use full GICv3 features.
194 *******************************************************************************/
195void gicv3_cpuif_setup(void)
196{
197 unsigned int scr_val, val, base;
198
199 /*
200 * When CPUs come out of reset they have their GICR_WAKER.ProcessorSleep
201 * bit set. In order to allow interrupts to get routed to the CPU we
202 * need to clear this bit if set and wait for GICR_WAKER.ChildrenAsleep
203 * to clear (GICv3 Architecture specification 5.4.23).
204 * GICR_WAKER is NOT banked per CPU, compute the correct base address
205 * per CPU.
206 *
207 * TODO:
208 * For GICv4 we also need to adjust the Base address based on
209 * GICR_TYPER.VLPIS
210 */
211 base = BASE_GICR_BASE +
212 (platform_get_core_pos(read_mpidr()) << GICR_PCPUBASE_SHIFT);
213 val = gicr_read_waker(base);
214
215 val &= ~WAKER_PS;
216 gicr_write_waker(base, val);
217 dsb();
218
219 /* We need to wait for ChildrenAsleep to clear. */
220 val = gicr_read_waker(base);
221 while (val & WAKER_CA) {
222 val = gicr_read_waker(base);
223 }
224
225 /*
226 * We need to set SCR_EL3.NS in order to see GICv3 non-secure state.
227 * Restore SCR_EL3.NS again before exit.
228 */
229 scr_val = read_scr();
230 write_scr(scr_val | SCR_NS_BIT);
231
232 /*
233 * By default EL2 and NS-EL1 software should be able to enable GICv3
234 * System register access without any configuration at EL3. But it turns
235 * out that GICC PMR as set in GICv2 mode does not affect GICv3 mode. So
236 * we need to set it here again. In order to do that we need to enable
237 * register access. We leave it enabled as it should be fine and might
238 * prevent problems with later software trying to access GIC System
239 * Registers.
240 */
241 val = read_icc_sre_el3();
242 write_icc_sre_el3(val | ICC_SRE_EN | ICC_SRE_SRE);
243
244 val = read_icc_sre_el2();
245 write_icc_sre_el2(val | ICC_SRE_EN | ICC_SRE_SRE);
246
247 write_icc_pmr_el1(MAX_PRI_VAL);
248
249 /* Restore SCR_EL3 */
250 write_scr(scr_val);
251}
252
253/*******************************************************************************
254 * This function does some minimal GICv3 configuration when cores go
255 * down.
256 *******************************************************************************/
257void gicv3_cpuif_deactivate(void)
258{
259 unsigned int val, base;
260
261 /*
262 * When taking CPUs down we need to set GICR_WAKER.ProcessorSleep and
263 * wait for GICR_WAKER.ChildrenAsleep to get set.
264 * (GICv3 Architecture specification 5.4.23).
265 * GICR_WAKER is NOT banked per CPU, compute the correct base address
266 * per CPU.
267 *
268 * TODO:
269 * For GICv4 we also need to adjust the Base address based on
270 * GICR_TYPER.VLPIS
271 */
272 base = BASE_GICR_BASE +
273 (platform_get_core_pos(read_mpidr()) << GICR_PCPUBASE_SHIFT);
274 val = gicr_read_waker(base);
275 val |= WAKER_PS;
276 gicr_write_waker(base, val);
277 dsb();
278
279 /* We need to wait for ChildrenAsleep to set. */
280 val = gicr_read_waker(base);
281 while ((val & WAKER_CA) == 0) {
282 val = gicr_read_waker(base);
283 }
284}
285
286
287/*******************************************************************************
288 * Enable secure interrupts and use FIQs to route them. Disable legacy bypass
289 * and set the priority mask register to allow all interrupts to trickle in.
290 ******************************************************************************/
291void gic_cpuif_setup(unsigned int gicc_base)
292{
293 unsigned int val;
294
295 val = gicc_read_iidr(gicc_base);
296
297 /*
298 * If GICv3 we need to do a bit of additional setup. We want to
299 * allow default GICv2 behaviour but allow the next stage to
300 * enable full gicv3 features.
301 */
302 if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) {
303 gicv3_cpuif_setup();
304 }
305
306 val = ENABLE_GRP0 | FIQ_EN | FIQ_BYP_DIS_GRP0;
307 val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1;
308
309 gicc_write_pmr(gicc_base, MAX_PRI_VAL);
310 gicc_write_ctlr(gicc_base, val);
311}
312
313/*******************************************************************************
314 * Place the cpu interface in a state where it can never make a cpu exit wfi as
315 * as result of an asserted interrupt. This is critical for powering down a cpu
316 ******************************************************************************/
317void gic_cpuif_deactivate(unsigned int gicc_base)
318{
319 unsigned int val;
320
321 /* Disable secure, non-secure interrupts and disable their bypass */
322 val = gicc_read_ctlr(gicc_base);
323 val &= ~(ENABLE_GRP0 | ENABLE_GRP1);
324 val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0;
325 val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1;
326 gicc_write_ctlr(gicc_base, val);
327
328 val = gicc_read_iidr(gicc_base);
329
330 /*
331 * If GICv3 we need to do a bit of additional setup. Make sure the
332 * RDIST is put to sleep.
333 */
334 if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) {
335 gicv3_cpuif_deactivate();
336 }
337}
338
339/*******************************************************************************
340 * Per cpu gic distributor setup which will be done by all cpus after a cold
341 * boot/hotplug. This marks out the secure interrupts & enables them.
342 ******************************************************************************/
343void gic_pcpu_distif_setup(unsigned int gicd_base)
344{
345 gicd_write_igroupr(gicd_base, 0, ~0);
346
347 gicd_clr_igroupr(gicd_base, IRQ_SEC_PHY_TIMER);
348 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_0);
349 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_1);
350 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_2);
351 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_3);
352 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_4);
353 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_5);
354 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_6);
355 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_7);
356
357 gicd_set_ipriorityr(gicd_base, IRQ_SEC_PHY_TIMER, MAX_PRI_VAL);
358 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_0, MAX_PRI_VAL);
359 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_1, MAX_PRI_VAL);
360 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_2, MAX_PRI_VAL);
361 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_3, MAX_PRI_VAL);
362 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_4, MAX_PRI_VAL);
363 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_5, MAX_PRI_VAL);
364 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_6, MAX_PRI_VAL);
365 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_7, MAX_PRI_VAL);
366
367 gicd_set_isenabler(gicd_base, IRQ_SEC_PHY_TIMER);
368 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_0);
369 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_1);
370 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_2);
371 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_3);
372 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_4);
373 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_5);
374 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_6);
375 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_7);
376}
377
378/*******************************************************************************
379 * Global gic distributor setup which will be done by the primary cpu after a
380 * cold boot. It marks out the secure SPIs, PPIs & SGIs and enables them. It
381 * then enables the secure GIC distributor interface.
382 ******************************************************************************/
383void gic_distif_setup(unsigned int gicd_base)
384{
385 unsigned int ctr, num_ints, ctlr;
386
387 /* Disable the distributor before going further */
388 ctlr = gicd_read_ctlr(gicd_base);
389 ctlr &= ~(ENABLE_GRP0 | ENABLE_GRP1);
390 gicd_write_ctlr(gicd_base, ctlr);
391
392 /*
393 * Mark out non-secure interrupts. Calculate number of
394 * IGROUPR registers to consider. Will be equal to the
395 * number of IT_LINES
396 */
397 num_ints = gicd_read_typer(gicd_base) & IT_LINES_NO_MASK;
398 num_ints++;
399 for (ctr = 0; ctr < num_ints; ctr++)
400 gicd_write_igroupr(gicd_base, ctr << IGROUPR_SHIFT, ~0);
401
402 /* Configure secure interrupts now */
403 gicd_clr_igroupr(gicd_base, IRQ_TZ_WDOG);
404 gicd_set_ipriorityr(gicd_base, IRQ_TZ_WDOG, MAX_PRI_VAL);
405 gicd_set_itargetsr(gicd_base, IRQ_TZ_WDOG,
406 platform_get_core_pos(read_mpidr()));
407 gicd_set_isenabler(gicd_base, IRQ_TZ_WDOG);
408 gic_pcpu_distif_setup(gicd_base);
409
410 gicd_write_ctlr(gicd_base, ctlr | ENABLE_GRP0);
411}
412
413void gic_setup(void)
414{
415 unsigned int gicd_base, gicc_base;
416
417 gicd_base = platform_get_cfgvar(CONFIG_GICD_ADDR);
418 gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
419
420 gic_cpuif_setup(gicc_base);
421 gic_distif_setup(gicd_base);
422}