blob: 221b85ac73223af0d3e2590ff5d73db844ec54a4 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleyab2d31e2013-12-02 19:25:12 +00002 * Copyright (c) 2013, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <string.h>
32#include <assert.h>
33#include <arch_helpers.h>
34#include <platform.h>
35#include <bl31.h>
36#include <bl_common.h>
37#include <pl011.h>
38#include <bakery_lock.h>
39#include <cci400.h>
40#include <gic.h>
41#include <fvp_pwrc.h>
42
43/*******************************************************************************
44 * Declarations of linker defined symbols which will help us find the layout
45 * of trusted SRAM
46 ******************************************************************************/
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000047extern unsigned long __RO_START__;
48extern unsigned long __RO_END__;
Achin Gupta4f6ad662013-10-25 09:08:21 +010049
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000050extern unsigned long __COHERENT_RAM_START__;
51extern unsigned long __COHERENT_RAM_END__;
Achin Gupta4f6ad662013-10-25 09:08:21 +010052
Sandrine Bailleux8d69a032013-11-27 09:38:52 +000053/*
54 * The next 2 constants identify the extents of the code & RO data region.
55 * These addresses are used by the MMU setup code and therefore they must be
56 * page-aligned. It is the responsibility of the linker script to ensure that
57 * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
58 */
59#define BL31_RO_BASE (unsigned long)(&__RO_START__)
60#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
61
62/*
63 * The next 2 constants identify the extents of the coherent memory region.
64 * These addresses are used by the MMU setup code and therefore they must be
65 * page-aligned. It is the responsibility of the linker script to ensure that
66 * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols
67 * refer to page-aligned addresses.
68 */
69#define BL31_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
70#define BL31_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
Achin Gupta4f6ad662013-10-25 09:08:21 +010071
72/*******************************************************************************
73 * This data structures holds information copied by BL31 from BL2 to pass
74 * control to the non-trusted software images. A per-cpu entry was created to
75 * use the same structure in the warm boot path but that's not the case right
76 * now. Persisting with this approach for the time being. TODO: Can this be
77 * moved out of device memory.
78 ******************************************************************************/
79el_change_info ns_entry_info[PLATFORM_CORE_COUNT]
80__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
Sandrine Bailleux204aa032013-10-28 15:14:00 +000081 section("tzfw_coherent_mem")));
Achin Gupta4f6ad662013-10-25 09:08:21 +010082
83/* Data structure which holds the extents of the trusted SRAM for BL31 */
84static meminfo bl31_tzram_layout
85__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
Sandrine Bailleux204aa032013-10-28 15:14:00 +000086 section("tzfw_coherent_mem")));
Achin Gupta4f6ad662013-10-25 09:08:21 +010087
Sandrine Bailleuxee12f6f2013-11-28 14:55:58 +000088meminfo *bl31_plat_sec_mem_layout(void)
Achin Gupta4f6ad662013-10-25 09:08:21 +010089{
Sandrine Bailleuxee12f6f2013-11-28 14:55:58 +000090 return &bl31_tzram_layout;
Achin Gupta4f6ad662013-10-25 09:08:21 +010091}
92
93/*******************************************************************************
94 * Return information about passing control to the non-trusted software images
95 * to common code.TODO: In the initial architecture, the image after BL31 will
96 * always run in the non-secure state. In the final architecture there
97 * will be a series of images. This function will need enhancement then
98 ******************************************************************************/
99el_change_info *bl31_get_next_image_info(unsigned long mpidr)
100{
101 return &ns_entry_info[platform_get_core_pos(mpidr)];
102}
103
104/*******************************************************************************
105 * Perform any BL31 specific platform actions. Here we copy parameters passed
106 * by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they are lost
107 * (potentially). This is done before the MMU is initialized so that the memory
108 * layout can be used while creating page tables.
109 ******************************************************************************/
110void bl31_early_platform_setup(meminfo *mem_layout,
111 void *data,
112 unsigned long mpidr)
113{
114 el_change_info *image_info = (el_change_info *) data;
115 unsigned int lin_index = platform_get_core_pos(mpidr);
116
117 /* Setup the BL31 memory layout */
118 bl31_tzram_layout.total_base = mem_layout->total_base;
119 bl31_tzram_layout.total_size = mem_layout->total_size;
120 bl31_tzram_layout.free_base = mem_layout->free_base;
121 bl31_tzram_layout.free_size = mem_layout->free_size;
122 bl31_tzram_layout.attr = mem_layout->attr;
123 bl31_tzram_layout.next = 0;
124
125 /* Save information about jumping into the NS world */
126 ns_entry_info[lin_index].entrypoint = image_info->entrypoint;
127 ns_entry_info[lin_index].spsr = image_info->spsr;
128 ns_entry_info[lin_index].args = image_info->args;
129 ns_entry_info[lin_index].security_state = image_info->security_state;
130 ns_entry_info[lin_index].next = image_info->next;
131
132 /* Initialize the platform config for future decision making */
133 platform_config_setup();
134}
135
136/*******************************************************************************
137 * Initialize the gic, configure the CLCD and zero out variables needed by the
138 * secondaries to boot up correctly.
139 ******************************************************************************/
140void bl31_platform_setup()
141{
142 unsigned int reg_val;
143
144 /* Initialize the gic cpu and distributor interfaces */
145 gic_setup();
146
147 /*
148 * TODO: Configure the CLCD before handing control to
149 * linux. Need to see if a separate driver is needed
150 * instead.
151 */
152 mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGDATA, 0);
153 mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGCTRL,
154 (1ull << 31) | (1 << 30) | (7 << 20) | (0 << 16));
155
156 /* Allow access to the System counter timer module */
157 reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
158 reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
159 reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
160 mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val);
161 mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val);
162
163 reg_val = (1 << CNTNSAR_NS_SHIFT(0)) | (1 << CNTNSAR_NS_SHIFT(1));
164 mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val);
165
166 /* Intialize the power controller */
167 fvp_pwrc_setup();
168
169 /* Topologies are best known to the platform. */
170 plat_setup_topology();
171}
172
173/*******************************************************************************
174 * Perform the very early platform specific architectural setup here. At the
175 * moment this is only intializes the mmu in a quick and dirty way.
176 ******************************************************************************/
177void bl31_plat_arch_setup()
178{
Achin Gupta4f6ad662013-10-25 09:08:21 +0100179 configure_mmu(&bl31_tzram_layout,
Sandrine Bailleux8d69a032013-11-27 09:38:52 +0000180 BL31_RO_BASE,
181 BL31_RO_LIMIT,
182 BL31_COHERENT_RAM_BASE,
183 BL31_COHERENT_RAM_LIMIT);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100184}
185
186/*******************************************************************************
187 * TODO: Move GIC setup to a separate file in case it is needed by other BL
188 * stages or ELs
189 * TODO: Revisit if priorities are being set such that no non-secure interrupt
190 * can have a higher priority than a secure one as recommended in the GICv2 spec
191 *******************************************************************************/
192
193/*******************************************************************************
194 * This function does some minimal GICv3 configuration. The Firmware itself does
195 * not fully support GICv3 at this time and relies on GICv2 emulation as
196 * provided by GICv3. This function allows software (like Linux) in later stages
197 * to use full GICv3 features.
198 *******************************************************************************/
199void gicv3_cpuif_setup(void)
200{
201 unsigned int scr_val, val, base;
202
203 /*
204 * When CPUs come out of reset they have their GICR_WAKER.ProcessorSleep
205 * bit set. In order to allow interrupts to get routed to the CPU we
206 * need to clear this bit if set and wait for GICR_WAKER.ChildrenAsleep
207 * to clear (GICv3 Architecture specification 5.4.23).
208 * GICR_WAKER is NOT banked per CPU, compute the correct base address
209 * per CPU.
210 *
211 * TODO:
212 * For GICv4 we also need to adjust the Base address based on
213 * GICR_TYPER.VLPIS
214 */
215 base = BASE_GICR_BASE +
216 (platform_get_core_pos(read_mpidr()) << GICR_PCPUBASE_SHIFT);
217 val = gicr_read_waker(base);
218
219 val &= ~WAKER_PS;
220 gicr_write_waker(base, val);
221 dsb();
222
223 /* We need to wait for ChildrenAsleep to clear. */
224 val = gicr_read_waker(base);
225 while (val & WAKER_CA) {
226 val = gicr_read_waker(base);
227 }
228
229 /*
230 * We need to set SCR_EL3.NS in order to see GICv3 non-secure state.
231 * Restore SCR_EL3.NS again before exit.
232 */
233 scr_val = read_scr();
234 write_scr(scr_val | SCR_NS_BIT);
235
236 /*
237 * By default EL2 and NS-EL1 software should be able to enable GICv3
238 * System register access without any configuration at EL3. But it turns
239 * out that GICC PMR as set in GICv2 mode does not affect GICv3 mode. So
240 * we need to set it here again. In order to do that we need to enable
241 * register access. We leave it enabled as it should be fine and might
242 * prevent problems with later software trying to access GIC System
243 * Registers.
244 */
245 val = read_icc_sre_el3();
246 write_icc_sre_el3(val | ICC_SRE_EN | ICC_SRE_SRE);
247
248 val = read_icc_sre_el2();
249 write_icc_sre_el2(val | ICC_SRE_EN | ICC_SRE_SRE);
250
251 write_icc_pmr_el1(MAX_PRI_VAL);
252
253 /* Restore SCR_EL3 */
254 write_scr(scr_val);
255}
256
257/*******************************************************************************
258 * This function does some minimal GICv3 configuration when cores go
259 * down.
260 *******************************************************************************/
261void gicv3_cpuif_deactivate(void)
262{
263 unsigned int val, base;
264
265 /*
266 * When taking CPUs down we need to set GICR_WAKER.ProcessorSleep and
267 * wait for GICR_WAKER.ChildrenAsleep to get set.
268 * (GICv3 Architecture specification 5.4.23).
269 * GICR_WAKER is NOT banked per CPU, compute the correct base address
270 * per CPU.
271 *
272 * TODO:
273 * For GICv4 we also need to adjust the Base address based on
274 * GICR_TYPER.VLPIS
275 */
276 base = BASE_GICR_BASE +
277 (platform_get_core_pos(read_mpidr()) << GICR_PCPUBASE_SHIFT);
278 val = gicr_read_waker(base);
279 val |= WAKER_PS;
280 gicr_write_waker(base, val);
281 dsb();
282
283 /* We need to wait for ChildrenAsleep to set. */
284 val = gicr_read_waker(base);
285 while ((val & WAKER_CA) == 0) {
286 val = gicr_read_waker(base);
287 }
288}
289
290
291/*******************************************************************************
292 * Enable secure interrupts and use FIQs to route them. Disable legacy bypass
293 * and set the priority mask register to allow all interrupts to trickle in.
294 ******************************************************************************/
295void gic_cpuif_setup(unsigned int gicc_base)
296{
297 unsigned int val;
298
299 val = gicc_read_iidr(gicc_base);
300
301 /*
302 * If GICv3 we need to do a bit of additional setup. We want to
303 * allow default GICv2 behaviour but allow the next stage to
304 * enable full gicv3 features.
305 */
306 if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) {
307 gicv3_cpuif_setup();
308 }
309
310 val = ENABLE_GRP0 | FIQ_EN | FIQ_BYP_DIS_GRP0;
311 val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1;
312
313 gicc_write_pmr(gicc_base, MAX_PRI_VAL);
314 gicc_write_ctlr(gicc_base, val);
315}
316
317/*******************************************************************************
318 * Place the cpu interface in a state where it can never make a cpu exit wfi as
319 * as result of an asserted interrupt. This is critical for powering down a cpu
320 ******************************************************************************/
321void gic_cpuif_deactivate(unsigned int gicc_base)
322{
323 unsigned int val;
324
325 /* Disable secure, non-secure interrupts and disable their bypass */
326 val = gicc_read_ctlr(gicc_base);
327 val &= ~(ENABLE_GRP0 | ENABLE_GRP1);
328 val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0;
329 val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1;
330 gicc_write_ctlr(gicc_base, val);
331
332 val = gicc_read_iidr(gicc_base);
333
334 /*
335 * If GICv3 we need to do a bit of additional setup. Make sure the
336 * RDIST is put to sleep.
337 */
338 if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) {
339 gicv3_cpuif_deactivate();
340 }
341}
342
343/*******************************************************************************
344 * Per cpu gic distributor setup which will be done by all cpus after a cold
345 * boot/hotplug. This marks out the secure interrupts & enables them.
346 ******************************************************************************/
347void gic_pcpu_distif_setup(unsigned int gicd_base)
348{
349 gicd_write_igroupr(gicd_base, 0, ~0);
350
351 gicd_clr_igroupr(gicd_base, IRQ_SEC_PHY_TIMER);
352 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_0);
353 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_1);
354 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_2);
355 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_3);
356 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_4);
357 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_5);
358 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_6);
359 gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_7);
360
361 gicd_set_ipriorityr(gicd_base, IRQ_SEC_PHY_TIMER, MAX_PRI_VAL);
362 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_0, MAX_PRI_VAL);
363 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_1, MAX_PRI_VAL);
364 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_2, MAX_PRI_VAL);
365 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_3, MAX_PRI_VAL);
366 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_4, MAX_PRI_VAL);
367 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_5, MAX_PRI_VAL);
368 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_6, MAX_PRI_VAL);
369 gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_7, MAX_PRI_VAL);
370
371 gicd_set_isenabler(gicd_base, IRQ_SEC_PHY_TIMER);
372 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_0);
373 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_1);
374 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_2);
375 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_3);
376 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_4);
377 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_5);
378 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_6);
379 gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_7);
380}
381
382/*******************************************************************************
383 * Global gic distributor setup which will be done by the primary cpu after a
384 * cold boot. It marks out the secure SPIs, PPIs & SGIs and enables them. It
385 * then enables the secure GIC distributor interface.
386 ******************************************************************************/
387void gic_distif_setup(unsigned int gicd_base)
388{
389 unsigned int ctr, num_ints, ctlr;
390
391 /* Disable the distributor before going further */
392 ctlr = gicd_read_ctlr(gicd_base);
393 ctlr &= ~(ENABLE_GRP0 | ENABLE_GRP1);
394 gicd_write_ctlr(gicd_base, ctlr);
395
396 /*
397 * Mark out non-secure interrupts. Calculate number of
398 * IGROUPR registers to consider. Will be equal to the
399 * number of IT_LINES
400 */
401 num_ints = gicd_read_typer(gicd_base) & IT_LINES_NO_MASK;
402 num_ints++;
403 for (ctr = 0; ctr < num_ints; ctr++)
404 gicd_write_igroupr(gicd_base, ctr << IGROUPR_SHIFT, ~0);
405
406 /* Configure secure interrupts now */
407 gicd_clr_igroupr(gicd_base, IRQ_TZ_WDOG);
408 gicd_set_ipriorityr(gicd_base, IRQ_TZ_WDOG, MAX_PRI_VAL);
409 gicd_set_itargetsr(gicd_base, IRQ_TZ_WDOG,
410 platform_get_core_pos(read_mpidr()));
411 gicd_set_isenabler(gicd_base, IRQ_TZ_WDOG);
412 gic_pcpu_distif_setup(gicd_base);
413
414 gicd_write_ctlr(gicd_base, ctlr | ENABLE_GRP0);
415}
416
417void gic_setup(void)
418{
419 unsigned int gicd_base, gicc_base;
420
421 gicd_base = platform_get_cfgvar(CONFIG_GICD_ADDR);
422 gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
423
424 gic_cpuif_setup(gicc_base);
425 gic_distif_setup(gicd_base);
426}