blob: faeccf3130e0c2f0833ab707c444d070d8af94f8 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
32#include <platform.h>
33#include <assert.h>
34
35/*******************************************************************************
36 * This duplicates what the primary cpu did after a cold boot in BL1. The same
37 * needs to be done when a cpu is hotplugged in. This function could also over-
38 * ride any EL3 setup done by BL1 as this code resides in rw memory.
39 ******************************************************************************/
40void bl31_arch_setup(void)
41{
42 unsigned long tmp_reg = 0;
Achin Gupta4f6ad662013-10-25 09:08:21 +010043
44 /* Enable alignment checks and set the exception endianness to LE */
45 tmp_reg = read_sctlr();
46 tmp_reg |= (SCTLR_A_BIT | SCTLR_SA_BIT);
47 tmp_reg &= ~SCTLR_EE_BIT;
48 write_sctlr(tmp_reg);
49
50 /*
Sandrine Bailleux37382742013-11-18 17:26:59 +000051 * Enable HVCs, route FIQs to EL3, set the next EL to be AArch64, route
52 * external abort and SError interrupts to EL3
Achin Gupta4f6ad662013-10-25 09:08:21 +010053 */
Sandrine Bailleux37382742013-11-18 17:26:59 +000054 tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_HCE_BIT | SCR_EA_BIT |
55 SCR_FIQ_BIT;
Achin Gupta4f6ad662013-10-25 09:08:21 +010056 write_scr(tmp_reg);
57
Sandrine Bailleux37382742013-11-18 17:26:59 +000058 /*
59 * Enable SError and Debug exceptions
60 */
61 enable_serror();
62 enable_debug_exceptions();
63
Achin Gupta4f6ad662013-10-25 09:08:21 +010064 return;
65}
66
67/*******************************************************************************
Achin Gupta35ca3512014-02-19 17:58:33 +000068 * Detect what the security state of the next EL is and setup the minimum
69 * required architectural state: program SCTRL to reflect the RES1 bits, and to
70 * have MMU and caches disabled
Achin Gupta4f6ad662013-10-25 09:08:21 +010071 ******************************************************************************/
Achin Gupta35ca3512014-02-19 17:58:33 +000072void bl31_next_el_arch_setup(uint32_t security_state)
73{
Achin Gupta4f6ad662013-10-25 09:08:21 +010074 unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1();
75 unsigned long current_sctlr, next_sctlr;
76 unsigned long el_status;
77 unsigned long scr = read_scr();
78
79 /* Use the same endianness than the current BL */
80 current_sctlr = read_sctlr();
81 next_sctlr = (current_sctlr & SCTLR_EE_BIT);
82
83 /* Find out which EL we are going to */
84 el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) & ID_AA64PFR0_ELX_MASK;
85
Achin Gupta35ca3512014-02-19 17:58:33 +000086 if (security_state == NON_SECURE) {
87 /* Check if EL2 is supported */
88 if (el_status && (scr & SCR_HCE_BIT)) {
89 /* Set SCTLR EL2 */
90 next_sctlr |= SCTLR_EL2_RES1;
91 write_sctlr_el2(next_sctlr);
92 return;
93 }
Achin Gupta4f6ad662013-10-25 09:08:21 +010094 }
Achin Gupta35ca3512014-02-19 17:58:33 +000095
96 /*
97 * SCTLR_EL1 needs the same programming irrespective of the
98 * security state of EL1.
99 */
100 next_sctlr |= SCTLR_EL1_RES1;
101 write_sctlr_el1(next_sctlr);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100102}