Soby Mathew | ec8ac1c | 2016-05-05 14:32:05 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions are met: |
| 6 | * |
| 7 | * Redistributions of source code must retain the above copyright notice, this |
| 8 | * list of conditions and the following disclaimer. |
| 9 | * |
| 10 | * Redistributions in binary form must reproduce the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer in the documentation |
| 12 | * and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * Neither the name of ARM nor the names of its contributors may be used |
| 15 | * to endorse or promote products derived from this software without specific |
| 16 | * prior written permission. |
| 17 | * |
| 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
| 22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 28 | * POSSIBILITY OF SUCH DAMAGE. |
| 29 | */ |
| 30 | |
| 31 | #include <arch.h> |
| 32 | #include <asm_macros.S> |
| 33 | #include <bl_common.h> |
| 34 | #include <context.h> |
| 35 | #include <runtime_svc.h> |
| 36 | #include <smcc_helpers.h> |
| 37 | #include <smcc_macros.S> |
| 38 | #include <xlat_tables.h> |
| 39 | |
| 40 | .globl sp_min_vector_table |
| 41 | .globl sp_min_entrypoint |
| 42 | .globl sp_min_warm_entrypoint |
| 43 | |
| 44 | func sp_min_vector_table |
| 45 | b sp_min_entrypoint |
| 46 | b plat_panic_handler /* Undef */ |
| 47 | b handle_smc /* Syscall */ |
| 48 | b plat_panic_handler /* Prefetch abort */ |
| 49 | b plat_panic_handler /* Data abort */ |
| 50 | b plat_panic_handler /* Reserved */ |
| 51 | b plat_panic_handler /* IRQ */ |
| 52 | b plat_panic_handler /* FIQ */ |
| 53 | endfunc sp_min_vector_table |
| 54 | |
| 55 | func handle_smc |
| 56 | smcc_save_gp_mode_regs |
| 57 | |
| 58 | /* r0 points to smc_context */ |
| 59 | mov r2, r0 /* handle */ |
| 60 | ldcopr r0, SCR |
| 61 | |
| 62 | /* Save SCR in stack */ |
| 63 | push {r0} |
| 64 | and r3, r0, #SCR_NS_BIT /* flags */ |
| 65 | |
| 66 | /* Switch to Secure Mode*/ |
| 67 | bic r0, #SCR_NS_BIT |
| 68 | stcopr r0, SCR |
| 69 | isb |
| 70 | ldr r0, [r2, #SMC_CTX_GPREG_R0] /* smc_fid */ |
| 71 | /* Check whether an SMC64 is issued */ |
| 72 | tst r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT) |
| 73 | beq 1f /* SMC32 is detected */ |
| 74 | mov r0, #SMC_UNK |
| 75 | str r0, [r2, #SMC_CTX_GPREG_R0] |
| 76 | mov r0, r2 |
| 77 | b 2f /* Skip handling the SMC */ |
| 78 | 1: |
| 79 | mov r1, #0 /* cookie */ |
| 80 | bl handle_runtime_svc |
| 81 | 2: |
| 82 | /* r0 points to smc context */ |
| 83 | |
| 84 | /* Restore SCR from stack */ |
| 85 | pop {r1} |
| 86 | stcopr r1, SCR |
| 87 | isb |
| 88 | |
| 89 | b sp_min_exit |
| 90 | endfunc handle_smc |
| 91 | |
| 92 | /* |
| 93 | * The Cold boot/Reset entrypoint for SP_MIN |
| 94 | */ |
| 95 | func sp_min_entrypoint |
| 96 | |
| 97 | /* |
| 98 | * The caches and TLBs are disabled at reset. If any implementation |
| 99 | * allows the caches/TLB to be hit while they are disabled, ensure |
| 100 | * that they are invalidated here |
| 101 | */ |
| 102 | |
| 103 | /* Make sure we are in Secure Mode*/ |
| 104 | ldcopr r0, SCR |
| 105 | bic r0, #SCR_NS_BIT |
| 106 | stcopr r0, SCR |
| 107 | isb |
| 108 | |
| 109 | /* Switch to monitor mode */ |
| 110 | cps #MODE32_mon |
| 111 | isb |
| 112 | |
| 113 | /* |
| 114 | * Set sane values for NS SCTLR as well. |
| 115 | * Switch to non secure mode for this. |
| 116 | */ |
| 117 | ldr r0, =(SCTLR_RES1) |
| 118 | ldcopr r1, SCR |
| 119 | orr r2, r1, #SCR_NS_BIT |
| 120 | stcopr r2, SCR |
| 121 | isb |
| 122 | |
| 123 | ldcopr r2, SCTLR |
| 124 | orr r0, r0, r2 |
| 125 | stcopr r0, SCTLR |
| 126 | isb |
| 127 | |
| 128 | stcopr r1, SCR |
| 129 | isb |
| 130 | |
| 131 | /* |
| 132 | * Set the CPU endianness before doing anything that might involve |
| 133 | * memory reads or writes. |
| 134 | */ |
| 135 | ldcopr r0, SCTLR |
| 136 | bic r0, r0, #SCTLR_EE_BIT |
| 137 | stcopr r0, SCTLR |
| 138 | isb |
| 139 | |
| 140 | /* Run the CPU Specific Reset handler */ |
| 141 | bl reset_handler |
| 142 | |
| 143 | /* |
| 144 | * Enable the instruction cache and data access |
| 145 | * alignment checks |
| 146 | */ |
| 147 | ldcopr r0, SCTLR |
| 148 | ldr r1, =(SCTLR_RES1 | SCTLR_A_BIT | SCTLR_I_BIT) |
| 149 | orr r0, r0, r1 |
| 150 | stcopr r0, SCTLR |
| 151 | isb |
| 152 | |
| 153 | /* Set the vector tables */ |
| 154 | ldr r0, =sp_min_vector_table |
| 155 | stcopr r0, VBAR |
| 156 | stcopr r0, MVBAR |
| 157 | isb |
| 158 | |
| 159 | /* |
| 160 | * Enable the SIF bit to disable instruction fetches |
| 161 | * from Non-secure memory. |
| 162 | */ |
| 163 | ldcopr r0, SCR |
| 164 | orr r0, r0, #SCR_SIF_BIT |
| 165 | stcopr r0, SCR |
| 166 | |
| 167 | /* |
| 168 | * Enable the SError interrupt now that the exception vectors have been |
| 169 | * setup. |
| 170 | */ |
| 171 | cpsie a |
| 172 | isb |
| 173 | |
| 174 | /* Enable access to Advanced SIMD registers */ |
| 175 | ldcopr r0, NSACR |
| 176 | bic r0, r0, #NSASEDIS_BIT |
| 177 | orr r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT) |
| 178 | stcopr r0, NSACR |
| 179 | isb |
| 180 | |
| 181 | /* |
| 182 | * Enable access to Advanced SIMD, Floating point and to the Trace |
| 183 | * functionality as well. |
| 184 | */ |
| 185 | ldcopr r0, CPACR |
| 186 | bic r0, r0, #ASEDIS_BIT |
| 187 | bic r0, r0, #TRCDIS_BIT |
| 188 | orr r0, r0, #CPACR_ENABLE_FP_ACCESS |
| 189 | stcopr r0, CPACR |
| 190 | isb |
| 191 | |
| 192 | vmrs r0, FPEXC |
| 193 | orr r0, r0, #FPEXC_EN_BIT |
| 194 | vmsr FPEXC, r0 |
| 195 | |
| 196 | /* Detect whether Warm or Cold boot */ |
| 197 | bl plat_get_my_entrypoint |
| 198 | cmp r0, #0 |
| 199 | /* If warm boot detected, jump to warm boot entry */ |
| 200 | bxne r0 |
| 201 | |
| 202 | /* Setup C runtime stack */ |
| 203 | bl plat_set_my_stack |
| 204 | |
| 205 | /* Perform platform specific memory initialization */ |
| 206 | bl platform_mem_init |
| 207 | |
| 208 | /* Initialize the C Runtime Environment */ |
| 209 | |
| 210 | /* |
| 211 | * Invalidate the RW memory used by SP_MIN image. This includes |
| 212 | * the data and NOBITS sections. This is done to safeguard against |
| 213 | * possible corruption of this memory by dirty cache lines in a system |
| 214 | * cache as a result of use by an earlier boot loader stage. |
| 215 | */ |
| 216 | ldr r0, =__RW_START__ |
| 217 | ldr r1, =__RW_END__ |
| 218 | sub r1, r1, r0 |
| 219 | bl inv_dcache_range |
| 220 | |
| 221 | ldr r0, =__BSS_START__ |
| 222 | ldr r1, =__BSS_SIZE__ |
| 223 | bl zeromem |
| 224 | |
| 225 | #if USE_COHERENT_MEM |
| 226 | ldr r0, =__COHERENT_RAM_START__ |
| 227 | ldr r1, =__COHERENT_RAM_UNALIGNED_SIZE__ |
| 228 | bl zeromem |
| 229 | #endif |
| 230 | |
| 231 | /* Perform platform specific early arch. setup */ |
| 232 | bl sp_min_early_platform_setup |
| 233 | bl sp_min_plat_arch_setup |
| 234 | |
| 235 | /* Jump to the main function */ |
| 236 | bl sp_min_main |
| 237 | |
| 238 | /* ------------------------------------------------------------- |
| 239 | * Clean the .data & .bss sections to main memory. This ensures |
| 240 | * that any global data which was initialised by the primary CPU |
| 241 | * is visible to secondary CPUs before they enable their data |
| 242 | * caches and participate in coherency. |
| 243 | * ------------------------------------------------------------- |
| 244 | */ |
| 245 | ldr r0, =__DATA_START__ |
| 246 | ldr r1, =__DATA_END__ |
| 247 | sub r1, r1, r0 |
| 248 | bl clean_dcache_range |
| 249 | |
| 250 | ldr r0, =__BSS_START__ |
| 251 | ldr r1, =__BSS_END__ |
| 252 | sub r1, r1, r0 |
| 253 | bl clean_dcache_range |
| 254 | |
| 255 | /* Program the registers in cpu_context and exit monitor mode */ |
| 256 | mov r0, #NON_SECURE |
| 257 | bl cm_get_context |
| 258 | |
| 259 | /* Restore the SCR */ |
| 260 | ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR] |
| 261 | stcopr r2, SCR |
| 262 | isb |
| 263 | |
| 264 | /* Restore the SCTLR */ |
| 265 | ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR] |
| 266 | stcopr r2, SCTLR |
| 267 | |
| 268 | bl smc_get_next_ctx |
| 269 | /* The other cpu_context registers have been copied to smc context */ |
| 270 | b sp_min_exit |
| 271 | endfunc sp_min_entrypoint |
| 272 | |
| 273 | /* |
| 274 | * The Warm boot entrypoint for SP_MIN. |
| 275 | */ |
| 276 | func sp_min_warm_entrypoint |
| 277 | |
| 278 | /* Setup C runtime stack */ |
| 279 | bl plat_set_my_stack |
| 280 | |
| 281 | /* -------------------------------------------- |
| 282 | * Enable the MMU with the DCache disabled. It |
| 283 | * is safe to use stacks allocated in normal |
| 284 | * memory as a result. All memory accesses are |
| 285 | * marked nGnRnE when the MMU is disabled. So |
| 286 | * all the stack writes will make it to memory. |
| 287 | * All memory accesses are marked Non-cacheable |
| 288 | * when the MMU is enabled but D$ is disabled. |
| 289 | * So used stack memory is guaranteed to be |
| 290 | * visible immediately after the MMU is enabled |
| 291 | * Enabling the DCache at the same time as the |
| 292 | * MMU can lead to speculatively fetched and |
| 293 | * possibly stale stack memory being read from |
| 294 | * other caches. This can lead to coherency |
| 295 | * issues. |
| 296 | * -------------------------------------------- |
| 297 | */ |
| 298 | mov r0, #DISABLE_DCACHE |
| 299 | bl bl32_plat_enable_mmu |
| 300 | |
| 301 | bl sp_min_warm_boot |
| 302 | |
| 303 | /* Program the registers in cpu_context and exit monitor mode */ |
| 304 | mov r0, #NON_SECURE |
| 305 | bl cm_get_context |
| 306 | |
| 307 | /* Restore the SCR */ |
| 308 | ldr r2, [r0, #CTX_REGS_OFFSET + CTX_SCR] |
| 309 | stcopr r2, SCR |
| 310 | isb |
| 311 | |
| 312 | /* Restore the SCTLR */ |
| 313 | ldr r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR] |
| 314 | stcopr r2, SCTLR |
| 315 | |
| 316 | bl smc_get_next_ctx |
| 317 | |
| 318 | /* The other cpu_context registers have been copied to smc context */ |
| 319 | b sp_min_exit |
| 320 | endfunc sp_min_warm_entrypoint |
| 321 | |
| 322 | /* |
| 323 | * The function to restore the registers from SMC context and return |
| 324 | * to the mode restored to SPSR. |
| 325 | * |
| 326 | * Arguments : r0 must point to the SMC context to restore from. |
| 327 | */ |
| 328 | func sp_min_exit |
| 329 | smcc_restore_gp_mode_regs |
| 330 | eret |
| 331 | endfunc sp_min_exit |