Andre Przywara | e776fd2 | 2013-09-19 18:06:40 +0200 | [diff] [blame] | 1 | /* |
| 2 | * code for switching cores into non-secure state |
| 3 | * |
| 4 | * Copyright (c) 2013 Andre Przywara <andre.przywara@linaro.org> |
| 5 | * |
| 6 | * See file CREDITS for list of people who contributed to this |
| 7 | * project. |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or |
| 10 | * modify it under the terms of the GNU General Public License as |
| 11 | * published by the Free Software Foundation; either version 2 of |
| 12 | * the License, or (at your option) any later version. |
| 13 | * |
| 14 | * This program is distributed in the hope that it will be useful, |
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 17 | * GNU General Public License for more details. |
| 18 | * |
| 19 | * You should have received a copy of the GNU General Public License |
| 20 | * along with this program; if not, write to the Free Software |
| 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, |
| 22 | * MA 02111-1307 USA |
| 23 | */ |
| 24 | |
| 25 | #include <config.h> |
Andre Przywara | dd5e8da | 2013-09-19 18:06:41 +0200 | [diff] [blame] | 26 | #include <linux/linkage.h> |
| 27 | #include <asm/gic.h> |
| 28 | #include <asm/armv7.h> |
| 29 | |
| 30 | .arch_extension sec |
Andre Przywara | e776fd2 | 2013-09-19 18:06:40 +0200 | [diff] [blame] | 31 | |
| 32 | /* the vector table for secure state */ |
| 33 | _monitor_vectors: |
| 34 | .word 0 /* reset */ |
| 35 | .word 0 /* undef */ |
| 36 | adr pc, _secure_monitor |
| 37 | .word 0 |
| 38 | .word 0 |
| 39 | .word 0 |
| 40 | .word 0 |
| 41 | .word 0 |
| 42 | |
| 43 | /* |
| 44 | * secure monitor handler |
| 45 | * U-boot calls this "software interrupt" in start.S |
| 46 | * This is executed on a "smc" instruction, we use a "smc #0" to switch |
| 47 | * to non-secure state. |
| 48 | * We use only r0 and r1 here, due to constraints in the caller. |
| 49 | */ |
| 50 | .align 5 |
| 51 | _secure_monitor: |
| 52 | mrc p15, 0, r1, c1, c1, 0 @ read SCR |
| 53 | bic r1, r1, #0x4e @ clear IRQ, FIQ, EA, nET bits |
| 54 | orr r1, r1, #0x31 @ enable NS, AW, FW bits |
| 55 | |
| 56 | mcr p15, 0, r1, c1, c1, 0 @ write SCR (with NS bit set) |
| 57 | |
| 58 | movs pc, lr @ return to non-secure SVC |
Andre Przywara | dd5e8da | 2013-09-19 18:06:41 +0200 | [diff] [blame] | 59 | |
| 60 | /* |
Andre Przywara | dbbe196 | 2013-09-19 18:06:44 +0200 | [diff] [blame^] | 61 | * Secondary CPUs start here and call the code for the core specific parts |
| 62 | * of the non-secure and HYP mode transition. The GIC distributor specific |
| 63 | * code has already been executed by a C function before. |
| 64 | * Then they go back to wfi and wait to be woken up by the kernel again. |
| 65 | */ |
| 66 | ENTRY(_smp_pen) |
| 67 | mrs r0, cpsr |
| 68 | orr r0, r0, #0xc0 |
| 69 | msr cpsr, r0 @ disable interrupts |
| 70 | ldr r1, =_start |
| 71 | mcr p15, 0, r1, c12, c0, 0 @ set VBAR |
| 72 | |
| 73 | bl _nonsec_init |
| 74 | |
| 75 | ldr r1, [r0, #GICC_IAR] @ acknowledge IPI |
| 76 | str r1, [r0, #GICC_EOIR] @ signal end of interrupt |
| 77 | |
| 78 | adr r0, _smp_pen @ do not use this address again |
| 79 | b smp_waitloop @ wait for IPIs, board specific |
| 80 | ENDPROC(_smp_pen) |
| 81 | |
| 82 | /* |
Andre Przywara | dd5e8da | 2013-09-19 18:06:41 +0200 | [diff] [blame] | 83 | * Switch a core to non-secure state. |
| 84 | * |
| 85 | * 1. initialize the GIC per-core interface |
| 86 | * 2. allow coprocessor access in non-secure modes |
| 87 | * 3. switch the cpu mode (by calling "smc #0") |
| 88 | * |
| 89 | * Called from smp_pen by secondary cores and directly by the BSP. |
| 90 | * Do not assume that the stack is available and only use registers |
| 91 | * r0-r3 and r12. |
| 92 | * |
| 93 | * PERIPHBASE is used to get the GIC address. This could be 40 bits long, |
| 94 | * though, but we check this in C before calling this function. |
| 95 | */ |
| 96 | ENTRY(_nonsec_init) |
| 97 | #ifdef CONFIG_ARM_GIC_BASE_ADDRESS |
| 98 | ldr r2, =CONFIG_ARM_GIC_BASE_ADDRESS |
| 99 | #else |
| 100 | mrc p15, 4, r2, c15, c0, 0 @ read CBAR |
| 101 | bfc r2, #0, #15 @ clear reserved bits |
| 102 | #endif |
| 103 | add r3, r2, #GIC_DIST_OFFSET @ GIC dist i/f offset |
| 104 | mvn r1, #0 @ all bits to 1 |
| 105 | str r1, [r3, #GICD_IGROUPRn] @ allow private interrupts |
| 106 | |
| 107 | mrc p15, 0, r0, c0, c0, 0 @ read MIDR |
| 108 | ldr r1, =MIDR_PRIMARY_PART_MASK |
| 109 | and r0, r0, r1 @ mask out variant and revision |
| 110 | |
| 111 | ldr r1, =MIDR_CORTEX_A7_R0P0 & MIDR_PRIMARY_PART_MASK |
| 112 | cmp r0, r1 @ check for Cortex-A7 |
| 113 | |
| 114 | ldr r1, =MIDR_CORTEX_A15_R0P0 & MIDR_PRIMARY_PART_MASK |
| 115 | cmpne r0, r1 @ check for Cortex-A15 |
| 116 | |
| 117 | movne r1, #GIC_CPU_OFFSET_A9 @ GIC CPU offset for A9 |
| 118 | moveq r1, #GIC_CPU_OFFSET_A15 @ GIC CPU offset for A15/A7 |
| 119 | add r3, r2, r1 @ r3 = GIC CPU i/f addr |
| 120 | |
| 121 | mov r1, #1 @ set GICC_CTLR[enable] |
| 122 | str r1, [r3, #GICC_CTLR] @ and clear all other bits |
| 123 | mov r1, #0xff |
| 124 | str r1, [r3, #GICC_PMR] @ set priority mask register |
| 125 | |
| 126 | movw r1, #0x3fff |
| 127 | movt r1, #0x0006 |
| 128 | mcr p15, 0, r1, c1, c1, 2 @ NSACR = all copros to non-sec |
| 129 | |
| 130 | /* The CNTFRQ register of the generic timer needs to be |
| 131 | * programmed in secure state. Some primary bootloaders / firmware |
| 132 | * omit this, so if the frequency is provided in the configuration, |
| 133 | * we do this here instead. |
| 134 | * But first check if we have the generic timer. |
| 135 | */ |
| 136 | #ifdef CONFIG_SYS_CLK_FREQ |
| 137 | mrc p15, 0, r0, c0, c1, 1 @ read ID_PFR1 |
| 138 | and r0, r0, #CPUID_ARM_GENTIMER_MASK @ mask arch timer bits |
| 139 | cmp r0, #(1 << CPUID_ARM_GENTIMER_SHIFT) |
| 140 | ldreq r1, =CONFIG_SYS_CLK_FREQ |
| 141 | mcreq p15, 0, r1, c14, c0, 0 @ write CNTFRQ |
| 142 | #endif |
| 143 | |
| 144 | adr r1, _monitor_vectors |
| 145 | mcr p15, 0, r1, c12, c0, 1 @ set MVBAR to secure vectors |
| 146 | |
| 147 | mrc p15, 0, ip, c12, c0, 0 @ save secure copy of VBAR |
| 148 | |
| 149 | isb |
| 150 | smc #0 @ call into MONITOR mode |
| 151 | |
| 152 | mcr p15, 0, ip, c12, c0, 0 @ write non-secure copy of VBAR |
| 153 | |
| 154 | mov r1, #1 |
| 155 | str r1, [r3, #GICC_CTLR] @ enable non-secure CPU i/f |
| 156 | add r2, r2, #GIC_DIST_OFFSET |
| 157 | str r1, [r2, #GICD_CTLR] @ allow private interrupts |
| 158 | |
| 159 | mov r0, r3 @ return GICC address |
| 160 | |
| 161 | bx lr |
| 162 | ENDPROC(_nonsec_init) |
Andre Przywara | dbbe196 | 2013-09-19 18:06:44 +0200 | [diff] [blame^] | 163 | |
| 164 | #ifdef CONFIG_SMP_PEN_ADDR |
| 165 | /* void __weak smp_waitloop(unsigned previous_address); */ |
| 166 | ENTRY(smp_waitloop) |
| 167 | wfi |
| 168 | ldr r1, =CONFIG_SMP_PEN_ADDR @ load start address |
| 169 | ldr r1, [r1] |
| 170 | cmp r0, r1 @ make sure we dont execute this code |
| 171 | beq smp_waitloop @ again (due to a spurious wakeup) |
| 172 | mov pc, r1 |
| 173 | ENDPROC(smp_waitloop) |
| 174 | .weak smp_waitloop |
| 175 | #endif |