blob: 2a43e3c81e255a1082bc6473a76e35372f163efb [file] [log] [blame]
Andre Przywarae776fd22013-09-19 18:06:40 +02001/*
Andre Przywara8de142c2013-09-19 18:06:45 +02002 * code for switching cores into non-secure state and into HYP mode
Andre Przywarae776fd22013-09-19 18:06:40 +02003 *
4 * Copyright (c) 2013 Andre Przywara <andre.przywara@linaro.org>
5 *
Andre Przywara6b216452013-10-07 10:56:51 +02006 * SPDX-License-Identifier: GPL-2.0+
Andre Przywarae776fd22013-09-19 18:06:40 +02007 */
8
9#include <config.h>
Andre Przywaradd5e8da2013-09-19 18:06:41 +020010#include <linux/linkage.h>
11#include <asm/gic.h>
12#include <asm/armv7.h>
Marc Zyngier855ca662014-07-12 14:24:03 +010013#include <asm/proc-armv/ptrace.h>
Andre Przywaradd5e8da2013-09-19 18:06:41 +020014
15.arch_extension sec
Andre Przywara8de142c2013-09-19 18:06:45 +020016.arch_extension virt
Andre Przywarae776fd22013-09-19 18:06:40 +020017
Marc Zyngier855ca662014-07-12 14:24:03 +010018 .pushsection ._secure.text, "ax"
19
Masahiro Yamada92bd4ac2013-10-07 11:46:56 +090020 .align 5
Andre Przywara8de142c2013-09-19 18:06:45 +020021/* the vector table for secure state and HYP mode */
Andre Przywarae776fd22013-09-19 18:06:40 +020022_monitor_vectors:
23 .word 0 /* reset */
24 .word 0 /* undef */
25 adr pc, _secure_monitor
26 .word 0
27 .word 0
Marc Zyngier855ca662014-07-12 14:24:03 +010028 .word 0
Andre Przywarae776fd22013-09-19 18:06:40 +020029 .word 0
30 .word 0
Andre Przywarae776fd22013-09-19 18:06:40 +020031
Marc Zyngier855ca662014-07-12 14:24:03 +010032.macro is_cpu_virt_capable tmp
33 mrc p15, 0, \tmp, c0, c1, 1 @ read ID_PFR1
34 and \tmp, \tmp, #CPUID_ARM_VIRT_MASK @ mask virtualization bits
35 cmp \tmp, #(1 << CPUID_ARM_VIRT_SHIFT)
36.endm
37
Andre Przywarae776fd22013-09-19 18:06:40 +020038/*
39 * secure monitor handler
40 * U-boot calls this "software interrupt" in start.S
41 * This is executed on a "smc" instruction, we use a "smc #0" to switch
42 * to non-secure state.
Marc Zyngier855ca662014-07-12 14:24:03 +010043 * r0, r1, r2: passed to the callee
44 * ip: target PC
Andre Przywarae776fd22013-09-19 18:06:40 +020045 */
Andre Przywarae776fd22013-09-19 18:06:40 +020046_secure_monitor:
Marc Zyngier855ca662014-07-12 14:24:03 +010047 mrc p15, 0, r5, c1, c1, 0 @ read SCR
48 bic r5, r5, #0x4e @ clear IRQ, FIQ, EA, nET bits
49 orr r5, r5, #0x31 @ enable NS, AW, FW bits
Andre Przywarae776fd22013-09-19 18:06:40 +020050
Marc Zyngier855ca662014-07-12 14:24:03 +010051 mov r6, #SVC_MODE @ default mode is SVC
52 is_cpu_virt_capable r4
Marc Zyngier4cd832b2014-07-12 14:24:00 +010053#ifdef CONFIG_ARMV7_VIRT
Marc Zyngier855ca662014-07-12 14:24:03 +010054 orreq r5, r5, #0x100 @ allow HVC instruction
55 moveq r6, #HYP_MODE @ Enter the kernel as HYP
Andre Przywara8de142c2013-09-19 18:06:45 +020056#endif
57
Marc Zyngier855ca662014-07-12 14:24:03 +010058 mcr p15, 0, r5, c1, c1, 0 @ write SCR (with NS bit set)
Marc Zyngiere9195772014-07-12 14:23:59 +010059 isb
Andre Przywarae776fd22013-09-19 18:06:40 +020060
Marc Zyngier4cd832b2014-07-12 14:24:00 +010061 bne 1f
Andre Przywara8de142c2013-09-19 18:06:45 +020062
Marc Zyngier4cd832b2014-07-12 14:24:00 +010063 @ Reset CNTVOFF to 0 before leaving monitor mode
Marc Zyngier855ca662014-07-12 14:24:03 +010064 mrc p15, 0, r4, c0, c1, 1 @ read ID_PFR1
65 ands r4, r4, #CPUID_ARM_GENTIMER_MASK @ test arch timer bits
66 movne r4, #0
67 mcrrne p15, 4, r4, r4, c14 @ Reset CNTVOFF to zero
Marc Zyngier4cd832b2014-07-12 14:24:00 +0100681:
Marc Zyngier855ca662014-07-12 14:24:03 +010069 mov lr, ip
70 mov ip, #(F_BIT | I_BIT | A_BIT) @ Set A, I and F
71 tst lr, #1 @ Check for Thumb PC
72 orrne ip, ip, #T_BIT @ Set T if Thumb
73 orr ip, ip, r6 @ Slot target mode in
74 msr spsr_cxfs, ip @ Set full SPSR
75 movs pc, lr @ ERET to non-secure
76
77ENTRY(_do_nonsec_entry)
78 mov ip, r0
79 mov r0, r1
80 mov r1, r2
81 mov r2, r3
82 smc #0
83ENDPROC(_do_nonsec_entry)
Andre Przywaradd5e8da2013-09-19 18:06:41 +020084
Marc Zyngier855ca662014-07-12 14:24:03 +010085.macro get_cbar_addr addr
86#ifdef CONFIG_ARM_GIC_BASE_ADDRESS
87 ldr \addr, =CONFIG_ARM_GIC_BASE_ADDRESS
88#else
89 mrc p15, 4, \addr, c15, c0, 0 @ read CBAR
90 bfc \addr, #0, #15 @ clear reserved bits
91#endif
92.endm
93
94.macro get_gicd_addr addr
95 get_cbar_addr \addr
96 add \addr, \addr, #GIC_DIST_OFFSET @ GIC dist i/f offset
97.endm
Andre Przywara8de142c2013-09-19 18:06:45 +020098
Marc Zyngier855ca662014-07-12 14:24:03 +010099.macro get_gicc_addr addr, tmp
100 get_cbar_addr \addr
101 is_cpu_virt_capable \tmp
102 movne \tmp, #GIC_CPU_OFFSET_A9 @ GIC CPU offset for A9
103 moveq \tmp, #GIC_CPU_OFFSET_A15 @ GIC CPU offset for A15/A7
104 add \addr, \addr, \tmp
105.endm
106
107#ifndef CONFIG_ARMV7_PSCI
Andre Przywaradd5e8da2013-09-19 18:06:41 +0200108/*
Andre Przywaradbbe1962013-09-19 18:06:44 +0200109 * Secondary CPUs start here and call the code for the core specific parts
110 * of the non-secure and HYP mode transition. The GIC distributor specific
111 * code has already been executed by a C function before.
112 * Then they go back to wfi and wait to be woken up by the kernel again.
113 */
114ENTRY(_smp_pen)
Marc Zyngier855ca662014-07-12 14:24:03 +0100115 cpsid i
116 cpsid f
Andre Przywaradbbe1962013-09-19 18:06:44 +0200117
118 bl _nonsec_init
Andre Przywaradbbe1962013-09-19 18:06:44 +0200119
120 adr r0, _smp_pen @ do not use this address again
121 b smp_waitloop @ wait for IPIs, board specific
122ENDPROC(_smp_pen)
Marc Zyngier855ca662014-07-12 14:24:03 +0100123#endif
Andre Przywaradbbe1962013-09-19 18:06:44 +0200124
125/*
Andre Przywaradd5e8da2013-09-19 18:06:41 +0200126 * Switch a core to non-secure state.
127 *
128 * 1. initialize the GIC per-core interface
129 * 2. allow coprocessor access in non-secure modes
Andre Przywaradd5e8da2013-09-19 18:06:41 +0200130 *
131 * Called from smp_pen by secondary cores and directly by the BSP.
132 * Do not assume that the stack is available and only use registers
133 * r0-r3 and r12.
134 *
135 * PERIPHBASE is used to get the GIC address. This could be 40 bits long,
136 * though, but we check this in C before calling this function.
137 */
138ENTRY(_nonsec_init)
Marc Zyngier855ca662014-07-12 14:24:03 +0100139 get_gicd_addr r3
140
Andre Przywaradd5e8da2013-09-19 18:06:41 +0200141 mvn r1, #0 @ all bits to 1
142 str r1, [r3, #GICD_IGROUPRn] @ allow private interrupts
143
Marc Zyngier855ca662014-07-12 14:24:03 +0100144 get_gicc_addr r3, r1
Andre Przywaradd5e8da2013-09-19 18:06:41 +0200145
Marc Zyngier855ca662014-07-12 14:24:03 +0100146 mov r1, #3 @ Enable both groups
Andre Przywaradd5e8da2013-09-19 18:06:41 +0200147 str r1, [r3, #GICC_CTLR] @ and clear all other bits
148 mov r1, #0xff
149 str r1, [r3, #GICC_PMR] @ set priority mask register
150
Marc Zyngier855ca662014-07-12 14:24:03 +0100151 mrc p15, 0, r0, c1, c1, 2
Andre Przywaradd5e8da2013-09-19 18:06:41 +0200152 movw r1, #0x3fff
Marc Zyngier855ca662014-07-12 14:24:03 +0100153 movt r1, #0x0004
154 orr r0, r0, r1
155 mcr p15, 0, r0, c1, c1, 2 @ NSACR = all copros to non-sec
Andre Przywaradd5e8da2013-09-19 18:06:41 +0200156
157/* The CNTFRQ register of the generic timer needs to be
158 * programmed in secure state. Some primary bootloaders / firmware
159 * omit this, so if the frequency is provided in the configuration,
160 * we do this here instead.
161 * But first check if we have the generic timer.
162 */
163#ifdef CONFIG_SYS_CLK_FREQ
164 mrc p15, 0, r0, c0, c1, 1 @ read ID_PFR1
165 and r0, r0, #CPUID_ARM_GENTIMER_MASK @ mask arch timer bits
166 cmp r0, #(1 << CPUID_ARM_GENTIMER_SHIFT)
167 ldreq r1, =CONFIG_SYS_CLK_FREQ
168 mcreq p15, 0, r1, c14, c0, 0 @ write CNTFRQ
169#endif
170
171 adr r1, _monitor_vectors
172 mcr p15, 0, r1, c12, c0, 1 @ set MVBAR to secure vectors
Andre Przywaradd5e8da2013-09-19 18:06:41 +0200173 isb
Andre Przywaradd5e8da2013-09-19 18:06:41 +0200174
175 mov r0, r3 @ return GICC address
Andre Przywaradd5e8da2013-09-19 18:06:41 +0200176 bx lr
177ENDPROC(_nonsec_init)
Andre Przywaradbbe1962013-09-19 18:06:44 +0200178
179#ifdef CONFIG_SMP_PEN_ADDR
180/* void __weak smp_waitloop(unsigned previous_address); */
181ENTRY(smp_waitloop)
182 wfi
183 ldr r1, =CONFIG_SMP_PEN_ADDR @ load start address
184 ldr r1, [r1]
185 cmp r0, r1 @ make sure we dont execute this code
186 beq smp_waitloop @ again (due to a spurious wakeup)
Marc Zyngier855ca662014-07-12 14:24:03 +0100187 mov r0, r1
188 b _do_nonsec_entry
Andre Przywaradbbe1962013-09-19 18:06:44 +0200189ENDPROC(smp_waitloop)
190.weak smp_waitloop
191#endif
Andre Przywara8de142c2013-09-19 18:06:45 +0200192
Marc Zyngier855ca662014-07-12 14:24:03 +0100193 .popsection