York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 1 | /* |
| 2 | * (C) Copyright 2014 Freescale Semiconductor |
| 3 | * |
| 4 | * SPDX-License-Identifier: GPL-2.0+ |
| 5 | * |
| 6 | * Extracted from armv8/start.S |
| 7 | */ |
| 8 | |
| 9 | #include <config.h> |
| 10 | #include <linux/linkage.h> |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 11 | #include <asm/gic.h> |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 12 | #include <asm/macro.h> |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 13 | #include "mp.h" |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 14 | |
| 15 | ENTRY(lowlevel_init) |
| 16 | mov x29, lr /* Save LR */ |
| 17 | |
| 18 | /* Set the SMMU page size in the sACR register */ |
| 19 | ldr x1, =SMMU_BASE |
| 20 | ldr w0, [x1, #0x10] |
| 21 | orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */ |
| 22 | str w0, [x1, #0x10] |
| 23 | |
| 24 | /* Initialize GIC Secure Bank Status */ |
| 25 | #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) |
| 26 | branch_if_slave x0, 1f |
| 27 | ldr x0, =GICD_BASE |
| 28 | bl gic_init_secure |
| 29 | 1: |
| 30 | #ifdef CONFIG_GICV3 |
| 31 | ldr x0, =GICR_BASE |
| 32 | bl gic_init_secure_percpu |
| 33 | #elif defined(CONFIG_GICV2) |
| 34 | ldr x0, =GICD_BASE |
| 35 | ldr x1, =GICC_BASE |
| 36 | bl gic_init_secure_percpu |
| 37 | #endif |
| 38 | #endif |
| 39 | |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 40 | branch_if_master x0, x1, 2f |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 41 | |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 42 | ldr x0, =secondary_boot_func |
| 43 | blr x0 |
| 44 | 2: |
Bhupesh Sharma | a0c00ff | 2015-01-06 13:11:21 -0800 | [diff] [blame] | 45 | |
| 46 | #ifdef CONFIG_FSL_TZPC_BP147 |
| 47 | /* Set Non Secure access for all devices protected via TZPC */ |
| 48 | ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */ |
| 49 | orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */ |
| 50 | str w0, [x1] |
| 51 | |
| 52 | isb |
| 53 | dsb sy |
| 54 | #endif |
| 55 | |
| 56 | #ifdef CONFIG_FSL_TZASC_400 |
| 57 | /* Set TZASC so that: |
| 58 | * a. We use only Region0 whose global secure write/read is EN |
| 59 | * b. We use only Region0 whose NSAID write/read is EN |
| 60 | * |
| 61 | * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just |
| 62 | * placeholders. |
| 63 | */ |
| 64 | ldr x1, =TZASC_GATE_KEEPER(0) |
| 65 | ldr x0, [x1] /* Filter 0 Gate Keeper Register */ |
| 66 | orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */ |
| 67 | str x0, [x1] |
| 68 | |
| 69 | ldr x1, =TZASC_GATE_KEEPER(1) |
| 70 | ldr x0, [x1] /* Filter 0 Gate Keeper Register */ |
| 71 | orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */ |
| 72 | str x0, [x1] |
| 73 | |
| 74 | ldr x1, =TZASC_REGION_ATTRIBUTES_0(0) |
| 75 | ldr x0, [x1] /* Region-0 Attributes Register */ |
| 76 | orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */ |
| 77 | orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */ |
| 78 | str x0, [x1] |
| 79 | |
| 80 | ldr x1, =TZASC_REGION_ATTRIBUTES_0(1) |
| 81 | ldr x0, [x1] /* Region-1 Attributes Register */ |
| 82 | orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */ |
| 83 | orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */ |
| 84 | str x0, [x1] |
| 85 | |
| 86 | ldr x1, =TZASC_REGION_ID_ACCESS_0(0) |
| 87 | ldr w0, [x1] /* Region-0 Access Register */ |
| 88 | mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */ |
| 89 | str w0, [x1] |
| 90 | |
| 91 | ldr x1, =TZASC_REGION_ID_ACCESS_0(1) |
| 92 | ldr w0, [x1] /* Region-1 Attributes Register */ |
| 93 | mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */ |
| 94 | str w0, [x1] |
| 95 | |
| 96 | isb |
| 97 | dsb sy |
| 98 | #endif |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 99 | mov lr, x29 /* Restore LR */ |
| 100 | ret |
| 101 | ENDPROC(lowlevel_init) |
| 102 | |
York Sun | 1ce575f | 2015-01-06 13:18:42 -0800 | [diff] [blame] | 103 | hnf_pstate_poll: |
| 104 | /* x0 has the desired status, return 0 for success, 1 for timeout |
| 105 | * clobber x1, x2, x3, x4, x6, x7 |
| 106 | */ |
| 107 | mov x1, x0 |
| 108 | mov x7, #0 /* flag for timeout */ |
| 109 | mrs x3, cntpct_el0 /* read timer */ |
| 110 | add x3, x3, #1200 /* timeout after 100 microseconds */ |
| 111 | mov x0, #0x18 |
| 112 | movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */ |
| 113 | mov w6, #8 /* HN-F node count */ |
| 114 | 1: |
| 115 | ldr x2, [x0] |
| 116 | cmp x2, x1 /* check status */ |
| 117 | b.eq 2f |
| 118 | mrs x4, cntpct_el0 |
| 119 | cmp x4, x3 |
| 120 | b.ls 1b |
| 121 | mov x7, #1 /* timeout */ |
| 122 | b 3f |
| 123 | 2: |
| 124 | add x0, x0, #0x10000 /* move to next node */ |
| 125 | subs w6, w6, #1 |
| 126 | cbnz w6, 1b |
| 127 | 3: |
| 128 | mov x0, x7 |
| 129 | ret |
| 130 | |
| 131 | hnf_set_pstate: |
| 132 | /* x0 has the desired state, clobber x1, x2, x6 */ |
| 133 | mov x1, x0 |
| 134 | /* power state to SFONLY */ |
| 135 | mov w6, #8 /* HN-F node count */ |
| 136 | mov x0, #0x10 |
| 137 | movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */ |
| 138 | 1: /* set pstate to sfonly */ |
| 139 | ldr x2, [x0] |
| 140 | and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */ |
| 141 | orr x2, x2, x1 |
| 142 | str x2, [x0] |
| 143 | add x0, x0, #0x10000 /* move to next node */ |
| 144 | subs w6, w6, #1 |
| 145 | cbnz w6, 1b |
| 146 | |
| 147 | ret |
| 148 | |
| 149 | ENTRY(__asm_flush_l3_cache) |
| 150 | /* |
| 151 | * Return status in x0 |
| 152 | * success 0 |
| 153 | * tmeout 1 for setting SFONLY, 2 for FAM, 3 for both |
| 154 | */ |
| 155 | mov x29, lr |
| 156 | mov x8, #0 |
| 157 | |
| 158 | dsb sy |
| 159 | mov x0, #0x1 /* HNFPSTAT_SFONLY */ |
| 160 | bl hnf_set_pstate |
| 161 | |
| 162 | mov x0, #0x4 /* SFONLY status */ |
| 163 | bl hnf_pstate_poll |
| 164 | cbz x0, 1f |
| 165 | mov x8, #1 /* timeout */ |
| 166 | 1: |
| 167 | dsb sy |
| 168 | mov x0, #0x3 /* HNFPSTAT_FAM */ |
| 169 | bl hnf_set_pstate |
| 170 | |
| 171 | mov x0, #0xc /* FAM status */ |
| 172 | bl hnf_pstate_poll |
| 173 | cbz x0, 1f |
| 174 | add x8, x8, #0x2 |
| 175 | 1: |
| 176 | mov x0, x8 |
| 177 | mov lr, x29 |
| 178 | ret |
| 179 | ENDPROC(__asm_flush_l3_cache) |
| 180 | |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 181 | /* Keep literals not used by the secondary boot code outside it */ |
| 182 | .ltorg |
| 183 | |
| 184 | /* Using 64 bit alignment since the spin table is accessed as data */ |
| 185 | .align 4 |
| 186 | .global secondary_boot_code |
| 187 | /* Secondary Boot Code starts here */ |
| 188 | secondary_boot_code: |
| 189 | .global __spin_table |
| 190 | __spin_table: |
| 191 | .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE |
| 192 | |
| 193 | .align 2 |
| 194 | ENTRY(secondary_boot_func) |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 195 | /* |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 196 | * MPIDR_EL1 Fields: |
| 197 | * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1) |
| 198 | * MPIDR[7:2] = AFF0_RES |
| 199 | * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3) |
| 200 | * MPIDR[23:16] = AFF2_CLUSTERID |
| 201 | * MPIDR[24] = MT |
| 202 | * MPIDR[29:25] = RES0 |
| 203 | * MPIDR[30] = U |
| 204 | * MPIDR[31] = ME |
| 205 | * MPIDR[39:32] = AFF3 |
| 206 | * |
| 207 | * Linear Processor ID (LPID) calculation from MPIDR_EL1: |
| 208 | * (We only use AFF0_CPUID and AFF1_CLUSTERID for now |
| 209 | * until AFF2_CLUSTERID and AFF3 have non-zero values) |
| 210 | * |
| 211 | * LPID = MPIDR[15:8] | MPIDR[1:0] |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 212 | */ |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 213 | mrs x0, mpidr_el1 |
| 214 | ubfm x1, x0, #8, #15 |
| 215 | ubfm x2, x0, #0, #1 |
| 216 | orr x10, x2, x1, lsl #2 /* x10 has LPID */ |
| 217 | ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */ |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 218 | /* |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 219 | * offset of the spin table element for this core from start of spin |
| 220 | * table (each elem is padded to 64 bytes) |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 221 | */ |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 222 | lsl x1, x10, #6 |
| 223 | ldr x0, =__spin_table |
| 224 | /* physical address of this cpus spin table element */ |
| 225 | add x11, x1, x0 |
| 226 | |
York Sun | 77a1097 | 2015-03-20 19:28:08 -0700 | [diff] [blame] | 227 | ldr x0, =__real_cntfrq |
| 228 | ldr x0, [x0] |
| 229 | msr cntfrq_el0, x0 /* set with real frequency */ |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 230 | str x9, [x11, #16] /* LPID */ |
| 231 | mov x4, #1 |
| 232 | str x4, [x11, #8] /* STATUS */ |
| 233 | dsb sy |
| 234 | #if defined(CONFIG_GICV3) |
| 235 | gic_wait_for_interrupt_m x0 |
| 236 | #elif defined(CONFIG_GICV2) |
| 237 | ldr x0, =GICC_BASE |
| 238 | gic_wait_for_interrupt_m x0, w1 |
| 239 | #endif |
| 240 | |
| 241 | bl secondary_switch_to_el2 |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 242 | #ifdef CONFIG_ARMV8_SWITCH_TO_EL1 |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 243 | bl secondary_switch_to_el1 |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 244 | #endif |
York Sun | a84cd72 | 2014-06-23 15:15:54 -0700 | [diff] [blame] | 245 | |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 246 | slave_cpu: |
| 247 | wfe |
| 248 | ldr x0, [x11] |
| 249 | cbz x0, slave_cpu |
| 250 | #ifndef CONFIG_ARMV8_SWITCH_TO_EL1 |
| 251 | mrs x1, sctlr_el2 |
| 252 | #else |
| 253 | mrs x1, sctlr_el1 |
| 254 | #endif |
| 255 | tbz x1, #25, cpu_is_le |
| 256 | rev x0, x0 /* BE to LE conversion */ |
| 257 | cpu_is_le: |
| 258 | br x0 /* branch to the given address */ |
| 259 | ENDPROC(secondary_boot_func) |
| 260 | |
| 261 | ENTRY(secondary_switch_to_el2) |
| 262 | switch_el x0, 1f, 0f, 0f |
| 263 | 0: ret |
| 264 | 1: armv8_switch_to_el2_m x0 |
| 265 | ENDPROC(secondary_switch_to_el2) |
| 266 | |
| 267 | ENTRY(secondary_switch_to_el1) |
| 268 | switch_el x0, 0f, 1f, 0f |
| 269 | 0: ret |
| 270 | 1: armv8_switch_to_el1_m x0, x1 |
| 271 | ENDPROC(secondary_switch_to_el1) |
| 272 | |
| 273 | /* Ensure that the literals used by the secondary boot code are |
| 274 | * assembled within it (this is required so that we can protect |
| 275 | * this area with a single memreserve region |
| 276 | */ |
| 277 | .ltorg |
| 278 | |
| 279 | /* 64 bit alignment for elements accessed as data */ |
| 280 | .align 4 |
York Sun | 77a1097 | 2015-03-20 19:28:08 -0700 | [diff] [blame] | 281 | .global __real_cntfrq |
| 282 | __real_cntfrq: |
| 283 | .quad COUNTER_FREQUENCY |
York Sun | 56cc3db | 2014-09-08 12:20:00 -0700 | [diff] [blame] | 284 | .globl __secondary_boot_code_size |
| 285 | .type __secondary_boot_code_size, %object |
| 286 | /* Secondary Boot Code ends here */ |
| 287 | __secondary_boot_code_size: |
| 288 | .quad .-secondary_boot_code |