developer | 2fddd72 | 2022-05-20 11:22:21 +0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * Copyright (C) 2022 MediaTek Inc. All rights reserved. |
| 4 | * |
| 5 | * Author: Weijie Gao <weijie.gao@mediatek.com> |
| 6 | */ |
| 7 | |
| 8 | #include <asm/cm.h> |
| 9 | #include <asm/asm.h> |
| 10 | #include <asm/regdef.h> |
| 11 | #include <asm/cacheops.h> |
| 12 | #include <asm/mipsregs.h> |
| 13 | #include <asm/addrspace.h> |
| 14 | #include <asm/mipsmtregs.h> |
| 15 | #include "launch.h" |
| 16 | |
| 17 | .macro cache_loop curr, end, line_sz, op |
| 18 | 10: cache \op, 0(\curr) |
| 19 | PTR_ADDU \curr, \curr, \line_sz |
| 20 | bne \curr, \end, 10b |
| 21 | .endm |
| 22 | |
| 23 | .set mt |
| 24 | |
| 25 | /* |
| 26 | * Join the coherent domain |
| 27 | * a0 = number of cores |
| 28 | */ |
| 29 | LEAF(join_coherent_domain) |
| 30 | /* |
| 31 | * Enable coherence and allow interventions from all other cores. |
| 32 | * (Write access enabled via GCR_ACCESS by core 0.) |
| 33 | */ |
| 34 | li t1, 1 |
| 35 | sll t1, a0 |
| 36 | addiu t1, -1 |
| 37 | |
| 38 | li t0, KSEG1ADDR(CONFIG_MIPS_CM_BASE) |
| 39 | sw t1, GCR_Cx_COHERENCE(t0) |
| 40 | ehb |
| 41 | |
| 42 | move t2, zero |
| 43 | |
| 44 | _next_coherent_core: |
| 45 | sll t1, t2, GCR_Cx_OTHER_CORENUM_SHIFT |
| 46 | sw t1, GCR_Cx_OTHER(t0) |
| 47 | |
| 48 | _busy_wait_coherent_core: |
| 49 | lw t1, GCR_CO_COHERENCE(t0) |
| 50 | beqz t1, _busy_wait_coherent_core |
| 51 | |
| 52 | addiu t2, 1 |
| 53 | bne t2, a0, _next_coherent_core |
| 54 | |
| 55 | jr ra |
| 56 | END(join_coherent_domain) |
| 57 | |
| 58 | /* |
| 59 | * All VPEs other than VPE0 will go here. |
| 60 | */ |
| 61 | LEAF(launch_vpe_entry) |
| 62 | mfc0 t0, CP0_EBASE |
| 63 | and t0, t0, MIPS_EBASE_CPUNUM |
| 64 | |
| 65 | /* per-VPE cpulaunch_t */ |
| 66 | li a0, KSEG0ADDR(CPULAUNCH) |
| 67 | sll t1, t0, LOG2CPULAUNCH |
| 68 | addu a0, t1 |
| 69 | |
| 70 | /* Set CPU online flag */ |
| 71 | li t0, LAUNCH_FREADY |
| 72 | sw t0, LAUNCH_FLAGS(a0) |
| 73 | |
| 74 | /* Enable count interrupt in mask, but do not enable interrupts */ |
| 75 | mfc0 t0, CP0_STATUS |
| 76 | ori t0, STATUSF_IP7 |
| 77 | mtc0 t0, CP0_STATUS |
| 78 | |
| 79 | /* VPEs executing in wait code do not need a stack */ |
| 80 | li t9, KSEG0ADDR(LAUNCH_WAITCODE) |
| 81 | jr t9 |
| 82 | END(launch_vpe_entry) |
| 83 | |
| 84 | /* |
| 85 | * This function will not be executed in place. |
| 86 | * It will be copied into memory, and VPEs other than VPE0 will be |
| 87 | * started to run into this in-memory function. |
| 88 | */ |
| 89 | LEAF(launch_wait_code) |
| 90 | .globl launch_wait_code_start |
| 91 | launch_wait_code_start: |
| 92 | |
| 93 | move t0, a0 |
| 94 | |
| 95 | start_poll: |
| 96 | /* Poll CPU go flag */ |
| 97 | mtc0 zero, CP0_COUNT |
| 98 | li t1, LAUNCHPERIOD |
| 99 | mtc0 t1, CP0_COMPARE |
| 100 | |
| 101 | time_wait: |
| 102 | /* Software wait */ |
| 103 | mfc0 t2, CP0_COUNT |
| 104 | subu t2, t1 |
| 105 | bltz t2, time_wait |
| 106 | |
| 107 | /* Check the launch flag */ |
| 108 | lw t3, LAUNCH_FLAGS(t0) |
| 109 | and t3, LAUNCH_FGO |
| 110 | beqz t3, start_poll |
| 111 | |
| 112 | /* Reset the counter and interrupts to give naive clients a chance */ |
| 113 | mfc0 t1, CP0_STATUS |
| 114 | ins t1, zero, STATUSB_IP7, 1 |
| 115 | mtc0 t1, CP0_STATUS |
| 116 | |
| 117 | mfc0 t1, CP0_COUNT |
| 118 | subu t1, 1 |
| 119 | mtc0 t1, CP0_COMPARE |
| 120 | |
| 121 | /* Jump to kernel */ |
| 122 | lw t9, LAUNCH_PC(t0) |
| 123 | lw gp, LAUNCH_GP(t0) |
| 124 | lw sp, LAUNCH_SP(t0) |
| 125 | lw a0, LAUNCH_A0(t0) |
| 126 | move a1, zero |
| 127 | move a2, zero |
| 128 | move a3, zero |
| 129 | ori t3, LAUNCH_FGONE |
| 130 | sw t3, LAUNCH_FLAGS(t0) |
| 131 | |
| 132 | jr t9 |
| 133 | |
| 134 | .globl launch_wait_code_end |
| 135 | launch_wait_code_end: |
| 136 | END(launch_wait_code) |
| 137 | |
| 138 | /* |
| 139 | * Core1 will go here. |
| 140 | */ |
| 141 | LEAF(launch_core_entry) |
| 142 | /* Disable caches */ |
| 143 | bal mips_cache_disable |
| 144 | |
| 145 | /* Initialize L1 cache only */ |
| 146 | li a0, CONFIG_SYS_ICACHE_SIZE |
| 147 | li a1, CONFIG_SYS_ICACHE_LINE_SIZE |
| 148 | li a2, CONFIG_SYS_DCACHE_SIZE |
| 149 | li a3, CONFIG_SYS_DCACHE_LINE_SIZE |
| 150 | |
| 151 | mtc0 zero, CP0_TAGLO |
| 152 | mtc0 zero, CP0_TAGLO, 2 |
| 153 | ehb |
| 154 | |
| 155 | /* |
| 156 | * Initialize the I-cache first, |
| 157 | */ |
| 158 | li t0, KSEG0 |
| 159 | addu t1, t0, a0 |
| 160 | /* clear tag to invalidate */ |
| 161 | cache_loop t0, t1, a1, INDEX_STORE_TAG_I |
| 162 | #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD |
| 163 | /* fill once, so data field parity is correct */ |
| 164 | PTR_LI t0, KSEG0 |
| 165 | cache_loop t0, t1, a1, FILL |
| 166 | /* invalidate again - prudent but not strictly necessary */ |
| 167 | PTR_LI t0, KSEG0 |
| 168 | cache_loop t0, t1, a1, INDEX_STORE_TAG_I |
| 169 | #endif |
| 170 | |
| 171 | /* |
| 172 | * then initialize D-cache. |
| 173 | */ |
| 174 | PTR_LI t0, KSEG0 |
| 175 | PTR_ADDU t1, t0, a2 |
| 176 | /* clear all tags */ |
| 177 | cache_loop t0, t1, a3, INDEX_STORE_TAG_D |
| 178 | #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD |
| 179 | /* load from each line (in cached space) */ |
| 180 | PTR_LI t0, KSEG0 |
| 181 | 2: LONG_L zero, 0(t0) |
| 182 | PTR_ADDU t0, a3 |
| 183 | bne t0, t1, 2b |
| 184 | /* clear all tags */ |
| 185 | PTR_LI t0, KSEG0 |
| 186 | cache_loop t0, t1, a3, INDEX_STORE_TAG_D |
| 187 | #endif |
| 188 | |
| 189 | /* Set Cache Mode */ |
| 190 | mfc0 t0, CP0_CONFIG |
| 191 | li t1, CONF_CM_CACHABLE_COW |
| 192 | ins t0, t1, 0, 3 |
| 193 | mtc0 t0, CP0_CONFIG |
| 194 | |
| 195 | /* Join the coherent domain */ |
| 196 | li a0, 2 |
| 197 | bal join_coherent_domain |
| 198 | |
| 199 | /* Bootup Core0/VPE1 */ |
| 200 | bal boot_vpe1 |
| 201 | |
| 202 | b launch_vpe_entry |
| 203 | END(launch_core_entry) |
| 204 | |
| 205 | /* |
| 206 | * Bootup VPE1. |
| 207 | * This subroutine must be executed from VPE0 with VPECONF0[MVP] already set. |
| 208 | */ |
| 209 | LEAF(boot_vpe1) |
| 210 | mfc0 t0, CP0_MVPCONF0 |
| 211 | |
| 212 | /* a0 = number of TCs - 1 */ |
| 213 | ext a0, t0, MVPCONF0_PTC_SHIFT, 8 |
| 214 | beqz a0, _vpe1_init_done |
| 215 | |
| 216 | /* a1 = number of VPEs - 1 */ |
| 217 | ext a1, t0, MVPCONF0_PVPE_SHIFT, 4 |
| 218 | beqz a1, _vpe1_init_done |
| 219 | |
| 220 | /* a2 = current TC No. */ |
| 221 | move a2, zero |
| 222 | |
| 223 | /* Enter VPE Configuration State */ |
| 224 | mfc0 t0, CP0_MVPCONTROL |
| 225 | or t0, MVPCONTROL_VPC |
| 226 | mtc0 t0, CP0_MVPCONTROL |
| 227 | ehb |
| 228 | |
| 229 | _next_tc: |
| 230 | /* Set the TC number to be used on MTTR and MFTR instructions */ |
| 231 | mfc0 t0, CP0_VPECONTROL |
| 232 | ins t0, a2, 0, 8 |
| 233 | mtc0 t0, CP0_VPECONTROL |
| 234 | ehb |
| 235 | |
| 236 | /* TC0 is already bound */ |
| 237 | beqz a2, _next_vpe |
| 238 | |
| 239 | /* Halt current TC */ |
| 240 | li t0, TCHALT_H |
| 241 | mttc0 t0, CP0_TCHALT |
| 242 | ehb |
| 243 | |
| 244 | /* If there is spare TC, bind it to the last VPE (VPE[a1]) */ |
| 245 | slt t1, a1, a2 |
| 246 | bnez t1, _vpe_bind_tc |
| 247 | move t1, a1 |
| 248 | |
| 249 | /* Set Exclusive TC for active TC */ |
| 250 | mftc0 t0, CP0_VPECONF0 |
| 251 | ins t0, a2, VPECONF0_XTC_SHIFT, 8 |
| 252 | mttc0 t0, CP0_VPECONF0 |
| 253 | |
| 254 | move t1, a2 |
| 255 | _vpe_bind_tc: |
| 256 | /* Bind TC to a VPE */ |
| 257 | mftc0 t0, CP0_TCBIND |
| 258 | ins t0, t1, TCBIND_CURVPE_SHIFT, 4 |
| 259 | mttc0 t0, CP0_TCBIND |
| 260 | |
| 261 | /* |
| 262 | * Set up CP0_TCSTATUS register: |
| 263 | * Disable Coprocessor Usable bits |
| 264 | * Disable MDMX/DSP ASE |
| 265 | * Clear Dirty TC |
| 266 | * not dynamically allocatable |
| 267 | * not allocated |
| 268 | * Kernel mode |
| 269 | * interrupt exempt |
| 270 | * ASID 0 |
| 271 | */ |
| 272 | li t0, TCSTATUS_IXMT |
| 273 | mttc0 t0, CP0_TCSTATUS |
| 274 | |
| 275 | _next_vpe: |
| 276 | slt t1, a1, a2 |
| 277 | bnez t1, _done_vpe # No more VPEs |
| 278 | |
| 279 | /* Disable TC multi-threading */ |
| 280 | mftc0 t0, CP0_VPECONTROL |
| 281 | ins t0, zero, VPECONTROL_TE_SHIFT, 1 |
| 282 | mttc0 t0, CP0_VPECONTROL |
| 283 | |
| 284 | /* Skip following configuration for TC0 */ |
| 285 | beqz a2, _done_vpe |
| 286 | |
| 287 | /* Deactivate VPE, set Master VPE */ |
| 288 | mftc0 t0, CP0_VPECONF0 |
| 289 | ins t0, zero, VPECONF0_VPA_SHIFT, 1 |
| 290 | or t0, VPECONF0_MVP |
| 291 | mttc0 t0, CP0_VPECONF0 |
| 292 | |
| 293 | mfc0 t0, CP0_STATUS |
| 294 | mttc0 t0, CP0_STATUS |
| 295 | |
| 296 | mttc0 zero, CP0_EPC |
| 297 | mttc0 zero, CP0_CAUSE |
| 298 | |
| 299 | mfc0 t0, CP0_CONFIG |
| 300 | mttc0 t0, CP0_CONFIG |
| 301 | |
| 302 | /* |
| 303 | * VPE1 of each core can execute cached as its L1 I$ has already |
| 304 | * been initialized. |
| 305 | * and the L2$ has been initialized or "disabled" via CCA override. |
| 306 | */ |
| 307 | PTR_LA t0, _start |
| 308 | mttc0 t0, CP0_TCRESTART |
| 309 | |
| 310 | /* Unset Interrupt Exempt, set Activate Thread */ |
| 311 | mftc0 t0, CP0_TCSTATUS |
| 312 | ins t0, zero, TCSTATUS_IXMT_SHIFT, 1 |
| 313 | ori t0, TCSTATUS_A |
| 314 | mttc0 t0, CP0_TCSTATUS |
| 315 | |
| 316 | /* Resume TC */ |
| 317 | mttc0 zero, CP0_TCHALT |
| 318 | |
| 319 | /* Activate VPE */ |
| 320 | mftc0 t0, CP0_VPECONF0 |
| 321 | ori t0, VPECONF0_VPA |
| 322 | mttc0 t0, CP0_VPECONF0 |
| 323 | |
| 324 | _done_vpe: |
| 325 | addu a2, 1 |
| 326 | sltu t0, a0, a2 |
| 327 | beqz t0, _next_tc |
| 328 | |
| 329 | mfc0 t0, CP0_MVPCONTROL |
| 330 | /* Enable all activated VPE to execute */ |
| 331 | ori t0, MVPCONTROL_EVP |
| 332 | /* Exit VPE Configuration State */ |
| 333 | ins t0, zero, MVPCONTROL_VPC_SHIFT, 1 |
| 334 | mtc0 t0, CP0_MVPCONTROL |
| 335 | ehb |
| 336 | |
| 337 | _vpe1_init_done: |
| 338 | jr ra |
| 339 | END(boot_vpe1) |