blob: 8926c2d1d9c76f4c2aeb9e480145192d7f93df92 [file] [log] [blame]
/*
* Copyright (C) 2020 Intel Corporation. All rights reserved
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <asm-offsets.h>
#include <config.h>
#include <linux/linkage.h>
#include <asm/macro.h>
ENTRY(lowlevel_init)
mov x29, lr /* Save LR */
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
#if defined(CONFIG_XPL_BUILD) && defined(CONFIG_SPL_ATF)
wait_for_atf:
ldr x4, =CPU_RELEASE_ADDR
ldr x5, [x4]
cbz x5, slave_wait_atf
br x5
slave_wait_atf:
branch_if_slave x0, wait_for_atf
#else
branch_if_slave x0, 1f
#endif
ldr x0, =GICD_BASE
bl gic_init_secure
1:
#if defined(CONFIG_GICV3)
ldr x0, =GICR_BASE
bl gic_init_secure_percpu
#elif defined(CONFIG_GICV2)
ldr x0, =GICD_BASE
ldr x1, =GICC_BASE
bl gic_init_secure_percpu
#endif
#endif
#ifdef CONFIG_ARMV8_MULTIENTRY
branch_if_master x0, 2f
/*
* Slave should wait for master clearing spin table.
* This sync prevent slaves observing incorrect
* value of spin table and jumping to wrong place.
*/
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
#ifdef CONFIG_GICV2
ldr x0, =GICC_BASE
#endif
bl gic_wait_for_interrupt
#endif
/*
* All slaves will enter EL2 and optionally EL1.
*/
adr x4, lowlevel_in_el2
ldr x5, =ES_TO_AARCH64
bl armv8_switch_to_el2
lowlevel_in_el2:
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
adr x4, lowlevel_in_el1
ldr x5, =ES_TO_AARCH64
bl armv8_switch_to_el1
lowlevel_in_el1:
#endif
#endif /* CONFIG_ARMV8_MULTIENTRY */
2:
mov lr, x29 /* Restore LR */
ret
ENDPROC(lowlevel_init)