blob: 11bbf9e15106485fcf6c44e5bfe42ef3d29d0b2c [file] [log] [blame]
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +08001/*
Haojian Zhuang1b4b4122018-01-25 16:13:05 +08002 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
8#include <assert.h>
9#include <bl_common.h>
10#include <console.h>
11#include <debug.h>
Haojian Zhuang1b4b4122018-01-25 16:13:05 +080012#include <delay_timer.h>
Victor Chong2d9a42d2017-08-17 15:21:10 +090013#include <desc_image_load.h>
Haojian Zhuang1b4b4122018-01-25 16:13:05 +080014#include <dw_ufs.h>
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080015#include <errno.h>
16#include <generic_delay_timer.h>
17#include <hi3660.h>
18#include <mmio.h>
Victor Chong7d787f52017-08-16 13:53:56 +090019#ifdef SPD_opteed
20#include <optee_utils.h>
21#endif
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080022#include <platform_def.h>
23#include <string.h>
24#include <ufs.h>
25
26#include "hikey960_def.h"
27#include "hikey960_private.h"
28
29/*
30 * The next 2 constants identify the extents of the code & RO data region.
31 * These addresses are used by the MMU setup code and therefore they must be
32 * page-aligned. It is the responsibility of the linker script to ensure that
33 * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
34 */
35#define BL2_RO_BASE (unsigned long)(&__RO_START__)
36#define BL2_RO_LIMIT (unsigned long)(&__RO_END__)
37
Haojian Zhuang1b4b4122018-01-25 16:13:05 +080038#define BL2_RW_BASE (BL2_RO_LIMIT)
39
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080040/*
41 * The next 2 constants identify the extents of the coherent memory region.
42 * These addresses are used by the MMU setup code and therefore they must be
43 * page-aligned. It is the responsibility of the linker script to ensure that
44 * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
45 * page-aligned addresses.
46 */
47#define BL2_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
48#define BL2_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
49
Haojian Zhuang1b4b4122018-01-25 16:13:05 +080050static meminfo_t bl2_el3_tzram_layout;
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080051extern int load_lpm3(void);
52
Haojian Zhuang1b4b4122018-01-25 16:13:05 +080053enum {
54 BOOT_MODE_RECOVERY = 0,
55 BOOT_MODE_NORMAL,
56 BOOT_MODE_MASK = 1,
57};
58
Victor Chong2d9a42d2017-08-17 15:21:10 +090059/*******************************************************************************
60 * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
61 * Return 0 on success, -1 otherwise.
62 ******************************************************************************/
Victor Chong2d9a42d2017-08-17 15:21:10 +090063int plat_hikey960_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info)
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080064{
65 int i;
66 int *buf;
67
Victor Chong2d9a42d2017-08-17 15:21:10 +090068 assert(scp_bl2_image_info->image_size < SCP_BL2_SIZE);
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080069
70 INFO("BL2: Initiating SCP_BL2 transfer to SCP\n");
71
72 INFO("BL2: SCP_BL2: 0x%lx@0x%x\n",
73 scp_bl2_image_info->image_base,
74 scp_bl2_image_info->image_size);
75
76 buf = (int *)scp_bl2_image_info->image_base;
77
78 INFO("BL2: SCP_BL2 HEAD:\n");
79 for (i = 0; i < 64; i += 4)
80 INFO("BL2: SCP_BL2 0x%x 0x%x 0x%x 0x%x\n",
81 buf[i], buf[i+1], buf[i+2], buf[i+3]);
82
83 buf = (int *)(scp_bl2_image_info->image_base +
84 scp_bl2_image_info->image_size - 256);
85
86 INFO("BL2: SCP_BL2 TAIL:\n");
87 for (i = 0; i < 64; i += 4)
88 INFO("BL2: SCP_BL2 0x%x 0x%x 0x%x 0x%x\n",
89 buf[i], buf[i+1], buf[i+2], buf[i+3]);
90
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080091 INFO("BL2: SCP_BL2 transferred to SCP\n");
92
93 load_lpm3();
94 (void)buf;
95
96 return 0;
97}
98
Haojian Zhuang1b4b4122018-01-25 16:13:05 +080099static void hikey960_ufs_reset(void)
100{
101 unsigned int data, mask;
102
103 mmio_write_32(CRG_PERDIS7_REG, 1 << 14);
104 mmio_clrbits_32(UFS_SYS_PHY_CLK_CTRL_REG, BIT_SYSCTRL_REF_CLOCK_EN);
105 do {
106 data = mmio_read_32(UFS_SYS_PHY_CLK_CTRL_REG);
107 } while (data & BIT_SYSCTRL_REF_CLOCK_EN);
108 /* use abb clk */
109 mmio_clrbits_32(UFS_SYS_UFS_SYSCTRL_REG, BIT_UFS_REFCLK_SRC_SE1);
110 mmio_clrbits_32(UFS_SYS_PHY_ISO_EN_REG, BIT_UFS_REFCLK_ISO_EN);
111 mmio_write_32(PCTRL_PERI_CTRL3_REG, (1 << 0) | (1 << 16));
112 mdelay(1);
113 mmio_write_32(CRG_PEREN7_REG, 1 << 14);
114 mmio_setbits_32(UFS_SYS_PHY_CLK_CTRL_REG, BIT_SYSCTRL_REF_CLOCK_EN);
115
116 mmio_write_32(CRG_PERRSTEN3_REG, PERI_UFS_BIT);
117 do {
118 data = mmio_read_32(CRG_PERRSTSTAT3_REG);
119 } while ((data & PERI_UFS_BIT) == 0);
120 mmio_setbits_32(UFS_SYS_PSW_POWER_CTRL_REG, BIT_UFS_PSW_MTCMOS_EN);
121 mdelay(1);
122 mmio_setbits_32(UFS_SYS_HC_LP_CTRL_REG, BIT_SYSCTRL_PWR_READY);
123 mmio_write_32(UFS_SYS_UFS_DEVICE_RESET_CTRL_REG,
124 MASK_UFS_DEVICE_RESET);
125 /* clear SC_DIV_UFS_PERIBUS */
126 mask = SC_DIV_UFS_PERIBUS << 16;
127 mmio_write_32(CRG_CLKDIV17_REG, mask);
128 /* set SC_DIV_UFSPHY_CFG(3) */
129 mask = SC_DIV_UFSPHY_CFG_MASK << 16;
130 data = SC_DIV_UFSPHY_CFG(3);
131 mmio_write_32(CRG_CLKDIV16_REG, mask | data);
132 data = mmio_read_32(UFS_SYS_PHY_CLK_CTRL_REG);
133 data &= ~MASK_SYSCTRL_CFG_CLOCK_FREQ;
134 data |= 0x39;
135 mmio_write_32(UFS_SYS_PHY_CLK_CTRL_REG, data);
136 mmio_clrbits_32(UFS_SYS_PHY_CLK_CTRL_REG, MASK_SYSCTRL_REF_CLOCK_SEL);
137 mmio_setbits_32(UFS_SYS_CLOCK_GATE_BYPASS_REG,
138 MASK_UFS_CLK_GATE_BYPASS);
139 mmio_setbits_32(UFS_SYS_UFS_SYSCTRL_REG, MASK_UFS_SYSCTRL_BYPASS);
140
141 mmio_setbits_32(UFS_SYS_PSW_CLK_CTRL_REG, BIT_SYSCTRL_PSW_CLK_EN);
142 mmio_clrbits_32(UFS_SYS_PSW_POWER_CTRL_REG, BIT_UFS_PSW_ISO_CTRL);
143 mmio_clrbits_32(UFS_SYS_PHY_ISO_EN_REG, BIT_UFS_PHY_ISO_CTRL);
144 mmio_clrbits_32(UFS_SYS_HC_LP_CTRL_REG, BIT_SYSCTRL_LP_ISOL_EN);
145 mmio_write_32(CRG_PERRSTDIS3_REG, PERI_ARST_UFS_BIT);
146 mmio_setbits_32(UFS_SYS_RESET_CTRL_EN_REG, BIT_SYSCTRL_LP_RESET_N);
147 mdelay(1);
148 mmio_write_32(UFS_SYS_UFS_DEVICE_RESET_CTRL_REG,
149 MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET);
150 mdelay(20);
151 mmio_write_32(UFS_SYS_UFS_DEVICE_RESET_CTRL_REG,
152 0x03300330);
153
154 mmio_write_32(CRG_PERRSTDIS3_REG, PERI_UFS_BIT);
155 do {
156 data = mmio_read_32(CRG_PERRSTSTAT3_REG);
157 } while (data & PERI_UFS_BIT);
158}
159
Haojian Zhuang9411fe32018-01-30 10:35:17 +0800160static void hikey960_init_ufs(void)
Victor Chong2d9a42d2017-08-17 15:21:10 +0900161{
Haojian Zhuang1b4b4122018-01-25 16:13:05 +0800162 dw_ufs_params_t ufs_params;
Victor Chong2d9a42d2017-08-17 15:21:10 +0900163
164 memset(&ufs_params, 0, sizeof(ufs_params_t));
165 ufs_params.reg_base = UFS_REG_BASE;
166 ufs_params.desc_base = HIKEY960_UFS_DESC_BASE;
167 ufs_params.desc_size = HIKEY960_UFS_DESC_SIZE;
Haojian Zhuang1b4b4122018-01-25 16:13:05 +0800168 hikey960_ufs_reset();
169 dw_ufs_init(&ufs_params);
Victor Chong2d9a42d2017-08-17 15:21:10 +0900170}
171
172/*******************************************************************************
173 * Gets SPSR for BL32 entry
174 ******************************************************************************/
175uint32_t hikey960_get_spsr_for_bl32_entry(void)
176{
177 /*
178 * The Secure Payload Dispatcher service is responsible for
179 * setting the SPSR prior to entry into the BL3-2 image.
180 */
181 return 0;
182}
183
184/*******************************************************************************
185 * Gets SPSR for BL33 entry
186 ******************************************************************************/
187#ifndef AARCH32
188uint32_t hikey960_get_spsr_for_bl33_entry(void)
189{
190 unsigned int mode;
191 uint32_t spsr;
192
193 /* Figure out what mode we enter the non-secure world in */
194 mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1;
195
196 /*
197 * TODO: Consider the possibility of specifying the SPSR in
198 * the FIP ToC and allowing the platform to have a say as
199 * well.
200 */
201 spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
202 return spsr;
203}
204#else
205uint32_t hikey960_get_spsr_for_bl33_entry(void)
206{
207 unsigned int hyp_status, mode, spsr;
208
209 hyp_status = GET_VIRT_EXT(read_id_pfr1());
210
211 mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
212
213 /*
214 * TODO: Consider the possibility of specifying the SPSR in
215 * the FIP ToC and allowing the platform to have a say as
216 * well.
217 */
218 spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
219 SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
220 return spsr;
221}
222#endif /* AARCH32 */
223
Victor Chong2d9a42d2017-08-17 15:21:10 +0900224int hikey960_bl2_handle_post_image_load(unsigned int image_id)
225{
226 int err = 0;
227 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
Victor Chong7d787f52017-08-16 13:53:56 +0900228#ifdef SPD_opteed
229 bl_mem_params_node_t *pager_mem_params = NULL;
230 bl_mem_params_node_t *paged_mem_params = NULL;
231#endif
Victor Chong2d9a42d2017-08-17 15:21:10 +0900232 assert(bl_mem_params);
233
234 switch (image_id) {
235#ifdef AARCH64
236 case BL32_IMAGE_ID:
Victor Chong7d787f52017-08-16 13:53:56 +0900237#ifdef SPD_opteed
238 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
239 assert(pager_mem_params);
240
241 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
242 assert(paged_mem_params);
243
244 err = parse_optee_header(&bl_mem_params->ep_info,
245 &pager_mem_params->image_info,
246 &paged_mem_params->image_info);
247 if (err != 0) {
248 WARN("OPTEE header parse error.\n");
249 }
250#endif
Victor Chong2d9a42d2017-08-17 15:21:10 +0900251 bl_mem_params->ep_info.spsr = hikey960_get_spsr_for_bl32_entry();
252 break;
253#endif
254
255 case BL33_IMAGE_ID:
256 /* BL33 expects to receive the primary CPU MPID (through r0) */
257 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
258 bl_mem_params->ep_info.spsr = hikey960_get_spsr_for_bl33_entry();
259 break;
260
261#ifdef SCP_BL2_BASE
262 case SCP_BL2_IMAGE_ID:
263 /* The subsequent handling of SCP_BL2 is platform specific */
264 err = plat_hikey960_bl2_handle_scp_bl2(&bl_mem_params->image_info);
265 if (err) {
266 WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
267 }
268 break;
269#endif
270 }
271
272 return err;
273}
274
275/*******************************************************************************
276 * This function can be used by the platforms to update/use image
277 * information for given `image_id`.
278 ******************************************************************************/
279int bl2_plat_handle_post_image_load(unsigned int image_id)
280{
281 return hikey960_bl2_handle_post_image_load(image_id);
282}
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +0800283
Haojian Zhuang1b4b4122018-01-25 16:13:05 +0800284void bl2_el3_early_platform_setup(u_register_t arg1, u_register_t arg2,
285 u_register_t arg3, u_register_t arg4)
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +0800286{
287 unsigned int id, uart_base;
288
289 generic_delay_timer_init();
290 hikey960_read_boardid(&id);
291 if (id == 5300)
292 uart_base = PL011_UART5_BASE;
293 else
294 uart_base = PL011_UART6_BASE;
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +0800295 /* Initialize the console to provide early debug support */
296 console_init(uart_base, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
Haojian Zhuang1b4b4122018-01-25 16:13:05 +0800297 /*
298 * Allow BL2 to see the whole Trusted RAM.
299 */
300 bl2_el3_tzram_layout.total_base = BL2_RW_BASE;
301 bl2_el3_tzram_layout.total_size = BL31_LIMIT - BL2_RW_BASE;
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +0800302}
303
Haojian Zhuang1b4b4122018-01-25 16:13:05 +0800304void bl2_el3_plat_arch_setup(void)
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +0800305{
Haojian Zhuang1b4b4122018-01-25 16:13:05 +0800306 hikey960_init_mmu_el3(bl2_el3_tzram_layout.total_base,
307 bl2_el3_tzram_layout.total_size,
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +0800308 BL2_RO_BASE,
309 BL2_RO_LIMIT,
310 BL2_COHERENT_RAM_BASE,
311 BL2_COHERENT_RAM_LIMIT);
312}
313
314void bl2_platform_setup(void)
315{
316 /* disable WDT0 */
317 if (mmio_read_32(WDT0_REG_BASE + WDT_LOCK_OFFSET) == WDT_LOCKED) {
318 mmio_write_32(WDT0_REG_BASE + WDT_LOCK_OFFSET, WDT_UNLOCK);
319 mmio_write_32(WDT0_REG_BASE + WDT_CONTROL_OFFSET, 0);
320 mmio_write_32(WDT0_REG_BASE + WDT_LOCK_OFFSET, 0);
321 }
Haojian Zhuang1b4b4122018-01-25 16:13:05 +0800322 hikey960_clk_init();
323 hikey960_pmu_init();
324 hikey960_regulator_enable();
325 hikey960_tzc_init();
326 hikey960_peri_init();
327 hikey960_pinmux_init();
Haojian Zhuang9411fe32018-01-30 10:35:17 +0800328 hikey960_init_ufs();
329 hikey960_io_setup();
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +0800330}