blob: f85e866bf73542519fc5ff622743baa74a461615 [file] [log] [blame]
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +08001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch_helpers.h>
8#include <assert.h>
9#include <bl_common.h>
10#include <console.h>
11#include <debug.h>
Victor Chong2d9a42d2017-08-17 15:21:10 +090012#include <desc_image_load.h>
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080013#include <errno.h>
14#include <generic_delay_timer.h>
15#include <hi3660.h>
16#include <mmio.h>
Victor Chong7d787f52017-08-16 13:53:56 +090017#ifdef SPD_opteed
18#include <optee_utils.h>
19#endif
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080020#include <platform_def.h>
21#include <string.h>
22#include <ufs.h>
23
24#include "hikey960_def.h"
25#include "hikey960_private.h"
26
27/*
28 * The next 2 constants identify the extents of the code & RO data region.
29 * These addresses are used by the MMU setup code and therefore they must be
30 * page-aligned. It is the responsibility of the linker script to ensure that
31 * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
32 */
33#define BL2_RO_BASE (unsigned long)(&__RO_START__)
34#define BL2_RO_LIMIT (unsigned long)(&__RO_END__)
35
36/*
37 * The next 2 constants identify the extents of the coherent memory region.
38 * These addresses are used by the MMU setup code and therefore they must be
39 * page-aligned. It is the responsibility of the linker script to ensure that
40 * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols refer to
41 * page-aligned addresses.
42 */
43#define BL2_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
44#define BL2_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
45
46static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
47
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080048extern int load_lpm3(void);
49
Victor Chong2d9a42d2017-08-17 15:21:10 +090050/*******************************************************************************
51 * Transfer SCP_BL2 from Trusted RAM using the SCP Download protocol.
52 * Return 0 on success, -1 otherwise.
53 ******************************************************************************/
Victor Chong2d9a42d2017-08-17 15:21:10 +090054int plat_hikey960_bl2_handle_scp_bl2(image_info_t *scp_bl2_image_info)
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080055{
56 int i;
57 int *buf;
58
Victor Chong2d9a42d2017-08-17 15:21:10 +090059 assert(scp_bl2_image_info->image_size < SCP_BL2_SIZE);
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080060
61 INFO("BL2: Initiating SCP_BL2 transfer to SCP\n");
62
63 INFO("BL2: SCP_BL2: 0x%lx@0x%x\n",
64 scp_bl2_image_info->image_base,
65 scp_bl2_image_info->image_size);
66
67 buf = (int *)scp_bl2_image_info->image_base;
68
69 INFO("BL2: SCP_BL2 HEAD:\n");
70 for (i = 0; i < 64; i += 4)
71 INFO("BL2: SCP_BL2 0x%x 0x%x 0x%x 0x%x\n",
72 buf[i], buf[i+1], buf[i+2], buf[i+3]);
73
74 buf = (int *)(scp_bl2_image_info->image_base +
75 scp_bl2_image_info->image_size - 256);
76
77 INFO("BL2: SCP_BL2 TAIL:\n");
78 for (i = 0; i < 64; i += 4)
79 INFO("BL2: SCP_BL2 0x%x 0x%x 0x%x 0x%x\n",
80 buf[i], buf[i+1], buf[i+2], buf[i+3]);
81
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +080082 INFO("BL2: SCP_BL2 transferred to SCP\n");
83
84 load_lpm3();
85 (void)buf;
86
87 return 0;
88}
89
Victor Chong2d9a42d2017-08-17 15:21:10 +090090void hikey960_init_ufs(void)
91{
92 ufs_params_t ufs_params;
93
94 memset(&ufs_params, 0, sizeof(ufs_params_t));
95 ufs_params.reg_base = UFS_REG_BASE;
96 ufs_params.desc_base = HIKEY960_UFS_DESC_BASE;
97 ufs_params.desc_size = HIKEY960_UFS_DESC_SIZE;
98 ufs_params.flags = UFS_FLAGS_SKIPINIT;
99 ufs_init(NULL, &ufs_params);
100}
101
102/*******************************************************************************
103 * Gets SPSR for BL32 entry
104 ******************************************************************************/
105uint32_t hikey960_get_spsr_for_bl32_entry(void)
106{
107 /*
108 * The Secure Payload Dispatcher service is responsible for
109 * setting the SPSR prior to entry into the BL3-2 image.
110 */
111 return 0;
112}
113
114/*******************************************************************************
115 * Gets SPSR for BL33 entry
116 ******************************************************************************/
117#ifndef AARCH32
118uint32_t hikey960_get_spsr_for_bl33_entry(void)
119{
120 unsigned int mode;
121 uint32_t spsr;
122
123 /* Figure out what mode we enter the non-secure world in */
124 mode = EL_IMPLEMENTED(2) ? MODE_EL2 : MODE_EL1;
125
126 /*
127 * TODO: Consider the possibility of specifying the SPSR in
128 * the FIP ToC and allowing the platform to have a say as
129 * well.
130 */
131 spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
132 return spsr;
133}
134#else
135uint32_t hikey960_get_spsr_for_bl33_entry(void)
136{
137 unsigned int hyp_status, mode, spsr;
138
139 hyp_status = GET_VIRT_EXT(read_id_pfr1());
140
141 mode = (hyp_status) ? MODE32_hyp : MODE32_svc;
142
143 /*
144 * TODO: Consider the possibility of specifying the SPSR in
145 * the FIP ToC and allowing the platform to have a say as
146 * well.
147 */
148 spsr = SPSR_MODE32(mode, plat_get_ns_image_entrypoint() & 0x1,
149 SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
150 return spsr;
151}
152#endif /* AARCH32 */
153
Victor Chong2d9a42d2017-08-17 15:21:10 +0900154int hikey960_bl2_handle_post_image_load(unsigned int image_id)
155{
156 int err = 0;
157 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
Victor Chong7d787f52017-08-16 13:53:56 +0900158#ifdef SPD_opteed
159 bl_mem_params_node_t *pager_mem_params = NULL;
160 bl_mem_params_node_t *paged_mem_params = NULL;
161#endif
Victor Chong2d9a42d2017-08-17 15:21:10 +0900162 assert(bl_mem_params);
163
164 switch (image_id) {
165#ifdef AARCH64
166 case BL32_IMAGE_ID:
Victor Chong7d787f52017-08-16 13:53:56 +0900167#ifdef SPD_opteed
168 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
169 assert(pager_mem_params);
170
171 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
172 assert(paged_mem_params);
173
174 err = parse_optee_header(&bl_mem_params->ep_info,
175 &pager_mem_params->image_info,
176 &paged_mem_params->image_info);
177 if (err != 0) {
178 WARN("OPTEE header parse error.\n");
179 }
180#endif
Victor Chong2d9a42d2017-08-17 15:21:10 +0900181 bl_mem_params->ep_info.spsr = hikey960_get_spsr_for_bl32_entry();
182 break;
183#endif
184
185 case BL33_IMAGE_ID:
186 /* BL33 expects to receive the primary CPU MPID (through r0) */
187 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
188 bl_mem_params->ep_info.spsr = hikey960_get_spsr_for_bl33_entry();
189 break;
190
191#ifdef SCP_BL2_BASE
192 case SCP_BL2_IMAGE_ID:
193 /* The subsequent handling of SCP_BL2 is platform specific */
194 err = plat_hikey960_bl2_handle_scp_bl2(&bl_mem_params->image_info);
195 if (err) {
196 WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
197 }
198 break;
199#endif
200 }
201
202 return err;
203}
204
205/*******************************************************************************
206 * This function can be used by the platforms to update/use image
207 * information for given `image_id`.
208 ******************************************************************************/
209int bl2_plat_handle_post_image_load(unsigned int image_id)
210{
211 return hikey960_bl2_handle_post_image_load(image_id);
212}
Haojian Zhuang1f73c0c2017-06-01 14:03:22 +0800213
214void bl2_early_platform_setup(meminfo_t *mem_layout)
215{
216 unsigned int id, uart_base;
217
218 generic_delay_timer_init();
219 hikey960_read_boardid(&id);
220 if (id == 5300)
221 uart_base = PL011_UART5_BASE;
222 else
223 uart_base = PL011_UART6_BASE;
224
225 /* Initialize the console to provide early debug support */
226 console_init(uart_base, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
227
228 /* Setup the BL2 memory layout */
229 bl2_tzram_layout = *mem_layout;
230}
231
232void bl2_plat_arch_setup(void)
233{
234 hikey960_init_mmu_el1(bl2_tzram_layout.total_base,
235 bl2_tzram_layout.total_size,
236 BL2_RO_BASE,
237 BL2_RO_LIMIT,
238 BL2_COHERENT_RAM_BASE,
239 BL2_COHERENT_RAM_LIMIT);
240}
241
242void bl2_platform_setup(void)
243{
244 /* disable WDT0 */
245 if (mmio_read_32(WDT0_REG_BASE + WDT_LOCK_OFFSET) == WDT_LOCKED) {
246 mmio_write_32(WDT0_REG_BASE + WDT_LOCK_OFFSET, WDT_UNLOCK);
247 mmio_write_32(WDT0_REG_BASE + WDT_CONTROL_OFFSET, 0);
248 mmio_write_32(WDT0_REG_BASE + WDT_LOCK_OFFSET, 0);
249 }
250}