blob: 0f6c294f1eb22396d2d24afbbe0d35d770f52944 [file] [log] [blame]
Andrew Davis5eb8d572024-02-01 18:24:44 -06001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * K3: R5 Common Architecture initialization
4 *
5 * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
6 */
7
Andrew Davisef880db2024-02-01 18:24:45 -06008#include <linux/printk.h>
Andrew Davis5eb8d572024-02-01 18:24:44 -06009#include <linux/types.h>
10#include <asm/hardware.h>
11#include <asm/io.h>
Andrew Davisef880db2024-02-01 18:24:45 -060012#include <image.h>
13#include <fs_loader.h>
14#include <linux/soc/ti/ti_sci_protocol.h>
15#include <spl.h>
16#include <remoteproc.h>
17#include <elf.h>
Andrew Davis5eb8d572024-02-01 18:24:44 -060018
19#include "../common.h"
20
Andrew Davisef880db2024-02-01 18:24:45 -060021#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
22enum {
23 IMAGE_ID_ATF,
24 IMAGE_ID_OPTEE,
25 IMAGE_ID_SPL,
26 IMAGE_ID_DM_FW,
Kamlesh Gurudasani0908c082024-04-03 17:33:09 +053027 IMAGE_ID_TIFSSTUB_HS,
28 IMAGE_ID_TIFSSTUB_FS,
29 IMAGE_ID_T,
Andrew Davisef880db2024-02-01 18:24:45 -060030 IMAGE_AMT,
31};
32
33#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
34static const char *image_os_match[IMAGE_AMT] = {
35 "arm-trusted-firmware",
36 "tee",
37 "U-Boot",
38 "DM",
Kamlesh Gurudasani0908c082024-04-03 17:33:09 +053039 "tifsstub-hs",
40 "tifsstub-fs",
41 "tifsstub-gp",
Andrew Davisef880db2024-02-01 18:24:45 -060042};
43#endif
44
45static struct image_info fit_image_info[IMAGE_AMT];
46
47void init_env(void)
48{
49#ifdef CONFIG_SPL_ENV_SUPPORT
50 char *part;
51
52 env_init();
53 env_relocate();
54 switch (spl_boot_device()) {
55 case BOOT_DEVICE_MMC2:
56 part = env_get("bootpart");
57 env_set("storage_interface", "mmc");
58 env_set("fw_dev_part", part);
59 break;
60 case BOOT_DEVICE_SPI:
61 env_set("storage_interface", "ubi");
62 env_set("fw_ubi_mtdpart", "UBI");
63 env_set("fw_ubi_volume", "UBI0");
64 break;
65 default:
66 printf("%s from device %u not supported!\n",
67 __func__, spl_boot_device());
68 return;
69 }
70#endif
71}
72
73int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
74{
75 struct udevice *fsdev;
76 char *name = NULL;
77 int size = 0;
78
MD Danish Anwar71c6a012024-03-14 20:03:10 +053079 if (!CONFIG_IS_ENABLED(FS_LOADER))
Andrew Davisef880db2024-02-01 18:24:45 -060080 return 0;
81
82 *loadaddr = 0;
83#ifdef CONFIG_SPL_ENV_SUPPORT
84 switch (spl_boot_device()) {
85 case BOOT_DEVICE_MMC2:
86 name = env_get(name_fw);
87 *loadaddr = env_get_hex(name_loadaddr, *loadaddr);
88 break;
89 default:
90 printf("Loading rproc fw image from device %u not supported!\n",
91 spl_boot_device());
92 return 0;
93 }
94#endif
95 if (!*loadaddr)
96 return 0;
97
98 if (!get_fs_loader(&fsdev)) {
99 size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
100 0, 0);
101 }
102
103 return size;
104}
105
106void release_resources_for_core_shutdown(void)
107{
108 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
109 struct ti_sci_dev_ops *dev_ops = &ti_sci->ops.dev_ops;
110 struct ti_sci_proc_ops *proc_ops = &ti_sci->ops.proc_ops;
111 int ret;
112 u32 i;
113
114 /* Iterate through list of devices to put (shutdown) */
115 for (i = 0; i < ARRAY_SIZE(put_device_ids); i++) {
116 u32 id = put_device_ids[i];
117
118 ret = dev_ops->put_device(ti_sci, id);
119 if (ret)
120 panic("Failed to put device %u (%d)\n", id, ret);
121 }
122
123 /* Iterate through list of cores to put (shutdown) */
124 for (i = 0; i < ARRAY_SIZE(put_core_ids); i++) {
125 u32 id = put_core_ids[i];
126
127 /*
128 * Queue up the core shutdown request. Note that this call
129 * needs to be followed up by an actual invocation of an WFE
130 * or WFI CPU instruction.
131 */
132 ret = proc_ops->proc_shutdown_no_wait(ti_sci, id);
133 if (ret)
134 panic("Failed sending core %u shutdown message (%d)\n",
135 id, ret);
136 }
137}
138
139void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
140{
141 typedef void __noreturn (*image_entry_noargs_t)(void);
142 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
143 u32 loadaddr = 0;
144 int ret, size = 0, shut_cpu = 0;
145
146 /* Release all the exclusive devices held by SPL before starting ATF */
147 ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
148
149 ret = rproc_init();
150 if (ret)
151 panic("rproc failed to be initialized (%d)\n", ret);
152
153 init_env();
154
155 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
156 size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load",
157 &loadaddr);
158 }
159
160 /*
161 * It is assumed that remoteproc device 1 is the corresponding
162 * Cortex-A core which runs ATF. Make sure DT reflects the same.
163 */
164 if (!fit_image_info[IMAGE_ID_ATF].image_start)
165 fit_image_info[IMAGE_ID_ATF].image_start =
166 spl_image->entry_point;
167
168 ret = rproc_load(1, fit_image_info[IMAGE_ID_ATF].image_start, 0x200);
169 if (ret)
170 panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
171
172#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
173 /* Authenticate ATF */
174 void *image_addr = (void *)fit_image_info[IMAGE_ID_ATF].image_start;
175
176 debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
177 fit_image_info[IMAGE_ID_ATF].image_start,
178 fit_image_info[IMAGE_ID_ATF].image_len,
179 image_os_match[IMAGE_ID_ATF]);
180
181 ti_secure_image_post_process(&image_addr,
182 (size_t *)&fit_image_info[IMAGE_ID_ATF].image_len);
183
184 /* Authenticate OPTEE */
185 image_addr = (void *)fit_image_info[IMAGE_ID_OPTEE].image_start;
186
187 debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
188 fit_image_info[IMAGE_ID_OPTEE].image_start,
189 fit_image_info[IMAGE_ID_OPTEE].image_len,
190 image_os_match[IMAGE_ID_OPTEE]);
191
192 ti_secure_image_post_process(&image_addr,
193 (size_t *)&fit_image_info[IMAGE_ID_OPTEE].image_len);
194#endif
195
196 if (!fit_image_info[IMAGE_ID_DM_FW].image_len &&
197 !(size > 0 && valid_elf_image(loadaddr))) {
198 shut_cpu = 1;
199 goto start_arm64;
200 }
201
202 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
203 loadaddr = load_elf_image_phdr(loadaddr);
204 } else {
205 loadaddr = fit_image_info[IMAGE_ID_DM_FW].image_start;
206 if (valid_elf_image(loadaddr))
207 loadaddr = load_elf_image_phdr(loadaddr);
208 }
209
210 debug("%s: jumping to address %x\n", __func__, loadaddr);
211
212start_arm64:
213 /* Add an extra newline to differentiate the ATF logs from SPL */
214 printf("Starting ATF on ARM64 core...\n\n");
215
216 ret = rproc_start(1);
217 if (ret)
218 panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
219
220 if (shut_cpu) {
221 debug("Shutting down...\n");
222 release_resources_for_core_shutdown();
223
224 while (1)
225 asm volatile("wfe");
226 }
227 image_entry_noargs_t image_entry = (image_entry_noargs_t)loadaddr;
228
229 image_entry();
230}
231#endif
232
Andrew Davis5eb8d572024-02-01 18:24:44 -0600233void disable_linefill_optimization(void)
234{
235 u32 actlr;
236
237 /*
238 * On K3 devices there are 2 conditions where R5F can deadlock:
239 * 1.When software is performing series of store operations to
240 * cacheable write back/write allocate memory region and later
241 * on software execute barrier operation (DSB or DMB). R5F may
242 * hang at the barrier instruction.
243 * 2.When software is performing a mix of load and store operations
244 * within a tight loop and store operations are all writing to
245 * cacheable write back/write allocates memory regions, R5F may
246 * hang at one of the load instruction.
247 *
248 * To avoid the above two conditions disable linefill optimization
249 * inside Cortex R5F.
250 */
251 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
252 actlr |= (1 << 13); /* Set DLFO bit */
253 asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
254}
Andrew Davisef880db2024-02-01 18:24:45 -0600255
Andrew Daviseb937aa2024-02-01 18:24:47 -0600256static void remove_fwl_regions(struct fwl_data fwl_data, size_t num_regions,
257 enum k3_firewall_region_type fwl_type)
258{
259 struct ti_sci_fwl_ops *fwl_ops;
260 struct ti_sci_handle *ti_sci;
261 struct ti_sci_msg_fwl_region region;
262 size_t j;
263
264 ti_sci = get_ti_sci_handle();
265 fwl_ops = &ti_sci->ops.fwl_ops;
266
267 for (j = 0; j < fwl_data.regions; j++) {
268 region.fwl_id = fwl_data.fwl_id;
269 region.region = j;
270 region.n_permission_regs = 3;
271
272 fwl_ops->get_fwl_region(ti_sci, &region);
273
274 /* Don't disable the background regions */
275 if (region.control != 0 &&
276 ((region.control >> K3_FIREWALL_BACKGROUND_BIT) & 1) == fwl_type) {
277 pr_debug("Attempting to disable firewall %5d (%25s)\n",
278 region.fwl_id, fwl_data.name);
279 region.control = 0;
280
281 if (fwl_ops->set_fwl_region(ti_sci, &region))
282 pr_err("Could not disable firewall %5d (%25s)\n",
283 region.fwl_id, fwl_data.name);
284 }
285 }
286}
287
288void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
289{
290 size_t i;
291
292 for (i = 0; i < fwl_data_size; i++) {
293 remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
294 K3_FIREWALL_REGION_FOREGROUND);
295 remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
296 K3_FIREWALL_REGION_BACKGROUND);
297 }
298}
299
Andrew Davisef880db2024-02-01 18:24:45 -0600300#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
301void board_fit_image_post_process(const void *fit, int node, void **p_image,
302 size_t *p_size)
303{
304 int len;
305 int i;
306 const char *os;
307 u32 addr;
308
309 os = fdt_getprop(fit, node, "os", &len);
310 addr = fdt_getprop_u32_default_node(fit, node, 0, "entry", -1);
311
312 debug("%s: processing image: addr=%x, size=%d, os=%s\n", __func__,
313 addr, *p_size, os);
314
315 for (i = 0; i < IMAGE_AMT; i++) {
316 if (!strcmp(os, image_os_match[i])) {
317 fit_image_info[i].image_start = addr;
318 fit_image_info[i].image_len = *p_size;
319 debug("%s: matched image for ID %d\n", __func__, i);
320 break;
321 }
322 }
Kamlesh Gurudasani0908c082024-04-03 17:33:09 +0530323
324 if (i < IMAGE_AMT && i > IMAGE_ID_DM_FW) {
325 int device_type = get_device_type();
326
327 if ((device_type == K3_DEVICE_TYPE_HS_SE &&
328 strcmp(os, "tifsstub-hs")) ||
329 (device_type == K3_DEVICE_TYPE_HS_FS &&
330 strcmp(os, "tifsstub-fs")) ||
331 (device_type == K3_DEVICE_TYPE_GP &&
332 strcmp(os, "tifsstub-gp"))) {
333 *p_size = 0;
334 } else {
335 debug("tifsstub-type: %s\n", os);
336 }
337
338 return;
339 }
340
Andrew Davisef880db2024-02-01 18:24:45 -0600341 /*
342 * Only DM and the DTBs are being authenticated here,
343 * rest will be authenticated when A72 cluster is up
344 */
345 if ((i != IMAGE_ID_ATF) && (i != IMAGE_ID_OPTEE)) {
346 ti_secure_image_check_binary(p_image, p_size);
347 ti_secure_image_post_process(p_image, p_size);
348 } else {
349 ti_secure_image_check_binary(p_image, p_size);
350 }
351}
352#endif