blob: 0d97e7f5537ffe47de1963376a950d4e48499675 [file] [log] [blame]
Andrew Davis5eb8d572024-02-01 18:24:44 -06001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * K3: R5 Common Architecture initialization
4 *
5 * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
6 */
7
Andrew Davisef880db2024-02-01 18:24:45 -06008#include <linux/printk.h>
Andrew Davis5eb8d572024-02-01 18:24:44 -06009#include <linux/types.h>
10#include <asm/hardware.h>
11#include <asm/io.h>
Andrew Davisef880db2024-02-01 18:24:45 -060012#include <image.h>
13#include <fs_loader.h>
14#include <linux/soc/ti/ti_sci_protocol.h>
15#include <spl.h>
16#include <remoteproc.h>
17#include <elf.h>
Andrew Davis5eb8d572024-02-01 18:24:44 -060018
19#include "../common.h"
20
Andrew Davisef880db2024-02-01 18:24:45 -060021#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
22enum {
23 IMAGE_ID_ATF,
24 IMAGE_ID_OPTEE,
25 IMAGE_ID_SPL,
26 IMAGE_ID_DM_FW,
Kamlesh Gurudasani0908c082024-04-03 17:33:09 +053027 IMAGE_ID_TIFSSTUB_HS,
28 IMAGE_ID_TIFSSTUB_FS,
29 IMAGE_ID_T,
Andrew Davisef880db2024-02-01 18:24:45 -060030 IMAGE_AMT,
31};
32
33#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
34static const char *image_os_match[IMAGE_AMT] = {
35 "arm-trusted-firmware",
36 "tee",
37 "U-Boot",
38 "DM",
Kamlesh Gurudasani0908c082024-04-03 17:33:09 +053039 "tifsstub-hs",
40 "tifsstub-fs",
41 "tifsstub-gp",
Andrew Davisef880db2024-02-01 18:24:45 -060042};
43#endif
44
45static struct image_info fit_image_info[IMAGE_AMT];
46
47void init_env(void)
48{
49#ifdef CONFIG_SPL_ENV_SUPPORT
50 char *part;
51
52 env_init();
53 env_relocate();
54 switch (spl_boot_device()) {
55 case BOOT_DEVICE_MMC2:
56 part = env_get("bootpart");
57 env_set("storage_interface", "mmc");
58 env_set("fw_dev_part", part);
59 break;
60 case BOOT_DEVICE_SPI:
61 env_set("storage_interface", "ubi");
62 env_set("fw_ubi_mtdpart", "UBI");
63 env_set("fw_ubi_volume", "UBI0");
64 break;
65 default:
66 printf("%s from device %u not supported!\n",
67 __func__, spl_boot_device());
68 return;
69 }
70#endif
71}
72
73int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
74{
75 struct udevice *fsdev;
76 char *name = NULL;
77 int size = 0;
78
MD Danish Anwar71c6a012024-03-14 20:03:10 +053079 if (!CONFIG_IS_ENABLED(FS_LOADER))
Andrew Davisef880db2024-02-01 18:24:45 -060080 return 0;
81
82 *loadaddr = 0;
83#ifdef CONFIG_SPL_ENV_SUPPORT
84 switch (spl_boot_device()) {
85 case BOOT_DEVICE_MMC2:
86 name = env_get(name_fw);
87 *loadaddr = env_get_hex(name_loadaddr, *loadaddr);
88 break;
89 default:
90 printf("Loading rproc fw image from device %u not supported!\n",
91 spl_boot_device());
92 return 0;
93 }
94#endif
95 if (!*loadaddr)
96 return 0;
97
98 if (!get_fs_loader(&fsdev)) {
99 size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
100 0, 0);
101 }
102
103 return size;
104}
105
106void release_resources_for_core_shutdown(void)
107{
108 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
109 struct ti_sci_dev_ops *dev_ops = &ti_sci->ops.dev_ops;
110 struct ti_sci_proc_ops *proc_ops = &ti_sci->ops.proc_ops;
111 int ret;
112 u32 i;
113
114 /* Iterate through list of devices to put (shutdown) */
115 for (i = 0; i < ARRAY_SIZE(put_device_ids); i++) {
116 u32 id = put_device_ids[i];
117
118 ret = dev_ops->put_device(ti_sci, id);
119 if (ret)
120 panic("Failed to put device %u (%d)\n", id, ret);
121 }
122
123 /* Iterate through list of cores to put (shutdown) */
124 for (i = 0; i < ARRAY_SIZE(put_core_ids); i++) {
125 u32 id = put_core_ids[i];
126
127 /*
128 * Queue up the core shutdown request. Note that this call
129 * needs to be followed up by an actual invocation of an WFE
130 * or WFI CPU instruction.
131 */
132 ret = proc_ops->proc_shutdown_no_wait(ti_sci, id);
133 if (ret)
134 panic("Failed sending core %u shutdown message (%d)\n",
135 id, ret);
136 }
137}
138
139void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
140{
141 typedef void __noreturn (*image_entry_noargs_t)(void);
142 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
143 u32 loadaddr = 0;
144 int ret, size = 0, shut_cpu = 0;
145
146 /* Release all the exclusive devices held by SPL before starting ATF */
147 ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
148
149 ret = rproc_init();
150 if (ret)
151 panic("rproc failed to be initialized (%d)\n", ret);
152
153 init_env();
154
155 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
156 size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load",
157 &loadaddr);
158 }
159
160 /*
161 * It is assumed that remoteproc device 1 is the corresponding
162 * Cortex-A core which runs ATF. Make sure DT reflects the same.
163 */
164 if (!fit_image_info[IMAGE_ID_ATF].image_start)
165 fit_image_info[IMAGE_ID_ATF].image_start =
166 spl_image->entry_point;
167
168 ret = rproc_load(1, fit_image_info[IMAGE_ID_ATF].image_start, 0x200);
169 if (ret)
170 panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
171
172#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
173 /* Authenticate ATF */
174 void *image_addr = (void *)fit_image_info[IMAGE_ID_ATF].image_start;
175
176 debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
177 fit_image_info[IMAGE_ID_ATF].image_start,
178 fit_image_info[IMAGE_ID_ATF].image_len,
179 image_os_match[IMAGE_ID_ATF]);
180
181 ti_secure_image_post_process(&image_addr,
182 (size_t *)&fit_image_info[IMAGE_ID_ATF].image_len);
183
184 /* Authenticate OPTEE */
185 image_addr = (void *)fit_image_info[IMAGE_ID_OPTEE].image_start;
186
187 debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
188 fit_image_info[IMAGE_ID_OPTEE].image_start,
189 fit_image_info[IMAGE_ID_OPTEE].image_len,
190 image_os_match[IMAGE_ID_OPTEE]);
191
192 ti_secure_image_post_process(&image_addr,
193 (size_t *)&fit_image_info[IMAGE_ID_OPTEE].image_len);
194#endif
195
196 if (!fit_image_info[IMAGE_ID_DM_FW].image_len &&
197 !(size > 0 && valid_elf_image(loadaddr))) {
198 shut_cpu = 1;
199 goto start_arm64;
200 }
201
202 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
203 loadaddr = load_elf_image_phdr(loadaddr);
204 } else {
205 loadaddr = fit_image_info[IMAGE_ID_DM_FW].image_start;
206 if (valid_elf_image(loadaddr))
207 loadaddr = load_elf_image_phdr(loadaddr);
208 }
209
210 debug("%s: jumping to address %x\n", __func__, loadaddr);
211
212start_arm64:
213 /* Add an extra newline to differentiate the ATF logs from SPL */
214 printf("Starting ATF on ARM64 core...\n\n");
215
216 ret = rproc_start(1);
217 if (ret)
218 panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
219
220 if (shut_cpu) {
221 debug("Shutting down...\n");
222 release_resources_for_core_shutdown();
223
224 while (1)
225 asm volatile("wfe");
226 }
227 image_entry_noargs_t image_entry = (image_entry_noargs_t)loadaddr;
228
229 image_entry();
230}
231#endif
232
Andrew Davis5eb8d572024-02-01 18:24:44 -0600233void disable_linefill_optimization(void)
234{
235 u32 actlr;
236
237 /*
238 * On K3 devices there are 2 conditions where R5F can deadlock:
239 * 1.When software is performing series of store operations to
240 * cacheable write back/write allocate memory region and later
241 * on software execute barrier operation (DSB or DMB). R5F may
242 * hang at the barrier instruction.
243 * 2.When software is performing a mix of load and store operations
244 * within a tight loop and store operations are all writing to
245 * cacheable write back/write allocates memory regions, R5F may
246 * hang at one of the load instruction.
247 *
248 * To avoid the above two conditions disable linefill optimization
249 * inside Cortex R5F.
250 */
251 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
252 actlr |= (1 << 13); /* Set DLFO bit */
253 asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
254}
Andrew Davisef880db2024-02-01 18:24:45 -0600255
Bryan Brattlof1b19b262025-04-14 15:20:02 -0500256int remove_fwl_region(struct fwl_data *fwl)
257{
258 struct ti_sci_handle *sci = get_ti_sci_handle();
259 struct ti_sci_fwl_ops *ops = &sci->ops.fwl_ops;
260 struct ti_sci_msg_fwl_region region;
261 int ret;
262
263 region.fwl_id = fwl->fwl_id;
264 region.region = fwl->regions;
265 region.n_permission_regs = 3;
266
267 ops->get_fwl_region(sci, &region);
268
269 /* zero out the enable field of the firewall */
270 region.control = region.control & ~0xF;
271
272 pr_debug("Disabling firewall id: %d region: %d\n",
273 region.fwl_id, region.region);
274
275 ret = ops->set_fwl_region(sci, &region);
276 if (ret)
277 pr_err("Could not disable firewall\n");
278 return ret;
279}
280
Andrew Daviseb937aa2024-02-01 18:24:47 -0600281static void remove_fwl_regions(struct fwl_data fwl_data, size_t num_regions,
282 enum k3_firewall_region_type fwl_type)
283{
284 struct ti_sci_fwl_ops *fwl_ops;
285 struct ti_sci_handle *ti_sci;
286 struct ti_sci_msg_fwl_region region;
287 size_t j;
288
289 ti_sci = get_ti_sci_handle();
290 fwl_ops = &ti_sci->ops.fwl_ops;
291
292 for (j = 0; j < fwl_data.regions; j++) {
293 region.fwl_id = fwl_data.fwl_id;
294 region.region = j;
295 region.n_permission_regs = 3;
296
297 fwl_ops->get_fwl_region(ti_sci, &region);
298
299 /* Don't disable the background regions */
300 if (region.control != 0 &&
301 ((region.control >> K3_FIREWALL_BACKGROUND_BIT) & 1) == fwl_type) {
302 pr_debug("Attempting to disable firewall %5d (%25s)\n",
303 region.fwl_id, fwl_data.name);
304 region.control = 0;
305
306 if (fwl_ops->set_fwl_region(ti_sci, &region))
307 pr_err("Could not disable firewall %5d (%25s)\n",
308 region.fwl_id, fwl_data.name);
309 }
310 }
311}
312
313void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
314{
315 size_t i;
316
317 for (i = 0; i < fwl_data_size; i++) {
318 remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
319 K3_FIREWALL_REGION_FOREGROUND);
320 remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
321 K3_FIREWALL_REGION_BACKGROUND);
322 }
323}
324
Andrew Davisef880db2024-02-01 18:24:45 -0600325#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
326void board_fit_image_post_process(const void *fit, int node, void **p_image,
327 size_t *p_size)
328{
329 int len;
330 int i;
331 const char *os;
332 u32 addr;
333
334 os = fdt_getprop(fit, node, "os", &len);
335 addr = fdt_getprop_u32_default_node(fit, node, 0, "entry", -1);
336
337 debug("%s: processing image: addr=%x, size=%d, os=%s\n", __func__,
338 addr, *p_size, os);
339
340 for (i = 0; i < IMAGE_AMT; i++) {
341 if (!strcmp(os, image_os_match[i])) {
342 fit_image_info[i].image_start = addr;
343 fit_image_info[i].image_len = *p_size;
344 debug("%s: matched image for ID %d\n", __func__, i);
345 break;
346 }
347 }
Kamlesh Gurudasani0908c082024-04-03 17:33:09 +0530348
349 if (i < IMAGE_AMT && i > IMAGE_ID_DM_FW) {
350 int device_type = get_device_type();
351
352 if ((device_type == K3_DEVICE_TYPE_HS_SE &&
353 strcmp(os, "tifsstub-hs")) ||
354 (device_type == K3_DEVICE_TYPE_HS_FS &&
355 strcmp(os, "tifsstub-fs")) ||
356 (device_type == K3_DEVICE_TYPE_GP &&
357 strcmp(os, "tifsstub-gp"))) {
358 *p_size = 0;
359 } else {
360 debug("tifsstub-type: %s\n", os);
361 }
362
363 return;
364 }
365
Andrew Davisef880db2024-02-01 18:24:45 -0600366 /*
367 * Only DM and the DTBs are being authenticated here,
368 * rest will be authenticated when A72 cluster is up
369 */
370 if ((i != IMAGE_ID_ATF) && (i != IMAGE_ID_OPTEE)) {
371 ti_secure_image_check_binary(p_image, p_size);
372 ti_secure_image_post_process(p_image, p_size);
373 } else {
374 ti_secure_image_check_binary(p_image, p_size);
375 }
376}
377#endif