blob: cc755dd1bf6e007f2440581f7ff141dd05a11002 [file] [log] [blame]
Lokesh Vutla5d83fd22018-11-02 19:51:05 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * K3: Common Architecture initialization
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 */
8
9#include <common.h>
Simon Glassafb02152019-12-28 10:45:01 -070010#include <cpu_func.h>
Simon Glass2dc9c342020-05-10 11:40:01 -060011#include <image.h>
Simon Glass97589732020-05-10 11:40:02 -060012#include <init.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053014#include <spl.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060015#include <asm/global_data.h>
Simon Glassbdd5f812023-09-14 18:21:46 -060016#include <linux/printk.h>
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053017#include "common.h"
18#include <dm.h>
19#include <remoteproc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060020#include <asm/cache.h>
Lokesh Vutla28cd8242019-03-08 11:47:33 +053021#include <linux/soc/ti/ti_sci_protocol.h>
Lokesh Vutla16cf5d22019-03-08 11:47:34 +053022#include <fdt_support.h>
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +053023#include <asm/hardware.h>
24#include <asm/io.h>
Keerthy7007adc2020-02-12 13:55:04 +053025#include <fs_loader.h>
26#include <fs.h>
27#include <env.h>
28#include <elf.h>
Dave Gerlachc74227f2020-07-15 23:40:04 -050029#include <soc.h>
Lokesh Vutla28cd8242019-03-08 11:47:33 +053030
Tero Kristo738c5902021-06-11 11:45:19 +030031#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
32enum {
33 IMAGE_ID_ATF,
34 IMAGE_ID_OPTEE,
35 IMAGE_ID_SPL,
36 IMAGE_ID_DM_FW,
37 IMAGE_AMT,
38};
39
40#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
41static const char *image_os_match[IMAGE_AMT] = {
42 "arm-trusted-firmware",
43 "tee",
44 "U-Boot",
45 "DM",
46};
47#endif
48
49static struct image_info fit_image_info[IMAGE_AMT];
50#endif
51
Lokesh Vutla28cd8242019-03-08 11:47:33 +053052struct ti_sci_handle *get_ti_sci_handle(void)
53{
54 struct udevice *dev;
55 int ret;
56
Lokesh Vutla00a15132019-09-27 13:32:15 +053057 ret = uclass_get_device_by_driver(UCLASS_FIRMWARE,
Simon Glass65130cd2020-12-28 20:34:56 -070058 DM_DRIVER_GET(ti_sci), &dev);
Lokesh Vutla28cd8242019-03-08 11:47:33 +053059 if (ret)
60 panic("Failed to get SYSFW (%d)\n", ret);
61
62 return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev);
63}
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053064
Lokesh Vutla5fafe442020-03-10 16:50:58 +053065void k3_sysfw_print_ver(void)
66{
67 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
68 char fw_desc[sizeof(ti_sci->version.firmware_description) + 1];
69
70 /*
71 * Output System Firmware version info. Note that since the
72 * 'firmware_description' field is not guaranteed to be zero-
73 * terminated we manually add a \0 terminator if needed. Further
74 * note that we intentionally no longer rely on the extended
75 * printf() formatter '%.*s' to not having to require a more
76 * full-featured printf() implementation.
77 */
78 strncpy(fw_desc, ti_sci->version.firmware_description,
79 sizeof(ti_sci->version.firmware_description));
80 fw_desc[sizeof(fw_desc) - 1] = '\0';
81
82 printf("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n",
83 ti_sci->version.abi_major, ti_sci->version.abi_minor,
84 ti_sci->version.firmware_revision, fw_desc);
85}
86
Lokesh Vutlaff7ab092020-08-05 22:44:17 +053087void mmr_unlock(phys_addr_t base, u32 partition)
88{
89 /* Translate the base address */
90 phys_addr_t part_base = base + partition * CTRL_MMR0_PARTITION_SIZE;
91
92 /* Unlock the requested partition if locked using two-step sequence */
93 writel(CTRLMMR_LOCK_KICK0_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK0);
94 writel(CTRLMMR_LOCK_KICK1_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK1);
95}
96
Lokesh Vutla8be6bbf2020-08-05 22:44:23 +053097bool is_rom_loaded_sysfw(struct rom_extended_boot_data *data)
98{
99 if (strncmp(data->header, K3_ROM_BOOT_HEADER_MAGIC, 7))
100 return false;
101
102 return data->num_components > 1;
103}
104
Andreas Dannenbergd13ec8c2019-08-15 15:55:28 -0500105DECLARE_GLOBAL_DATA_PTR;
106
107#ifdef CONFIG_K3_EARLY_CONS
108int early_console_init(void)
109{
110 struct udevice *dev;
111 int ret;
112
113 gd->baudrate = CONFIG_BAUDRATE;
114
115 ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX,
116 &dev);
117 if (ret) {
118 printf("Error getting serial dev for early console! (%d)\n",
119 ret);
120 return ret;
121 }
122
123 gd->cur_serial_dev = dev;
124 gd->flags |= GD_FLG_SERIAL_READY;
125 gd->have_console = 1;
126
127 return 0;
128}
129#endif
130
Tero Kristo738c5902021-06-11 11:45:19 +0300131#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
Keerthy7007adc2020-02-12 13:55:04 +0530132
133void init_env(void)
134{
135#ifdef CONFIG_SPL_ENV_SUPPORT
136 char *part;
137
138 env_init();
139 env_relocate();
140 switch (spl_boot_device()) {
141 case BOOT_DEVICE_MMC2:
142 part = env_get("bootpart");
143 env_set("storage_interface", "mmc");
144 env_set("fw_dev_part", part);
145 break;
146 case BOOT_DEVICE_SPI:
147 env_set("storage_interface", "ubi");
148 env_set("fw_ubi_mtdpart", "UBI");
149 env_set("fw_ubi_volume", "UBI0");
150 break;
151 default:
152 printf("%s from device %u not supported!\n",
153 __func__, spl_boot_device());
154 return;
155 }
156#endif
157}
158
Keerthy7007adc2020-02-12 13:55:04 +0530159int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
160{
161 struct udevice *fsdev;
162 char *name = NULL;
163 int size = 0;
164
Keerthyfe8f6092022-01-27 13:16:53 +0100165 if (!IS_ENABLED(CONFIG_FS_LOADER))
166 return 0;
167
Keerthy7007adc2020-02-12 13:55:04 +0530168 *loadaddr = 0;
169#ifdef CONFIG_SPL_ENV_SUPPORT
170 switch (spl_boot_device()) {
171 case BOOT_DEVICE_MMC2:
172 name = env_get(name_fw);
173 *loadaddr = env_get_hex(name_loadaddr, *loadaddr);
174 break;
175 default:
176 printf("Loading rproc fw image from device %u not supported!\n",
177 spl_boot_device());
178 return 0;
179 }
180#endif
181 if (!*loadaddr)
182 return 0;
183
Sean Anderson5cd0cb32022-12-29 11:52:59 -0500184 if (!get_fs_loader(&fsdev)) {
Keerthy7007adc2020-02-12 13:55:04 +0530185 size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
186 0, 0);
187 }
188
189 return size;
190}
Keerthy7007adc2020-02-12 13:55:04 +0530191
Andrew Davisc178e6d2023-04-06 11:38:15 -0500192void release_resources_for_core_shutdown(void)
Suman Anna34574102021-07-27 18:24:40 -0500193{
Andrew Davisc178e6d2023-04-06 11:38:15 -0500194 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
195 struct ti_sci_dev_ops *dev_ops = &ti_sci->ops.dev_ops;
196 struct ti_sci_proc_ops *proc_ops = &ti_sci->ops.proc_ops;
197 int ret;
198 u32 i;
199
200 /* Iterate through list of devices to put (shutdown) */
201 for (i = 0; i < ARRAY_SIZE(put_device_ids); i++) {
202 u32 id = put_device_ids[i];
203
204 ret = dev_ops->put_device(ti_sci, id);
205 if (ret)
206 panic("Failed to put device %u (%d)\n", id, ret);
207 }
208
209 /* Iterate through list of cores to put (shutdown) */
210 for (i = 0; i < ARRAY_SIZE(put_core_ids); i++) {
211 u32 id = put_core_ids[i];
212
213 /*
214 * Queue up the core shutdown request. Note that this call
215 * needs to be followed up by an actual invocation of an WFE
216 * or WFI CPU instruction.
217 */
218 ret = proc_ops->proc_shutdown_no_wait(ti_sci, id);
219 if (ret)
220 panic("Failed sending core %u shutdown message (%d)\n",
221 id, ret);
222 }
Suman Anna34574102021-07-27 18:24:40 -0500223}
224
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530225void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
226{
Keerthy20c87b02020-02-12 13:55:06 +0530227 typedef void __noreturn (*image_entry_noargs_t)(void);
Lokesh Vutla005476d2019-06-07 19:24:43 +0530228 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
Keerthy20c87b02020-02-12 13:55:06 +0530229 u32 loadaddr = 0;
Nishanth Menon1535e2a2021-08-31 13:20:48 -0500230 int ret, size = 0, shut_cpu = 0;
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530231
Lokesh Vutla005476d2019-06-07 19:24:43 +0530232 /* Release all the exclusive devices held by SPL before starting ATF */
233 ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
234
Keerthy7007adc2020-02-12 13:55:04 +0530235 ret = rproc_init();
236 if (ret)
237 panic("rproc failed to be initialized (%d)\n", ret);
238
239 init_env();
Dave Gerlachcdd02452021-06-11 11:45:21 +0300240
241 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
Tero Kristo738c5902021-06-11 11:45:19 +0300242 size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load",
243 &loadaddr);
Dave Gerlachcdd02452021-06-11 11:45:21 +0300244 }
Keerthy7007adc2020-02-12 13:55:04 +0530245
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530246 /*
247 * It is assumed that remoteproc device 1 is the corresponding
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -0600248 * Cortex-A core which runs ATF. Make sure DT reflects the same.
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530249 */
Tero Kristo738c5902021-06-11 11:45:19 +0300250 if (!fit_image_info[IMAGE_ID_ATF].image_start)
251 fit_image_info[IMAGE_ID_ATF].image_start =
252 spl_image->entry_point;
253
254 ret = rproc_load(1, fit_image_info[IMAGE_ID_ATF].image_start, 0x200);
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -0600255 if (ret)
256 panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530257
Manorit Chawdhryc20edd32023-04-14 09:48:01 +0530258#if (CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS) && IS_ENABLED(CONFIG_SYS_K3_SPL_ATF))
259 /* Authenticate ATF */
260 void *image_addr = (void *)fit_image_info[IMAGE_ID_ATF].image_start;
261
262 debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
263 fit_image_info[IMAGE_ID_ATF].image_start,
264 fit_image_info[IMAGE_ID_ATF].image_len,
265 image_os_match[IMAGE_ID_ATF]);
266
267 ti_secure_image_post_process(&image_addr,
268 (size_t *)&fit_image_info[IMAGE_ID_ATF].image_len);
269
270 /* Authenticate OPTEE */
271 image_addr = (void *)fit_image_info[IMAGE_ID_OPTEE].image_start;
272
273 debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
274 fit_image_info[IMAGE_ID_OPTEE].image_start,
275 fit_image_info[IMAGE_ID_OPTEE].image_len,
276 image_os_match[IMAGE_ID_OPTEE]);
277
278 ti_secure_image_post_process(&image_addr,
279 (size_t *)&fit_image_info[IMAGE_ID_OPTEE].image_len);
280
281#endif
282
Tero Kristo738c5902021-06-11 11:45:19 +0300283 if (!fit_image_info[IMAGE_ID_DM_FW].image_len &&
284 !(size > 0 && valid_elf_image(loadaddr))) {
Nishanth Menon1535e2a2021-08-31 13:20:48 -0500285 shut_cpu = 1;
286 goto start_arm64;
Keerthy20c87b02020-02-12 13:55:06 +0530287 }
288
Tero Kristo738c5902021-06-11 11:45:19 +0300289 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
290 loadaddr = load_elf_image_phdr(loadaddr);
291 } else {
292 loadaddr = fit_image_info[IMAGE_ID_DM_FW].image_start;
293 if (valid_elf_image(loadaddr))
294 loadaddr = load_elf_image_phdr(loadaddr);
295 }
296
297 debug("%s: jumping to address %x\n", __func__, loadaddr);
298
Nishanth Menon1535e2a2021-08-31 13:20:48 -0500299start_arm64:
300 /* Add an extra newline to differentiate the ATF logs from SPL */
301 printf("Starting ATF on ARM64 core...\n\n");
302
303 ret = rproc_start(1);
304 if (ret)
305 panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
306
307 if (shut_cpu) {
308 debug("Shutting down...\n");
309 release_resources_for_core_shutdown();
310
311 while (1)
312 asm volatile("wfe");
313 }
Tero Kristo738c5902021-06-11 11:45:19 +0300314 image_entry_noargs_t image_entry = (image_entry_noargs_t)loadaddr;
Andreas Dannenberg31175f82019-06-07 19:24:42 +0530315
Keerthy20c87b02020-02-12 13:55:06 +0530316 image_entry();
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530317}
318#endif
Lokesh Vutla16cf5d22019-03-08 11:47:34 +0530319
Tero Kristo738c5902021-06-11 11:45:19 +0300320#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
321void board_fit_image_post_process(const void *fit, int node, void **p_image,
322 size_t *p_size)
323{
324#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
325 int len;
326 int i;
327 const char *os;
328 u32 addr;
329
330 os = fdt_getprop(fit, node, "os", &len);
331 addr = fdt_getprop_u32_default_node(fit, node, 0, "entry", -1);
332
333 debug("%s: processing image: addr=%x, size=%d, os=%s\n", __func__,
334 addr, *p_size, os);
335
336 for (i = 0; i < IMAGE_AMT; i++) {
337 if (!strcmp(os, image_os_match[i])) {
338 fit_image_info[i].image_start = addr;
339 fit_image_info[i].image_len = *p_size;
340 debug("%s: matched image for ID %d\n", __func__, i);
341 break;
342 }
343 }
Manorit Chawdhryc20edd32023-04-14 09:48:01 +0530344 /*
345 * Only DM and the DTBs are being authenticated here,
346 * rest will be authenticated when A72 cluster is up
347 */
348 if ((i != IMAGE_ID_ATF) && (i != IMAGE_ID_OPTEE))
Tero Kristo738c5902021-06-11 11:45:19 +0300349#endif
Manorit Chawdhryc20edd32023-04-14 09:48:01 +0530350 {
Manorit Chawdhrydb01bcc2023-05-18 12:44:17 +0530351 ti_secure_image_check_binary(p_image, p_size);
Manorit Chawdhryc20edd32023-04-14 09:48:01 +0530352 ti_secure_image_post_process(p_image, p_size);
353 }
Manorit Chawdhrydb01bcc2023-05-18 12:44:17 +0530354#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
355 else
356 ti_secure_image_check_binary(p_image, p_size);
357#endif
Tero Kristo738c5902021-06-11 11:45:19 +0300358}
359#endif
360
Lokesh Vutlaa2285322019-06-13 10:29:42 +0530361#ifndef CONFIG_SYSRESET
Harald Seiler6f14d5f2020-12-15 16:47:52 +0100362void reset_cpu(void)
Lokesh Vutlaa2285322019-06-13 10:29:42 +0530363{
364}
365#endif
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530366
Andrew Davisf8c98362022-07-15 11:34:32 -0500367enum k3_device_type get_device_type(void)
368{
369 u32 sys_status = readl(K3_SEC_MGR_SYS_STATUS);
370
371 u32 sys_dev_type = (sys_status & SYS_STATUS_DEV_TYPE_MASK) >>
372 SYS_STATUS_DEV_TYPE_SHIFT;
373
374 u32 sys_sub_type = (sys_status & SYS_STATUS_SUB_TYPE_MASK) >>
375 SYS_STATUS_SUB_TYPE_SHIFT;
376
377 switch (sys_dev_type) {
378 case SYS_STATUS_DEV_TYPE_GP:
379 return K3_DEVICE_TYPE_GP;
380 case SYS_STATUS_DEV_TYPE_TEST:
381 return K3_DEVICE_TYPE_TEST;
382 case SYS_STATUS_DEV_TYPE_EMU:
383 return K3_DEVICE_TYPE_EMU;
384 case SYS_STATUS_DEV_TYPE_HS:
385 if (sys_sub_type == SYS_STATUS_SUB_TYPE_VAL_FS)
386 return K3_DEVICE_TYPE_HS_FS;
387 else
388 return K3_DEVICE_TYPE_HS_SE;
389 default:
390 return K3_DEVICE_TYPE_BAD;
391 }
392}
393
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530394#if defined(CONFIG_DISPLAY_CPUINFO)
Andrew Davisf8c98362022-07-15 11:34:32 -0500395static const char *get_device_type_name(void)
396{
397 enum k3_device_type type = get_device_type();
398
399 switch (type) {
400 case K3_DEVICE_TYPE_GP:
401 return "GP";
402 case K3_DEVICE_TYPE_TEST:
403 return "TEST";
404 case K3_DEVICE_TYPE_EMU:
405 return "EMU";
406 case K3_DEVICE_TYPE_HS_FS:
407 return "HS-FS";
408 case K3_DEVICE_TYPE_HS_SE:
409 return "HS-SE";
410 default:
411 return "BAD";
412 }
413}
414
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530415int print_cpuinfo(void)
416{
Dave Gerlachc74227f2020-07-15 23:40:04 -0500417 struct udevice *soc;
418 char name[64];
419 int ret;
Dave Gerlach3373ee02020-07-15 23:40:04 -0500420
Tom Rini5a9ecb22020-07-24 08:42:06 -0400421 printf("SoC: ");
Dave Gerlach3373ee02020-07-15 23:40:04 -0500422
Dave Gerlachc74227f2020-07-15 23:40:04 -0500423 ret = soc_get(&soc);
424 if (ret) {
425 printf("UNKNOWN\n");
426 return 0;
427 }
428
429 ret = soc_get_family(soc, name, 64);
430 if (!ret) {
431 printf("%s ", name);
432 }
433
434 ret = soc_get_revision(soc, name, 64);
435 if (!ret) {
Andrew Davisf8c98362022-07-15 11:34:32 -0500436 printf("%s ", name);
Dave Gerlachc74227f2020-07-15 23:40:04 -0500437 }
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530438
Andrew Davisf8c98362022-07-15 11:34:32 -0500439 printf("%s\n", get_device_type_name());
440
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530441 return 0;
442}
443#endif
Lokesh Vutla362beda2019-10-07 13:52:17 +0530444
445#ifdef CONFIG_ARM64
Simon Glassdf00afa2022-09-06 20:26:50 -0600446void board_prep_linux(struct bootm_headers *images)
Lokesh Vutla362beda2019-10-07 13:52:17 +0530447{
448 debug("Linux kernel Image start = 0x%lx end = 0x%lx\n",
449 images->os.start, images->os.end);
450 __asm_flush_dcache_range(images->os.start,
451 ROUND(images->os.end,
452 CONFIG_SYS_CACHELINE_SIZE));
453}
454#endif
Lokesh Vutla5fbd6fe2019-12-31 15:49:55 +0530455
456#ifdef CONFIG_CPU_V7R
457void disable_linefill_optimization(void)
458{
459 u32 actlr;
460
461 /*
462 * On K3 devices there are 2 conditions where R5F can deadlock:
463 * 1.When software is performing series of store operations to
464 * cacheable write back/write allocate memory region and later
465 * on software execute barrier operation (DSB or DMB). R5F may
466 * hang at the barrier instruction.
467 * 2.When software is performing a mix of load and store operations
468 * within a tight loop and store operations are all writing to
469 * cacheable write back/write allocates memory regions, R5F may
470 * hang at one of the load instruction.
471 *
472 * To avoid the above two conditions disable linefill optimization
473 * inside Cortex R5F.
474 */
475 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
476 actlr |= (1 << 13); /* Set DLFO bit */
477 asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
478}
479#endif
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500480
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530481static void remove_fwl_regions(struct fwl_data fwl_data, size_t num_regions,
482 enum k3_firewall_region_type fwl_type)
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500483{
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500484 struct ti_sci_fwl_ops *fwl_ops;
485 struct ti_sci_handle *ti_sci;
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530486 struct ti_sci_msg_fwl_region region;
487 size_t j;
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500488
489 ti_sci = get_ti_sci_handle();
490 fwl_ops = &ti_sci->ops.fwl_ops;
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530491
492 for (j = 0; j < fwl_data.regions; j++) {
493 region.fwl_id = fwl_data.fwl_id;
494 region.region = j;
495 region.n_permission_regs = 3;
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500496
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530497 fwl_ops->get_fwl_region(ti_sci, &region);
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500498
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530499 /* Don't disable the background regions */
500 if (region.control != 0 &&
Manorit Chawdhrya2cfec42023-07-14 11:22:27 +0530501 ((region.control >> K3_FIREWALL_BACKGROUND_BIT) & 1) == fwl_type) {
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530502 pr_debug("Attempting to disable firewall %5d (%25s)\n",
503 region.fwl_id, fwl_data.name);
504 region.control = 0;
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500505
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530506 if (fwl_ops->set_fwl_region(ti_sci, &region))
507 pr_err("Could not disable firewall %5d (%25s)\n",
508 region.fwl_id, fwl_data.name);
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500509 }
510 }
511}
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200512
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530513void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
514{
515 size_t i;
516
517 for (i = 0; i < fwl_data_size; i++) {
518 remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
519 K3_FIREWALL_REGION_FOREGROUND);
520 remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
521 K3_FIREWALL_REGION_BACKGROUND);
522 }
523}
524
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200525void spl_enable_dcache(void)
526{
527#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
Tom Rinibb4dd962022-11-16 13:10:37 -0500528 phys_addr_t ram_top = CFG_SYS_SDRAM_BASE;
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200529
Georgi Vlaeva5076cd2022-06-14 17:45:30 +0300530 dram_init();
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200531
532 /* reserve TLB table */
533 gd->arch.tlb_size = PGTABLE_SIZE;
534
535 ram_top += get_effective_memsize();
536 /* keep ram_top in the 32-bit address space */
537 if (ram_top >= 0x100000000)
538 ram_top = (phys_addr_t) 0x100000000;
539
540 gd->arch.tlb_addr = ram_top - gd->arch.tlb_size;
Nikhil M Jain36836812023-07-18 14:27:28 +0530541 gd->arch.tlb_addr &= ~(0x10000 - 1);
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200542 debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
543 gd->arch.tlb_addr + gd->arch.tlb_size);
Nikhil M Jain36836812023-07-18 14:27:28 +0530544 gd->relocaddr = gd->arch.tlb_addr;
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200545
546 dcache_enable();
547#endif
548}
549
550#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
551void spl_board_prepare_for_boot(void)
552{
553 dcache_disable();
554}
555
Patrick Delaunay35c949c2020-07-07 14:25:15 +0200556void spl_board_prepare_for_linux(void)
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200557{
558 dcache_disable();
559}
560#endif
Vignesh Raghavendra030f4052021-12-24 12:55:29 +0530561
562int misc_init_r(void)
563{
564 if (IS_ENABLED(CONFIG_TI_AM65_CPSW_NUSS)) {
565 struct udevice *dev;
566 int ret;
567
568 ret = uclass_get_device_by_driver(UCLASS_MISC,
569 DM_DRIVER_GET(am65_cpsw_nuss),
570 &dev);
571 if (ret)
572 printf("Failed to probe am65_cpsw_nuss driver\n");
573 }
574
Vignesh Raghavendraae17d362023-04-20 21:42:21 +0530575 /* Default FIT boot on HS-SE devices */
576 if (get_device_type() == K3_DEVICE_TYPE_HS_SE)
Andrew Davisf1d72052022-10-07 11:27:46 -0500577 env_set("boot_fit", "1");
578
Vignesh Raghavendra030f4052021-12-24 12:55:29 +0530579 return 0;
580}
Andrew Davis2dde9a72023-04-06 11:38:17 -0500581
582/**
583 * do_board_detect() - Detect board description
584 *
585 * Function to detect board description. This is expected to be
586 * overridden in the SoC family board file where desired.
587 */
588void __weak do_board_detect(void)
589{
590}