blob: bda01527d3fb3ea873da2fdab01beb69015fc68c [file] [log] [blame]
Lokesh Vutla5d83fd22018-11-02 19:51:05 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * K3: Common Architecture initialization
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 */
8
9#include <common.h>
Simon Glassafb02152019-12-28 10:45:01 -070010#include <cpu_func.h>
Simon Glass2dc9c342020-05-10 11:40:01 -060011#include <image.h>
Simon Glass97589732020-05-10 11:40:02 -060012#include <init.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053014#include <spl.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060015#include <asm/global_data.h>
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053016#include "common.h"
17#include <dm.h>
18#include <remoteproc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060019#include <asm/cache.h>
Lokesh Vutla28cd8242019-03-08 11:47:33 +053020#include <linux/soc/ti/ti_sci_protocol.h>
Lokesh Vutla16cf5d22019-03-08 11:47:34 +053021#include <fdt_support.h>
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +053022#include <asm/hardware.h>
23#include <asm/io.h>
Keerthy7007adc2020-02-12 13:55:04 +053024#include <fs_loader.h>
25#include <fs.h>
26#include <env.h>
27#include <elf.h>
Dave Gerlachc74227f2020-07-15 23:40:04 -050028#include <soc.h>
Lokesh Vutla28cd8242019-03-08 11:47:33 +053029
Tero Kristo738c5902021-06-11 11:45:19 +030030#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
31enum {
32 IMAGE_ID_ATF,
33 IMAGE_ID_OPTEE,
34 IMAGE_ID_SPL,
35 IMAGE_ID_DM_FW,
36 IMAGE_AMT,
37};
38
39#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
40static const char *image_os_match[IMAGE_AMT] = {
41 "arm-trusted-firmware",
42 "tee",
43 "U-Boot",
44 "DM",
45};
46#endif
47
48static struct image_info fit_image_info[IMAGE_AMT];
49#endif
50
Lokesh Vutla28cd8242019-03-08 11:47:33 +053051struct ti_sci_handle *get_ti_sci_handle(void)
52{
53 struct udevice *dev;
54 int ret;
55
Lokesh Vutla00a15132019-09-27 13:32:15 +053056 ret = uclass_get_device_by_driver(UCLASS_FIRMWARE,
Simon Glass65130cd2020-12-28 20:34:56 -070057 DM_DRIVER_GET(ti_sci), &dev);
Lokesh Vutla28cd8242019-03-08 11:47:33 +053058 if (ret)
59 panic("Failed to get SYSFW (%d)\n", ret);
60
61 return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev);
62}
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053063
Lokesh Vutla5fafe442020-03-10 16:50:58 +053064void k3_sysfw_print_ver(void)
65{
66 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
67 char fw_desc[sizeof(ti_sci->version.firmware_description) + 1];
68
69 /*
70 * Output System Firmware version info. Note that since the
71 * 'firmware_description' field is not guaranteed to be zero-
72 * terminated we manually add a \0 terminator if needed. Further
73 * note that we intentionally no longer rely on the extended
74 * printf() formatter '%.*s' to not having to require a more
75 * full-featured printf() implementation.
76 */
77 strncpy(fw_desc, ti_sci->version.firmware_description,
78 sizeof(ti_sci->version.firmware_description));
79 fw_desc[sizeof(fw_desc) - 1] = '\0';
80
81 printf("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n",
82 ti_sci->version.abi_major, ti_sci->version.abi_minor,
83 ti_sci->version.firmware_revision, fw_desc);
84}
85
Lokesh Vutlaff7ab092020-08-05 22:44:17 +053086void mmr_unlock(phys_addr_t base, u32 partition)
87{
88 /* Translate the base address */
89 phys_addr_t part_base = base + partition * CTRL_MMR0_PARTITION_SIZE;
90
91 /* Unlock the requested partition if locked using two-step sequence */
92 writel(CTRLMMR_LOCK_KICK0_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK0);
93 writel(CTRLMMR_LOCK_KICK1_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK1);
94}
95
Lokesh Vutla8be6bbf2020-08-05 22:44:23 +053096bool is_rom_loaded_sysfw(struct rom_extended_boot_data *data)
97{
98 if (strncmp(data->header, K3_ROM_BOOT_HEADER_MAGIC, 7))
99 return false;
100
101 return data->num_components > 1;
102}
103
Andreas Dannenbergd13ec8c2019-08-15 15:55:28 -0500104DECLARE_GLOBAL_DATA_PTR;
105
106#ifdef CONFIG_K3_EARLY_CONS
107int early_console_init(void)
108{
109 struct udevice *dev;
110 int ret;
111
112 gd->baudrate = CONFIG_BAUDRATE;
113
114 ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX,
115 &dev);
116 if (ret) {
117 printf("Error getting serial dev for early console! (%d)\n",
118 ret);
119 return ret;
120 }
121
122 gd->cur_serial_dev = dev;
123 gd->flags |= GD_FLG_SERIAL_READY;
124 gd->have_console = 1;
125
126 return 0;
127}
128#endif
129
Tero Kristo738c5902021-06-11 11:45:19 +0300130#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
Keerthy7007adc2020-02-12 13:55:04 +0530131
132void init_env(void)
133{
134#ifdef CONFIG_SPL_ENV_SUPPORT
135 char *part;
136
137 env_init();
138 env_relocate();
139 switch (spl_boot_device()) {
140 case BOOT_DEVICE_MMC2:
141 part = env_get("bootpart");
142 env_set("storage_interface", "mmc");
143 env_set("fw_dev_part", part);
144 break;
145 case BOOT_DEVICE_SPI:
146 env_set("storage_interface", "ubi");
147 env_set("fw_ubi_mtdpart", "UBI");
148 env_set("fw_ubi_volume", "UBI0");
149 break;
150 default:
151 printf("%s from device %u not supported!\n",
152 __func__, spl_boot_device());
153 return;
154 }
155#endif
156}
157
Keerthy7007adc2020-02-12 13:55:04 +0530158int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
159{
160 struct udevice *fsdev;
161 char *name = NULL;
162 int size = 0;
163
Keerthyfe8f6092022-01-27 13:16:53 +0100164 if (!IS_ENABLED(CONFIG_FS_LOADER))
165 return 0;
166
Keerthy7007adc2020-02-12 13:55:04 +0530167 *loadaddr = 0;
168#ifdef CONFIG_SPL_ENV_SUPPORT
169 switch (spl_boot_device()) {
170 case BOOT_DEVICE_MMC2:
171 name = env_get(name_fw);
172 *loadaddr = env_get_hex(name_loadaddr, *loadaddr);
173 break;
174 default:
175 printf("Loading rproc fw image from device %u not supported!\n",
176 spl_boot_device());
177 return 0;
178 }
179#endif
180 if (!*loadaddr)
181 return 0;
182
Sean Anderson5cd0cb32022-12-29 11:52:59 -0500183 if (!get_fs_loader(&fsdev)) {
Keerthy7007adc2020-02-12 13:55:04 +0530184 size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
185 0, 0);
186 }
187
188 return size;
189}
Keerthy7007adc2020-02-12 13:55:04 +0530190
Andrew Davisc178e6d2023-04-06 11:38:15 -0500191void release_resources_for_core_shutdown(void)
Suman Anna34574102021-07-27 18:24:40 -0500192{
Andrew Davisc178e6d2023-04-06 11:38:15 -0500193 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
194 struct ti_sci_dev_ops *dev_ops = &ti_sci->ops.dev_ops;
195 struct ti_sci_proc_ops *proc_ops = &ti_sci->ops.proc_ops;
196 int ret;
197 u32 i;
198
199 /* Iterate through list of devices to put (shutdown) */
200 for (i = 0; i < ARRAY_SIZE(put_device_ids); i++) {
201 u32 id = put_device_ids[i];
202
203 ret = dev_ops->put_device(ti_sci, id);
204 if (ret)
205 panic("Failed to put device %u (%d)\n", id, ret);
206 }
207
208 /* Iterate through list of cores to put (shutdown) */
209 for (i = 0; i < ARRAY_SIZE(put_core_ids); i++) {
210 u32 id = put_core_ids[i];
211
212 /*
213 * Queue up the core shutdown request. Note that this call
214 * needs to be followed up by an actual invocation of an WFE
215 * or WFI CPU instruction.
216 */
217 ret = proc_ops->proc_shutdown_no_wait(ti_sci, id);
218 if (ret)
219 panic("Failed sending core %u shutdown message (%d)\n",
220 id, ret);
221 }
Suman Anna34574102021-07-27 18:24:40 -0500222}
223
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530224void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
225{
Keerthy20c87b02020-02-12 13:55:06 +0530226 typedef void __noreturn (*image_entry_noargs_t)(void);
Lokesh Vutla005476d2019-06-07 19:24:43 +0530227 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
Keerthy20c87b02020-02-12 13:55:06 +0530228 u32 loadaddr = 0;
Nishanth Menon1535e2a2021-08-31 13:20:48 -0500229 int ret, size = 0, shut_cpu = 0;
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530230
Lokesh Vutla005476d2019-06-07 19:24:43 +0530231 /* Release all the exclusive devices held by SPL before starting ATF */
232 ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
233
Keerthy7007adc2020-02-12 13:55:04 +0530234 ret = rproc_init();
235 if (ret)
236 panic("rproc failed to be initialized (%d)\n", ret);
237
238 init_env();
Dave Gerlachcdd02452021-06-11 11:45:21 +0300239
240 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
Tero Kristo738c5902021-06-11 11:45:19 +0300241 size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load",
242 &loadaddr);
Dave Gerlachcdd02452021-06-11 11:45:21 +0300243 }
Keerthy7007adc2020-02-12 13:55:04 +0530244
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530245 /*
246 * It is assumed that remoteproc device 1 is the corresponding
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -0600247 * Cortex-A core which runs ATF. Make sure DT reflects the same.
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530248 */
Tero Kristo738c5902021-06-11 11:45:19 +0300249 if (!fit_image_info[IMAGE_ID_ATF].image_start)
250 fit_image_info[IMAGE_ID_ATF].image_start =
251 spl_image->entry_point;
252
253 ret = rproc_load(1, fit_image_info[IMAGE_ID_ATF].image_start, 0x200);
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -0600254 if (ret)
255 panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530256
Manorit Chawdhryc20edd32023-04-14 09:48:01 +0530257#if (CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS) && IS_ENABLED(CONFIG_SYS_K3_SPL_ATF))
258 /* Authenticate ATF */
259 void *image_addr = (void *)fit_image_info[IMAGE_ID_ATF].image_start;
260
261 debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
262 fit_image_info[IMAGE_ID_ATF].image_start,
263 fit_image_info[IMAGE_ID_ATF].image_len,
264 image_os_match[IMAGE_ID_ATF]);
265
266 ti_secure_image_post_process(&image_addr,
267 (size_t *)&fit_image_info[IMAGE_ID_ATF].image_len);
268
269 /* Authenticate OPTEE */
270 image_addr = (void *)fit_image_info[IMAGE_ID_OPTEE].image_start;
271
272 debug("%s: Authenticating image: addr=%lx, size=%ld, os=%s\n", __func__,
273 fit_image_info[IMAGE_ID_OPTEE].image_start,
274 fit_image_info[IMAGE_ID_OPTEE].image_len,
275 image_os_match[IMAGE_ID_OPTEE]);
276
277 ti_secure_image_post_process(&image_addr,
278 (size_t *)&fit_image_info[IMAGE_ID_OPTEE].image_len);
279
280#endif
281
Tero Kristo738c5902021-06-11 11:45:19 +0300282 if (!fit_image_info[IMAGE_ID_DM_FW].image_len &&
283 !(size > 0 && valid_elf_image(loadaddr))) {
Nishanth Menon1535e2a2021-08-31 13:20:48 -0500284 shut_cpu = 1;
285 goto start_arm64;
Keerthy20c87b02020-02-12 13:55:06 +0530286 }
287
Tero Kristo738c5902021-06-11 11:45:19 +0300288 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
289 loadaddr = load_elf_image_phdr(loadaddr);
290 } else {
291 loadaddr = fit_image_info[IMAGE_ID_DM_FW].image_start;
292 if (valid_elf_image(loadaddr))
293 loadaddr = load_elf_image_phdr(loadaddr);
294 }
295
296 debug("%s: jumping to address %x\n", __func__, loadaddr);
297
Nishanth Menon1535e2a2021-08-31 13:20:48 -0500298start_arm64:
299 /* Add an extra newline to differentiate the ATF logs from SPL */
300 printf("Starting ATF on ARM64 core...\n\n");
301
302 ret = rproc_start(1);
303 if (ret)
304 panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
305
306 if (shut_cpu) {
307 debug("Shutting down...\n");
308 release_resources_for_core_shutdown();
309
310 while (1)
311 asm volatile("wfe");
312 }
Tero Kristo738c5902021-06-11 11:45:19 +0300313 image_entry_noargs_t image_entry = (image_entry_noargs_t)loadaddr;
Andreas Dannenberg31175f82019-06-07 19:24:42 +0530314
Keerthy20c87b02020-02-12 13:55:06 +0530315 image_entry();
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530316}
317#endif
Lokesh Vutla16cf5d22019-03-08 11:47:34 +0530318
Tero Kristo738c5902021-06-11 11:45:19 +0300319#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
320void board_fit_image_post_process(const void *fit, int node, void **p_image,
321 size_t *p_size)
322{
323#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
324 int len;
325 int i;
326 const char *os;
327 u32 addr;
328
329 os = fdt_getprop(fit, node, "os", &len);
330 addr = fdt_getprop_u32_default_node(fit, node, 0, "entry", -1);
331
332 debug("%s: processing image: addr=%x, size=%d, os=%s\n", __func__,
333 addr, *p_size, os);
334
335 for (i = 0; i < IMAGE_AMT; i++) {
336 if (!strcmp(os, image_os_match[i])) {
337 fit_image_info[i].image_start = addr;
338 fit_image_info[i].image_len = *p_size;
339 debug("%s: matched image for ID %d\n", __func__, i);
340 break;
341 }
342 }
Manorit Chawdhryc20edd32023-04-14 09:48:01 +0530343 /*
344 * Only DM and the DTBs are being authenticated here,
345 * rest will be authenticated when A72 cluster is up
346 */
347 if ((i != IMAGE_ID_ATF) && (i != IMAGE_ID_OPTEE))
Tero Kristo738c5902021-06-11 11:45:19 +0300348#endif
Manorit Chawdhryc20edd32023-04-14 09:48:01 +0530349 {
Manorit Chawdhrydb01bcc2023-05-18 12:44:17 +0530350 ti_secure_image_check_binary(p_image, p_size);
Manorit Chawdhryc20edd32023-04-14 09:48:01 +0530351 ti_secure_image_post_process(p_image, p_size);
352 }
Manorit Chawdhrydb01bcc2023-05-18 12:44:17 +0530353#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
354 else
355 ti_secure_image_check_binary(p_image, p_size);
356#endif
Tero Kristo738c5902021-06-11 11:45:19 +0300357}
358#endif
359
Lokesh Vutla16cf5d22019-03-08 11:47:34 +0530360#if defined(CONFIG_OF_LIBFDT)
361int fdt_fixup_msmc_ram(void *blob, char *parent_path, char *node_name)
362{
363 u64 msmc_start = 0, msmc_end = 0, msmc_size, reg[2];
364 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
365 int ret, node, subnode, len, prev_node;
366 u32 range[4], addr, size;
367 const fdt32_t *sub_reg;
368
369 ti_sci->ops.core_ops.query_msmc(ti_sci, &msmc_start, &msmc_end);
370 msmc_size = msmc_end - msmc_start + 1;
371 debug("%s: msmc_start = 0x%llx, msmc_size = 0x%llx\n", __func__,
372 msmc_start, msmc_size);
373
374 /* find or create "msmc_sram node */
375 ret = fdt_path_offset(blob, parent_path);
376 if (ret < 0)
377 return ret;
378
379 node = fdt_find_or_add_subnode(blob, ret, node_name);
380 if (node < 0)
381 return node;
382
383 ret = fdt_setprop_string(blob, node, "compatible", "mmio-sram");
384 if (ret < 0)
385 return ret;
386
387 reg[0] = cpu_to_fdt64(msmc_start);
388 reg[1] = cpu_to_fdt64(msmc_size);
389 ret = fdt_setprop(blob, node, "reg", reg, sizeof(reg));
390 if (ret < 0)
391 return ret;
392
393 fdt_setprop_cell(blob, node, "#address-cells", 1);
394 fdt_setprop_cell(blob, node, "#size-cells", 1);
395
396 range[0] = 0;
397 range[1] = cpu_to_fdt32(msmc_start >> 32);
398 range[2] = cpu_to_fdt32(msmc_start & 0xffffffff);
399 range[3] = cpu_to_fdt32(msmc_size);
400 ret = fdt_setprop(blob, node, "ranges", range, sizeof(range));
401 if (ret < 0)
402 return ret;
403
404 subnode = fdt_first_subnode(blob, node);
405 prev_node = 0;
406
407 /* Look for invalid subnodes and delete them */
408 while (subnode >= 0) {
409 sub_reg = fdt_getprop(blob, subnode, "reg", &len);
410 addr = fdt_read_number(sub_reg, 1);
411 sub_reg++;
412 size = fdt_read_number(sub_reg, 1);
413 debug("%s: subnode = %d, addr = 0x%x. size = 0x%x\n", __func__,
414 subnode, addr, size);
415 if (addr + size > msmc_size ||
416 !strncmp(fdt_get_name(blob, subnode, &len), "sysfw", 5) ||
417 !strncmp(fdt_get_name(blob, subnode, &len), "l3cache", 7)) {
418 fdt_del_node(blob, subnode);
419 debug("%s: deleting subnode %d\n", __func__, subnode);
420 if (!prev_node)
421 subnode = fdt_first_subnode(blob, node);
422 else
423 subnode = fdt_next_subnode(blob, prev_node);
424 } else {
425 prev_node = subnode;
426 subnode = fdt_next_subnode(blob, prev_node);
427 }
428 }
429
430 return 0;
431}
Andrew F. Davis6c43b522019-09-17 17:15:40 -0400432
Andrew Davisb1c29792023-04-06 11:38:10 -0500433#if defined(CONFIG_OF_SYSTEM_SETUP)
434int ft_system_setup(void *blob, struct bd_info *bd)
435{
436 int ret;
437
438 ret = fdt_fixup_msmc_ram(blob, "/bus@100000", "sram@70000000");
439 if (ret < 0)
440 ret = fdt_fixup_msmc_ram(blob, "/interconnect@100000",
441 "sram@70000000");
442 if (ret)
443 printf("%s: fixing up msmc ram failed %d\n", __func__, ret);
444
445 return ret;
446}
447#endif
448
Lokesh Vutla16cf5d22019-03-08 11:47:34 +0530449#endif
Lokesh Vutlaa2285322019-06-13 10:29:42 +0530450
451#ifndef CONFIG_SYSRESET
Harald Seiler6f14d5f2020-12-15 16:47:52 +0100452void reset_cpu(void)
Lokesh Vutlaa2285322019-06-13 10:29:42 +0530453{
454}
455#endif
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530456
Andrew Davisf8c98362022-07-15 11:34:32 -0500457enum k3_device_type get_device_type(void)
458{
459 u32 sys_status = readl(K3_SEC_MGR_SYS_STATUS);
460
461 u32 sys_dev_type = (sys_status & SYS_STATUS_DEV_TYPE_MASK) >>
462 SYS_STATUS_DEV_TYPE_SHIFT;
463
464 u32 sys_sub_type = (sys_status & SYS_STATUS_SUB_TYPE_MASK) >>
465 SYS_STATUS_SUB_TYPE_SHIFT;
466
467 switch (sys_dev_type) {
468 case SYS_STATUS_DEV_TYPE_GP:
469 return K3_DEVICE_TYPE_GP;
470 case SYS_STATUS_DEV_TYPE_TEST:
471 return K3_DEVICE_TYPE_TEST;
472 case SYS_STATUS_DEV_TYPE_EMU:
473 return K3_DEVICE_TYPE_EMU;
474 case SYS_STATUS_DEV_TYPE_HS:
475 if (sys_sub_type == SYS_STATUS_SUB_TYPE_VAL_FS)
476 return K3_DEVICE_TYPE_HS_FS;
477 else
478 return K3_DEVICE_TYPE_HS_SE;
479 default:
480 return K3_DEVICE_TYPE_BAD;
481 }
482}
483
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530484#if defined(CONFIG_DISPLAY_CPUINFO)
Andrew Davisf8c98362022-07-15 11:34:32 -0500485static const char *get_device_type_name(void)
486{
487 enum k3_device_type type = get_device_type();
488
489 switch (type) {
490 case K3_DEVICE_TYPE_GP:
491 return "GP";
492 case K3_DEVICE_TYPE_TEST:
493 return "TEST";
494 case K3_DEVICE_TYPE_EMU:
495 return "EMU";
496 case K3_DEVICE_TYPE_HS_FS:
497 return "HS-FS";
498 case K3_DEVICE_TYPE_HS_SE:
499 return "HS-SE";
500 default:
501 return "BAD";
502 }
503}
504
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530505int print_cpuinfo(void)
506{
Dave Gerlachc74227f2020-07-15 23:40:04 -0500507 struct udevice *soc;
508 char name[64];
509 int ret;
Dave Gerlach3373ee02020-07-15 23:40:04 -0500510
Tom Rini5a9ecb22020-07-24 08:42:06 -0400511 printf("SoC: ");
Dave Gerlach3373ee02020-07-15 23:40:04 -0500512
Dave Gerlachc74227f2020-07-15 23:40:04 -0500513 ret = soc_get(&soc);
514 if (ret) {
515 printf("UNKNOWN\n");
516 return 0;
517 }
518
519 ret = soc_get_family(soc, name, 64);
520 if (!ret) {
521 printf("%s ", name);
522 }
523
524 ret = soc_get_revision(soc, name, 64);
525 if (!ret) {
Andrew Davisf8c98362022-07-15 11:34:32 -0500526 printf("%s ", name);
Dave Gerlachc74227f2020-07-15 23:40:04 -0500527 }
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530528
Andrew Davisf8c98362022-07-15 11:34:32 -0500529 printf("%s\n", get_device_type_name());
530
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530531 return 0;
532}
533#endif
Lokesh Vutla362beda2019-10-07 13:52:17 +0530534
535#ifdef CONFIG_ARM64
Simon Glassdf00afa2022-09-06 20:26:50 -0600536void board_prep_linux(struct bootm_headers *images)
Lokesh Vutla362beda2019-10-07 13:52:17 +0530537{
538 debug("Linux kernel Image start = 0x%lx end = 0x%lx\n",
539 images->os.start, images->os.end);
540 __asm_flush_dcache_range(images->os.start,
541 ROUND(images->os.end,
542 CONFIG_SYS_CACHELINE_SIZE));
543}
544#endif
Lokesh Vutla5fbd6fe2019-12-31 15:49:55 +0530545
546#ifdef CONFIG_CPU_V7R
547void disable_linefill_optimization(void)
548{
549 u32 actlr;
550
551 /*
552 * On K3 devices there are 2 conditions where R5F can deadlock:
553 * 1.When software is performing series of store operations to
554 * cacheable write back/write allocate memory region and later
555 * on software execute barrier operation (DSB or DMB). R5F may
556 * hang at the barrier instruction.
557 * 2.When software is performing a mix of load and store operations
558 * within a tight loop and store operations are all writing to
559 * cacheable write back/write allocates memory regions, R5F may
560 * hang at one of the load instruction.
561 *
562 * To avoid the above two conditions disable linefill optimization
563 * inside Cortex R5F.
564 */
565 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
566 actlr |= (1 << 13); /* Set DLFO bit */
567 asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
568}
569#endif
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500570
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530571static void remove_fwl_regions(struct fwl_data fwl_data, size_t num_regions,
572 enum k3_firewall_region_type fwl_type)
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500573{
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500574 struct ti_sci_fwl_ops *fwl_ops;
575 struct ti_sci_handle *ti_sci;
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530576 struct ti_sci_msg_fwl_region region;
577 size_t j;
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500578
579 ti_sci = get_ti_sci_handle();
580 fwl_ops = &ti_sci->ops.fwl_ops;
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530581
582 for (j = 0; j < fwl_data.regions; j++) {
583 region.fwl_id = fwl_data.fwl_id;
584 region.region = j;
585 region.n_permission_regs = 3;
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500586
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530587 fwl_ops->get_fwl_region(ti_sci, &region);
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500588
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530589 /* Don't disable the background regions */
590 if (region.control != 0 &&
591 ((region.control & K3_FIREWALL_BACKGROUND_BIT) ==
592 fwl_type)) {
593 pr_debug("Attempting to disable firewall %5d (%25s)\n",
594 region.fwl_id, fwl_data.name);
595 region.control = 0;
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500596
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530597 if (fwl_ops->set_fwl_region(ti_sci, &region))
598 pr_err("Could not disable firewall %5d (%25s)\n",
599 region.fwl_id, fwl_data.name);
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500600 }
601 }
602}
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200603
Manorit Chawdhry33f75ee2023-05-05 15:54:00 +0530604void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
605{
606 size_t i;
607
608 for (i = 0; i < fwl_data_size; i++) {
609 remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
610 K3_FIREWALL_REGION_FOREGROUND);
611 remove_fwl_regions(fwl_data[i], fwl_data[i].regions,
612 K3_FIREWALL_REGION_BACKGROUND);
613 }
614}
615
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200616void spl_enable_dcache(void)
617{
618#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
Tom Rinibb4dd962022-11-16 13:10:37 -0500619 phys_addr_t ram_top = CFG_SYS_SDRAM_BASE;
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200620
Georgi Vlaeva5076cd2022-06-14 17:45:30 +0300621 dram_init();
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200622
623 /* reserve TLB table */
624 gd->arch.tlb_size = PGTABLE_SIZE;
625
626 ram_top += get_effective_memsize();
627 /* keep ram_top in the 32-bit address space */
628 if (ram_top >= 0x100000000)
629 ram_top = (phys_addr_t) 0x100000000;
630
631 gd->arch.tlb_addr = ram_top - gd->arch.tlb_size;
632 debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
633 gd->arch.tlb_addr + gd->arch.tlb_size);
634
635 dcache_enable();
636#endif
637}
638
639#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
640void spl_board_prepare_for_boot(void)
641{
642 dcache_disable();
643}
644
Patrick Delaunay35c949c2020-07-07 14:25:15 +0200645void spl_board_prepare_for_linux(void)
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200646{
647 dcache_disable();
648}
649#endif
Vignesh Raghavendra030f4052021-12-24 12:55:29 +0530650
651int misc_init_r(void)
652{
653 if (IS_ENABLED(CONFIG_TI_AM65_CPSW_NUSS)) {
654 struct udevice *dev;
655 int ret;
656
657 ret = uclass_get_device_by_driver(UCLASS_MISC,
658 DM_DRIVER_GET(am65_cpsw_nuss),
659 &dev);
660 if (ret)
661 printf("Failed to probe am65_cpsw_nuss driver\n");
662 }
663
Vignesh Raghavendraae17d362023-04-20 21:42:21 +0530664 /* Default FIT boot on HS-SE devices */
665 if (get_device_type() == K3_DEVICE_TYPE_HS_SE)
Andrew Davisf1d72052022-10-07 11:27:46 -0500666 env_set("boot_fit", "1");
667
Vignesh Raghavendra030f4052021-12-24 12:55:29 +0530668 return 0;
669}
Andrew Davis2dde9a72023-04-06 11:38:17 -0500670
671/**
672 * do_board_detect() - Detect board description
673 *
674 * Function to detect board description. This is expected to be
675 * overridden in the SoC family board file where desired.
676 */
677void __weak do_board_detect(void)
678{
679}