blob: 2666cd2d7b174d21ebd4cfd28bf70d2b733dfbb7 [file] [log] [blame]
Lokesh Vutla5d83fd22018-11-02 19:51:05 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * K3: Common Architecture initialization
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 */
8
9#include <common.h>
Simon Glassafb02152019-12-28 10:45:01 -070010#include <cpu_func.h>
Simon Glass2dc9c342020-05-10 11:40:01 -060011#include <image.h>
Simon Glass97589732020-05-10 11:40:02 -060012#include <init.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053014#include <spl.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060015#include <asm/global_data.h>
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053016#include "common.h"
17#include <dm.h>
18#include <remoteproc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060019#include <asm/cache.h>
Lokesh Vutla28cd8242019-03-08 11:47:33 +053020#include <linux/soc/ti/ti_sci_protocol.h>
Lokesh Vutla16cf5d22019-03-08 11:47:34 +053021#include <fdt_support.h>
Andreas Dannenberg31175f82019-06-07 19:24:42 +053022#include <asm/arch/sys_proto.h>
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +053023#include <asm/hardware.h>
24#include <asm/io.h>
Keerthy7007adc2020-02-12 13:55:04 +053025#include <fs_loader.h>
26#include <fs.h>
27#include <env.h>
28#include <elf.h>
Dave Gerlachc74227f2020-07-15 23:40:04 -050029#include <soc.h>
Lokesh Vutla28cd8242019-03-08 11:47:33 +053030
Tero Kristo738c5902021-06-11 11:45:19 +030031#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
32enum {
33 IMAGE_ID_ATF,
34 IMAGE_ID_OPTEE,
35 IMAGE_ID_SPL,
36 IMAGE_ID_DM_FW,
37 IMAGE_AMT,
38};
39
40#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
41static const char *image_os_match[IMAGE_AMT] = {
42 "arm-trusted-firmware",
43 "tee",
44 "U-Boot",
45 "DM",
46};
47#endif
48
49static struct image_info fit_image_info[IMAGE_AMT];
50#endif
51
Lokesh Vutla28cd8242019-03-08 11:47:33 +053052struct ti_sci_handle *get_ti_sci_handle(void)
53{
54 struct udevice *dev;
55 int ret;
56
Lokesh Vutla00a15132019-09-27 13:32:15 +053057 ret = uclass_get_device_by_driver(UCLASS_FIRMWARE,
Simon Glass65130cd2020-12-28 20:34:56 -070058 DM_DRIVER_GET(ti_sci), &dev);
Lokesh Vutla28cd8242019-03-08 11:47:33 +053059 if (ret)
60 panic("Failed to get SYSFW (%d)\n", ret);
61
62 return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev);
63}
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053064
Lokesh Vutla5fafe442020-03-10 16:50:58 +053065void k3_sysfw_print_ver(void)
66{
67 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
68 char fw_desc[sizeof(ti_sci->version.firmware_description) + 1];
69
70 /*
71 * Output System Firmware version info. Note that since the
72 * 'firmware_description' field is not guaranteed to be zero-
73 * terminated we manually add a \0 terminator if needed. Further
74 * note that we intentionally no longer rely on the extended
75 * printf() formatter '%.*s' to not having to require a more
76 * full-featured printf() implementation.
77 */
78 strncpy(fw_desc, ti_sci->version.firmware_description,
79 sizeof(ti_sci->version.firmware_description));
80 fw_desc[sizeof(fw_desc) - 1] = '\0';
81
82 printf("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n",
83 ti_sci->version.abi_major, ti_sci->version.abi_minor,
84 ti_sci->version.firmware_revision, fw_desc);
85}
86
Lokesh Vutlaff7ab092020-08-05 22:44:17 +053087void mmr_unlock(phys_addr_t base, u32 partition)
88{
89 /* Translate the base address */
90 phys_addr_t part_base = base + partition * CTRL_MMR0_PARTITION_SIZE;
91
92 /* Unlock the requested partition if locked using two-step sequence */
93 writel(CTRLMMR_LOCK_KICK0_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK0);
94 writel(CTRLMMR_LOCK_KICK1_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK1);
95}
96
Lokesh Vutla8be6bbf2020-08-05 22:44:23 +053097bool is_rom_loaded_sysfw(struct rom_extended_boot_data *data)
98{
99 if (strncmp(data->header, K3_ROM_BOOT_HEADER_MAGIC, 7))
100 return false;
101
102 return data->num_components > 1;
103}
104
Andreas Dannenbergd13ec8c2019-08-15 15:55:28 -0500105DECLARE_GLOBAL_DATA_PTR;
106
107#ifdef CONFIG_K3_EARLY_CONS
108int early_console_init(void)
109{
110 struct udevice *dev;
111 int ret;
112
113 gd->baudrate = CONFIG_BAUDRATE;
114
115 ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX,
116 &dev);
117 if (ret) {
118 printf("Error getting serial dev for early console! (%d)\n",
119 ret);
120 return ret;
121 }
122
123 gd->cur_serial_dev = dev;
124 gd->flags |= GD_FLG_SERIAL_READY;
125 gd->have_console = 1;
126
127 return 0;
128}
129#endif
130
Tero Kristo738c5902021-06-11 11:45:19 +0300131#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
Keerthy7007adc2020-02-12 13:55:04 +0530132
133void init_env(void)
134{
135#ifdef CONFIG_SPL_ENV_SUPPORT
136 char *part;
137
138 env_init();
139 env_relocate();
140 switch (spl_boot_device()) {
141 case BOOT_DEVICE_MMC2:
142 part = env_get("bootpart");
143 env_set("storage_interface", "mmc");
144 env_set("fw_dev_part", part);
145 break;
146 case BOOT_DEVICE_SPI:
147 env_set("storage_interface", "ubi");
148 env_set("fw_ubi_mtdpart", "UBI");
149 env_set("fw_ubi_volume", "UBI0");
150 break;
151 default:
152 printf("%s from device %u not supported!\n",
153 __func__, spl_boot_device());
154 return;
155 }
156#endif
157}
158
159#ifdef CONFIG_FS_LOADER
160int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
161{
162 struct udevice *fsdev;
163 char *name = NULL;
164 int size = 0;
165
166 *loadaddr = 0;
167#ifdef CONFIG_SPL_ENV_SUPPORT
168 switch (spl_boot_device()) {
169 case BOOT_DEVICE_MMC2:
170 name = env_get(name_fw);
171 *loadaddr = env_get_hex(name_loadaddr, *loadaddr);
172 break;
173 default:
174 printf("Loading rproc fw image from device %u not supported!\n",
175 spl_boot_device());
176 return 0;
177 }
178#endif
179 if (!*loadaddr)
180 return 0;
181
182 if (!uclass_get_device(UCLASS_FS_FIRMWARE_LOADER, 0, &fsdev)) {
183 size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr,
184 0, 0);
185 }
186
187 return size;
188}
189#else
190int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr)
191{
192 return 0;
193}
194#endif
195
Suman Anna34574102021-07-27 18:24:40 -0500196__weak void release_resources_for_core_shutdown(void)
197{
198 debug("%s not implemented...\n", __func__);
199}
200
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530201void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
202{
Keerthy20c87b02020-02-12 13:55:06 +0530203 typedef void __noreturn (*image_entry_noargs_t)(void);
Lokesh Vutla005476d2019-06-07 19:24:43 +0530204 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
Keerthy20c87b02020-02-12 13:55:06 +0530205 u32 loadaddr = 0;
Nishanth Menon1535e2a2021-08-31 13:20:48 -0500206 int ret, size = 0, shut_cpu = 0;
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530207
Lokesh Vutla005476d2019-06-07 19:24:43 +0530208 /* Release all the exclusive devices held by SPL before starting ATF */
209 ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
210
Keerthy7007adc2020-02-12 13:55:04 +0530211 ret = rproc_init();
212 if (ret)
213 panic("rproc failed to be initialized (%d)\n", ret);
214
215 init_env();
Dave Gerlachcdd02452021-06-11 11:45:21 +0300216
217 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
Tero Kristo738c5902021-06-11 11:45:19 +0300218 size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load",
219 &loadaddr);
Dave Gerlachcdd02452021-06-11 11:45:21 +0300220 }
Keerthy7007adc2020-02-12 13:55:04 +0530221
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530222 /*
223 * It is assumed that remoteproc device 1 is the corresponding
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -0600224 * Cortex-A core which runs ATF. Make sure DT reflects the same.
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530225 */
Tero Kristo738c5902021-06-11 11:45:19 +0300226 if (!fit_image_info[IMAGE_ID_ATF].image_start)
227 fit_image_info[IMAGE_ID_ATF].image_start =
228 spl_image->entry_point;
229
230 ret = rproc_load(1, fit_image_info[IMAGE_ID_ATF].image_start, 0x200);
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -0600231 if (ret)
232 panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530233
Tero Kristo738c5902021-06-11 11:45:19 +0300234 if (!fit_image_info[IMAGE_ID_DM_FW].image_len &&
235 !(size > 0 && valid_elf_image(loadaddr))) {
Nishanth Menon1535e2a2021-08-31 13:20:48 -0500236 shut_cpu = 1;
237 goto start_arm64;
Keerthy20c87b02020-02-12 13:55:06 +0530238 }
239
Tero Kristo738c5902021-06-11 11:45:19 +0300240 if (!fit_image_info[IMAGE_ID_DM_FW].image_start) {
241 loadaddr = load_elf_image_phdr(loadaddr);
242 } else {
243 loadaddr = fit_image_info[IMAGE_ID_DM_FW].image_start;
244 if (valid_elf_image(loadaddr))
245 loadaddr = load_elf_image_phdr(loadaddr);
246 }
247
248 debug("%s: jumping to address %x\n", __func__, loadaddr);
249
Nishanth Menon1535e2a2021-08-31 13:20:48 -0500250start_arm64:
251 /* Add an extra newline to differentiate the ATF logs from SPL */
252 printf("Starting ATF on ARM64 core...\n\n");
253
254 ret = rproc_start(1);
255 if (ret)
256 panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
257
258 if (shut_cpu) {
259 debug("Shutting down...\n");
260 release_resources_for_core_shutdown();
261
262 while (1)
263 asm volatile("wfe");
264 }
Tero Kristo738c5902021-06-11 11:45:19 +0300265 image_entry_noargs_t image_entry = (image_entry_noargs_t)loadaddr;
Andreas Dannenberg31175f82019-06-07 19:24:42 +0530266
Keerthy20c87b02020-02-12 13:55:06 +0530267 image_entry();
Lokesh Vutla5d83fd22018-11-02 19:51:05 +0530268}
269#endif
Lokesh Vutla16cf5d22019-03-08 11:47:34 +0530270
Tero Kristo738c5902021-06-11 11:45:19 +0300271#if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS)
272void board_fit_image_post_process(const void *fit, int node, void **p_image,
273 size_t *p_size)
274{
275#if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF)
276 int len;
277 int i;
278 const char *os;
279 u32 addr;
280
281 os = fdt_getprop(fit, node, "os", &len);
282 addr = fdt_getprop_u32_default_node(fit, node, 0, "entry", -1);
283
284 debug("%s: processing image: addr=%x, size=%d, os=%s\n", __func__,
285 addr, *p_size, os);
286
287 for (i = 0; i < IMAGE_AMT; i++) {
288 if (!strcmp(os, image_os_match[i])) {
289 fit_image_info[i].image_start = addr;
290 fit_image_info[i].image_len = *p_size;
291 debug("%s: matched image for ID %d\n", __func__, i);
292 break;
293 }
294 }
295#endif
296
297#if IS_ENABLED(CONFIG_TI_SECURE_DEVICE)
298 ti_secure_image_post_process(p_image, p_size);
299#endif
300}
301#endif
302
Lokesh Vutla16cf5d22019-03-08 11:47:34 +0530303#if defined(CONFIG_OF_LIBFDT)
304int fdt_fixup_msmc_ram(void *blob, char *parent_path, char *node_name)
305{
306 u64 msmc_start = 0, msmc_end = 0, msmc_size, reg[2];
307 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
308 int ret, node, subnode, len, prev_node;
309 u32 range[4], addr, size;
310 const fdt32_t *sub_reg;
311
312 ti_sci->ops.core_ops.query_msmc(ti_sci, &msmc_start, &msmc_end);
313 msmc_size = msmc_end - msmc_start + 1;
314 debug("%s: msmc_start = 0x%llx, msmc_size = 0x%llx\n", __func__,
315 msmc_start, msmc_size);
316
317 /* find or create "msmc_sram node */
318 ret = fdt_path_offset(blob, parent_path);
319 if (ret < 0)
320 return ret;
321
322 node = fdt_find_or_add_subnode(blob, ret, node_name);
323 if (node < 0)
324 return node;
325
326 ret = fdt_setprop_string(blob, node, "compatible", "mmio-sram");
327 if (ret < 0)
328 return ret;
329
330 reg[0] = cpu_to_fdt64(msmc_start);
331 reg[1] = cpu_to_fdt64(msmc_size);
332 ret = fdt_setprop(blob, node, "reg", reg, sizeof(reg));
333 if (ret < 0)
334 return ret;
335
336 fdt_setprop_cell(blob, node, "#address-cells", 1);
337 fdt_setprop_cell(blob, node, "#size-cells", 1);
338
339 range[0] = 0;
340 range[1] = cpu_to_fdt32(msmc_start >> 32);
341 range[2] = cpu_to_fdt32(msmc_start & 0xffffffff);
342 range[3] = cpu_to_fdt32(msmc_size);
343 ret = fdt_setprop(blob, node, "ranges", range, sizeof(range));
344 if (ret < 0)
345 return ret;
346
347 subnode = fdt_first_subnode(blob, node);
348 prev_node = 0;
349
350 /* Look for invalid subnodes and delete them */
351 while (subnode >= 0) {
352 sub_reg = fdt_getprop(blob, subnode, "reg", &len);
353 addr = fdt_read_number(sub_reg, 1);
354 sub_reg++;
355 size = fdt_read_number(sub_reg, 1);
356 debug("%s: subnode = %d, addr = 0x%x. size = 0x%x\n", __func__,
357 subnode, addr, size);
358 if (addr + size > msmc_size ||
359 !strncmp(fdt_get_name(blob, subnode, &len), "sysfw", 5) ||
360 !strncmp(fdt_get_name(blob, subnode, &len), "l3cache", 7)) {
361 fdt_del_node(blob, subnode);
362 debug("%s: deleting subnode %d\n", __func__, subnode);
363 if (!prev_node)
364 subnode = fdt_first_subnode(blob, node);
365 else
366 subnode = fdt_next_subnode(blob, prev_node);
367 } else {
368 prev_node = subnode;
369 subnode = fdt_next_subnode(blob, prev_node);
370 }
371 }
372
373 return 0;
374}
Andrew F. Davis6c43b522019-09-17 17:15:40 -0400375
376int fdt_disable_node(void *blob, char *node_path)
377{
378 int offs;
379 int ret;
380
381 offs = fdt_path_offset(blob, node_path);
382 if (offs < 0) {
Andrew F. Davis7e13f2c2020-01-07 18:12:40 -0500383 printf("Node %s not found.\n", node_path);
384 return offs;
Andrew F. Davis6c43b522019-09-17 17:15:40 -0400385 }
386 ret = fdt_setprop_string(blob, offs, "status", "disabled");
387 if (ret < 0) {
388 printf("Could not add status property to node %s: %s\n",
389 node_path, fdt_strerror(ret));
390 return ret;
391 }
392 return 0;
393}
394
Lokesh Vutla16cf5d22019-03-08 11:47:34 +0530395#endif
Lokesh Vutlaa2285322019-06-13 10:29:42 +0530396
397#ifndef CONFIG_SYSRESET
Harald Seiler6f14d5f2020-12-15 16:47:52 +0100398void reset_cpu(void)
Lokesh Vutlaa2285322019-06-13 10:29:42 +0530399{
400}
401#endif
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530402
403#if defined(CONFIG_DISPLAY_CPUINFO)
404int print_cpuinfo(void)
405{
Dave Gerlachc74227f2020-07-15 23:40:04 -0500406 struct udevice *soc;
407 char name[64];
408 int ret;
Dave Gerlach3373ee02020-07-15 23:40:04 -0500409
Tom Rini5a9ecb22020-07-24 08:42:06 -0400410 printf("SoC: ");
Dave Gerlach3373ee02020-07-15 23:40:04 -0500411
Dave Gerlachc74227f2020-07-15 23:40:04 -0500412 ret = soc_get(&soc);
413 if (ret) {
414 printf("UNKNOWN\n");
415 return 0;
416 }
417
418 ret = soc_get_family(soc, name, 64);
419 if (!ret) {
420 printf("%s ", name);
421 }
422
423 ret = soc_get_revision(soc, name, 64);
424 if (!ret) {
425 printf("%s\n", name);
426 }
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530427
428 return 0;
429}
430#endif
Lokesh Vutla362beda2019-10-07 13:52:17 +0530431
Lokesh Vutla65549942020-08-05 22:44:19 +0530432bool soc_is_j721e(void)
433{
434 u32 soc;
435
436 soc = (readl(CTRLMMR_WKUP_JTAG_ID) &
437 JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT;
438
439 return soc == J721E;
440}
441
Lokesh Vutla0318a082020-08-05 22:44:21 +0530442bool soc_is_j7200(void)
443{
444 u32 soc;
445
446 soc = (readl(CTRLMMR_WKUP_JTAG_ID) &
447 JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT;
448
449 return soc == J7200;
450}
451
Lokesh Vutla362beda2019-10-07 13:52:17 +0530452#ifdef CONFIG_ARM64
453void board_prep_linux(bootm_headers_t *images)
454{
455 debug("Linux kernel Image start = 0x%lx end = 0x%lx\n",
456 images->os.start, images->os.end);
457 __asm_flush_dcache_range(images->os.start,
458 ROUND(images->os.end,
459 CONFIG_SYS_CACHELINE_SIZE));
460}
461#endif
Lokesh Vutla5fbd6fe2019-12-31 15:49:55 +0530462
463#ifdef CONFIG_CPU_V7R
464void disable_linefill_optimization(void)
465{
466 u32 actlr;
467
468 /*
469 * On K3 devices there are 2 conditions where R5F can deadlock:
470 * 1.When software is performing series of store operations to
471 * cacheable write back/write allocate memory region and later
472 * on software execute barrier operation (DSB or DMB). R5F may
473 * hang at the barrier instruction.
474 * 2.When software is performing a mix of load and store operations
475 * within a tight loop and store operations are all writing to
476 * cacheable write back/write allocates memory regions, R5F may
477 * hang at one of the load instruction.
478 *
479 * To avoid the above two conditions disable linefill optimization
480 * inside Cortex R5F.
481 */
482 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
483 actlr |= (1 << 13); /* Set DLFO bit */
484 asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
485}
486#endif
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500487
488void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
489{
490 struct ti_sci_msg_fwl_region region;
491 struct ti_sci_fwl_ops *fwl_ops;
492 struct ti_sci_handle *ti_sci;
493 size_t i, j;
494
495 ti_sci = get_ti_sci_handle();
496 fwl_ops = &ti_sci->ops.fwl_ops;
497 for (i = 0; i < fwl_data_size; i++) {
498 for (j = 0; j < fwl_data[i].regions; j++) {
499 region.fwl_id = fwl_data[i].fwl_id;
500 region.region = j;
501 region.n_permission_regs = 3;
502
503 fwl_ops->get_fwl_region(ti_sci, &region);
504
505 if (region.control != 0) {
506 pr_debug("Attempting to disable firewall %5d (%25s)\n",
507 region.fwl_id, fwl_data[i].name);
508 region.control = 0;
509
510 if (fwl_ops->set_fwl_region(ti_sci, &region))
511 pr_err("Could not disable firewall %5d (%25s)\n",
512 region.fwl_id, fwl_data[i].name);
513 }
514 }
515 }
516}
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200517
518void spl_enable_dcache(void)
519{
520#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
521 phys_addr_t ram_top = CONFIG_SYS_SDRAM_BASE;
522
523 dram_init_banksize();
524
525 /* reserve TLB table */
526 gd->arch.tlb_size = PGTABLE_SIZE;
527
528 ram_top += get_effective_memsize();
529 /* keep ram_top in the 32-bit address space */
530 if (ram_top >= 0x100000000)
531 ram_top = (phys_addr_t) 0x100000000;
532
533 gd->arch.tlb_addr = ram_top - gd->arch.tlb_size;
534 debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
535 gd->arch.tlb_addr + gd->arch.tlb_size);
536
537 dcache_enable();
538#endif
539}
540
541#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
542void spl_board_prepare_for_boot(void)
543{
544 dcache_disable();
545}
546
Patrick Delaunay35c949c2020-07-07 14:25:15 +0200547void spl_board_prepare_for_linux(void)
Jan Kiszka7ce99f72020-05-18 07:57:22 +0200548{
549 dcache_disable();
550}
551#endif