Lokesh Vutla | 5d83fd2 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * K3: Common Architecture initialization |
| 4 | * |
| 5 | * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ |
| 6 | * Lokesh Vutla <lokeshvutla@ti.com> |
| 7 | */ |
| 8 | |
| 9 | #include <common.h> |
Simon Glass | afb0215 | 2019-12-28 10:45:01 -0700 | [diff] [blame] | 10 | #include <cpu_func.h> |
Simon Glass | 2dc9c34 | 2020-05-10 11:40:01 -0600 | [diff] [blame] | 11 | #include <image.h> |
Simon Glass | 9758973 | 2020-05-10 11:40:02 -0600 | [diff] [blame] | 12 | #include <init.h> |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 13 | #include <log.h> |
Lokesh Vutla | 5d83fd2 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 14 | #include <spl.h> |
Simon Glass | 3ba929a | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 15 | #include <asm/global_data.h> |
Lokesh Vutla | 5d83fd2 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 16 | #include "common.h" |
| 17 | #include <dm.h> |
| 18 | #include <remoteproc.h> |
Simon Glass | 274e0b0 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 19 | #include <asm/cache.h> |
Lokesh Vutla | 28cd824 | 2019-03-08 11:47:33 +0530 | [diff] [blame] | 20 | #include <linux/soc/ti/ti_sci_protocol.h> |
Lokesh Vutla | 16cf5d2 | 2019-03-08 11:47:34 +0530 | [diff] [blame] | 21 | #include <fdt_support.h> |
Andreas Dannenberg | 31175f8 | 2019-06-07 19:24:42 +0530 | [diff] [blame] | 22 | #include <asm/arch/sys_proto.h> |
Lokesh Vutla | a04cf3b | 2019-09-27 13:32:11 +0530 | [diff] [blame] | 23 | #include <asm/hardware.h> |
| 24 | #include <asm/io.h> |
Keerthy | 7007adc | 2020-02-12 13:55:04 +0530 | [diff] [blame] | 25 | #include <fs_loader.h> |
| 26 | #include <fs.h> |
| 27 | #include <env.h> |
| 28 | #include <elf.h> |
Dave Gerlach | c74227f | 2020-07-15 23:40:04 -0500 | [diff] [blame] | 29 | #include <soc.h> |
Lokesh Vutla | 28cd824 | 2019-03-08 11:47:33 +0530 | [diff] [blame] | 30 | |
Tero Kristo | 738c590 | 2021-06-11 11:45:19 +0300 | [diff] [blame] | 31 | #if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF) |
| 32 | enum { |
| 33 | IMAGE_ID_ATF, |
| 34 | IMAGE_ID_OPTEE, |
| 35 | IMAGE_ID_SPL, |
| 36 | IMAGE_ID_DM_FW, |
| 37 | IMAGE_AMT, |
| 38 | }; |
| 39 | |
| 40 | #if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS) |
| 41 | static const char *image_os_match[IMAGE_AMT] = { |
| 42 | "arm-trusted-firmware", |
| 43 | "tee", |
| 44 | "U-Boot", |
| 45 | "DM", |
| 46 | }; |
| 47 | #endif |
| 48 | |
| 49 | static struct image_info fit_image_info[IMAGE_AMT]; |
| 50 | #endif |
| 51 | |
Lokesh Vutla | 28cd824 | 2019-03-08 11:47:33 +0530 | [diff] [blame] | 52 | struct ti_sci_handle *get_ti_sci_handle(void) |
| 53 | { |
| 54 | struct udevice *dev; |
| 55 | int ret; |
| 56 | |
Lokesh Vutla | 00a1513 | 2019-09-27 13:32:15 +0530 | [diff] [blame] | 57 | ret = uclass_get_device_by_driver(UCLASS_FIRMWARE, |
Simon Glass | 65130cd | 2020-12-28 20:34:56 -0700 | [diff] [blame] | 58 | DM_DRIVER_GET(ti_sci), &dev); |
Lokesh Vutla | 28cd824 | 2019-03-08 11:47:33 +0530 | [diff] [blame] | 59 | if (ret) |
| 60 | panic("Failed to get SYSFW (%d)\n", ret); |
| 61 | |
| 62 | return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev); |
| 63 | } |
Lokesh Vutla | 5d83fd2 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 64 | |
Lokesh Vutla | 5fafe44 | 2020-03-10 16:50:58 +0530 | [diff] [blame] | 65 | void k3_sysfw_print_ver(void) |
| 66 | { |
| 67 | struct ti_sci_handle *ti_sci = get_ti_sci_handle(); |
| 68 | char fw_desc[sizeof(ti_sci->version.firmware_description) + 1]; |
| 69 | |
| 70 | /* |
| 71 | * Output System Firmware version info. Note that since the |
| 72 | * 'firmware_description' field is not guaranteed to be zero- |
| 73 | * terminated we manually add a \0 terminator if needed. Further |
| 74 | * note that we intentionally no longer rely on the extended |
| 75 | * printf() formatter '%.*s' to not having to require a more |
| 76 | * full-featured printf() implementation. |
| 77 | */ |
| 78 | strncpy(fw_desc, ti_sci->version.firmware_description, |
| 79 | sizeof(ti_sci->version.firmware_description)); |
| 80 | fw_desc[sizeof(fw_desc) - 1] = '\0'; |
| 81 | |
| 82 | printf("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n", |
| 83 | ti_sci->version.abi_major, ti_sci->version.abi_minor, |
| 84 | ti_sci->version.firmware_revision, fw_desc); |
| 85 | } |
| 86 | |
Lokesh Vutla | ff7ab09 | 2020-08-05 22:44:17 +0530 | [diff] [blame] | 87 | void mmr_unlock(phys_addr_t base, u32 partition) |
| 88 | { |
| 89 | /* Translate the base address */ |
| 90 | phys_addr_t part_base = base + partition * CTRL_MMR0_PARTITION_SIZE; |
| 91 | |
| 92 | /* Unlock the requested partition if locked using two-step sequence */ |
| 93 | writel(CTRLMMR_LOCK_KICK0_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK0); |
| 94 | writel(CTRLMMR_LOCK_KICK1_UNLOCK_VAL, part_base + CTRLMMR_LOCK_KICK1); |
| 95 | } |
| 96 | |
Lokesh Vutla | 8be6bbf | 2020-08-05 22:44:23 +0530 | [diff] [blame] | 97 | bool is_rom_loaded_sysfw(struct rom_extended_boot_data *data) |
| 98 | { |
| 99 | if (strncmp(data->header, K3_ROM_BOOT_HEADER_MAGIC, 7)) |
| 100 | return false; |
| 101 | |
| 102 | return data->num_components > 1; |
| 103 | } |
| 104 | |
Andreas Dannenberg | d13ec8c | 2019-08-15 15:55:28 -0500 | [diff] [blame] | 105 | DECLARE_GLOBAL_DATA_PTR; |
| 106 | |
| 107 | #ifdef CONFIG_K3_EARLY_CONS |
| 108 | int early_console_init(void) |
| 109 | { |
| 110 | struct udevice *dev; |
| 111 | int ret; |
| 112 | |
| 113 | gd->baudrate = CONFIG_BAUDRATE; |
| 114 | |
| 115 | ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX, |
| 116 | &dev); |
| 117 | if (ret) { |
| 118 | printf("Error getting serial dev for early console! (%d)\n", |
| 119 | ret); |
| 120 | return ret; |
| 121 | } |
| 122 | |
| 123 | gd->cur_serial_dev = dev; |
| 124 | gd->flags |= GD_FLG_SERIAL_READY; |
| 125 | gd->have_console = 1; |
| 126 | |
| 127 | return 0; |
| 128 | } |
| 129 | #endif |
| 130 | |
Tero Kristo | 738c590 | 2021-06-11 11:45:19 +0300 | [diff] [blame] | 131 | #if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF) |
Keerthy | 7007adc | 2020-02-12 13:55:04 +0530 | [diff] [blame] | 132 | |
| 133 | void init_env(void) |
| 134 | { |
| 135 | #ifdef CONFIG_SPL_ENV_SUPPORT |
| 136 | char *part; |
| 137 | |
| 138 | env_init(); |
| 139 | env_relocate(); |
| 140 | switch (spl_boot_device()) { |
| 141 | case BOOT_DEVICE_MMC2: |
| 142 | part = env_get("bootpart"); |
| 143 | env_set("storage_interface", "mmc"); |
| 144 | env_set("fw_dev_part", part); |
| 145 | break; |
| 146 | case BOOT_DEVICE_SPI: |
| 147 | env_set("storage_interface", "ubi"); |
| 148 | env_set("fw_ubi_mtdpart", "UBI"); |
| 149 | env_set("fw_ubi_volume", "UBI0"); |
| 150 | break; |
| 151 | default: |
| 152 | printf("%s from device %u not supported!\n", |
| 153 | __func__, spl_boot_device()); |
| 154 | return; |
| 155 | } |
| 156 | #endif |
| 157 | } |
| 158 | |
| 159 | #ifdef CONFIG_FS_LOADER |
| 160 | int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr) |
| 161 | { |
| 162 | struct udevice *fsdev; |
| 163 | char *name = NULL; |
| 164 | int size = 0; |
| 165 | |
| 166 | *loadaddr = 0; |
| 167 | #ifdef CONFIG_SPL_ENV_SUPPORT |
| 168 | switch (spl_boot_device()) { |
| 169 | case BOOT_DEVICE_MMC2: |
| 170 | name = env_get(name_fw); |
| 171 | *loadaddr = env_get_hex(name_loadaddr, *loadaddr); |
| 172 | break; |
| 173 | default: |
| 174 | printf("Loading rproc fw image from device %u not supported!\n", |
| 175 | spl_boot_device()); |
| 176 | return 0; |
| 177 | } |
| 178 | #endif |
| 179 | if (!*loadaddr) |
| 180 | return 0; |
| 181 | |
| 182 | if (!uclass_get_device(UCLASS_FS_FIRMWARE_LOADER, 0, &fsdev)) { |
| 183 | size = request_firmware_into_buf(fsdev, name, (void *)*loadaddr, |
| 184 | 0, 0); |
| 185 | } |
| 186 | |
| 187 | return size; |
| 188 | } |
| 189 | #else |
| 190 | int load_firmware(char *name_fw, char *name_loadaddr, u32 *loadaddr) |
| 191 | { |
| 192 | return 0; |
| 193 | } |
| 194 | #endif |
| 195 | |
| 196 | __weak void start_non_linux_remote_cores(void) |
| 197 | { |
| 198 | } |
| 199 | |
Lokesh Vutla | 5d83fd2 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 200 | void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image) |
| 201 | { |
Keerthy | 20c87b0 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 202 | typedef void __noreturn (*image_entry_noargs_t)(void); |
Lokesh Vutla | 005476d | 2019-06-07 19:24:43 +0530 | [diff] [blame] | 203 | struct ti_sci_handle *ti_sci = get_ti_sci_handle(); |
Keerthy | 20c87b0 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 204 | u32 loadaddr = 0; |
Tero Kristo | 738c590 | 2021-06-11 11:45:19 +0300 | [diff] [blame] | 205 | int ret, size = 0; |
Lokesh Vutla | 5d83fd2 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 206 | |
Lokesh Vutla | 005476d | 2019-06-07 19:24:43 +0530 | [diff] [blame] | 207 | /* Release all the exclusive devices held by SPL before starting ATF */ |
| 208 | ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci); |
| 209 | |
Keerthy | 7007adc | 2020-02-12 13:55:04 +0530 | [diff] [blame] | 210 | ret = rproc_init(); |
| 211 | if (ret) |
| 212 | panic("rproc failed to be initialized (%d)\n", ret); |
| 213 | |
| 214 | init_env(); |
Dave Gerlach | cdd0245 | 2021-06-11 11:45:21 +0300 | [diff] [blame^] | 215 | |
| 216 | if (!fit_image_info[IMAGE_ID_DM_FW].image_start) { |
| 217 | start_non_linux_remote_cores(); |
Tero Kristo | 738c590 | 2021-06-11 11:45:19 +0300 | [diff] [blame] | 218 | size = load_firmware("name_mcur5f0_0fw", "addr_mcur5f0_0load", |
| 219 | &loadaddr); |
Dave Gerlach | cdd0245 | 2021-06-11 11:45:21 +0300 | [diff] [blame^] | 220 | } |
Keerthy | 7007adc | 2020-02-12 13:55:04 +0530 | [diff] [blame] | 221 | |
Lokesh Vutla | 5d83fd2 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 222 | /* |
| 223 | * It is assumed that remoteproc device 1 is the corresponding |
Andreas Dannenberg | 376c0fe | 2019-02-04 12:58:47 -0600 | [diff] [blame] | 224 | * Cortex-A core which runs ATF. Make sure DT reflects the same. |
Lokesh Vutla | 5d83fd2 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 225 | */ |
Tero Kristo | 738c590 | 2021-06-11 11:45:19 +0300 | [diff] [blame] | 226 | if (!fit_image_info[IMAGE_ID_ATF].image_start) |
| 227 | fit_image_info[IMAGE_ID_ATF].image_start = |
| 228 | spl_image->entry_point; |
| 229 | |
| 230 | ret = rproc_load(1, fit_image_info[IMAGE_ID_ATF].image_start, 0x200); |
Andreas Dannenberg | 376c0fe | 2019-02-04 12:58:47 -0600 | [diff] [blame] | 231 | if (ret) |
| 232 | panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret); |
Lokesh Vutla | 5d83fd2 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 233 | |
Andreas Dannenberg | 376c0fe | 2019-02-04 12:58:47 -0600 | [diff] [blame] | 234 | /* Add an extra newline to differentiate the ATF logs from SPL */ |
Lokesh Vutla | 5d83fd2 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 235 | printf("Starting ATF on ARM64 core...\n\n"); |
| 236 | |
| 237 | ret = rproc_start(1); |
Andreas Dannenberg | 376c0fe | 2019-02-04 12:58:47 -0600 | [diff] [blame] | 238 | if (ret) |
| 239 | panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret); |
Tero Kristo | 738c590 | 2021-06-11 11:45:19 +0300 | [diff] [blame] | 240 | if (!fit_image_info[IMAGE_ID_DM_FW].image_len && |
| 241 | !(size > 0 && valid_elf_image(loadaddr))) { |
Keerthy | 20c87b0 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 242 | debug("Shutting down...\n"); |
| 243 | release_resources_for_core_shutdown(); |
Lokesh Vutla | 5d83fd2 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 244 | |
Keerthy | 20c87b0 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 245 | while (1) |
| 246 | asm volatile("wfe"); |
| 247 | } |
| 248 | |
Tero Kristo | 738c590 | 2021-06-11 11:45:19 +0300 | [diff] [blame] | 249 | if (!fit_image_info[IMAGE_ID_DM_FW].image_start) { |
| 250 | loadaddr = load_elf_image_phdr(loadaddr); |
| 251 | } else { |
| 252 | loadaddr = fit_image_info[IMAGE_ID_DM_FW].image_start; |
| 253 | if (valid_elf_image(loadaddr)) |
| 254 | loadaddr = load_elf_image_phdr(loadaddr); |
| 255 | } |
| 256 | |
| 257 | debug("%s: jumping to address %x\n", __func__, loadaddr); |
| 258 | |
| 259 | image_entry_noargs_t image_entry = (image_entry_noargs_t)loadaddr; |
Andreas Dannenberg | 31175f8 | 2019-06-07 19:24:42 +0530 | [diff] [blame] | 260 | |
Keerthy | 20c87b0 | 2020-02-12 13:55:06 +0530 | [diff] [blame] | 261 | image_entry(); |
Lokesh Vutla | 5d83fd2 | 2018-11-02 19:51:05 +0530 | [diff] [blame] | 262 | } |
| 263 | #endif |
Lokesh Vutla | 16cf5d2 | 2019-03-08 11:47:34 +0530 | [diff] [blame] | 264 | |
Tero Kristo | 738c590 | 2021-06-11 11:45:19 +0300 | [diff] [blame] | 265 | #if CONFIG_IS_ENABLED(FIT_IMAGE_POST_PROCESS) |
| 266 | void board_fit_image_post_process(const void *fit, int node, void **p_image, |
| 267 | size_t *p_size) |
| 268 | { |
| 269 | #if IS_ENABLED(CONFIG_SYS_K3_SPL_ATF) |
| 270 | int len; |
| 271 | int i; |
| 272 | const char *os; |
| 273 | u32 addr; |
| 274 | |
| 275 | os = fdt_getprop(fit, node, "os", &len); |
| 276 | addr = fdt_getprop_u32_default_node(fit, node, 0, "entry", -1); |
| 277 | |
| 278 | debug("%s: processing image: addr=%x, size=%d, os=%s\n", __func__, |
| 279 | addr, *p_size, os); |
| 280 | |
| 281 | for (i = 0; i < IMAGE_AMT; i++) { |
| 282 | if (!strcmp(os, image_os_match[i])) { |
| 283 | fit_image_info[i].image_start = addr; |
| 284 | fit_image_info[i].image_len = *p_size; |
| 285 | debug("%s: matched image for ID %d\n", __func__, i); |
| 286 | break; |
| 287 | } |
| 288 | } |
| 289 | #endif |
| 290 | |
| 291 | #if IS_ENABLED(CONFIG_TI_SECURE_DEVICE) |
| 292 | ti_secure_image_post_process(p_image, p_size); |
| 293 | #endif |
| 294 | } |
| 295 | #endif |
| 296 | |
Lokesh Vutla | 16cf5d2 | 2019-03-08 11:47:34 +0530 | [diff] [blame] | 297 | #if defined(CONFIG_OF_LIBFDT) |
| 298 | int fdt_fixup_msmc_ram(void *blob, char *parent_path, char *node_name) |
| 299 | { |
| 300 | u64 msmc_start = 0, msmc_end = 0, msmc_size, reg[2]; |
| 301 | struct ti_sci_handle *ti_sci = get_ti_sci_handle(); |
| 302 | int ret, node, subnode, len, prev_node; |
| 303 | u32 range[4], addr, size; |
| 304 | const fdt32_t *sub_reg; |
| 305 | |
| 306 | ti_sci->ops.core_ops.query_msmc(ti_sci, &msmc_start, &msmc_end); |
| 307 | msmc_size = msmc_end - msmc_start + 1; |
| 308 | debug("%s: msmc_start = 0x%llx, msmc_size = 0x%llx\n", __func__, |
| 309 | msmc_start, msmc_size); |
| 310 | |
| 311 | /* find or create "msmc_sram node */ |
| 312 | ret = fdt_path_offset(blob, parent_path); |
| 313 | if (ret < 0) |
| 314 | return ret; |
| 315 | |
| 316 | node = fdt_find_or_add_subnode(blob, ret, node_name); |
| 317 | if (node < 0) |
| 318 | return node; |
| 319 | |
| 320 | ret = fdt_setprop_string(blob, node, "compatible", "mmio-sram"); |
| 321 | if (ret < 0) |
| 322 | return ret; |
| 323 | |
| 324 | reg[0] = cpu_to_fdt64(msmc_start); |
| 325 | reg[1] = cpu_to_fdt64(msmc_size); |
| 326 | ret = fdt_setprop(blob, node, "reg", reg, sizeof(reg)); |
| 327 | if (ret < 0) |
| 328 | return ret; |
| 329 | |
| 330 | fdt_setprop_cell(blob, node, "#address-cells", 1); |
| 331 | fdt_setprop_cell(blob, node, "#size-cells", 1); |
| 332 | |
| 333 | range[0] = 0; |
| 334 | range[1] = cpu_to_fdt32(msmc_start >> 32); |
| 335 | range[2] = cpu_to_fdt32(msmc_start & 0xffffffff); |
| 336 | range[3] = cpu_to_fdt32(msmc_size); |
| 337 | ret = fdt_setprop(blob, node, "ranges", range, sizeof(range)); |
| 338 | if (ret < 0) |
| 339 | return ret; |
| 340 | |
| 341 | subnode = fdt_first_subnode(blob, node); |
| 342 | prev_node = 0; |
| 343 | |
| 344 | /* Look for invalid subnodes and delete them */ |
| 345 | while (subnode >= 0) { |
| 346 | sub_reg = fdt_getprop(blob, subnode, "reg", &len); |
| 347 | addr = fdt_read_number(sub_reg, 1); |
| 348 | sub_reg++; |
| 349 | size = fdt_read_number(sub_reg, 1); |
| 350 | debug("%s: subnode = %d, addr = 0x%x. size = 0x%x\n", __func__, |
| 351 | subnode, addr, size); |
| 352 | if (addr + size > msmc_size || |
| 353 | !strncmp(fdt_get_name(blob, subnode, &len), "sysfw", 5) || |
| 354 | !strncmp(fdt_get_name(blob, subnode, &len), "l3cache", 7)) { |
| 355 | fdt_del_node(blob, subnode); |
| 356 | debug("%s: deleting subnode %d\n", __func__, subnode); |
| 357 | if (!prev_node) |
| 358 | subnode = fdt_first_subnode(blob, node); |
| 359 | else |
| 360 | subnode = fdt_next_subnode(blob, prev_node); |
| 361 | } else { |
| 362 | prev_node = subnode; |
| 363 | subnode = fdt_next_subnode(blob, prev_node); |
| 364 | } |
| 365 | } |
| 366 | |
| 367 | return 0; |
| 368 | } |
Andrew F. Davis | 6c43b52 | 2019-09-17 17:15:40 -0400 | [diff] [blame] | 369 | |
| 370 | int fdt_disable_node(void *blob, char *node_path) |
| 371 | { |
| 372 | int offs; |
| 373 | int ret; |
| 374 | |
| 375 | offs = fdt_path_offset(blob, node_path); |
| 376 | if (offs < 0) { |
Andrew F. Davis | 7e13f2c | 2020-01-07 18:12:40 -0500 | [diff] [blame] | 377 | printf("Node %s not found.\n", node_path); |
| 378 | return offs; |
Andrew F. Davis | 6c43b52 | 2019-09-17 17:15:40 -0400 | [diff] [blame] | 379 | } |
| 380 | ret = fdt_setprop_string(blob, offs, "status", "disabled"); |
| 381 | if (ret < 0) { |
| 382 | printf("Could not add status property to node %s: %s\n", |
| 383 | node_path, fdt_strerror(ret)); |
| 384 | return ret; |
| 385 | } |
| 386 | return 0; |
| 387 | } |
| 388 | |
Lokesh Vutla | 16cf5d2 | 2019-03-08 11:47:34 +0530 | [diff] [blame] | 389 | #endif |
Lokesh Vutla | a228532 | 2019-06-13 10:29:42 +0530 | [diff] [blame] | 390 | |
| 391 | #ifndef CONFIG_SYSRESET |
Harald Seiler | 6f14d5f | 2020-12-15 16:47:52 +0100 | [diff] [blame] | 392 | void reset_cpu(void) |
Lokesh Vutla | a228532 | 2019-06-13 10:29:42 +0530 | [diff] [blame] | 393 | { |
| 394 | } |
| 395 | #endif |
Lokesh Vutla | a04cf3b | 2019-09-27 13:32:11 +0530 | [diff] [blame] | 396 | |
| 397 | #if defined(CONFIG_DISPLAY_CPUINFO) |
| 398 | int print_cpuinfo(void) |
| 399 | { |
Dave Gerlach | c74227f | 2020-07-15 23:40:04 -0500 | [diff] [blame] | 400 | struct udevice *soc; |
| 401 | char name[64]; |
| 402 | int ret; |
Dave Gerlach | 3373ee0 | 2020-07-15 23:40:04 -0500 | [diff] [blame] | 403 | |
Tom Rini | 5a9ecb2 | 2020-07-24 08:42:06 -0400 | [diff] [blame] | 404 | printf("SoC: "); |
Dave Gerlach | 3373ee0 | 2020-07-15 23:40:04 -0500 | [diff] [blame] | 405 | |
Dave Gerlach | c74227f | 2020-07-15 23:40:04 -0500 | [diff] [blame] | 406 | ret = soc_get(&soc); |
| 407 | if (ret) { |
| 408 | printf("UNKNOWN\n"); |
| 409 | return 0; |
| 410 | } |
| 411 | |
| 412 | ret = soc_get_family(soc, name, 64); |
| 413 | if (!ret) { |
| 414 | printf("%s ", name); |
| 415 | } |
| 416 | |
| 417 | ret = soc_get_revision(soc, name, 64); |
| 418 | if (!ret) { |
| 419 | printf("%s\n", name); |
| 420 | } |
Lokesh Vutla | a04cf3b | 2019-09-27 13:32:11 +0530 | [diff] [blame] | 421 | |
| 422 | return 0; |
| 423 | } |
| 424 | #endif |
Lokesh Vutla | 362beda | 2019-10-07 13:52:17 +0530 | [diff] [blame] | 425 | |
Lokesh Vutla | 6554994 | 2020-08-05 22:44:19 +0530 | [diff] [blame] | 426 | bool soc_is_j721e(void) |
| 427 | { |
| 428 | u32 soc; |
| 429 | |
| 430 | soc = (readl(CTRLMMR_WKUP_JTAG_ID) & |
| 431 | JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT; |
| 432 | |
| 433 | return soc == J721E; |
| 434 | } |
| 435 | |
Lokesh Vutla | 0318a08 | 2020-08-05 22:44:21 +0530 | [diff] [blame] | 436 | bool soc_is_j7200(void) |
| 437 | { |
| 438 | u32 soc; |
| 439 | |
| 440 | soc = (readl(CTRLMMR_WKUP_JTAG_ID) & |
| 441 | JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT; |
| 442 | |
| 443 | return soc == J7200; |
| 444 | } |
| 445 | |
Lokesh Vutla | 362beda | 2019-10-07 13:52:17 +0530 | [diff] [blame] | 446 | #ifdef CONFIG_ARM64 |
| 447 | void board_prep_linux(bootm_headers_t *images) |
| 448 | { |
| 449 | debug("Linux kernel Image start = 0x%lx end = 0x%lx\n", |
| 450 | images->os.start, images->os.end); |
| 451 | __asm_flush_dcache_range(images->os.start, |
| 452 | ROUND(images->os.end, |
| 453 | CONFIG_SYS_CACHELINE_SIZE)); |
| 454 | } |
| 455 | #endif |
Lokesh Vutla | 5fbd6fe | 2019-12-31 15:49:55 +0530 | [diff] [blame] | 456 | |
| 457 | #ifdef CONFIG_CPU_V7R |
| 458 | void disable_linefill_optimization(void) |
| 459 | { |
| 460 | u32 actlr; |
| 461 | |
| 462 | /* |
| 463 | * On K3 devices there are 2 conditions where R5F can deadlock: |
| 464 | * 1.When software is performing series of store operations to |
| 465 | * cacheable write back/write allocate memory region and later |
| 466 | * on software execute barrier operation (DSB or DMB). R5F may |
| 467 | * hang at the barrier instruction. |
| 468 | * 2.When software is performing a mix of load and store operations |
| 469 | * within a tight loop and store operations are all writing to |
| 470 | * cacheable write back/write allocates memory regions, R5F may |
| 471 | * hang at one of the load instruction. |
| 472 | * |
| 473 | * To avoid the above two conditions disable linefill optimization |
| 474 | * inside Cortex R5F. |
| 475 | */ |
| 476 | asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr)); |
| 477 | actlr |= (1 << 13); /* Set DLFO bit */ |
| 478 | asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr)); |
| 479 | } |
| 480 | #endif |
Andrew F. Davis | f0bcb66 | 2020-01-10 14:35:21 -0500 | [diff] [blame] | 481 | |
| 482 | void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size) |
| 483 | { |
| 484 | struct ti_sci_msg_fwl_region region; |
| 485 | struct ti_sci_fwl_ops *fwl_ops; |
| 486 | struct ti_sci_handle *ti_sci; |
| 487 | size_t i, j; |
| 488 | |
| 489 | ti_sci = get_ti_sci_handle(); |
| 490 | fwl_ops = &ti_sci->ops.fwl_ops; |
| 491 | for (i = 0; i < fwl_data_size; i++) { |
| 492 | for (j = 0; j < fwl_data[i].regions; j++) { |
| 493 | region.fwl_id = fwl_data[i].fwl_id; |
| 494 | region.region = j; |
| 495 | region.n_permission_regs = 3; |
| 496 | |
| 497 | fwl_ops->get_fwl_region(ti_sci, ®ion); |
| 498 | |
| 499 | if (region.control != 0) { |
| 500 | pr_debug("Attempting to disable firewall %5d (%25s)\n", |
| 501 | region.fwl_id, fwl_data[i].name); |
| 502 | region.control = 0; |
| 503 | |
| 504 | if (fwl_ops->set_fwl_region(ti_sci, ®ion)) |
| 505 | pr_err("Could not disable firewall %5d (%25s)\n", |
| 506 | region.fwl_id, fwl_data[i].name); |
| 507 | } |
| 508 | } |
| 509 | } |
| 510 | } |
Jan Kiszka | 7ce99f7 | 2020-05-18 07:57:22 +0200 | [diff] [blame] | 511 | |
| 512 | void spl_enable_dcache(void) |
| 513 | { |
| 514 | #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) |
| 515 | phys_addr_t ram_top = CONFIG_SYS_SDRAM_BASE; |
| 516 | |
| 517 | dram_init_banksize(); |
| 518 | |
| 519 | /* reserve TLB table */ |
| 520 | gd->arch.tlb_size = PGTABLE_SIZE; |
| 521 | |
| 522 | ram_top += get_effective_memsize(); |
| 523 | /* keep ram_top in the 32-bit address space */ |
| 524 | if (ram_top >= 0x100000000) |
| 525 | ram_top = (phys_addr_t) 0x100000000; |
| 526 | |
| 527 | gd->arch.tlb_addr = ram_top - gd->arch.tlb_size; |
| 528 | debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr, |
| 529 | gd->arch.tlb_addr + gd->arch.tlb_size); |
| 530 | |
| 531 | dcache_enable(); |
| 532 | #endif |
| 533 | } |
| 534 | |
| 535 | #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) |
| 536 | void spl_board_prepare_for_boot(void) |
| 537 | { |
| 538 | dcache_disable(); |
| 539 | } |
| 540 | |
Patrick Delaunay | 35c949c | 2020-07-07 14:25:15 +0200 | [diff] [blame] | 541 | void spl_board_prepare_for_linux(void) |
Jan Kiszka | 7ce99f7 | 2020-05-18 07:57:22 +0200 | [diff] [blame] | 542 | { |
| 543 | dcache_disable(); |
| 544 | } |
| 545 | #endif |