blob: 7af60a7f2f8294f83ddd60f2119b3065cd71d65d [file] [log] [blame]
Lokesh Vutla5d83fd22018-11-02 19:51:05 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * K3: Common Architecture initialization
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 */
8
9#include <common.h>
Simon Glassafb02152019-12-28 10:45:01 -070010#include <cpu_func.h>
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053011#include <spl.h>
12#include "common.h"
13#include <dm.h>
14#include <remoteproc.h>
Lokesh Vutla28cd8242019-03-08 11:47:33 +053015#include <linux/soc/ti/ti_sci_protocol.h>
Lokesh Vutla16cf5d22019-03-08 11:47:34 +053016#include <fdt_support.h>
Andreas Dannenberg31175f82019-06-07 19:24:42 +053017#include <asm/arch/sys_proto.h>
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +053018#include <asm/hardware.h>
19#include <asm/io.h>
Lokesh Vutla28cd8242019-03-08 11:47:33 +053020
21struct ti_sci_handle *get_ti_sci_handle(void)
22{
23 struct udevice *dev;
24 int ret;
25
Lokesh Vutla00a15132019-09-27 13:32:15 +053026 ret = uclass_get_device_by_driver(UCLASS_FIRMWARE,
27 DM_GET_DRIVER(ti_sci), &dev);
Lokesh Vutla28cd8242019-03-08 11:47:33 +053028 if (ret)
29 panic("Failed to get SYSFW (%d)\n", ret);
30
31 return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev);
32}
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053033
Andreas Dannenbergd13ec8c2019-08-15 15:55:28 -050034DECLARE_GLOBAL_DATA_PTR;
35
36#ifdef CONFIG_K3_EARLY_CONS
37int early_console_init(void)
38{
39 struct udevice *dev;
40 int ret;
41
42 gd->baudrate = CONFIG_BAUDRATE;
43
44 ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX,
45 &dev);
46 if (ret) {
47 printf("Error getting serial dev for early console! (%d)\n",
48 ret);
49 return ret;
50 }
51
52 gd->cur_serial_dev = dev;
53 gd->flags |= GD_FLG_SERIAL_READY;
54 gd->have_console = 1;
55
56 return 0;
57}
58#endif
59
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053060#ifdef CONFIG_SYS_K3_SPL_ATF
61void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
62{
Lokesh Vutla005476d2019-06-07 19:24:43 +053063 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053064 int ret;
65
Lokesh Vutla005476d2019-06-07 19:24:43 +053066 /* Release all the exclusive devices held by SPL before starting ATF */
67 ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
68
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053069 /*
70 * It is assumed that remoteproc device 1 is the corresponding
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -060071 * Cortex-A core which runs ATF. Make sure DT reflects the same.
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053072 */
73 ret = rproc_dev_init(1);
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -060074 if (ret)
75 panic("%s: ATF failed to initialize on rproc (%d)\n", __func__,
76 ret);
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053077
78 ret = rproc_load(1, spl_image->entry_point, 0x200);
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -060079 if (ret)
80 panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053081
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -060082 /* Add an extra newline to differentiate the ATF logs from SPL */
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053083 printf("Starting ATF on ARM64 core...\n\n");
84
85 ret = rproc_start(1);
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -060086 if (ret)
87 panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053088
Andreas Dannenberg31175f82019-06-07 19:24:42 +053089 debug("Releasing resources...\n");
90 release_resources_for_core_shutdown();
91
92 debug("Finalizing core shutdown...\n");
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053093 while (1)
94 asm volatile("wfe");
95}
96#endif
Lokesh Vutla16cf5d22019-03-08 11:47:34 +053097
98#if defined(CONFIG_OF_LIBFDT)
99int fdt_fixup_msmc_ram(void *blob, char *parent_path, char *node_name)
100{
101 u64 msmc_start = 0, msmc_end = 0, msmc_size, reg[2];
102 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
103 int ret, node, subnode, len, prev_node;
104 u32 range[4], addr, size;
105 const fdt32_t *sub_reg;
106
107 ti_sci->ops.core_ops.query_msmc(ti_sci, &msmc_start, &msmc_end);
108 msmc_size = msmc_end - msmc_start + 1;
109 debug("%s: msmc_start = 0x%llx, msmc_size = 0x%llx\n", __func__,
110 msmc_start, msmc_size);
111
112 /* find or create "msmc_sram node */
113 ret = fdt_path_offset(blob, parent_path);
114 if (ret < 0)
115 return ret;
116
117 node = fdt_find_or_add_subnode(blob, ret, node_name);
118 if (node < 0)
119 return node;
120
121 ret = fdt_setprop_string(blob, node, "compatible", "mmio-sram");
122 if (ret < 0)
123 return ret;
124
125 reg[0] = cpu_to_fdt64(msmc_start);
126 reg[1] = cpu_to_fdt64(msmc_size);
127 ret = fdt_setprop(blob, node, "reg", reg, sizeof(reg));
128 if (ret < 0)
129 return ret;
130
131 fdt_setprop_cell(blob, node, "#address-cells", 1);
132 fdt_setprop_cell(blob, node, "#size-cells", 1);
133
134 range[0] = 0;
135 range[1] = cpu_to_fdt32(msmc_start >> 32);
136 range[2] = cpu_to_fdt32(msmc_start & 0xffffffff);
137 range[3] = cpu_to_fdt32(msmc_size);
138 ret = fdt_setprop(blob, node, "ranges", range, sizeof(range));
139 if (ret < 0)
140 return ret;
141
142 subnode = fdt_first_subnode(blob, node);
143 prev_node = 0;
144
145 /* Look for invalid subnodes and delete them */
146 while (subnode >= 0) {
147 sub_reg = fdt_getprop(blob, subnode, "reg", &len);
148 addr = fdt_read_number(sub_reg, 1);
149 sub_reg++;
150 size = fdt_read_number(sub_reg, 1);
151 debug("%s: subnode = %d, addr = 0x%x. size = 0x%x\n", __func__,
152 subnode, addr, size);
153 if (addr + size > msmc_size ||
154 !strncmp(fdt_get_name(blob, subnode, &len), "sysfw", 5) ||
155 !strncmp(fdt_get_name(blob, subnode, &len), "l3cache", 7)) {
156 fdt_del_node(blob, subnode);
157 debug("%s: deleting subnode %d\n", __func__, subnode);
158 if (!prev_node)
159 subnode = fdt_first_subnode(blob, node);
160 else
161 subnode = fdt_next_subnode(blob, prev_node);
162 } else {
163 prev_node = subnode;
164 subnode = fdt_next_subnode(blob, prev_node);
165 }
166 }
167
168 return 0;
169}
Andrew F. Davis6c43b522019-09-17 17:15:40 -0400170
171int fdt_disable_node(void *blob, char *node_path)
172{
173 int offs;
174 int ret;
175
176 offs = fdt_path_offset(blob, node_path);
177 if (offs < 0) {
Andrew F. Davis7e13f2c2020-01-07 18:12:40 -0500178 printf("Node %s not found.\n", node_path);
179 return offs;
Andrew F. Davis6c43b522019-09-17 17:15:40 -0400180 }
181 ret = fdt_setprop_string(blob, offs, "status", "disabled");
182 if (ret < 0) {
183 printf("Could not add status property to node %s: %s\n",
184 node_path, fdt_strerror(ret));
185 return ret;
186 }
187 return 0;
188}
189
Lokesh Vutla16cf5d22019-03-08 11:47:34 +0530190#endif
Lokesh Vutlaa2285322019-06-13 10:29:42 +0530191
192#ifndef CONFIG_SYSRESET
193void reset_cpu(ulong ignored)
194{
195}
196#endif
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530197
198#if defined(CONFIG_DISPLAY_CPUINFO)
199int print_cpuinfo(void)
200{
201 u32 soc, rev;
202 char *name;
203
204 soc = (readl(CTRLMMR_WKUP_JTAG_DEVICE_ID) &
205 DEVICE_ID_FAMILY_MASK) >> DEVICE_ID_FAMILY_SHIFT;
206 rev = (readl(CTRLMMR_WKUP_JTAG_ID) &
207 JTAG_ID_VARIANT_MASK) >> JTAG_ID_VARIANT_SHIFT;
208
209 printf("SoC: ");
210 switch (soc) {
211 case AM654:
212 name = "AM654";
213 break;
214 case J721E:
215 name = "J721E";
216 break;
217 default:
218 name = "Unknown Silicon";
219 };
220
Lokesh Vutlaac3b5d82020-02-10 10:39:17 +0530221 printf("%s SR ", name);
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530222 switch (rev) {
223 case REV_PG1_0:
224 name = "1.0";
225 break;
226 case REV_PG2_0:
227 name = "2.0";
228 break;
229 default:
230 name = "Unknown Revision";
231 };
232 printf("%s\n", name);
233
234 return 0;
235}
236#endif
Lokesh Vutla362beda2019-10-07 13:52:17 +0530237
238#ifdef CONFIG_ARM64
239void board_prep_linux(bootm_headers_t *images)
240{
241 debug("Linux kernel Image start = 0x%lx end = 0x%lx\n",
242 images->os.start, images->os.end);
243 __asm_flush_dcache_range(images->os.start,
244 ROUND(images->os.end,
245 CONFIG_SYS_CACHELINE_SIZE));
246}
247#endif
Lokesh Vutla5fbd6fe2019-12-31 15:49:55 +0530248
249#ifdef CONFIG_CPU_V7R
250void disable_linefill_optimization(void)
251{
252 u32 actlr;
253
254 /*
255 * On K3 devices there are 2 conditions where R5F can deadlock:
256 * 1.When software is performing series of store operations to
257 * cacheable write back/write allocate memory region and later
258 * on software execute barrier operation (DSB or DMB). R5F may
259 * hang at the barrier instruction.
260 * 2.When software is performing a mix of load and store operations
261 * within a tight loop and store operations are all writing to
262 * cacheable write back/write allocates memory regions, R5F may
263 * hang at one of the load instruction.
264 *
265 * To avoid the above two conditions disable linefill optimization
266 * inside Cortex R5F.
267 */
268 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
269 actlr |= (1 << 13); /* Set DLFO bit */
270 asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
271}
272#endif
Andrew F. Davisf0bcb662020-01-10 14:35:21 -0500273
274void remove_fwl_configs(struct fwl_data *fwl_data, size_t fwl_data_size)
275{
276 struct ti_sci_msg_fwl_region region;
277 struct ti_sci_fwl_ops *fwl_ops;
278 struct ti_sci_handle *ti_sci;
279 size_t i, j;
280
281 ti_sci = get_ti_sci_handle();
282 fwl_ops = &ti_sci->ops.fwl_ops;
283 for (i = 0; i < fwl_data_size; i++) {
284 for (j = 0; j < fwl_data[i].regions; j++) {
285 region.fwl_id = fwl_data[i].fwl_id;
286 region.region = j;
287 region.n_permission_regs = 3;
288
289 fwl_ops->get_fwl_region(ti_sci, &region);
290
291 if (region.control != 0) {
292 pr_debug("Attempting to disable firewall %5d (%25s)\n",
293 region.fwl_id, fwl_data[i].name);
294 region.control = 0;
295
296 if (fwl_ops->set_fwl_region(ti_sci, &region))
297 pr_err("Could not disable firewall %5d (%25s)\n",
298 region.fwl_id, fwl_data[i].name);
299 }
300 }
301 }
302}