blob: 50f5b81dfe5d2231727cf6628312ae70b5e7d8b8 [file] [log] [blame]
Lokesh Vutla5d83fd22018-11-02 19:51:05 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * K3: Common Architecture initialization
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 */
8
9#include <common.h>
10#include <spl.h>
11#include "common.h"
12#include <dm.h>
13#include <remoteproc.h>
Lokesh Vutla28cd8242019-03-08 11:47:33 +053014#include <linux/soc/ti/ti_sci_protocol.h>
Lokesh Vutla16cf5d22019-03-08 11:47:34 +053015#include <fdt_support.h>
Andreas Dannenberg31175f82019-06-07 19:24:42 +053016#include <asm/arch/sys_proto.h>
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +053017#include <asm/hardware.h>
18#include <asm/io.h>
Lokesh Vutla28cd8242019-03-08 11:47:33 +053019
20struct ti_sci_handle *get_ti_sci_handle(void)
21{
22 struct udevice *dev;
23 int ret;
24
Lokesh Vutla00a15132019-09-27 13:32:15 +053025 ret = uclass_get_device_by_driver(UCLASS_FIRMWARE,
26 DM_GET_DRIVER(ti_sci), &dev);
Lokesh Vutla28cd8242019-03-08 11:47:33 +053027 if (ret)
28 panic("Failed to get SYSFW (%d)\n", ret);
29
30 return (struct ti_sci_handle *)ti_sci_get_handle_from_sysfw(dev);
31}
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053032
Andreas Dannenbergd13ec8c2019-08-15 15:55:28 -050033DECLARE_GLOBAL_DATA_PTR;
34
35#ifdef CONFIG_K3_EARLY_CONS
36int early_console_init(void)
37{
38 struct udevice *dev;
39 int ret;
40
41 gd->baudrate = CONFIG_BAUDRATE;
42
43 ret = uclass_get_device_by_seq(UCLASS_SERIAL, CONFIG_K3_EARLY_CONS_IDX,
44 &dev);
45 if (ret) {
46 printf("Error getting serial dev for early console! (%d)\n",
47 ret);
48 return ret;
49 }
50
51 gd->cur_serial_dev = dev;
52 gd->flags |= GD_FLG_SERIAL_READY;
53 gd->have_console = 1;
54
55 return 0;
56}
57#endif
58
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053059#ifdef CONFIG_SYS_K3_SPL_ATF
60void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
61{
Lokesh Vutla005476d2019-06-07 19:24:43 +053062 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053063 int ret;
64
Lokesh Vutla005476d2019-06-07 19:24:43 +053065 /* Release all the exclusive devices held by SPL before starting ATF */
66 ti_sci->ops.dev_ops.release_exclusive_devices(ti_sci);
67
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053068 /*
69 * It is assumed that remoteproc device 1 is the corresponding
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -060070 * Cortex-A core which runs ATF. Make sure DT reflects the same.
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053071 */
72 ret = rproc_dev_init(1);
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -060073 if (ret)
74 panic("%s: ATF failed to initialize on rproc (%d)\n", __func__,
75 ret);
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053076
77 ret = rproc_load(1, spl_image->entry_point, 0x200);
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -060078 if (ret)
79 panic("%s: ATF failed to load on rproc (%d)\n", __func__, ret);
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053080
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -060081 /* Add an extra newline to differentiate the ATF logs from SPL */
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053082 printf("Starting ATF on ARM64 core...\n\n");
83
84 ret = rproc_start(1);
Andreas Dannenberg376c0fe2019-02-04 12:58:47 -060085 if (ret)
86 panic("%s: ATF failed to start on rproc (%d)\n", __func__, ret);
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053087
Andreas Dannenberg31175f82019-06-07 19:24:42 +053088 debug("Releasing resources...\n");
89 release_resources_for_core_shutdown();
90
91 debug("Finalizing core shutdown...\n");
Lokesh Vutla5d83fd22018-11-02 19:51:05 +053092 while (1)
93 asm volatile("wfe");
94}
95#endif
Lokesh Vutla16cf5d22019-03-08 11:47:34 +053096
97#if defined(CONFIG_OF_LIBFDT)
98int fdt_fixup_msmc_ram(void *blob, char *parent_path, char *node_name)
99{
100 u64 msmc_start = 0, msmc_end = 0, msmc_size, reg[2];
101 struct ti_sci_handle *ti_sci = get_ti_sci_handle();
102 int ret, node, subnode, len, prev_node;
103 u32 range[4], addr, size;
104 const fdt32_t *sub_reg;
105
106 ti_sci->ops.core_ops.query_msmc(ti_sci, &msmc_start, &msmc_end);
107 msmc_size = msmc_end - msmc_start + 1;
108 debug("%s: msmc_start = 0x%llx, msmc_size = 0x%llx\n", __func__,
109 msmc_start, msmc_size);
110
111 /* find or create "msmc_sram node */
112 ret = fdt_path_offset(blob, parent_path);
113 if (ret < 0)
114 return ret;
115
116 node = fdt_find_or_add_subnode(blob, ret, node_name);
117 if (node < 0)
118 return node;
119
120 ret = fdt_setprop_string(blob, node, "compatible", "mmio-sram");
121 if (ret < 0)
122 return ret;
123
124 reg[0] = cpu_to_fdt64(msmc_start);
125 reg[1] = cpu_to_fdt64(msmc_size);
126 ret = fdt_setprop(blob, node, "reg", reg, sizeof(reg));
127 if (ret < 0)
128 return ret;
129
130 fdt_setprop_cell(blob, node, "#address-cells", 1);
131 fdt_setprop_cell(blob, node, "#size-cells", 1);
132
133 range[0] = 0;
134 range[1] = cpu_to_fdt32(msmc_start >> 32);
135 range[2] = cpu_to_fdt32(msmc_start & 0xffffffff);
136 range[3] = cpu_to_fdt32(msmc_size);
137 ret = fdt_setprop(blob, node, "ranges", range, sizeof(range));
138 if (ret < 0)
139 return ret;
140
141 subnode = fdt_first_subnode(blob, node);
142 prev_node = 0;
143
144 /* Look for invalid subnodes and delete them */
145 while (subnode >= 0) {
146 sub_reg = fdt_getprop(blob, subnode, "reg", &len);
147 addr = fdt_read_number(sub_reg, 1);
148 sub_reg++;
149 size = fdt_read_number(sub_reg, 1);
150 debug("%s: subnode = %d, addr = 0x%x. size = 0x%x\n", __func__,
151 subnode, addr, size);
152 if (addr + size > msmc_size ||
153 !strncmp(fdt_get_name(blob, subnode, &len), "sysfw", 5) ||
154 !strncmp(fdt_get_name(blob, subnode, &len), "l3cache", 7)) {
155 fdt_del_node(blob, subnode);
156 debug("%s: deleting subnode %d\n", __func__, subnode);
157 if (!prev_node)
158 subnode = fdt_first_subnode(blob, node);
159 else
160 subnode = fdt_next_subnode(blob, prev_node);
161 } else {
162 prev_node = subnode;
163 subnode = fdt_next_subnode(blob, prev_node);
164 }
165 }
166
167 return 0;
168}
Andrew F. Davis6c43b522019-09-17 17:15:40 -0400169
170int fdt_disable_node(void *blob, char *node_path)
171{
172 int offs;
173 int ret;
174
175 offs = fdt_path_offset(blob, node_path);
176 if (offs < 0) {
177 debug("Node %s not found.\n", node_path);
178 return 0;
179 }
180 ret = fdt_setprop_string(blob, offs, "status", "disabled");
181 if (ret < 0) {
182 printf("Could not add status property to node %s: %s\n",
183 node_path, fdt_strerror(ret));
184 return ret;
185 }
186 return 0;
187}
188
Lokesh Vutla16cf5d22019-03-08 11:47:34 +0530189#endif
Lokesh Vutlaa2285322019-06-13 10:29:42 +0530190
191#ifndef CONFIG_SYSRESET
192void reset_cpu(ulong ignored)
193{
194}
195#endif
Lokesh Vutlaa04cf3b2019-09-27 13:32:11 +0530196
197#if defined(CONFIG_DISPLAY_CPUINFO)
198int print_cpuinfo(void)
199{
200 u32 soc, rev;
201 char *name;
202
203 soc = (readl(CTRLMMR_WKUP_JTAG_DEVICE_ID) &
204 DEVICE_ID_FAMILY_MASK) >> DEVICE_ID_FAMILY_SHIFT;
205 rev = (readl(CTRLMMR_WKUP_JTAG_ID) &
206 JTAG_ID_VARIANT_MASK) >> JTAG_ID_VARIANT_SHIFT;
207
208 printf("SoC: ");
209 switch (soc) {
210 case AM654:
211 name = "AM654";
212 break;
213 case J721E:
214 name = "J721E";
215 break;
216 default:
217 name = "Unknown Silicon";
218 };
219
220 printf("%s PG ", name);
221 switch (rev) {
222 case REV_PG1_0:
223 name = "1.0";
224 break;
225 case REV_PG2_0:
226 name = "2.0";
227 break;
228 default:
229 name = "Unknown Revision";
230 };
231 printf("%s\n", name);
232
233 return 0;
234}
235#endif
Lokesh Vutla362beda2019-10-07 13:52:17 +0530236
237#ifdef CONFIG_ARM64
238void board_prep_linux(bootm_headers_t *images)
239{
240 debug("Linux kernel Image start = 0x%lx end = 0x%lx\n",
241 images->os.start, images->os.end);
242 __asm_flush_dcache_range(images->os.start,
243 ROUND(images->os.end,
244 CONFIG_SYS_CACHELINE_SIZE));
245}
246#endif
Lokesh Vutla5fbd6fe2019-12-31 15:49:55 +0530247
248#ifdef CONFIG_CPU_V7R
249void disable_linefill_optimization(void)
250{
251 u32 actlr;
252
253 /*
254 * On K3 devices there are 2 conditions where R5F can deadlock:
255 * 1.When software is performing series of store operations to
256 * cacheable write back/write allocate memory region and later
257 * on software execute barrier operation (DSB or DMB). R5F may
258 * hang at the barrier instruction.
259 * 2.When software is performing a mix of load and store operations
260 * within a tight loop and store operations are all writing to
261 * cacheable write back/write allocates memory regions, R5F may
262 * hang at one of the load instruction.
263 *
264 * To avoid the above two conditions disable linefill optimization
265 * inside Cortex R5F.
266 */
267 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (actlr));
268 actlr |= (1 << 13); /* Set DLFO bit */
269 asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (actlr));
270}
271#endif