blob: 31b9de7157936fcaf13eaabaa2e1e65ecad31738 [file] [log] [blame]
Hari Nagallac30f17b2025-02-10 14:29:35 -06001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments' K3 M4 Remoteproc driver
4 *
5 * Copyright (C) 2024 Texas Instruments Incorporated - http://www.ti.com/
6 * Hari Nagalla <hnagalla@ti.com>
7 */
8
9#include <dm.h>
10#include <log.h>
11#include <malloc.h>
12#include <remoteproc.h>
13#include <errno.h>
14#include <clk.h>
15#include <reset.h>
16#include <asm/io.h>
17#include <power-domain.h>
18#include <dm/device_compat.h>
19#include <linux/err.h>
20#include <linux/sizes.h>
21#include <linux/soc/ti/ti_sci_protocol.h>
22#include "ti_sci_proc.h"
23#include <mach/security.h>
24
25/**
26 * struct k3_m4_mem - internal memory structure
27 * @cpu_addr: MPU virtual address of the memory region
28 * @bus_addr: Bus address used to access the memory region
29 * @dev_addr: Device address from remoteproc view
30 * @size: Size of the memory region
31 */
32struct k3_m4_mem {
33 void __iomem *cpu_addr;
34 phys_addr_t bus_addr;
35 phys_addr_t dev_addr;
36 size_t size;
37};
38
39/**
40 * struct k3_m4_mem_data - memory definitions for m4 remote core
41 * @name: name for this memory entry
42 * @dev_addr: device address for the memory entry
43 */
44struct k3_m4_mem_data {
45 const char *name;
46 const u32 dev_addr;
47};
48
49/**
50 * struct k3_m4_boot_data - internal data structure used for boot
51 * @boot_align_addr: Boot vector address alignment granularity
52 */
53struct k3_m4_boot_data {
54 u32 boot_align_addr;
55};
56
57/**
58 * struct k3_m4_privdata - Structure representing Remote processor data.
59 * @m4_rst: m4 rproc reset control data
60 * @tsp: Pointer to TISCI proc contrl handle
61 * @data: Pointer to DSP specific boot data structure
62 * @mem: Array of available memories
63 * @num_mem: Number of available memories
64 */
65struct k3_m4_privdata {
66 struct reset_ctl m4_rst;
67 struct ti_sci_proc tsp;
68 struct k3_m4_boot_data *data;
69 struct k3_m4_mem *mem;
70 int num_mems;
71};
72
73/*
74 * The M4 cores have a local reset that affects only the CPU, and a
75 * generic module reset that powers on the device and allows the M4 internal
76 * memories to be accessed while the local reset is asserted. This function is
77 * used to release the global reset on M4F to allow loading into the M4F
78 * internal RAMs. This helper function is invoked in k3_m4_load() before any
79 * actual firmware loading happens and is undone only in k3_m4_stop(). The local
80 * reset cannot be released on M4 cores until after the firmware images are loaded.
81 */
82static int k3_m4_prepare(struct udevice *dev)
83{
84 struct k3_m4_privdata *m4 = dev_get_priv(dev);
85 int ret;
86
87 ret = ti_sci_proc_power_domain_on(&m4->tsp);
88 if (ret)
89 dev_err(dev, "cannot enable internal RAM loading, ret = %d\n",
90 ret);
91
92 return ret;
93}
94
95/*
96 * This function is the counterpart to k3_m4_prepare() and is used to assert
97 * the global reset on M4 cores. This completes the second step of powering
98 * down the M4 cores. The cores themselves are halted through the local reset
99 * in first step. This function is invoked in k3_m4_stop() after the local
100 * reset is asserted.
101 */
102static int k3_m4_unprepare(struct udevice *dev)
103{
104 struct k3_m4_privdata *m4 = dev_get_priv(dev);
105
106 return ti_sci_proc_power_domain_off(&m4->tsp);
107}
108
109/**
110 * k3_m4_load() - Load up the Remote processor image
111 * @dev: rproc device pointer
112 * @addr: Address at which image is available
113 * @size: size of the image
114 *
115 * Return: 0 if all goes good, else appropriate error message.
116 */
117static int k3_m4_load(struct udevice *dev, ulong addr, ulong size)
118{
119 struct k3_m4_privdata *m4 = dev_get_priv(dev);
120 void *image_addr = (void *)addr;
121 int ret;
122
123 ret = ti_sci_proc_request(&m4->tsp);
124 if (ret)
125 return ret;
126
127 ret = k3_m4_prepare(dev);
128 if (ret) {
129 dev_err(dev, "Prepare failed for core %d\n",
130 m4->tsp.proc_id);
131 goto proc_release;
132 }
133
134 ti_secure_image_post_process(&image_addr, &size);
135
136 ret = rproc_elf_load_image(dev, addr, size);
137 if (ret < 0) {
138 dev_err(dev, "Loading elf failed %d\n", ret);
139 goto unprepare;
140 }
141
142unprepare:
143 if (ret)
144 k3_m4_unprepare(dev);
145proc_release:
146 ti_sci_proc_release(&m4->tsp);
147 return ret;
148}
149
150/**
151 * k3_m4_start() - Start the remote processor
152 * @dev: rproc device pointer
153 *
154 * Return: 0 if all went ok, else return appropriate error
155 */
156static int k3_m4_start(struct udevice *dev)
157{
158 struct k3_m4_privdata *m4 = dev_get_priv(dev);
159 int ret;
160
161 ret = ti_sci_proc_request(&m4->tsp);
162 if (ret)
163 return ret;
164
165 ret = reset_deassert(&m4->m4_rst);
166
167 ti_sci_proc_release(&m4->tsp);
168
169 return ret;
170}
171
172static int k3_m4_stop(struct udevice *dev)
173{
174 struct k3_m4_privdata *m4 = dev_get_priv(dev);
175
176 ti_sci_proc_request(&m4->tsp);
177 reset_assert(&m4->m4_rst);
178 k3_m4_unprepare(dev);
179 ti_sci_proc_release(&m4->tsp);
180
181 return 0;
182}
183
184static void *k3_m4_da_to_va(struct udevice *dev, ulong da, ulong len)
185{
186 struct k3_m4_privdata *m4 = dev_get_priv(dev);
187 phys_addr_t bus_addr, dev_addr;
188 void __iomem *va = NULL;
189 size_t size;
190 u32 offset;
191 int i;
192
193 if (len <= 0)
194 return NULL;
195
196 for (i = 0; i < m4->num_mems; i++) {
197 bus_addr = m4->mem[i].bus_addr;
198 dev_addr = m4->mem[i].dev_addr;
199 size = m4->mem[i].size;
200
201 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
202 offset = da - dev_addr;
203 va = m4->mem[i].cpu_addr + offset;
204 return (__force void *)va;
205 }
206
207 if (da >= bus_addr && (da + len) <= (bus_addr + size)) {
208 offset = da - bus_addr;
209 va = m4->mem[i].cpu_addr + offset;
210 return (__force void *)va;
211 }
212 }
213
214 /* Assume it is DDR region and return da */
215 return map_physmem(da, len, MAP_NOCACHE);
216}
217
218static const struct dm_rproc_ops k3_m4_ops = {
219 .load = k3_m4_load,
220 .start = k3_m4_start,
221 .stop = k3_m4_stop,
222 .device_to_virt = k3_m4_da_to_va,
223};
224
225static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
226{
227 u32 ids[2];
228 int ret;
229
230 tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
231 if (IS_ERR(tsp->sci)) {
232 dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
233 return PTR_ERR(tsp->sci);
234 }
235
236 ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
237 if (ret) {
238 dev_err(dev, "Proc IDs not populated %d\n", ret);
239 return ret;
240 }
241
242 tsp->ops = &tsp->sci->ops.proc_ops;
243 tsp->proc_id = ids[0];
244 tsp->host_id = ids[1];
245 tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
246 TI_SCI_RESOURCE_NULL);
247 if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
248 dev_err(dev, "Device ID not populated %d\n", ret);
249 return -ENODEV;
250 }
251
252 return 0;
253}
254
255static const struct k3_m4_mem_data am6_m4_mems[] = {
256 { .name = "iram", .dev_addr = 0x0 },
257 { .name = "dram", .dev_addr = 0x30000 },
258};
259
260static int k3_m4_of_get_memories(struct udevice *dev)
261{
262 struct k3_m4_privdata *m4 = dev_get_priv(dev);
263 int i;
264
265 m4->num_mems = ARRAY_SIZE(am6_m4_mems);
266 m4->mem = calloc(m4->num_mems, sizeof(*m4->mem));
267 if (!m4->mem)
268 return -ENOMEM;
269
270 for (i = 0; i < m4->num_mems; i++) {
271 m4->mem[i].bus_addr = dev_read_addr_size_name(dev,
272 am6_m4_mems[i].name,
273 (fdt_addr_t *)&m4->mem[i].size);
274 if (m4->mem[i].bus_addr == FDT_ADDR_T_NONE) {
275 dev_err(dev, "%s bus address not found\n",
276 am6_m4_mems[i].name);
277 return -EINVAL;
278 }
279 m4->mem[i].cpu_addr = map_physmem(m4->mem[i].bus_addr,
280 m4->mem[i].size,
281 MAP_NOCACHE);
282 m4->mem[i].dev_addr = am6_m4_mems[i].dev_addr;
283 }
284
285 return 0;
286}
287
288/**
289 * k3_of_to_priv() - generate private data from device tree
290 * @dev: corresponding k3 m4 processor device
291 * @m4: pointer to driver specific private data
292 *
293 * Return: 0 if all goes good, else appropriate error message.
294 */
295static int k3_m4_of_to_priv(struct udevice *dev, struct k3_m4_privdata *m4)
296{
297 int ret;
298
299 ret = reset_get_by_index(dev, 0, &m4->m4_rst);
300 if (ret) {
301 dev_err(dev, "reset_get() failed: %d\n", ret);
302 return ret;
303 }
304
305 ret = ti_sci_proc_of_to_priv(dev, &m4->tsp);
306 if (ret)
307 return ret;
308
309 ret = k3_m4_of_get_memories(dev);
310 if (ret)
311 return ret;
312
313 m4->data = (struct k3_m4_boot_data *)dev_get_driver_data(dev);
314
315 return 0;
316}
317
318/**
319 * k3_m4_probe() - Basic probe
320 * @dev: corresponding k3 remote processor device
321 *
322 * Return: 0 if all goes good, else appropriate error message.
323 */
324static int k3_m4_probe(struct udevice *dev)
325{
326 struct k3_m4_privdata *m4;
327 int ret;
328
329 m4 = dev_get_priv(dev);
330 ret = k3_m4_of_to_priv(dev, m4);
331 if (ret)
332 return ret;
333
334 /*
335 * The M4 local resets are deasserted by default on Power-On-Reset.
336 * Assert the local resets to ensure the M4s don't execute bogus code
337 * in .load() callback when the module reset is released to support
338 * internal memory loading. This is needed for M4 cores.
339 */
340 reset_assert(&m4->m4_rst);
341
342 return 0;
343}
344
345static int k3_m4_remove(struct udevice *dev)
346{
347 struct k3_m4_privdata *m4 = dev_get_priv(dev);
348
349 free(m4->mem);
350
351 return 0;
352}
353
354static const struct k3_m4_boot_data m4_data = {
355 .boot_align_addr = SZ_1K,
356};
357
358static const struct udevice_id k3_m4_ids[] = {
359 { .compatible = "ti,am64-m4fss", .data = (ulong)&m4_data, },
360 {}
361};
362
363U_BOOT_DRIVER(k3_m4) = {
364 .name = "k3_m4",
365 .of_match = k3_m4_ids,
366 .id = UCLASS_REMOTEPROC,
367 .ops = &k3_m4_ops,
368 .probe = k3_m4_probe,
369 .remove = k3_m4_remove,
370 .priv_auto = sizeof(struct k3_m4_privdata),
371};