blob: 5a7d63772830cf064e28042ebb6726f058cb8876 [file] [log] [blame]
Lokesh Vutla2c771852019-09-04 16:01:36 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments' K3 DSP Remoteproc driver
4 *
Nishanth Menoneaa39c62023-11-01 15:56:03 -05005 * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
Lokesh Vutla2c771852019-09-04 16:01:36 +05306 * Lokesh Vutla <lokeshvutla@ti.com>
Suman Annabe7fa2d2020-03-10 16:05:56 -05007 * Suman Anna <s-anna@ti.com>
Lokesh Vutla2c771852019-09-04 16:01:36 +05308 */
9
Lokesh Vutla2c771852019-09-04 16:01:36 +053010#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060011#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070012#include <malloc.h>
Lokesh Vutla2c771852019-09-04 16:01:36 +053013#include <remoteproc.h>
14#include <errno.h>
15#include <clk.h>
16#include <reset.h>
17#include <asm/io.h>
Udit Kumar27205d22025-02-13 14:53:22 +053018#include <asm/system.h>
Lokesh Vutla2c771852019-09-04 16:01:36 +053019#include <power-domain.h>
Simon Glass9bc15642020-02-03 07:36:16 -070020#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070021#include <linux/err.h>
Suman Annae45fa382020-03-10 16:05:54 -050022#include <linux/sizes.h>
Lokesh Vutla2c771852019-09-04 16:01:36 +053023#include <linux/soc/ti/ti_sci_protocol.h>
24#include "ti_sci_proc.h"
Manorit Chawdhry3b779d12024-05-21 16:26:46 +053025#include <mach/security.h>
Lokesh Vutla2c771852019-09-04 16:01:36 +053026
27#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
28
29/**
30 * struct k3_dsp_mem - internal memory structure
31 * @cpu_addr: MPU virtual address of the memory region
32 * @bus_addr: Bus address used to access the memory region
33 * @dev_addr: Device address from remoteproc view
34 * @size: Size of the memory region
35 */
36struct k3_dsp_mem {
37 void __iomem *cpu_addr;
38 phys_addr_t bus_addr;
39 phys_addr_t dev_addr;
40 size_t size;
41};
42
43/**
Suman Annae45fa382020-03-10 16:05:54 -050044 * struct k3_dsp_boot_data - internal data structure used for boot
45 * @boot_align_addr: Boot vector address alignment granularity
Suman Annabe7fa2d2020-03-10 16:05:56 -050046 * @uses_lreset: Flag to denote the need for local reset management
Suman Annae45fa382020-03-10 16:05:54 -050047 */
48struct k3_dsp_boot_data {
49 u32 boot_align_addr;
Suman Annabe7fa2d2020-03-10 16:05:56 -050050 bool uses_lreset;
Suman Annae45fa382020-03-10 16:05:54 -050051};
52
53/**
Lokesh Vutla2c771852019-09-04 16:01:36 +053054 * struct k3_dsp_privdata - Structure representing Remote processor data.
55 * @rproc_rst: rproc reset control data
56 * @tsp: Pointer to TISCI proc contrl handle
Suman Annae45fa382020-03-10 16:05:54 -050057 * @data: Pointer to DSP specific boot data structure
Lokesh Vutla2c771852019-09-04 16:01:36 +053058 * @mem: Array of available memories
59 * @num_mem: Number of available memories
Udit Kumar27205d22025-02-13 14:53:22 +053060 * @cached_addr: Cached memory address
61 * @cached_size: Cached memory size
62 * @in_use: flag to tell if the core is already in use.
Lokesh Vutla2c771852019-09-04 16:01:36 +053063 */
64struct k3_dsp_privdata {
65 struct reset_ctl dsp_rst;
66 struct ti_sci_proc tsp;
Suman Annae45fa382020-03-10 16:05:54 -050067 struct k3_dsp_boot_data *data;
Lokesh Vutla2c771852019-09-04 16:01:36 +053068 struct k3_dsp_mem *mem;
69 int num_mems;
Udit Kumar27205d22025-02-13 14:53:22 +053070 void __iomem *cached_addr;
71 size_t cached_size;
Udit Kumara962c2e2023-11-25 23:59:39 +053072 bool in_use;
Lokesh Vutla2c771852019-09-04 16:01:36 +053073};
74
Suman Annabe7fa2d2020-03-10 16:05:56 -050075/*
76 * The C66x DSP cores have a local reset that affects only the CPU, and a
77 * generic module reset that powers on the device and allows the DSP internal
78 * memories to be accessed while the local reset is asserted. This function is
79 * used to release the global reset on C66x DSPs to allow loading into the DSP
80 * internal RAMs. This helper function is invoked in k3_dsp_load() before any
81 * actual firmware loading and is undone only in k3_dsp_stop(). The local reset
82 * on C71x cores is a no-op and the global reset cannot be released on C71x
83 * cores until after the firmware images are loaded, so this function does
84 * nothing for C71x cores.
85 */
86static int k3_dsp_prepare(struct udevice *dev)
87{
88 struct k3_dsp_privdata *dsp = dev_get_priv(dev);
89 struct k3_dsp_boot_data *data = dsp->data;
90 int ret;
91
92 /* local reset is no-op on C71x processors */
93 if (!data->uses_lreset)
94 return 0;
95
96 ret = ti_sci_proc_power_domain_on(&dsp->tsp);
97 if (ret)
98 dev_err(dev, "cannot enable internal RAM loading, ret = %d\n",
99 ret);
100
101 return ret;
102}
103
104/*
105 * This function is the counterpart to k3_dsp_prepare() and is used to assert
106 * the global reset on C66x DSP cores (no-op for C71x DSP cores). This completes
107 * the second step of powering down the C66x DSP cores. The cores themselves
108 * are halted through the local reset in first step. This function is invoked
109 * in k3_dsp_stop() after the local reset is asserted.
110 */
111static int k3_dsp_unprepare(struct udevice *dev)
112{
113 struct k3_dsp_privdata *dsp = dev_get_priv(dev);
114 struct k3_dsp_boot_data *data = dsp->data;
115
116 /* local reset is no-op on C71x processors */
117 if (!data->uses_lreset)
118 return 0;
119
120 return ti_sci_proc_power_domain_off(&dsp->tsp);
121}
122
Lokesh Vutla2c771852019-09-04 16:01:36 +0530123/**
124 * k3_dsp_load() - Load up the Remote processor image
125 * @dev: rproc device pointer
126 * @addr: Address at which image is available
127 * @size: size of the image
128 *
129 * Return: 0 if all goes good, else appropriate error message.
130 */
131static int k3_dsp_load(struct udevice *dev, ulong addr, ulong size)
132{
133 struct k3_dsp_privdata *dsp = dev_get_priv(dev);
Suman Annae45fa382020-03-10 16:05:54 -0500134 struct k3_dsp_boot_data *data = dsp->data;
Lokesh Vutla2c771852019-09-04 16:01:36 +0530135 u32 boot_vector;
Manorit Chawdhry3b779d12024-05-21 16:26:46 +0530136 void *image_addr = (void *)addr;
Lokesh Vutla2c771852019-09-04 16:01:36 +0530137 int ret;
138
Udit Kumara962c2e2023-11-25 23:59:39 +0530139 if (dsp->in_use) {
140 dev_err(dev,
141 "Invalid op: Trying to load/start on already running core %d\n",
142 dsp->tsp.proc_id);
143 return -EINVAL;
144 }
145
Lokesh Vutla2c771852019-09-04 16:01:36 +0530146 dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size);
147 ret = ti_sci_proc_request(&dsp->tsp);
148 if (ret)
149 return ret;
150
Suman Annabe7fa2d2020-03-10 16:05:56 -0500151 ret = k3_dsp_prepare(dev);
152 if (ret) {
153 dev_err(dev, "DSP prepare failed for core %d\n",
154 dsp->tsp.proc_id);
155 goto proc_release;
156 }
157
Manorit Chawdhry3b779d12024-05-21 16:26:46 +0530158 ti_secure_image_post_process(&image_addr, &size);
159
Lokesh Vutla2c771852019-09-04 16:01:36 +0530160 ret = rproc_elf_load_image(dev, addr, size);
161 if (ret < 0) {
162 dev_err(dev, "Loading elf failed %d\n", ret);
Suman Annabe7fa2d2020-03-10 16:05:56 -0500163 goto unprepare;
Lokesh Vutla2c771852019-09-04 16:01:36 +0530164 }
165
Udit Kumar27205d22025-02-13 14:53:22 +0530166 if (dsp->cached_addr && IS_ENABLED(CONFIG_SYS_DISABLE_DCACHE_OPS)) {
167 dev_dbg(dev, "final flush 0x%lx to 0x%lx\n",
168 (ulong)dsp->cached_addr, dsp->cached_size);
169 __asm_invalidate_dcache_range((u64)dsp->cached_addr,
170 (u64)dsp->cached_addr + (u64)dsp->cached_size);
171 }
172
Lokesh Vutla2c771852019-09-04 16:01:36 +0530173 boot_vector = rproc_elf_get_boot_addr(dev, addr);
Suman Annae45fa382020-03-10 16:05:54 -0500174 if (boot_vector & (data->boot_align_addr - 1)) {
175 ret = -EINVAL;
176 dev_err(dev, "Boot vector 0x%x not aligned on 0x%x boundary\n",
177 boot_vector, data->boot_align_addr);
178 goto proc_release;
179 }
Lokesh Vutla2c771852019-09-04 16:01:36 +0530180
181 dev_dbg(dev, "%s: Boot vector = 0x%x\n", __func__, boot_vector);
182
183 ret = ti_sci_proc_set_config(&dsp->tsp, boot_vector, 0, 0);
Suman Annabe7fa2d2020-03-10 16:05:56 -0500184unprepare:
185 if (ret)
186 k3_dsp_unprepare(dev);
Lokesh Vutla2c771852019-09-04 16:01:36 +0530187proc_release:
188 ti_sci_proc_release(&dsp->tsp);
189 return ret;
190}
191
192/**
193 * k3_dsp_start() - Start the remote processor
194 * @dev: rproc device pointer
195 *
196 * Return: 0 if all went ok, else return appropriate error
197 */
198static int k3_dsp_start(struct udevice *dev)
199{
200 struct k3_dsp_privdata *dsp = dev_get_priv(dev);
Suman Annabe7fa2d2020-03-10 16:05:56 -0500201 struct k3_dsp_boot_data *data = dsp->data;
Lokesh Vutla2c771852019-09-04 16:01:36 +0530202 int ret;
203
204 dev_dbg(dev, "%s\n", __func__);
205
206 ret = ti_sci_proc_request(&dsp->tsp);
207 if (ret)
208 return ret;
Suman Anna83d8fab2020-03-10 16:05:53 -0500209
Suman Annabe7fa2d2020-03-10 16:05:56 -0500210 if (!data->uses_lreset) {
211 ret = ti_sci_proc_power_domain_on(&dsp->tsp);
212 if (ret)
213 goto proc_release;
214 }
Lokesh Vutla2c771852019-09-04 16:01:36 +0530215
216 ret = reset_deassert(&dsp->dsp_rst);
Suman Annabe7fa2d2020-03-10 16:05:56 -0500217 if (ret) {
218 if (!data->uses_lreset)
219 ti_sci_proc_power_domain_off(&dsp->tsp);
220 }
Lokesh Vutla2c771852019-09-04 16:01:36 +0530221
Udit Kumara962c2e2023-11-25 23:59:39 +0530222 dsp->in_use = true;
Lokesh Vutla2c771852019-09-04 16:01:36 +0530223proc_release:
224 ti_sci_proc_release(&dsp->tsp);
225
226 return ret;
227}
228
229static int k3_dsp_stop(struct udevice *dev)
230{
231 struct k3_dsp_privdata *dsp = dev_get_priv(dev);
232
233 dev_dbg(dev, "%s\n", __func__);
234
Udit Kumara962c2e2023-11-25 23:59:39 +0530235 dsp->in_use = false;
Lokesh Vutla2c771852019-09-04 16:01:36 +0530236 ti_sci_proc_request(&dsp->tsp);
237 reset_assert(&dsp->dsp_rst);
238 ti_sci_proc_power_domain_off(&dsp->tsp);
239 ti_sci_proc_release(&dsp->tsp);
240
241 return 0;
242}
243
244/**
245 * k3_dsp_init() - Initialize the remote processor
246 * @dev: rproc device pointer
247 *
248 * Return: 0 if all went ok, else return appropriate error
249 */
250static int k3_dsp_init(struct udevice *dev)
251{
252 dev_dbg(dev, "%s\n", __func__);
253
254 return 0;
255}
256
257static int k3_dsp_reset(struct udevice *dev)
258{
259 dev_dbg(dev, "%s\n", __func__);
260
261 return 0;
262}
263
264static void *k3_dsp_da_to_va(struct udevice *dev, ulong da, ulong len)
265{
266 struct k3_dsp_privdata *dsp = dev_get_priv(dev);
267 phys_addr_t bus_addr, dev_addr;
Lokesh Vutla2c771852019-09-04 16:01:36 +0530268 size_t size;
269 u32 offset;
270 int i;
271
272 dev_dbg(dev, "%s\n", __func__);
273
274 if (len <= 0)
275 return NULL;
276
Udit Kumar27205d22025-02-13 14:53:22 +0530277 if (dsp->cached_addr && IS_ENABLED(CONFIG_SYS_DISABLE_DCACHE_OPS)) {
278 dev_dbg(dev, "flush 0x%lx to 0x%lx\n", (ulong)dsp->cached_addr,
279 dsp->cached_size);
280 __asm_invalidate_dcache_range((u64)dsp->cached_addr,
281 (u64)dsp->cached_addr + (u64)dsp->cached_size);
282 }
283
284 dsp->cached_size = len;
285 dsp->cached_addr = NULL;
286
Lokesh Vutla2c771852019-09-04 16:01:36 +0530287 for (i = 0; i < dsp->num_mems; i++) {
288 bus_addr = dsp->mem[i].bus_addr;
289 dev_addr = dsp->mem[i].dev_addr;
290 size = dsp->mem[i].size;
291
292 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
293 offset = da - dev_addr;
Udit Kumar27205d22025-02-13 14:53:22 +0530294 dsp->cached_addr = dsp->mem[i].cpu_addr + offset;
Lokesh Vutla2c771852019-09-04 16:01:36 +0530295 }
296
297 if (da >= bus_addr && (da + len) <= (bus_addr + size)) {
298 offset = da - bus_addr;
Udit Kumar27205d22025-02-13 14:53:22 +0530299 dsp->cached_addr = dsp->mem[i].cpu_addr + offset;
Lokesh Vutla2c771852019-09-04 16:01:36 +0530300 }
301 }
302
303 /* Assume it is DDR region and return da */
Udit Kumar27205d22025-02-13 14:53:22 +0530304 if (!dsp->cached_addr)
305 dsp->cached_addr = map_physmem(da, len, MAP_NOCACHE);
306
307 return dsp->cached_addr;
Lokesh Vutla2c771852019-09-04 16:01:36 +0530308}
309
310static const struct dm_rproc_ops k3_dsp_ops = {
311 .init = k3_dsp_init,
312 .load = k3_dsp_load,
313 .start = k3_dsp_start,
314 .stop = k3_dsp_stop,
315 .reset = k3_dsp_reset,
316 .device_to_virt = k3_dsp_da_to_va,
317};
318
319static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
320{
321 u32 ids[2];
322 int ret;
323
324 dev_dbg(dev, "%s\n", __func__);
325
326 tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
327 if (IS_ERR(tsp->sci)) {
328 dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
329 return PTR_ERR(tsp->sci);
330 }
331
332 ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
333 if (ret) {
334 dev_err(dev, "Proc IDs not populated %d\n", ret);
335 return ret;
336 }
337
338 tsp->ops = &tsp->sci->ops.proc_ops;
339 tsp->proc_id = ids[0];
340 tsp->host_id = ids[1];
341 tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
342 TI_SCI_RESOURCE_NULL);
343 if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
344 dev_err(dev, "Device ID not populated %d\n", ret);
345 return -ENODEV;
346 }
347
348 return 0;
349}
350
351static int k3_dsp_of_get_memories(struct udevice *dev)
352{
353 static const char * const mem_names[] = {"l2sram", "l1pram", "l1dram"};
354 struct k3_dsp_privdata *dsp = dev_get_priv(dev);
355 int i;
356
357 dev_dbg(dev, "%s\n", __func__);
358
359 dsp->num_mems = ARRAY_SIZE(mem_names);
360 dsp->mem = calloc(dsp->num_mems, sizeof(*dsp->mem));
361 if (!dsp->mem)
362 return -ENOMEM;
363
364 for (i = 0; i < dsp->num_mems; i++) {
365 /* C71 cores only have a L1P Cache, there are no L1P SRAMs */
Hari Nagalla1eb86812024-03-12 15:14:36 +0530366 if (((device_is_compatible(dev, "ti,j721e-c71-dsp")) ||
Hari Nagallada85f782024-05-09 09:20:34 -0500367 (device_is_compatible(dev, "ti,j721s2-c71-dsp")) ||
368 (device_is_compatible(dev, "ti,am62a-c7xv-dsp"))) &&
Lokesh Vutla2c771852019-09-04 16:01:36 +0530369 !strcmp(mem_names[i], "l1pram")) {
370 dsp->mem[i].bus_addr = FDT_ADDR_T_NONE;
371 dsp->mem[i].dev_addr = FDT_ADDR_T_NONE;
372 dsp->mem[i].cpu_addr = NULL;
373 dsp->mem[i].size = 0;
374 continue;
375 }
Hari Nagallada85f782024-05-09 09:20:34 -0500376 if (device_is_compatible(dev, "ti,am62a-c7xv-dsp") &&
377 !strcmp(mem_names[i], "l1dram")) {
378 dsp->mem[i].bus_addr = FDT_ADDR_T_NONE;
379 dsp->mem[i].dev_addr = FDT_ADDR_T_NONE;
380 dsp->mem[i].cpu_addr = NULL;
381 dsp->mem[i].size = 0;
382 continue;
383 }
Lokesh Vutla2c771852019-09-04 16:01:36 +0530384 dsp->mem[i].bus_addr = dev_read_addr_size_name(dev, mem_names[i],
385 (fdt_addr_t *)&dsp->mem[i].size);
386 if (dsp->mem[i].bus_addr == FDT_ADDR_T_NONE) {
387 dev_err(dev, "%s bus address not found\n", mem_names[i]);
388 return -EINVAL;
389 }
390 dsp->mem[i].cpu_addr = map_physmem(dsp->mem[i].bus_addr,
391 dsp->mem[i].size,
392 MAP_NOCACHE);
393 dsp->mem[i].dev_addr = dsp->mem[i].bus_addr &
394 KEYSTONE_RPROC_LOCAL_ADDRESS_MASK;
395
396 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da %pa\n",
397 mem_names[i], &dsp->mem[i].bus_addr,
398 dsp->mem[i].size, dsp->mem[i].cpu_addr,
399 &dsp->mem[i].dev_addr);
400 }
401
402 return 0;
403}
404
405/**
406 * k3_of_to_priv() - generate private data from device tree
407 * @dev: corresponding k3 dsp processor device
408 * @dsp: pointer to driver specific private data
409 *
410 * Return: 0 if all goes good, else appropriate error message.
411 */
412static int k3_dsp_of_to_priv(struct udevice *dev, struct k3_dsp_privdata *dsp)
413{
414 int ret;
415
416 dev_dbg(dev, "%s\n", __func__);
417
418 ret = reset_get_by_index(dev, 0, &dsp->dsp_rst);
419 if (ret) {
420 dev_err(dev, "reset_get() failed: %d\n", ret);
421 return ret;
422 }
423
424 ret = ti_sci_proc_of_to_priv(dev, &dsp->tsp);
425 if (ret)
426 return ret;
427
428 ret = k3_dsp_of_get_memories(dev);
429 if (ret)
430 return ret;
431
Suman Annae45fa382020-03-10 16:05:54 -0500432 dsp->data = (struct k3_dsp_boot_data *)dev_get_driver_data(dev);
433
Lokesh Vutla2c771852019-09-04 16:01:36 +0530434 return 0;
435}
436
437/**
438 * k3_dsp_probe() - Basic probe
439 * @dev: corresponding k3 remote processor device
440 *
441 * Return: 0 if all goes good, else appropriate error message.
442 */
443static int k3_dsp_probe(struct udevice *dev)
444{
445 struct k3_dsp_privdata *dsp;
446 int ret;
447
448 dev_dbg(dev, "%s\n", __func__);
449
450 dsp = dev_get_priv(dev);
451
452 ret = k3_dsp_of_to_priv(dev, dsp);
453 if (ret) {
454 dev_dbg(dev, "%s: Probe failed with error %d\n", __func__, ret);
455 return ret;
456 }
457
Suman Annabe7fa2d2020-03-10 16:05:56 -0500458 /*
459 * The DSP local resets are deasserted by default on Power-On-Reset.
460 * Assert the local resets to ensure the DSPs don't execute bogus code
461 * in .load() callback when the module reset is released to support
462 * internal memory loading. This is needed for C66x DSPs, and is a
463 * no-op on C71x DSPs.
464 */
465 reset_assert(&dsp->dsp_rst);
466
Lokesh Vutla2c771852019-09-04 16:01:36 +0530467 dev_dbg(dev, "Remoteproc successfully probed\n");
468
469 return 0;
470}
471
472static int k3_dsp_remove(struct udevice *dev)
473{
474 struct k3_dsp_privdata *dsp = dev_get_priv(dev);
475
476 free(dsp->mem);
477
478 return 0;
479}
480
Suman Annae45fa382020-03-10 16:05:54 -0500481static const struct k3_dsp_boot_data c66_data = {
482 .boot_align_addr = SZ_1K,
Suman Annabe7fa2d2020-03-10 16:05:56 -0500483 .uses_lreset = true,
Suman Annae45fa382020-03-10 16:05:54 -0500484};
485
486static const struct k3_dsp_boot_data c71_data = {
487 .boot_align_addr = SZ_2M,
Suman Annabe7fa2d2020-03-10 16:05:56 -0500488 .uses_lreset = false,
Suman Annae45fa382020-03-10 16:05:54 -0500489};
490
Lokesh Vutla2c771852019-09-04 16:01:36 +0530491static const struct udevice_id k3_dsp_ids[] = {
Suman Annae45fa382020-03-10 16:05:54 -0500492 { .compatible = "ti,j721e-c66-dsp", .data = (ulong)&c66_data, },
493 { .compatible = "ti,j721e-c71-dsp", .data = (ulong)&c71_data, },
Hari Nagalla1eb86812024-03-12 15:14:36 +0530494 { .compatible = "ti,j721s2-c71-dsp", .data = (ulong)&c71_data, },
Hari Nagallada85f782024-05-09 09:20:34 -0500495 { .compatible = "ti,am62a-c7xv-dsp", .data = (ulong)&c71_data, },
Lokesh Vutla2c771852019-09-04 16:01:36 +0530496 {}
497};
498
499U_BOOT_DRIVER(k3_dsp) = {
500 .name = "k3_dsp",
501 .of_match = k3_dsp_ids,
502 .id = UCLASS_REMOTEPROC,
503 .ops = &k3_dsp_ops,
504 .probe = k3_dsp_probe,
505 .remove = k3_dsp_remove,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700506 .priv_auto = sizeof(struct k3_dsp_privdata),
Lokesh Vutla2c771852019-09-04 16:01:36 +0530507};