blob: ef2c18722494f4056204f4ab96f346b5eb760b60 [file] [log] [blame]
Lokesh Vutla58633f12019-09-04 16:01:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments' K3 R5 Remoteproc driver
4 *
Suman Anna5d56d252020-08-17 18:15:08 -05005 * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
Lokesh Vutla58633f12019-09-04 16:01:34 +05306 * Lokesh Vutla <lokeshvutla@ti.com>
Suman Anna5d56d252020-08-17 18:15:08 -05007 * Suman Anna <s-anna@ti.com>
Lokesh Vutla58633f12019-09-04 16:01:34 +05308 */
9
Lokesh Vutla58633f12019-09-04 16:01:34 +053010#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060011#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070012#include <malloc.h>
Lokesh Vutla58633f12019-09-04 16:01:34 +053013#include <remoteproc.h>
14#include <errno.h>
15#include <clk.h>
16#include <reset.h>
17#include <asm/io.h>
Simon Glass9bc15642020-02-03 07:36:16 -070018#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070019#include <linux/err.h>
Lokesh Vutla58633f12019-09-04 16:01:34 +053020#include <linux/kernel.h>
21#include <linux/soc/ti/ti_sci_protocol.h>
22#include "ti_sci_proc.h"
Manorit Chawdhry3b779d12024-05-21 16:26:46 +053023#include <mach/security.h>
Lokesh Vutla58633f12019-09-04 16:01:34 +053024
25/*
26 * R5F's view of this address can either be for ATCM or BTCM with the other
27 * at address 0x0 based on loczrama signal.
28 */
29#define K3_R5_TCM_DEV_ADDR 0x41010000
30
31/* R5 TI-SCI Processor Configuration Flags */
32#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
33#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
34#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
35#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
36#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
37#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
38#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
39#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
40#define PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR 0x10000000
Suman Anna5d56d252020-08-17 18:15:08 -050041/* Available from J7200 SoCs onwards */
42#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
Hari Nagalla89322b82024-05-09 09:20:35 -050043#define PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE 0x00008000
44
Lokesh Vutla58633f12019-09-04 16:01:34 +053045
46/* R5 TI-SCI Processor Control Flags */
47#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
48
49/* R5 TI-SCI Processor Status Flags */
50#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
51#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
52#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
53#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
54
55#define NR_CORES 2
56
57enum cluster_mode {
58 CLUSTER_MODE_SPLIT = 0,
59 CLUSTER_MODE_LOCKSTEP,
Hari Nagalla89322b82024-05-09 09:20:35 -050060 CLUSTER_MODE_SINGLECPU,
61 CLUSTER_MODE_SINGLECORE,
Lokesh Vutla58633f12019-09-04 16:01:34 +053062};
63
64/**
Suman Anna5d56d252020-08-17 18:15:08 -050065 * struct k3_r5f_ip_data - internal data structure used for IP variations
66 * @tcm_is_double: flag to denote the larger unified TCMs in certain modes
67 * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
68 */
69struct k3_r5f_ip_data {
70 bool tcm_is_double;
71 bool tcm_ecc_autoinit;
Hari Nagalla89322b82024-05-09 09:20:35 -050072 bool is_single_core;
Suman Anna5d56d252020-08-17 18:15:08 -050073};
74
75/**
Lokesh Vutla58633f12019-09-04 16:01:34 +053076 * struct k3_r5_mem - internal memory structure
77 * @cpu_addr: MPU virtual address of the memory region
78 * @bus_addr: Bus address used to access the memory region
79 * @dev_addr: Device address from remoteproc view
80 * @size: Size of the memory region
81 */
82struct k3_r5f_mem {
83 void __iomem *cpu_addr;
84 phys_addr_t bus_addr;
85 u32 dev_addr;
86 size_t size;
87};
88
89/**
90 * struct k3_r5f_core - K3 R5 core structure
91 * @dev: cached device pointer
92 * @cluster: pointer to the parent cluster.
93 * @reset: reset control handle
94 * @tsp: TI-SCI processor control handle
Suman Anna5d56d252020-08-17 18:15:08 -050095 * @ipdata: cached pointer to R5F IP specific feature data
Lokesh Vutla58633f12019-09-04 16:01:34 +053096 * @mem: Array of available internal memories
97 * @num_mem: Number of available memories
98 * @atcm_enable: flag to control ATCM enablement
99 * @btcm_enable: flag to control BTCM enablement
100 * @loczrama: flag to dictate which TCM is at device address 0x0
101 * @in_use: flag to tell if the core is already in use.
102 */
103struct k3_r5f_core {
104 struct udevice *dev;
105 struct k3_r5f_cluster *cluster;
106 struct reset_ctl reset;
107 struct ti_sci_proc tsp;
Suman Anna5d56d252020-08-17 18:15:08 -0500108 struct k3_r5f_ip_data *ipdata;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530109 struct k3_r5f_mem *mem;
110 int num_mems;
111 u32 atcm_enable;
112 u32 btcm_enable;
113 u32 loczrama;
114 bool in_use;
115};
116
117/**
118 * struct k3_r5f_cluster - K3 R5F Cluster structure
119 * @mode: Mode to configure the Cluster - Split or LockStep
120 * @cores: Array of pointers to R5 cores within the cluster
121 */
122struct k3_r5f_cluster {
123 enum cluster_mode mode;
124 struct k3_r5f_core *cores[NR_CORES];
125};
126
127static bool is_primary_core(struct k3_r5f_core *core)
128{
129 return core == core->cluster->cores[0];
130}
131
132static int k3_r5f_proc_request(struct k3_r5f_core *core)
133{
134 struct k3_r5f_cluster *cluster = core->cluster;
135 int i, ret;
136
137 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
138 for (i = 0; i < NR_CORES; i++) {
139 ret = ti_sci_proc_request(&cluster->cores[i]->tsp);
140 if (ret)
141 goto proc_release;
142 }
143 } else {
144 ret = ti_sci_proc_request(&core->tsp);
145 }
146
147 return 0;
148
149proc_release:
150 while (i >= 0) {
151 ti_sci_proc_release(&cluster->cores[i]->tsp);
152 i--;
153 }
154 return ret;
155}
156
157static void k3_r5f_proc_release(struct k3_r5f_core *core)
158{
159 struct k3_r5f_cluster *cluster = core->cluster;
160 int i;
161
162 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
163 for (i = 0; i < NR_CORES; i++)
164 ti_sci_proc_release(&cluster->cores[i]->tsp);
165 else
166 ti_sci_proc_release(&core->tsp);
167}
168
169static int k3_r5f_lockstep_release(struct k3_r5f_cluster *cluster)
170{
171 int ret, c;
172
Sean Andersonf13dc372020-09-15 10:45:08 -0400173 debug("%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530174
175 for (c = NR_CORES - 1; c >= 0; c--) {
176 ret = ti_sci_proc_power_domain_on(&cluster->cores[c]->tsp);
177 if (ret)
178 goto unroll_module_reset;
179 }
180
181 /* deassert local reset on all applicable cores */
182 for (c = NR_CORES - 1; c >= 0; c--) {
183 ret = reset_deassert(&cluster->cores[c]->reset);
184 if (ret)
185 goto unroll_local_reset;
186 }
187
188 return 0;
189
190unroll_local_reset:
191 while (c < NR_CORES) {
192 reset_assert(&cluster->cores[c]->reset);
193 c++;
194 }
195 c = 0;
196unroll_module_reset:
197 while (c < NR_CORES) {
198 ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp);
199 c++;
200 }
201
202 return ret;
203}
204
205static int k3_r5f_split_release(struct k3_r5f_core *core)
206{
207 int ret;
208
Sean Andersonf13dc372020-09-15 10:45:08 -0400209 dev_dbg(core->dev, "%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530210
211 ret = ti_sci_proc_power_domain_on(&core->tsp);
212 if (ret) {
213 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
214 ret);
215 return ret;
216 }
217
218 ret = reset_deassert(&core->reset);
219 if (ret) {
220 dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
221 ret);
222 if (ti_sci_proc_power_domain_off(&core->tsp))
223 dev_warn(core->dev, "module-reset assert back failed\n");
224 }
225
226 return ret;
227}
228
229static int k3_r5f_prepare(struct udevice *dev)
230{
231 struct k3_r5f_core *core = dev_get_priv(dev);
232 struct k3_r5f_cluster *cluster = core->cluster;
233 int ret = 0;
234
235 dev_dbg(dev, "%s\n", __func__);
236
237 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
238 ret = k3_r5f_lockstep_release(cluster);
239 else
240 ret = k3_r5f_split_release(core);
241
242 if (ret)
243 dev_err(dev, "Unable to enable cores for TCM loading %d\n",
244 ret);
245
246 return ret;
247}
248
249static int k3_r5f_core_sanity_check(struct k3_r5f_core *core)
250{
251 struct k3_r5f_cluster *cluster = core->cluster;
252
253 if (core->in_use) {
Sean Andersonf13dc372020-09-15 10:45:08 -0400254 dev_err(core->dev,
255 "Invalid op: Trying to load/start on already running core %d\n",
Lokesh Vutla58633f12019-09-04 16:01:34 +0530256 core->tsp.proc_id);
257 return -EINVAL;
258 }
259
260 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !cluster->cores[1]) {
Sean Andersonf13dc372020-09-15 10:45:08 -0400261 dev_err(core->dev,
262 "Secondary core is not probed in this cluster\n");
Lokesh Vutla58633f12019-09-04 16:01:34 +0530263 return -EAGAIN;
264 }
265
266 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !is_primary_core(core)) {
Sean Andersonf13dc372020-09-15 10:45:08 -0400267 dev_err(core->dev,
268 "Invalid op: Trying to start secondary core %d in lockstep mode\n",
Lokesh Vutla58633f12019-09-04 16:01:34 +0530269 core->tsp.proc_id);
270 return -EINVAL;
271 }
272
273 if (cluster->mode == CLUSTER_MODE_SPLIT && !is_primary_core(core)) {
274 if (!core->cluster->cores[0]->in_use) {
Sean Andersonf13dc372020-09-15 10:45:08 -0400275 dev_err(core->dev,
276 "Invalid seq: Enable primary core before loading secondary core\n");
Lokesh Vutla58633f12019-09-04 16:01:34 +0530277 return -EINVAL;
278 }
279 }
280
281 return 0;
282}
283
Suman Anna5d56d252020-08-17 18:15:08 -0500284/* Zero out TCMs so that ECC can be effective on all TCM addresses */
285void k3_r5f_init_tcm_memories(struct k3_r5f_core *core, bool auto_inited)
286{
287 if (core->ipdata->tcm_ecc_autoinit && auto_inited)
288 return;
289
290 if (core->atcm_enable)
291 memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
292 if (core->btcm_enable)
293 memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
294}
295
Lokesh Vutla58633f12019-09-04 16:01:34 +0530296/**
297 * k3_r5f_load() - Load up the Remote processor image
298 * @dev: rproc device pointer
299 * @addr: Address at which image is available
300 * @size: size of the image
301 *
302 * Return: 0 if all goes good, else appropriate error message.
303 */
304static int k3_r5f_load(struct udevice *dev, ulong addr, ulong size)
305{
306 struct k3_r5f_core *core = dev_get_priv(dev);
Suman Anna5d56d252020-08-17 18:15:08 -0500307 u64 boot_vector;
308 u32 ctrl, sts, cfg = 0;
309 bool mem_auto_init;
Manorit Chawdhry3b779d12024-05-21 16:26:46 +0530310 void *image_addr = (void *)addr;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530311 int ret;
312
313 dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size);
314
315 ret = k3_r5f_core_sanity_check(core);
316 if (ret)
317 return ret;
318
319 ret = k3_r5f_proc_request(core);
320 if (ret)
321 return ret;
Suman Anna5d56d252020-08-17 18:15:08 -0500322
323 ret = ti_sci_proc_get_status(&core->tsp, &boot_vector, &cfg, &ctrl,
324 &sts);
325 if (ret)
326 return ret;
327 mem_auto_init = !(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530328
329 ret = k3_r5f_prepare(dev);
330 if (ret) {
331 dev_err(dev, "R5f prepare failed for core %d\n",
332 core->tsp.proc_id);
333 goto proc_release;
334 }
335
Suman Anna5d56d252020-08-17 18:15:08 -0500336 k3_r5f_init_tcm_memories(core, mem_auto_init);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530337
Manorit Chawdhry3b779d12024-05-21 16:26:46 +0530338 ti_secure_image_post_process(&image_addr, &size);
339
Lokesh Vutla58633f12019-09-04 16:01:34 +0530340 ret = rproc_elf_load_image(dev, addr, size);
341 if (ret < 0) {
342 dev_err(dev, "Loading elf failedi %d\n", ret);
343 goto proc_release;
344 }
345
346 boot_vector = rproc_elf_get_boot_addr(dev, addr);
347
Suman Anna5d56d252020-08-17 18:15:08 -0500348 dev_dbg(dev, "%s: Boot vector = 0x%llx\n", __func__, boot_vector);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530349
350 ret = ti_sci_proc_set_config(&core->tsp, boot_vector, 0, 0);
351
352proc_release:
353 k3_r5f_proc_release(core);
354
355 return ret;
356}
357
358static int k3_r5f_core_halt(struct k3_r5f_core *core)
359{
360 int ret;
361
362 ret = ti_sci_proc_set_control(&core->tsp,
363 PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
364 if (ret)
365 dev_err(core->dev, "Core %d failed to stop\n",
366 core->tsp.proc_id);
367
368 return ret;
369}
370
371static int k3_r5f_core_run(struct k3_r5f_core *core)
372{
373 int ret;
374
375 ret = ti_sci_proc_set_control(&core->tsp,
376 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
377 if (ret) {
378 dev_err(core->dev, "Core %d failed to start\n",
379 core->tsp.proc_id);
380 return ret;
381 }
382
383 return 0;
384}
385
386/**
387 * k3_r5f_start() - Start the remote processor
388 * @dev: rproc device pointer
389 *
390 * Return: 0 if all went ok, else return appropriate error
391 */
392static int k3_r5f_start(struct udevice *dev)
393{
394 struct k3_r5f_core *core = dev_get_priv(dev);
395 struct k3_r5f_cluster *cluster = core->cluster;
396 int ret, c;
397
398 dev_dbg(dev, "%s\n", __func__);
399
400 ret = k3_r5f_core_sanity_check(core);
401 if (ret)
402 return ret;
403
404 ret = k3_r5f_proc_request(core);
405 if (ret)
406 return ret;
407
408 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
409 if (is_primary_core(core)) {
410 for (c = NR_CORES - 1; c >= 0; c--) {
411 ret = k3_r5f_core_run(cluster->cores[c]);
412 if (ret)
413 goto unroll_core_run;
414 }
415 } else {
416 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
417 core->tsp.proc_id);
418 ret = -EINVAL;
419 goto proc_release;
420 }
421 } else {
422 ret = k3_r5f_core_run(core);
423 if (ret)
424 goto proc_release;
425 }
426
427 core->in_use = true;
428
429 k3_r5f_proc_release(core);
430 return 0;
431
432unroll_core_run:
433 while (c < NR_CORES) {
434 k3_r5f_core_halt(cluster->cores[c]);
435 c++;
436 }
437proc_release:
438 k3_r5f_proc_release(core);
439
440 return ret;
441}
442
443static int k3_r5f_split_reset(struct k3_r5f_core *core)
444{
445 int ret;
446
Sean Andersonf13dc372020-09-15 10:45:08 -0400447 dev_dbg(core->dev, "%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530448
449 if (reset_assert(&core->reset))
450 ret = -EINVAL;
451
452 if (ti_sci_proc_power_domain_off(&core->tsp))
453 ret = -EINVAL;
454
455 return ret;
456}
457
458static int k3_r5f_lockstep_reset(struct k3_r5f_cluster *cluster)
459{
460 int ret = 0, c;
461
Sean Andersonf13dc372020-09-15 10:45:08 -0400462 debug("%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530463
464 for (c = 0; c < NR_CORES; c++)
465 if (reset_assert(&cluster->cores[c]->reset))
466 ret = -EINVAL;
467
468 /* disable PSC modules on all applicable cores */
469 for (c = 0; c < NR_CORES; c++)
470 if (ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp))
471 ret = -EINVAL;
472
473 return ret;
474}
475
476static int k3_r5f_unprepare(struct udevice *dev)
477{
478 struct k3_r5f_core *core = dev_get_priv(dev);
479 struct k3_r5f_cluster *cluster = core->cluster;
480 int ret;
481
482 dev_dbg(dev, "%s\n", __func__);
483
484 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
485 if (is_primary_core(core))
486 ret = k3_r5f_lockstep_reset(cluster);
487 } else {
488 ret = k3_r5f_split_reset(core);
489 }
490
491 if (ret)
492 dev_warn(dev, "Unable to enable cores for TCM loading %d\n",
493 ret);
494
495 return 0;
496}
497
498static int k3_r5f_stop(struct udevice *dev)
499{
500 struct k3_r5f_core *core = dev_get_priv(dev);
501 struct k3_r5f_cluster *cluster = core->cluster;
502 int c, ret;
503
504 dev_dbg(dev, "%s\n", __func__);
505
506 ret = k3_r5f_proc_request(core);
507 if (ret)
508 return ret;
509
510 core->in_use = false;
511
512 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
513 if (is_primary_core(core)) {
514 for (c = 0; c < NR_CORES; c++)
515 k3_r5f_core_halt(cluster->cores[c]);
516 } else {
517 dev_err(dev, "Invalid op: Trying to stop secondary core in lockstep mode\n");
518 ret = -EINVAL;
519 goto proc_release;
520 }
521 } else {
522 k3_r5f_core_halt(core);
523 }
524
525 ret = k3_r5f_unprepare(dev);
526proc_release:
527 k3_r5f_proc_release(core);
528 return ret;
529}
530
531static void *k3_r5f_da_to_va(struct udevice *dev, ulong da, ulong size)
532{
533 struct k3_r5f_core *core = dev_get_priv(dev);
534 void __iomem *va = NULL;
535 phys_addr_t bus_addr;
536 u32 dev_addr, offset;
537 ulong mem_size;
538 int i;
539
540 dev_dbg(dev, "%s\n", __func__);
541
542 if (size <= 0)
543 return NULL;
544
545 for (i = 0; i < core->num_mems; i++) {
546 bus_addr = core->mem[i].bus_addr;
547 dev_addr = core->mem[i].dev_addr;
548 mem_size = core->mem[i].size;
549
550 if (da >= bus_addr && (da + size) <= (bus_addr + mem_size)) {
551 offset = da - bus_addr;
552 va = core->mem[i].cpu_addr + offset;
553 return (__force void *)va;
554 }
555
556 if (da >= dev_addr && (da + size) <= (dev_addr + mem_size)) {
557 offset = da - dev_addr;
558 va = core->mem[i].cpu_addr + offset;
559 return (__force void *)va;
560 }
561 }
562
563 /* Assume it is DDR region and return da */
564 return map_physmem(da, size, MAP_NOCACHE);
565}
566
567static int k3_r5f_init(struct udevice *dev)
568{
569 return 0;
570}
571
572static int k3_r5f_reset(struct udevice *dev)
573{
574 return 0;
575}
576
577static const struct dm_rproc_ops k3_r5f_rproc_ops = {
578 .init = k3_r5f_init,
579 .reset = k3_r5f_reset,
580 .start = k3_r5f_start,
581 .stop = k3_r5f_stop,
582 .load = k3_r5f_load,
583 .device_to_virt = k3_r5f_da_to_va,
584};
585
586static int k3_r5f_rproc_configure(struct k3_r5f_core *core)
587{
588 struct k3_r5f_cluster *cluster = core->cluster;
589 u32 set_cfg = 0, clr_cfg = 0, cfg, ctrl, sts;
Suman Anna9ff29302020-03-10 20:24:29 -0500590 bool lockstep_permitted;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530591 u64 boot_vec = 0;
592 int ret;
593
Sean Andersonf13dc372020-09-15 10:45:08 -0400594 dev_dbg(core->dev, "%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530595
596 ret = ti_sci_proc_request(&core->tsp);
597 if (ret < 0)
598 return ret;
599
600 /* Do not touch boot vector now. Load will take care of it. */
601 clr_cfg |= PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR;
602
603 ret = ti_sci_proc_get_status(&core->tsp, &boot_vec, &cfg, &ctrl, &sts);
604 if (ret)
605 goto out;
606
607 /* Sanity check for Lockstep mode */
Suman Anna9ff29302020-03-10 20:24:29 -0500608 lockstep_permitted = !!(sts &
609 PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
Hari Nagalla89322b82024-05-09 09:20:35 -0500610 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && is_primary_core(core) &&
611 !lockstep_permitted) {
612 dev_err(core->dev, "LockStep mode not permitted on this \
613 device\n");
Lokesh Vutla58633f12019-09-04 16:01:34 +0530614 ret = -EINVAL;
615 goto out;
616 }
617
618 /* Primary core only configuration */
619 if (is_primary_core(core)) {
620 /* always enable ARM mode */
621 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TEINIT;
622 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
623 set_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
Suman Anna9ff29302020-03-10 20:24:29 -0500624 else if (lockstep_permitted)
Lokesh Vutla58633f12019-09-04 16:01:34 +0530625 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
626 }
627
Hari Nagalla89322b82024-05-09 09:20:35 -0500628 if (core->ipdata->is_single_core)
629 set_cfg = PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE;
630
Lokesh Vutla58633f12019-09-04 16:01:34 +0530631 if (core->atcm_enable)
632 set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
633 else
634 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
635
636 if (core->btcm_enable)
637 set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
638 else
639 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
640
641 if (core->loczrama)
642 set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
643 else
644 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
645
646 ret = k3_r5f_core_halt(core);
647 if (ret)
648 goto out;
649
650 ret = ti_sci_proc_set_config(&core->tsp, boot_vec, set_cfg, clr_cfg);
651out:
652 ti_sci_proc_release(&core->tsp);
653 return ret;
654}
655
656static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
657{
658 u32 ids[2];
659 int ret;
660
661 dev_dbg(dev, "%s\n", __func__);
662
663 tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
664 if (IS_ERR(tsp->sci)) {
665 dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
666 return PTR_ERR(tsp->sci);
667 }
668
669 ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
670 if (ret) {
671 dev_err(dev, "Proc IDs not populated %d\n", ret);
672 return ret;
673 }
674
675 tsp->ops = &tsp->sci->ops.proc_ops;
676 tsp->proc_id = ids[0];
677 tsp->host_id = ids[1];
678 tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
679 TI_SCI_RESOURCE_NULL);
680 if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
681 dev_err(dev, "Device ID not populated %d\n", ret);
682 return -ENODEV;
683 }
684
685 return 0;
686}
687
688static int k3_r5f_of_to_priv(struct k3_r5f_core *core)
689{
690 int ret;
691
Sean Andersonf13dc372020-09-15 10:45:08 -0400692 dev_dbg(core->dev, "%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530693
Suman Annaa45e6db2021-01-26 18:20:56 -0600694 core->atcm_enable = dev_read_u32_default(core->dev, "ti,atcm-enable", 0);
695 core->btcm_enable = dev_read_u32_default(core->dev, "ti,btcm-enable", 1);
696 core->loczrama = dev_read_u32_default(core->dev, "ti,loczrama", 1);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530697
698 ret = ti_sci_proc_of_to_priv(core->dev, &core->tsp);
699 if (ret)
700 return ret;
701
702 ret = reset_get_by_index(core->dev, 0, &core->reset);
703 if (ret) {
704 dev_err(core->dev, "Reset lines not available: %d\n", ret);
705 return ret;
706 }
707
Suman Anna5d56d252020-08-17 18:15:08 -0500708 core->ipdata = (struct k3_r5f_ip_data *)dev_get_driver_data(core->dev);
709
Lokesh Vutla58633f12019-09-04 16:01:34 +0530710 return 0;
711}
712
713static int k3_r5f_core_of_get_memories(struct k3_r5f_core *core)
714{
715 static const char * const mem_names[] = {"atcm", "btcm"};
716 struct udevice *dev = core->dev;
717 int i;
718
719 dev_dbg(dev, "%s\n", __func__);
720
721 core->num_mems = ARRAY_SIZE(mem_names);
722 core->mem = calloc(core->num_mems, sizeof(*core->mem));
723 if (!core->mem)
724 return -ENOMEM;
725
726 for (i = 0; i < core->num_mems; i++) {
727 core->mem[i].bus_addr = dev_read_addr_size_name(dev,
728 mem_names[i],
729 (fdt_addr_t *)&core->mem[i].size);
730 if (core->mem[i].bus_addr == FDT_ADDR_T_NONE) {
731 dev_err(dev, "%s bus address not found\n",
732 mem_names[i]);
733 return -EINVAL;
734 }
735 core->mem[i].cpu_addr = map_physmem(core->mem[i].bus_addr,
736 core->mem[i].size,
737 MAP_NOCACHE);
738 if (!strcmp(mem_names[i], "atcm")) {
739 core->mem[i].dev_addr = core->loczrama ?
740 0 : K3_R5_TCM_DEV_ADDR;
741 } else {
742 core->mem[i].dev_addr = core->loczrama ?
743 K3_R5_TCM_DEV_ADDR : 0;
744 }
745
746 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
747 mem_names[i], &core->mem[i].bus_addr,
748 core->mem[i].size, core->mem[i].cpu_addr,
749 core->mem[i].dev_addr);
750 }
751
752 return 0;
753}
754
Suman Anna5d56d252020-08-17 18:15:08 -0500755/*
756 * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
757 * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
758 * cores are usable in Split-mode, but only the Core0 TCMs can be used in
759 * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
760 * leveraging the Core1 TCMs as well in certain modes where they would have
761 * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs). This is done by
762 * making a Core1 TCM visible immediately after the corresponding Core0 TCM.
763 * The SoC memory map uses the larger 64 KB sizes for the Core0 TCMs, and the
764 * dts representation reflects this increased size on supported SoCs. The Core0
765 * TCM sizes therefore have to be adjusted to only half the original size in
766 * Split mode.
767 */
768static void k3_r5f_core_adjust_tcm_sizes(struct k3_r5f_core *core)
769{
770 struct k3_r5f_cluster *cluster = core->cluster;
771
772 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
773 return;
774
775 if (!core->ipdata->tcm_is_double)
776 return;
777
778 if (core == cluster->cores[0]) {
779 core->mem[0].size /= 2;
780 core->mem[1].size /= 2;
781
782 dev_dbg(core->dev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
783 core->mem[0].size, core->mem[1].size);
784 }
785}
786
Lokesh Vutla58633f12019-09-04 16:01:34 +0530787/**
788 * k3_r5f_probe() - Basic probe
789 * @dev: corresponding k3 remote processor device
790 *
791 * Return: 0 if all goes good, else appropriate error message.
792 */
793static int k3_r5f_probe(struct udevice *dev)
794{
795 struct k3_r5f_cluster *cluster = dev_get_priv(dev->parent);
796 struct k3_r5f_core *core = dev_get_priv(dev);
797 bool r_state;
798 int ret;
799
800 dev_dbg(dev, "%s\n", __func__);
801
802 core->dev = dev;
803 ret = k3_r5f_of_to_priv(core);
804 if (ret)
805 return ret;
806
807 core->cluster = cluster;
808 /* Assume Primary core gets probed first */
809 if (!cluster->cores[0])
810 cluster->cores[0] = core;
811 else
812 cluster->cores[1] = core;
813
814 ret = k3_r5f_core_of_get_memories(core);
815 if (ret) {
816 dev_err(dev, "Rproc getting internal memories failed\n");
817 return ret;
818 }
819
Tero Kristof454d612021-06-11 11:45:04 +0300820 /*
821 * The PM functionality is not supported by the firmware during
822 * SPL execution with the separated DM firmware image. The following
823 * piece of code is not compiled in that case.
824 */
825 if (!IS_ENABLED(CONFIG_K3_DM_FW)) {
826 ret = core->tsp.sci->ops.dev_ops.is_on(core->tsp.sci,
827 core->tsp.dev_id,
828 &r_state, &core->in_use);
829 if (ret)
830 return ret;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530831
Tero Kristof454d612021-06-11 11:45:04 +0300832 if (core->in_use) {
833 dev_info(dev, "Core %d is already in use. No rproc commands work\n",
834 core->tsp.proc_id);
835 return 0;
836 }
Lokesh Vutla58633f12019-09-04 16:01:34 +0530837
Tero Kristof454d612021-06-11 11:45:04 +0300838 /* Make sure Local reset is asserted. Redundant? */
839 reset_assert(&core->reset);
840 }
Lokesh Vutla58633f12019-09-04 16:01:34 +0530841
842 ret = k3_r5f_rproc_configure(core);
843 if (ret) {
844 dev_err(dev, "rproc configure failed %d\n", ret);
845 return ret;
846 }
847
Suman Anna5d56d252020-08-17 18:15:08 -0500848 k3_r5f_core_adjust_tcm_sizes(core);
849
Lokesh Vutla58633f12019-09-04 16:01:34 +0530850 dev_dbg(dev, "Remoteproc successfully probed\n");
851
852 return 0;
853}
854
855static int k3_r5f_remove(struct udevice *dev)
856{
857 struct k3_r5f_core *core = dev_get_priv(dev);
858
859 free(core->mem);
860
861 ti_sci_proc_release(&core->tsp);
862
863 return 0;
864}
865
Suman Anna5d56d252020-08-17 18:15:08 -0500866static const struct k3_r5f_ip_data k3_data = {
867 .tcm_is_double = false,
868 .tcm_ecc_autoinit = false,
Hari Nagalla89322b82024-05-09 09:20:35 -0500869 .is_single_core = false,
Suman Anna5d56d252020-08-17 18:15:08 -0500870};
871
Hari Nagalla225271d2024-03-12 15:14:35 +0530872static const struct k3_r5f_ip_data j7200_j721s2_data = {
Suman Anna5d56d252020-08-17 18:15:08 -0500873 .tcm_is_double = true,
874 .tcm_ecc_autoinit = true,
Hari Nagalla89322b82024-05-09 09:20:35 -0500875 .is_single_core = false,
876};
877
878static const struct k3_r5f_ip_data am62_data = {
879 .tcm_is_double = false,
880 .tcm_ecc_autoinit = false,
881 .is_single_core = true,
Suman Anna5d56d252020-08-17 18:15:08 -0500882};
883
Lokesh Vutla58633f12019-09-04 16:01:34 +0530884static const struct udevice_id k3_r5f_rproc_ids[] = {
Suman Anna5d56d252020-08-17 18:15:08 -0500885 { .compatible = "ti,am654-r5f", .data = (ulong)&k3_data, },
886 { .compatible = "ti,j721e-r5f", .data = (ulong)&k3_data, },
Hari Nagalla225271d2024-03-12 15:14:35 +0530887 { .compatible = "ti,j7200-r5f", .data = (ulong)&j7200_j721s2_data, },
888 { .compatible = "ti,j721s2-r5f", .data = (ulong)&j7200_j721s2_data, },
Hari Nagalla89322b82024-05-09 09:20:35 -0500889 { .compatible = "ti,am62-r5f", .data = (ulong)&am62_data, },
Lokesh Vutla58633f12019-09-04 16:01:34 +0530890 {}
891};
892
893U_BOOT_DRIVER(k3_r5f_rproc) = {
894 .name = "k3_r5f_rproc",
895 .of_match = k3_r5f_rproc_ids,
896 .id = UCLASS_REMOTEPROC,
897 .ops = &k3_r5f_rproc_ops,
898 .probe = k3_r5f_probe,
899 .remove = k3_r5f_remove,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700900 .priv_auto = sizeof(struct k3_r5f_core),
Lokesh Vutla58633f12019-09-04 16:01:34 +0530901};
902
903static int k3_r5f_cluster_probe(struct udevice *dev)
904{
905 struct k3_r5f_cluster *cluster = dev_get_priv(dev);
906
907 dev_dbg(dev, "%s\n", __func__);
908
Suman Annaa45e6db2021-01-26 18:20:56 -0600909 cluster->mode = dev_read_u32_default(dev, "ti,cluster-mode",
Lokesh Vutla58633f12019-09-04 16:01:34 +0530910 CLUSTER_MODE_LOCKSTEP);
911
Hari Nagalla89322b82024-05-09 09:20:35 -0500912 if (device_is_compatible(dev, "ti,am62-r5fss")) {
913 cluster->mode = CLUSTER_MODE_SINGLECORE;
914 return 0;
915 }
916
Lokesh Vutla58633f12019-09-04 16:01:34 +0530917 if (device_get_child_count(dev) != 2) {
918 dev_err(dev, "Invalid number of R5 cores");
919 return -EINVAL;
920 }
921
922 dev_dbg(dev, "%s: Cluster successfully probed in %s mode\n",
923 __func__, cluster->mode ? "lockstep" : "split");
924
925 return 0;
926}
927
928static const struct udevice_id k3_r5fss_ids[] = {
929 { .compatible = "ti,am654-r5fss"},
930 { .compatible = "ti,j721e-r5fss"},
Suman Anna5d56d252020-08-17 18:15:08 -0500931 { .compatible = "ti,j7200-r5fss"},
Hari Nagalla225271d2024-03-12 15:14:35 +0530932 { .compatible = "ti,j721s2-r5fss"},
Hari Nagalla89322b82024-05-09 09:20:35 -0500933 { .compatible = "ti,am62-r5fss"},
Lokesh Vutla58633f12019-09-04 16:01:34 +0530934 {}
935};
936
937U_BOOT_DRIVER(k3_r5fss) = {
938 .name = "k3_r5fss",
939 .of_match = k3_r5fss_ids,
940 .id = UCLASS_MISC,
941 .probe = k3_r5f_cluster_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700942 .priv_auto = sizeof(struct k3_r5f_cluster),
Suman Anna41e89862020-03-10 18:34:54 -0500943 .flags = DM_FLAG_DEFAULT_PD_CTRL_OFF,
Lokesh Vutla58633f12019-09-04 16:01:34 +0530944};