blob: d78b3fa1bbd109d5f7a8ecf0d82219df3a065f9c [file] [log] [blame]
Lokesh Vutla58633f12019-09-04 16:01:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments' K3 R5 Remoteproc driver
4 *
Suman Anna5d56d252020-08-17 18:15:08 -05005 * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
Lokesh Vutla58633f12019-09-04 16:01:34 +05306 * Lokesh Vutla <lokeshvutla@ti.com>
Suman Anna5d56d252020-08-17 18:15:08 -05007 * Suman Anna <s-anna@ti.com>
Lokesh Vutla58633f12019-09-04 16:01:34 +05308 */
9
Lokesh Vutla58633f12019-09-04 16:01:34 +053010#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060011#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070012#include <malloc.h>
Lokesh Vutla58633f12019-09-04 16:01:34 +053013#include <remoteproc.h>
14#include <errno.h>
15#include <clk.h>
16#include <reset.h>
17#include <asm/io.h>
Simon Glass9bc15642020-02-03 07:36:16 -070018#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070019#include <linux/err.h>
Lokesh Vutla58633f12019-09-04 16:01:34 +053020#include <linux/kernel.h>
21#include <linux/soc/ti/ti_sci_protocol.h>
22#include "ti_sci_proc.h"
Manorit Chawdhry3b779d12024-05-21 16:26:46 +053023#include <mach/security.h>
Lokesh Vutla58633f12019-09-04 16:01:34 +053024
25/*
26 * R5F's view of this address can either be for ATCM or BTCM with the other
27 * at address 0x0 based on loczrama signal.
28 */
29#define K3_R5_TCM_DEV_ADDR 0x41010000
30
31/* R5 TI-SCI Processor Configuration Flags */
32#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
33#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
34#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
35#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
36#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
37#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
38#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
39#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
40#define PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR 0x10000000
Suman Anna5d56d252020-08-17 18:15:08 -050041/* Available from J7200 SoCs onwards */
42#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
Hari Nagalla89322b82024-05-09 09:20:35 -050043#define PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE 0x00008000
44
Lokesh Vutla58633f12019-09-04 16:01:34 +053045/* R5 TI-SCI Processor Control Flags */
46#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
47
48/* R5 TI-SCI Processor Status Flags */
49#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
50#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
51#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
52#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
53
54#define NR_CORES 2
55
56enum cluster_mode {
57 CLUSTER_MODE_SPLIT = 0,
58 CLUSTER_MODE_LOCKSTEP,
Hari Nagalla89322b82024-05-09 09:20:35 -050059 CLUSTER_MODE_SINGLECPU,
60 CLUSTER_MODE_SINGLECORE,
Lokesh Vutla58633f12019-09-04 16:01:34 +053061};
62
63/**
Suman Anna5d56d252020-08-17 18:15:08 -050064 * struct k3_r5f_ip_data - internal data structure used for IP variations
65 * @tcm_is_double: flag to denote the larger unified TCMs in certain modes
66 * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
67 */
68struct k3_r5f_ip_data {
69 bool tcm_is_double;
70 bool tcm_ecc_autoinit;
Hari Nagalla89322b82024-05-09 09:20:35 -050071 bool is_single_core;
Suman Anna5d56d252020-08-17 18:15:08 -050072};
73
74/**
Lokesh Vutla58633f12019-09-04 16:01:34 +053075 * struct k3_r5_mem - internal memory structure
76 * @cpu_addr: MPU virtual address of the memory region
77 * @bus_addr: Bus address used to access the memory region
78 * @dev_addr: Device address from remoteproc view
79 * @size: Size of the memory region
80 */
81struct k3_r5f_mem {
82 void __iomem *cpu_addr;
83 phys_addr_t bus_addr;
84 u32 dev_addr;
85 size_t size;
86};
87
88/**
89 * struct k3_r5f_core - K3 R5 core structure
90 * @dev: cached device pointer
91 * @cluster: pointer to the parent cluster.
92 * @reset: reset control handle
93 * @tsp: TI-SCI processor control handle
Suman Anna5d56d252020-08-17 18:15:08 -050094 * @ipdata: cached pointer to R5F IP specific feature data
Lokesh Vutla58633f12019-09-04 16:01:34 +053095 * @mem: Array of available internal memories
96 * @num_mem: Number of available memories
97 * @atcm_enable: flag to control ATCM enablement
98 * @btcm_enable: flag to control BTCM enablement
99 * @loczrama: flag to dictate which TCM is at device address 0x0
100 * @in_use: flag to tell if the core is already in use.
101 */
102struct k3_r5f_core {
103 struct udevice *dev;
104 struct k3_r5f_cluster *cluster;
105 struct reset_ctl reset;
106 struct ti_sci_proc tsp;
Suman Anna5d56d252020-08-17 18:15:08 -0500107 struct k3_r5f_ip_data *ipdata;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530108 struct k3_r5f_mem *mem;
109 int num_mems;
110 u32 atcm_enable;
111 u32 btcm_enable;
112 u32 loczrama;
113 bool in_use;
114};
115
116/**
117 * struct k3_r5f_cluster - K3 R5F Cluster structure
118 * @mode: Mode to configure the Cluster - Split or LockStep
119 * @cores: Array of pointers to R5 cores within the cluster
120 */
121struct k3_r5f_cluster {
122 enum cluster_mode mode;
123 struct k3_r5f_core *cores[NR_CORES];
124};
125
126static bool is_primary_core(struct k3_r5f_core *core)
127{
128 return core == core->cluster->cores[0];
129}
130
131static int k3_r5f_proc_request(struct k3_r5f_core *core)
132{
133 struct k3_r5f_cluster *cluster = core->cluster;
134 int i, ret;
135
136 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
137 for (i = 0; i < NR_CORES; i++) {
138 ret = ti_sci_proc_request(&cluster->cores[i]->tsp);
139 if (ret)
140 goto proc_release;
141 }
142 } else {
143 ret = ti_sci_proc_request(&core->tsp);
144 }
145
146 return 0;
147
148proc_release:
149 while (i >= 0) {
150 ti_sci_proc_release(&cluster->cores[i]->tsp);
151 i--;
152 }
153 return ret;
154}
155
156static void k3_r5f_proc_release(struct k3_r5f_core *core)
157{
158 struct k3_r5f_cluster *cluster = core->cluster;
159 int i;
160
161 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
162 for (i = 0; i < NR_CORES; i++)
163 ti_sci_proc_release(&cluster->cores[i]->tsp);
164 else
165 ti_sci_proc_release(&core->tsp);
166}
167
168static int k3_r5f_lockstep_release(struct k3_r5f_cluster *cluster)
169{
170 int ret, c;
171
Sean Andersonf13dc372020-09-15 10:45:08 -0400172 debug("%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530173
174 for (c = NR_CORES - 1; c >= 0; c--) {
175 ret = ti_sci_proc_power_domain_on(&cluster->cores[c]->tsp);
176 if (ret)
177 goto unroll_module_reset;
178 }
179
180 /* deassert local reset on all applicable cores */
181 for (c = NR_CORES - 1; c >= 0; c--) {
182 ret = reset_deassert(&cluster->cores[c]->reset);
183 if (ret)
184 goto unroll_local_reset;
185 }
186
187 return 0;
188
189unroll_local_reset:
190 while (c < NR_CORES) {
191 reset_assert(&cluster->cores[c]->reset);
192 c++;
193 }
194 c = 0;
195unroll_module_reset:
196 while (c < NR_CORES) {
197 ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp);
198 c++;
199 }
200
201 return ret;
202}
203
204static int k3_r5f_split_release(struct k3_r5f_core *core)
205{
206 int ret;
207
Sean Andersonf13dc372020-09-15 10:45:08 -0400208 dev_dbg(core->dev, "%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530209
210 ret = ti_sci_proc_power_domain_on(&core->tsp);
211 if (ret) {
212 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
213 ret);
214 return ret;
215 }
216
217 ret = reset_deassert(&core->reset);
218 if (ret) {
219 dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
220 ret);
221 if (ti_sci_proc_power_domain_off(&core->tsp))
222 dev_warn(core->dev, "module-reset assert back failed\n");
223 }
224
225 return ret;
226}
227
228static int k3_r5f_prepare(struct udevice *dev)
229{
230 struct k3_r5f_core *core = dev_get_priv(dev);
231 struct k3_r5f_cluster *cluster = core->cluster;
232 int ret = 0;
233
234 dev_dbg(dev, "%s\n", __func__);
235
236 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
237 ret = k3_r5f_lockstep_release(cluster);
238 else
239 ret = k3_r5f_split_release(core);
240
241 if (ret)
242 dev_err(dev, "Unable to enable cores for TCM loading %d\n",
243 ret);
244
245 return ret;
246}
247
248static int k3_r5f_core_sanity_check(struct k3_r5f_core *core)
249{
250 struct k3_r5f_cluster *cluster = core->cluster;
251
252 if (core->in_use) {
Sean Andersonf13dc372020-09-15 10:45:08 -0400253 dev_err(core->dev,
254 "Invalid op: Trying to load/start on already running core %d\n",
Lokesh Vutla58633f12019-09-04 16:01:34 +0530255 core->tsp.proc_id);
256 return -EINVAL;
257 }
258
259 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !cluster->cores[1]) {
Sean Andersonf13dc372020-09-15 10:45:08 -0400260 dev_err(core->dev,
261 "Secondary core is not probed in this cluster\n");
Lokesh Vutla58633f12019-09-04 16:01:34 +0530262 return -EAGAIN;
263 }
264
265 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !is_primary_core(core)) {
Sean Andersonf13dc372020-09-15 10:45:08 -0400266 dev_err(core->dev,
267 "Invalid op: Trying to start secondary core %d in lockstep mode\n",
Lokesh Vutla58633f12019-09-04 16:01:34 +0530268 core->tsp.proc_id);
269 return -EINVAL;
270 }
271
272 if (cluster->mode == CLUSTER_MODE_SPLIT && !is_primary_core(core)) {
273 if (!core->cluster->cores[0]->in_use) {
Sean Andersonf13dc372020-09-15 10:45:08 -0400274 dev_err(core->dev,
275 "Invalid seq: Enable primary core before loading secondary core\n");
Lokesh Vutla58633f12019-09-04 16:01:34 +0530276 return -EINVAL;
277 }
278 }
279
280 return 0;
281}
282
Suman Anna5d56d252020-08-17 18:15:08 -0500283/* Zero out TCMs so that ECC can be effective on all TCM addresses */
284void k3_r5f_init_tcm_memories(struct k3_r5f_core *core, bool auto_inited)
285{
286 if (core->ipdata->tcm_ecc_autoinit && auto_inited)
287 return;
288
289 if (core->atcm_enable)
290 memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
291 if (core->btcm_enable)
292 memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
293}
294
Lokesh Vutla58633f12019-09-04 16:01:34 +0530295/**
296 * k3_r5f_load() - Load up the Remote processor image
297 * @dev: rproc device pointer
298 * @addr: Address at which image is available
299 * @size: size of the image
300 *
301 * Return: 0 if all goes good, else appropriate error message.
302 */
303static int k3_r5f_load(struct udevice *dev, ulong addr, ulong size)
304{
305 struct k3_r5f_core *core = dev_get_priv(dev);
Suman Anna5d56d252020-08-17 18:15:08 -0500306 u64 boot_vector;
307 u32 ctrl, sts, cfg = 0;
308 bool mem_auto_init;
Manorit Chawdhry3b779d12024-05-21 16:26:46 +0530309 void *image_addr = (void *)addr;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530310 int ret;
311
312 dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size);
313
314 ret = k3_r5f_core_sanity_check(core);
315 if (ret)
316 return ret;
317
318 ret = k3_r5f_proc_request(core);
319 if (ret)
320 return ret;
Suman Anna5d56d252020-08-17 18:15:08 -0500321
322 ret = ti_sci_proc_get_status(&core->tsp, &boot_vector, &cfg, &ctrl,
323 &sts);
324 if (ret)
325 return ret;
326 mem_auto_init = !(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530327
328 ret = k3_r5f_prepare(dev);
329 if (ret) {
330 dev_err(dev, "R5f prepare failed for core %d\n",
331 core->tsp.proc_id);
332 goto proc_release;
333 }
334
Suman Anna5d56d252020-08-17 18:15:08 -0500335 k3_r5f_init_tcm_memories(core, mem_auto_init);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530336
Manorit Chawdhry3b779d12024-05-21 16:26:46 +0530337 ti_secure_image_post_process(&image_addr, &size);
338
Lokesh Vutla58633f12019-09-04 16:01:34 +0530339 ret = rproc_elf_load_image(dev, addr, size);
340 if (ret < 0) {
341 dev_err(dev, "Loading elf failedi %d\n", ret);
342 goto proc_release;
343 }
344
345 boot_vector = rproc_elf_get_boot_addr(dev, addr);
346
Suman Anna5d56d252020-08-17 18:15:08 -0500347 dev_dbg(dev, "%s: Boot vector = 0x%llx\n", __func__, boot_vector);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530348
349 ret = ti_sci_proc_set_config(&core->tsp, boot_vector, 0, 0);
350
351proc_release:
352 k3_r5f_proc_release(core);
353
354 return ret;
355}
356
357static int k3_r5f_core_halt(struct k3_r5f_core *core)
358{
359 int ret;
360
361 ret = ti_sci_proc_set_control(&core->tsp,
362 PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
363 if (ret)
364 dev_err(core->dev, "Core %d failed to stop\n",
365 core->tsp.proc_id);
366
367 return ret;
368}
369
370static int k3_r5f_core_run(struct k3_r5f_core *core)
371{
372 int ret;
373
374 ret = ti_sci_proc_set_control(&core->tsp,
375 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
376 if (ret) {
377 dev_err(core->dev, "Core %d failed to start\n",
378 core->tsp.proc_id);
379 return ret;
380 }
381
382 return 0;
383}
384
385/**
386 * k3_r5f_start() - Start the remote processor
387 * @dev: rproc device pointer
388 *
389 * Return: 0 if all went ok, else return appropriate error
390 */
391static int k3_r5f_start(struct udevice *dev)
392{
393 struct k3_r5f_core *core = dev_get_priv(dev);
394 struct k3_r5f_cluster *cluster = core->cluster;
395 int ret, c;
396
397 dev_dbg(dev, "%s\n", __func__);
398
399 ret = k3_r5f_core_sanity_check(core);
400 if (ret)
401 return ret;
402
403 ret = k3_r5f_proc_request(core);
404 if (ret)
405 return ret;
406
407 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
408 if (is_primary_core(core)) {
409 for (c = NR_CORES - 1; c >= 0; c--) {
410 ret = k3_r5f_core_run(cluster->cores[c]);
411 if (ret)
412 goto unroll_core_run;
413 }
414 } else {
415 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
416 core->tsp.proc_id);
417 ret = -EINVAL;
418 goto proc_release;
419 }
420 } else {
421 ret = k3_r5f_core_run(core);
422 if (ret)
423 goto proc_release;
424 }
425
426 core->in_use = true;
427
428 k3_r5f_proc_release(core);
429 return 0;
430
431unroll_core_run:
432 while (c < NR_CORES) {
433 k3_r5f_core_halt(cluster->cores[c]);
434 c++;
435 }
436proc_release:
437 k3_r5f_proc_release(core);
438
439 return ret;
440}
441
442static int k3_r5f_split_reset(struct k3_r5f_core *core)
443{
444 int ret;
445
Sean Andersonf13dc372020-09-15 10:45:08 -0400446 dev_dbg(core->dev, "%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530447
448 if (reset_assert(&core->reset))
449 ret = -EINVAL;
450
451 if (ti_sci_proc_power_domain_off(&core->tsp))
452 ret = -EINVAL;
453
454 return ret;
455}
456
457static int k3_r5f_lockstep_reset(struct k3_r5f_cluster *cluster)
458{
459 int ret = 0, c;
460
Sean Andersonf13dc372020-09-15 10:45:08 -0400461 debug("%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530462
463 for (c = 0; c < NR_CORES; c++)
464 if (reset_assert(&cluster->cores[c]->reset))
465 ret = -EINVAL;
466
467 /* disable PSC modules on all applicable cores */
468 for (c = 0; c < NR_CORES; c++)
469 if (ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp))
470 ret = -EINVAL;
471
472 return ret;
473}
474
475static int k3_r5f_unprepare(struct udevice *dev)
476{
477 struct k3_r5f_core *core = dev_get_priv(dev);
478 struct k3_r5f_cluster *cluster = core->cluster;
479 int ret;
480
481 dev_dbg(dev, "%s\n", __func__);
482
483 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
484 if (is_primary_core(core))
485 ret = k3_r5f_lockstep_reset(cluster);
486 } else {
487 ret = k3_r5f_split_reset(core);
488 }
489
490 if (ret)
491 dev_warn(dev, "Unable to enable cores for TCM loading %d\n",
492 ret);
493
494 return 0;
495}
496
497static int k3_r5f_stop(struct udevice *dev)
498{
499 struct k3_r5f_core *core = dev_get_priv(dev);
500 struct k3_r5f_cluster *cluster = core->cluster;
501 int c, ret;
502
503 dev_dbg(dev, "%s\n", __func__);
504
505 ret = k3_r5f_proc_request(core);
506 if (ret)
507 return ret;
508
509 core->in_use = false;
510
511 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
512 if (is_primary_core(core)) {
513 for (c = 0; c < NR_CORES; c++)
514 k3_r5f_core_halt(cluster->cores[c]);
515 } else {
516 dev_err(dev, "Invalid op: Trying to stop secondary core in lockstep mode\n");
517 ret = -EINVAL;
518 goto proc_release;
519 }
520 } else {
521 k3_r5f_core_halt(core);
522 }
523
524 ret = k3_r5f_unprepare(dev);
525proc_release:
526 k3_r5f_proc_release(core);
527 return ret;
528}
529
530static void *k3_r5f_da_to_va(struct udevice *dev, ulong da, ulong size)
531{
532 struct k3_r5f_core *core = dev_get_priv(dev);
533 void __iomem *va = NULL;
534 phys_addr_t bus_addr;
535 u32 dev_addr, offset;
536 ulong mem_size;
537 int i;
538
539 dev_dbg(dev, "%s\n", __func__);
540
541 if (size <= 0)
542 return NULL;
543
544 for (i = 0; i < core->num_mems; i++) {
545 bus_addr = core->mem[i].bus_addr;
546 dev_addr = core->mem[i].dev_addr;
547 mem_size = core->mem[i].size;
548
549 if (da >= bus_addr && (da + size) <= (bus_addr + mem_size)) {
550 offset = da - bus_addr;
551 va = core->mem[i].cpu_addr + offset;
552 return (__force void *)va;
553 }
554
555 if (da >= dev_addr && (da + size) <= (dev_addr + mem_size)) {
556 offset = da - dev_addr;
557 va = core->mem[i].cpu_addr + offset;
558 return (__force void *)va;
559 }
560 }
561
562 /* Assume it is DDR region and return da */
563 return map_physmem(da, size, MAP_NOCACHE);
564}
565
566static int k3_r5f_init(struct udevice *dev)
567{
568 return 0;
569}
570
571static int k3_r5f_reset(struct udevice *dev)
572{
573 return 0;
574}
575
576static const struct dm_rproc_ops k3_r5f_rproc_ops = {
577 .init = k3_r5f_init,
578 .reset = k3_r5f_reset,
579 .start = k3_r5f_start,
580 .stop = k3_r5f_stop,
581 .load = k3_r5f_load,
582 .device_to_virt = k3_r5f_da_to_va,
583};
584
585static int k3_r5f_rproc_configure(struct k3_r5f_core *core)
586{
587 struct k3_r5f_cluster *cluster = core->cluster;
588 u32 set_cfg = 0, clr_cfg = 0, cfg, ctrl, sts;
Suman Anna9ff29302020-03-10 20:24:29 -0500589 bool lockstep_permitted;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530590 u64 boot_vec = 0;
591 int ret;
592
Sean Andersonf13dc372020-09-15 10:45:08 -0400593 dev_dbg(core->dev, "%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530594
595 ret = ti_sci_proc_request(&core->tsp);
596 if (ret < 0)
597 return ret;
598
599 /* Do not touch boot vector now. Load will take care of it. */
600 clr_cfg |= PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR;
601
602 ret = ti_sci_proc_get_status(&core->tsp, &boot_vec, &cfg, &ctrl, &sts);
603 if (ret)
604 goto out;
605
606 /* Sanity check for Lockstep mode */
Suman Anna9ff29302020-03-10 20:24:29 -0500607 lockstep_permitted = !!(sts &
608 PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
Hari Nagalla89322b82024-05-09 09:20:35 -0500609 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && is_primary_core(core) &&
610 !lockstep_permitted) {
611 dev_err(core->dev, "LockStep mode not permitted on this \
612 device\n");
Lokesh Vutla58633f12019-09-04 16:01:34 +0530613 ret = -EINVAL;
614 goto out;
615 }
616
617 /* Primary core only configuration */
618 if (is_primary_core(core)) {
619 /* always enable ARM mode */
620 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TEINIT;
621 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
622 set_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
Suman Anna9ff29302020-03-10 20:24:29 -0500623 else if (lockstep_permitted)
Lokesh Vutla58633f12019-09-04 16:01:34 +0530624 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
625 }
626
Hari Nagalla89322b82024-05-09 09:20:35 -0500627 if (core->ipdata->is_single_core)
628 set_cfg = PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE;
629
Lokesh Vutla58633f12019-09-04 16:01:34 +0530630 if (core->atcm_enable)
631 set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
632 else
633 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
634
635 if (core->btcm_enable)
636 set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
637 else
638 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
639
640 if (core->loczrama)
641 set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
642 else
643 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
644
645 ret = k3_r5f_core_halt(core);
646 if (ret)
647 goto out;
648
649 ret = ti_sci_proc_set_config(&core->tsp, boot_vec, set_cfg, clr_cfg);
650out:
651 ti_sci_proc_release(&core->tsp);
652 return ret;
653}
654
655static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
656{
657 u32 ids[2];
658 int ret;
659
660 dev_dbg(dev, "%s\n", __func__);
661
662 tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
663 if (IS_ERR(tsp->sci)) {
664 dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
665 return PTR_ERR(tsp->sci);
666 }
667
668 ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
669 if (ret) {
670 dev_err(dev, "Proc IDs not populated %d\n", ret);
671 return ret;
672 }
673
674 tsp->ops = &tsp->sci->ops.proc_ops;
675 tsp->proc_id = ids[0];
676 tsp->host_id = ids[1];
677 tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
678 TI_SCI_RESOURCE_NULL);
679 if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
680 dev_err(dev, "Device ID not populated %d\n", ret);
681 return -ENODEV;
682 }
683
684 return 0;
685}
686
687static int k3_r5f_of_to_priv(struct k3_r5f_core *core)
688{
689 int ret;
690
Sean Andersonf13dc372020-09-15 10:45:08 -0400691 dev_dbg(core->dev, "%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530692
Suman Annaa45e6db2021-01-26 18:20:56 -0600693 core->atcm_enable = dev_read_u32_default(core->dev, "ti,atcm-enable", 0);
694 core->btcm_enable = dev_read_u32_default(core->dev, "ti,btcm-enable", 1);
695 core->loczrama = dev_read_u32_default(core->dev, "ti,loczrama", 1);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530696
697 ret = ti_sci_proc_of_to_priv(core->dev, &core->tsp);
698 if (ret)
699 return ret;
700
701 ret = reset_get_by_index(core->dev, 0, &core->reset);
702 if (ret) {
703 dev_err(core->dev, "Reset lines not available: %d\n", ret);
704 return ret;
705 }
706
Suman Anna5d56d252020-08-17 18:15:08 -0500707 core->ipdata = (struct k3_r5f_ip_data *)dev_get_driver_data(core->dev);
708
Lokesh Vutla58633f12019-09-04 16:01:34 +0530709 return 0;
710}
711
712static int k3_r5f_core_of_get_memories(struct k3_r5f_core *core)
713{
714 static const char * const mem_names[] = {"atcm", "btcm"};
715 struct udevice *dev = core->dev;
716 int i;
717
718 dev_dbg(dev, "%s\n", __func__);
719
720 core->num_mems = ARRAY_SIZE(mem_names);
721 core->mem = calloc(core->num_mems, sizeof(*core->mem));
722 if (!core->mem)
723 return -ENOMEM;
724
725 for (i = 0; i < core->num_mems; i++) {
726 core->mem[i].bus_addr = dev_read_addr_size_name(dev,
727 mem_names[i],
728 (fdt_addr_t *)&core->mem[i].size);
729 if (core->mem[i].bus_addr == FDT_ADDR_T_NONE) {
730 dev_err(dev, "%s bus address not found\n",
731 mem_names[i]);
732 return -EINVAL;
733 }
734 core->mem[i].cpu_addr = map_physmem(core->mem[i].bus_addr,
735 core->mem[i].size,
736 MAP_NOCACHE);
737 if (!strcmp(mem_names[i], "atcm")) {
738 core->mem[i].dev_addr = core->loczrama ?
739 0 : K3_R5_TCM_DEV_ADDR;
740 } else {
741 core->mem[i].dev_addr = core->loczrama ?
742 K3_R5_TCM_DEV_ADDR : 0;
743 }
744
745 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
746 mem_names[i], &core->mem[i].bus_addr,
747 core->mem[i].size, core->mem[i].cpu_addr,
748 core->mem[i].dev_addr);
749 }
750
751 return 0;
752}
753
Suman Anna5d56d252020-08-17 18:15:08 -0500754/*
755 * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
756 * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
757 * cores are usable in Split-mode, but only the Core0 TCMs can be used in
758 * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
759 * leveraging the Core1 TCMs as well in certain modes where they would have
760 * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs). This is done by
761 * making a Core1 TCM visible immediately after the corresponding Core0 TCM.
762 * The SoC memory map uses the larger 64 KB sizes for the Core0 TCMs, and the
763 * dts representation reflects this increased size on supported SoCs. The Core0
764 * TCM sizes therefore have to be adjusted to only half the original size in
765 * Split mode.
766 */
767static void k3_r5f_core_adjust_tcm_sizes(struct k3_r5f_core *core)
768{
769 struct k3_r5f_cluster *cluster = core->cluster;
770
771 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
772 return;
773
774 if (!core->ipdata->tcm_is_double)
775 return;
776
777 if (core == cluster->cores[0]) {
778 core->mem[0].size /= 2;
779 core->mem[1].size /= 2;
780
781 dev_dbg(core->dev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
782 core->mem[0].size, core->mem[1].size);
783 }
784}
785
Lokesh Vutla58633f12019-09-04 16:01:34 +0530786/**
787 * k3_r5f_probe() - Basic probe
788 * @dev: corresponding k3 remote processor device
789 *
790 * Return: 0 if all goes good, else appropriate error message.
791 */
792static int k3_r5f_probe(struct udevice *dev)
793{
794 struct k3_r5f_cluster *cluster = dev_get_priv(dev->parent);
795 struct k3_r5f_core *core = dev_get_priv(dev);
796 bool r_state;
797 int ret;
798
799 dev_dbg(dev, "%s\n", __func__);
800
801 core->dev = dev;
802 ret = k3_r5f_of_to_priv(core);
803 if (ret)
804 return ret;
805
806 core->cluster = cluster;
807 /* Assume Primary core gets probed first */
808 if (!cluster->cores[0])
809 cluster->cores[0] = core;
810 else
811 cluster->cores[1] = core;
812
813 ret = k3_r5f_core_of_get_memories(core);
814 if (ret) {
815 dev_err(dev, "Rproc getting internal memories failed\n");
816 return ret;
817 }
818
Tero Kristof454d612021-06-11 11:45:04 +0300819 /*
820 * The PM functionality is not supported by the firmware during
821 * SPL execution with the separated DM firmware image. The following
822 * piece of code is not compiled in that case.
823 */
824 if (!IS_ENABLED(CONFIG_K3_DM_FW)) {
825 ret = core->tsp.sci->ops.dev_ops.is_on(core->tsp.sci,
826 core->tsp.dev_id,
827 &r_state, &core->in_use);
828 if (ret)
829 return ret;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530830
Tero Kristof454d612021-06-11 11:45:04 +0300831 if (core->in_use) {
832 dev_info(dev, "Core %d is already in use. No rproc commands work\n",
833 core->tsp.proc_id);
834 return 0;
835 }
Lokesh Vutla58633f12019-09-04 16:01:34 +0530836
Tero Kristof454d612021-06-11 11:45:04 +0300837 /* Make sure Local reset is asserted. Redundant? */
838 reset_assert(&core->reset);
839 }
Lokesh Vutla58633f12019-09-04 16:01:34 +0530840
841 ret = k3_r5f_rproc_configure(core);
842 if (ret) {
843 dev_err(dev, "rproc configure failed %d\n", ret);
844 return ret;
845 }
846
Suman Anna5d56d252020-08-17 18:15:08 -0500847 k3_r5f_core_adjust_tcm_sizes(core);
848
Lokesh Vutla58633f12019-09-04 16:01:34 +0530849 dev_dbg(dev, "Remoteproc successfully probed\n");
850
851 return 0;
852}
853
854static int k3_r5f_remove(struct udevice *dev)
855{
856 struct k3_r5f_core *core = dev_get_priv(dev);
857
858 free(core->mem);
859
860 ti_sci_proc_release(&core->tsp);
861
862 return 0;
863}
864
Suman Anna5d56d252020-08-17 18:15:08 -0500865static const struct k3_r5f_ip_data k3_data = {
866 .tcm_is_double = false,
867 .tcm_ecc_autoinit = false,
Hari Nagalla89322b82024-05-09 09:20:35 -0500868 .is_single_core = false,
Suman Anna5d56d252020-08-17 18:15:08 -0500869};
870
Hari Nagalla225271d2024-03-12 15:14:35 +0530871static const struct k3_r5f_ip_data j7200_j721s2_data = {
Suman Anna5d56d252020-08-17 18:15:08 -0500872 .tcm_is_double = true,
873 .tcm_ecc_autoinit = true,
Hari Nagalla89322b82024-05-09 09:20:35 -0500874 .is_single_core = false,
875};
876
877static const struct k3_r5f_ip_data am62_data = {
878 .tcm_is_double = false,
879 .tcm_ecc_autoinit = false,
880 .is_single_core = true,
Suman Anna5d56d252020-08-17 18:15:08 -0500881};
882
Lokesh Vutla58633f12019-09-04 16:01:34 +0530883static const struct udevice_id k3_r5f_rproc_ids[] = {
Suman Anna5d56d252020-08-17 18:15:08 -0500884 { .compatible = "ti,am654-r5f", .data = (ulong)&k3_data, },
885 { .compatible = "ti,j721e-r5f", .data = (ulong)&k3_data, },
Hari Nagalla225271d2024-03-12 15:14:35 +0530886 { .compatible = "ti,j7200-r5f", .data = (ulong)&j7200_j721s2_data, },
887 { .compatible = "ti,j721s2-r5f", .data = (ulong)&j7200_j721s2_data, },
Hari Nagalla89322b82024-05-09 09:20:35 -0500888 { .compatible = "ti,am62-r5f", .data = (ulong)&am62_data, },
Lokesh Vutla58633f12019-09-04 16:01:34 +0530889 {}
890};
891
892U_BOOT_DRIVER(k3_r5f_rproc) = {
893 .name = "k3_r5f_rproc",
894 .of_match = k3_r5f_rproc_ids,
895 .id = UCLASS_REMOTEPROC,
896 .ops = &k3_r5f_rproc_ops,
897 .probe = k3_r5f_probe,
898 .remove = k3_r5f_remove,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700899 .priv_auto = sizeof(struct k3_r5f_core),
Lokesh Vutla58633f12019-09-04 16:01:34 +0530900};
901
902static int k3_r5f_cluster_probe(struct udevice *dev)
903{
904 struct k3_r5f_cluster *cluster = dev_get_priv(dev);
905
906 dev_dbg(dev, "%s\n", __func__);
907
Suman Annaa45e6db2021-01-26 18:20:56 -0600908 cluster->mode = dev_read_u32_default(dev, "ti,cluster-mode",
Lokesh Vutla58633f12019-09-04 16:01:34 +0530909 CLUSTER_MODE_LOCKSTEP);
910
Hari Nagalla89322b82024-05-09 09:20:35 -0500911 if (device_is_compatible(dev, "ti,am62-r5fss")) {
912 cluster->mode = CLUSTER_MODE_SINGLECORE;
913 return 0;
914 }
915
Lokesh Vutla58633f12019-09-04 16:01:34 +0530916 if (device_get_child_count(dev) != 2) {
917 dev_err(dev, "Invalid number of R5 cores");
918 return -EINVAL;
919 }
920
921 dev_dbg(dev, "%s: Cluster successfully probed in %s mode\n",
922 __func__, cluster->mode ? "lockstep" : "split");
923
924 return 0;
925}
926
927static const struct udevice_id k3_r5fss_ids[] = {
928 { .compatible = "ti,am654-r5fss"},
929 { .compatible = "ti,j721e-r5fss"},
Suman Anna5d56d252020-08-17 18:15:08 -0500930 { .compatible = "ti,j7200-r5fss"},
Hari Nagalla225271d2024-03-12 15:14:35 +0530931 { .compatible = "ti,j721s2-r5fss"},
Hari Nagalla89322b82024-05-09 09:20:35 -0500932 { .compatible = "ti,am62-r5fss"},
Lokesh Vutla58633f12019-09-04 16:01:34 +0530933 {}
934};
935
936U_BOOT_DRIVER(k3_r5fss) = {
937 .name = "k3_r5fss",
938 .of_match = k3_r5fss_ids,
939 .id = UCLASS_MISC,
940 .probe = k3_r5f_cluster_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700941 .priv_auto = sizeof(struct k3_r5f_cluster),
Suman Anna41e89862020-03-10 18:34:54 -0500942 .flags = DM_FLAG_DEFAULT_PD_CTRL_OFF,
Lokesh Vutla58633f12019-09-04 16:01:34 +0530943};