blob: 74bf0433e12ac765f191950e1f0911794376b68c [file] [log] [blame]
Lokesh Vutla58633f12019-09-04 16:01:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments' K3 R5 Remoteproc driver
4 *
Suman Anna5d56d252020-08-17 18:15:08 -05005 * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
Lokesh Vutla58633f12019-09-04 16:01:34 +05306 * Lokesh Vutla <lokeshvutla@ti.com>
Suman Anna5d56d252020-08-17 18:15:08 -05007 * Suman Anna <s-anna@ti.com>
Lokesh Vutla58633f12019-09-04 16:01:34 +05308 */
9
Lokesh Vutla58633f12019-09-04 16:01:34 +053010#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060011#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070012#include <malloc.h>
Lokesh Vutla58633f12019-09-04 16:01:34 +053013#include <remoteproc.h>
14#include <errno.h>
15#include <clk.h>
16#include <reset.h>
17#include <asm/io.h>
Simon Glass9bc15642020-02-03 07:36:16 -070018#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070019#include <linux/err.h>
Lokesh Vutla58633f12019-09-04 16:01:34 +053020#include <linux/kernel.h>
21#include <linux/soc/ti/ti_sci_protocol.h>
22#include "ti_sci_proc.h"
23
24/*
25 * R5F's view of this address can either be for ATCM or BTCM with the other
26 * at address 0x0 based on loczrama signal.
27 */
28#define K3_R5_TCM_DEV_ADDR 0x41010000
29
30/* R5 TI-SCI Processor Configuration Flags */
31#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
32#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
33#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
34#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
35#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
36#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
37#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
38#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
39#define PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR 0x10000000
Suman Anna5d56d252020-08-17 18:15:08 -050040/* Available from J7200 SoCs onwards */
41#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
Hari Nagalla89322b82024-05-09 09:20:35 -050042#define PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE 0x00008000
43
Lokesh Vutla58633f12019-09-04 16:01:34 +053044
45/* R5 TI-SCI Processor Control Flags */
46#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
47
48/* R5 TI-SCI Processor Status Flags */
49#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
50#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
51#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
52#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
53
54#define NR_CORES 2
55
56enum cluster_mode {
57 CLUSTER_MODE_SPLIT = 0,
58 CLUSTER_MODE_LOCKSTEP,
Hari Nagalla89322b82024-05-09 09:20:35 -050059 CLUSTER_MODE_SINGLECPU,
60 CLUSTER_MODE_SINGLECORE,
Lokesh Vutla58633f12019-09-04 16:01:34 +053061};
62
63/**
Suman Anna5d56d252020-08-17 18:15:08 -050064 * struct k3_r5f_ip_data - internal data structure used for IP variations
65 * @tcm_is_double: flag to denote the larger unified TCMs in certain modes
66 * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
67 */
68struct k3_r5f_ip_data {
69 bool tcm_is_double;
70 bool tcm_ecc_autoinit;
Hari Nagalla89322b82024-05-09 09:20:35 -050071 bool is_single_core;
Suman Anna5d56d252020-08-17 18:15:08 -050072};
73
74/**
Lokesh Vutla58633f12019-09-04 16:01:34 +053075 * struct k3_r5_mem - internal memory structure
76 * @cpu_addr: MPU virtual address of the memory region
77 * @bus_addr: Bus address used to access the memory region
78 * @dev_addr: Device address from remoteproc view
79 * @size: Size of the memory region
80 */
81struct k3_r5f_mem {
82 void __iomem *cpu_addr;
83 phys_addr_t bus_addr;
84 u32 dev_addr;
85 size_t size;
86};
87
88/**
89 * struct k3_r5f_core - K3 R5 core structure
90 * @dev: cached device pointer
91 * @cluster: pointer to the parent cluster.
92 * @reset: reset control handle
93 * @tsp: TI-SCI processor control handle
Suman Anna5d56d252020-08-17 18:15:08 -050094 * @ipdata: cached pointer to R5F IP specific feature data
Lokesh Vutla58633f12019-09-04 16:01:34 +053095 * @mem: Array of available internal memories
96 * @num_mem: Number of available memories
97 * @atcm_enable: flag to control ATCM enablement
98 * @btcm_enable: flag to control BTCM enablement
99 * @loczrama: flag to dictate which TCM is at device address 0x0
100 * @in_use: flag to tell if the core is already in use.
101 */
102struct k3_r5f_core {
103 struct udevice *dev;
104 struct k3_r5f_cluster *cluster;
105 struct reset_ctl reset;
106 struct ti_sci_proc tsp;
Suman Anna5d56d252020-08-17 18:15:08 -0500107 struct k3_r5f_ip_data *ipdata;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530108 struct k3_r5f_mem *mem;
109 int num_mems;
110 u32 atcm_enable;
111 u32 btcm_enable;
112 u32 loczrama;
113 bool in_use;
114};
115
116/**
117 * struct k3_r5f_cluster - K3 R5F Cluster structure
118 * @mode: Mode to configure the Cluster - Split or LockStep
119 * @cores: Array of pointers to R5 cores within the cluster
120 */
121struct k3_r5f_cluster {
122 enum cluster_mode mode;
123 struct k3_r5f_core *cores[NR_CORES];
124};
125
126static bool is_primary_core(struct k3_r5f_core *core)
127{
128 return core == core->cluster->cores[0];
129}
130
131static int k3_r5f_proc_request(struct k3_r5f_core *core)
132{
133 struct k3_r5f_cluster *cluster = core->cluster;
134 int i, ret;
135
136 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
137 for (i = 0; i < NR_CORES; i++) {
138 ret = ti_sci_proc_request(&cluster->cores[i]->tsp);
139 if (ret)
140 goto proc_release;
141 }
142 } else {
143 ret = ti_sci_proc_request(&core->tsp);
144 }
145
146 return 0;
147
148proc_release:
149 while (i >= 0) {
150 ti_sci_proc_release(&cluster->cores[i]->tsp);
151 i--;
152 }
153 return ret;
154}
155
156static void k3_r5f_proc_release(struct k3_r5f_core *core)
157{
158 struct k3_r5f_cluster *cluster = core->cluster;
159 int i;
160
161 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
162 for (i = 0; i < NR_CORES; i++)
163 ti_sci_proc_release(&cluster->cores[i]->tsp);
164 else
165 ti_sci_proc_release(&core->tsp);
166}
167
168static int k3_r5f_lockstep_release(struct k3_r5f_cluster *cluster)
169{
170 int ret, c;
171
Sean Andersonf13dc372020-09-15 10:45:08 -0400172 debug("%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530173
174 for (c = NR_CORES - 1; c >= 0; c--) {
175 ret = ti_sci_proc_power_domain_on(&cluster->cores[c]->tsp);
176 if (ret)
177 goto unroll_module_reset;
178 }
179
180 /* deassert local reset on all applicable cores */
181 for (c = NR_CORES - 1; c >= 0; c--) {
182 ret = reset_deassert(&cluster->cores[c]->reset);
183 if (ret)
184 goto unroll_local_reset;
185 }
186
187 return 0;
188
189unroll_local_reset:
190 while (c < NR_CORES) {
191 reset_assert(&cluster->cores[c]->reset);
192 c++;
193 }
194 c = 0;
195unroll_module_reset:
196 while (c < NR_CORES) {
197 ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp);
198 c++;
199 }
200
201 return ret;
202}
203
204static int k3_r5f_split_release(struct k3_r5f_core *core)
205{
206 int ret;
207
Sean Andersonf13dc372020-09-15 10:45:08 -0400208 dev_dbg(core->dev, "%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530209
210 ret = ti_sci_proc_power_domain_on(&core->tsp);
211 if (ret) {
212 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
213 ret);
214 return ret;
215 }
216
217 ret = reset_deassert(&core->reset);
218 if (ret) {
219 dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
220 ret);
221 if (ti_sci_proc_power_domain_off(&core->tsp))
222 dev_warn(core->dev, "module-reset assert back failed\n");
223 }
224
225 return ret;
226}
227
228static int k3_r5f_prepare(struct udevice *dev)
229{
230 struct k3_r5f_core *core = dev_get_priv(dev);
231 struct k3_r5f_cluster *cluster = core->cluster;
232 int ret = 0;
233
234 dev_dbg(dev, "%s\n", __func__);
235
236 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
237 ret = k3_r5f_lockstep_release(cluster);
238 else
239 ret = k3_r5f_split_release(core);
240
241 if (ret)
242 dev_err(dev, "Unable to enable cores for TCM loading %d\n",
243 ret);
244
245 return ret;
246}
247
248static int k3_r5f_core_sanity_check(struct k3_r5f_core *core)
249{
250 struct k3_r5f_cluster *cluster = core->cluster;
251
252 if (core->in_use) {
Sean Andersonf13dc372020-09-15 10:45:08 -0400253 dev_err(core->dev,
254 "Invalid op: Trying to load/start on already running core %d\n",
Lokesh Vutla58633f12019-09-04 16:01:34 +0530255 core->tsp.proc_id);
256 return -EINVAL;
257 }
258
259 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !cluster->cores[1]) {
Sean Andersonf13dc372020-09-15 10:45:08 -0400260 dev_err(core->dev,
261 "Secondary core is not probed in this cluster\n");
Lokesh Vutla58633f12019-09-04 16:01:34 +0530262 return -EAGAIN;
263 }
264
265 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !is_primary_core(core)) {
Sean Andersonf13dc372020-09-15 10:45:08 -0400266 dev_err(core->dev,
267 "Invalid op: Trying to start secondary core %d in lockstep mode\n",
Lokesh Vutla58633f12019-09-04 16:01:34 +0530268 core->tsp.proc_id);
269 return -EINVAL;
270 }
271
272 if (cluster->mode == CLUSTER_MODE_SPLIT && !is_primary_core(core)) {
273 if (!core->cluster->cores[0]->in_use) {
Sean Andersonf13dc372020-09-15 10:45:08 -0400274 dev_err(core->dev,
275 "Invalid seq: Enable primary core before loading secondary core\n");
Lokesh Vutla58633f12019-09-04 16:01:34 +0530276 return -EINVAL;
277 }
278 }
279
280 return 0;
281}
282
Suman Anna5d56d252020-08-17 18:15:08 -0500283/* Zero out TCMs so that ECC can be effective on all TCM addresses */
284void k3_r5f_init_tcm_memories(struct k3_r5f_core *core, bool auto_inited)
285{
286 if (core->ipdata->tcm_ecc_autoinit && auto_inited)
287 return;
288
289 if (core->atcm_enable)
290 memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
291 if (core->btcm_enable)
292 memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
293}
294
Lokesh Vutla58633f12019-09-04 16:01:34 +0530295/**
296 * k3_r5f_load() - Load up the Remote processor image
297 * @dev: rproc device pointer
298 * @addr: Address at which image is available
299 * @size: size of the image
300 *
301 * Return: 0 if all goes good, else appropriate error message.
302 */
303static int k3_r5f_load(struct udevice *dev, ulong addr, ulong size)
304{
305 struct k3_r5f_core *core = dev_get_priv(dev);
Suman Anna5d56d252020-08-17 18:15:08 -0500306 u64 boot_vector;
307 u32 ctrl, sts, cfg = 0;
308 bool mem_auto_init;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530309 int ret;
310
311 dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size);
312
313 ret = k3_r5f_core_sanity_check(core);
314 if (ret)
315 return ret;
316
317 ret = k3_r5f_proc_request(core);
318 if (ret)
319 return ret;
Suman Anna5d56d252020-08-17 18:15:08 -0500320
321 ret = ti_sci_proc_get_status(&core->tsp, &boot_vector, &cfg, &ctrl,
322 &sts);
323 if (ret)
324 return ret;
325 mem_auto_init = !(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530326
327 ret = k3_r5f_prepare(dev);
328 if (ret) {
329 dev_err(dev, "R5f prepare failed for core %d\n",
330 core->tsp.proc_id);
331 goto proc_release;
332 }
333
Suman Anna5d56d252020-08-17 18:15:08 -0500334 k3_r5f_init_tcm_memories(core, mem_auto_init);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530335
336 ret = rproc_elf_load_image(dev, addr, size);
337 if (ret < 0) {
338 dev_err(dev, "Loading elf failedi %d\n", ret);
339 goto proc_release;
340 }
341
342 boot_vector = rproc_elf_get_boot_addr(dev, addr);
343
Suman Anna5d56d252020-08-17 18:15:08 -0500344 dev_dbg(dev, "%s: Boot vector = 0x%llx\n", __func__, boot_vector);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530345
346 ret = ti_sci_proc_set_config(&core->tsp, boot_vector, 0, 0);
347
348proc_release:
349 k3_r5f_proc_release(core);
350
351 return ret;
352}
353
354static int k3_r5f_core_halt(struct k3_r5f_core *core)
355{
356 int ret;
357
358 ret = ti_sci_proc_set_control(&core->tsp,
359 PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
360 if (ret)
361 dev_err(core->dev, "Core %d failed to stop\n",
362 core->tsp.proc_id);
363
364 return ret;
365}
366
367static int k3_r5f_core_run(struct k3_r5f_core *core)
368{
369 int ret;
370
371 ret = ti_sci_proc_set_control(&core->tsp,
372 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
373 if (ret) {
374 dev_err(core->dev, "Core %d failed to start\n",
375 core->tsp.proc_id);
376 return ret;
377 }
378
379 return 0;
380}
381
382/**
383 * k3_r5f_start() - Start the remote processor
384 * @dev: rproc device pointer
385 *
386 * Return: 0 if all went ok, else return appropriate error
387 */
388static int k3_r5f_start(struct udevice *dev)
389{
390 struct k3_r5f_core *core = dev_get_priv(dev);
391 struct k3_r5f_cluster *cluster = core->cluster;
392 int ret, c;
393
394 dev_dbg(dev, "%s\n", __func__);
395
396 ret = k3_r5f_core_sanity_check(core);
397 if (ret)
398 return ret;
399
400 ret = k3_r5f_proc_request(core);
401 if (ret)
402 return ret;
403
404 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
405 if (is_primary_core(core)) {
406 for (c = NR_CORES - 1; c >= 0; c--) {
407 ret = k3_r5f_core_run(cluster->cores[c]);
408 if (ret)
409 goto unroll_core_run;
410 }
411 } else {
412 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
413 core->tsp.proc_id);
414 ret = -EINVAL;
415 goto proc_release;
416 }
417 } else {
418 ret = k3_r5f_core_run(core);
419 if (ret)
420 goto proc_release;
421 }
422
423 core->in_use = true;
424
425 k3_r5f_proc_release(core);
426 return 0;
427
428unroll_core_run:
429 while (c < NR_CORES) {
430 k3_r5f_core_halt(cluster->cores[c]);
431 c++;
432 }
433proc_release:
434 k3_r5f_proc_release(core);
435
436 return ret;
437}
438
439static int k3_r5f_split_reset(struct k3_r5f_core *core)
440{
441 int ret;
442
Sean Andersonf13dc372020-09-15 10:45:08 -0400443 dev_dbg(core->dev, "%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530444
445 if (reset_assert(&core->reset))
446 ret = -EINVAL;
447
448 if (ti_sci_proc_power_domain_off(&core->tsp))
449 ret = -EINVAL;
450
451 return ret;
452}
453
454static int k3_r5f_lockstep_reset(struct k3_r5f_cluster *cluster)
455{
456 int ret = 0, c;
457
Sean Andersonf13dc372020-09-15 10:45:08 -0400458 debug("%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530459
460 for (c = 0; c < NR_CORES; c++)
461 if (reset_assert(&cluster->cores[c]->reset))
462 ret = -EINVAL;
463
464 /* disable PSC modules on all applicable cores */
465 for (c = 0; c < NR_CORES; c++)
466 if (ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp))
467 ret = -EINVAL;
468
469 return ret;
470}
471
472static int k3_r5f_unprepare(struct udevice *dev)
473{
474 struct k3_r5f_core *core = dev_get_priv(dev);
475 struct k3_r5f_cluster *cluster = core->cluster;
476 int ret;
477
478 dev_dbg(dev, "%s\n", __func__);
479
480 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
481 if (is_primary_core(core))
482 ret = k3_r5f_lockstep_reset(cluster);
483 } else {
484 ret = k3_r5f_split_reset(core);
485 }
486
487 if (ret)
488 dev_warn(dev, "Unable to enable cores for TCM loading %d\n",
489 ret);
490
491 return 0;
492}
493
494static int k3_r5f_stop(struct udevice *dev)
495{
496 struct k3_r5f_core *core = dev_get_priv(dev);
497 struct k3_r5f_cluster *cluster = core->cluster;
498 int c, ret;
499
500 dev_dbg(dev, "%s\n", __func__);
501
502 ret = k3_r5f_proc_request(core);
503 if (ret)
504 return ret;
505
506 core->in_use = false;
507
508 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
509 if (is_primary_core(core)) {
510 for (c = 0; c < NR_CORES; c++)
511 k3_r5f_core_halt(cluster->cores[c]);
512 } else {
513 dev_err(dev, "Invalid op: Trying to stop secondary core in lockstep mode\n");
514 ret = -EINVAL;
515 goto proc_release;
516 }
517 } else {
518 k3_r5f_core_halt(core);
519 }
520
521 ret = k3_r5f_unprepare(dev);
522proc_release:
523 k3_r5f_proc_release(core);
524 return ret;
525}
526
527static void *k3_r5f_da_to_va(struct udevice *dev, ulong da, ulong size)
528{
529 struct k3_r5f_core *core = dev_get_priv(dev);
530 void __iomem *va = NULL;
531 phys_addr_t bus_addr;
532 u32 dev_addr, offset;
533 ulong mem_size;
534 int i;
535
536 dev_dbg(dev, "%s\n", __func__);
537
538 if (size <= 0)
539 return NULL;
540
541 for (i = 0; i < core->num_mems; i++) {
542 bus_addr = core->mem[i].bus_addr;
543 dev_addr = core->mem[i].dev_addr;
544 mem_size = core->mem[i].size;
545
546 if (da >= bus_addr && (da + size) <= (bus_addr + mem_size)) {
547 offset = da - bus_addr;
548 va = core->mem[i].cpu_addr + offset;
549 return (__force void *)va;
550 }
551
552 if (da >= dev_addr && (da + size) <= (dev_addr + mem_size)) {
553 offset = da - dev_addr;
554 va = core->mem[i].cpu_addr + offset;
555 return (__force void *)va;
556 }
557 }
558
559 /* Assume it is DDR region and return da */
560 return map_physmem(da, size, MAP_NOCACHE);
561}
562
563static int k3_r5f_init(struct udevice *dev)
564{
565 return 0;
566}
567
568static int k3_r5f_reset(struct udevice *dev)
569{
570 return 0;
571}
572
573static const struct dm_rproc_ops k3_r5f_rproc_ops = {
574 .init = k3_r5f_init,
575 .reset = k3_r5f_reset,
576 .start = k3_r5f_start,
577 .stop = k3_r5f_stop,
578 .load = k3_r5f_load,
579 .device_to_virt = k3_r5f_da_to_va,
580};
581
582static int k3_r5f_rproc_configure(struct k3_r5f_core *core)
583{
584 struct k3_r5f_cluster *cluster = core->cluster;
585 u32 set_cfg = 0, clr_cfg = 0, cfg, ctrl, sts;
Suman Anna9ff29302020-03-10 20:24:29 -0500586 bool lockstep_permitted;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530587 u64 boot_vec = 0;
588 int ret;
589
Sean Andersonf13dc372020-09-15 10:45:08 -0400590 dev_dbg(core->dev, "%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530591
592 ret = ti_sci_proc_request(&core->tsp);
593 if (ret < 0)
594 return ret;
595
596 /* Do not touch boot vector now. Load will take care of it. */
597 clr_cfg |= PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR;
598
599 ret = ti_sci_proc_get_status(&core->tsp, &boot_vec, &cfg, &ctrl, &sts);
600 if (ret)
601 goto out;
602
603 /* Sanity check for Lockstep mode */
Suman Anna9ff29302020-03-10 20:24:29 -0500604 lockstep_permitted = !!(sts &
605 PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
Hari Nagalla89322b82024-05-09 09:20:35 -0500606 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && is_primary_core(core) &&
607 !lockstep_permitted) {
608 dev_err(core->dev, "LockStep mode not permitted on this \
609 device\n");
Lokesh Vutla58633f12019-09-04 16:01:34 +0530610 ret = -EINVAL;
611 goto out;
612 }
613
614 /* Primary core only configuration */
615 if (is_primary_core(core)) {
616 /* always enable ARM mode */
617 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TEINIT;
618 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
619 set_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
Suman Anna9ff29302020-03-10 20:24:29 -0500620 else if (lockstep_permitted)
Lokesh Vutla58633f12019-09-04 16:01:34 +0530621 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
622 }
623
Hari Nagalla89322b82024-05-09 09:20:35 -0500624 if (core->ipdata->is_single_core)
625 set_cfg = PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE;
626
Lokesh Vutla58633f12019-09-04 16:01:34 +0530627 if (core->atcm_enable)
628 set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
629 else
630 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
631
632 if (core->btcm_enable)
633 set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
634 else
635 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
636
637 if (core->loczrama)
638 set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
639 else
640 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
641
642 ret = k3_r5f_core_halt(core);
643 if (ret)
644 goto out;
645
646 ret = ti_sci_proc_set_config(&core->tsp, boot_vec, set_cfg, clr_cfg);
647out:
648 ti_sci_proc_release(&core->tsp);
649 return ret;
650}
651
652static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
653{
654 u32 ids[2];
655 int ret;
656
657 dev_dbg(dev, "%s\n", __func__);
658
659 tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
660 if (IS_ERR(tsp->sci)) {
661 dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
662 return PTR_ERR(tsp->sci);
663 }
664
665 ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
666 if (ret) {
667 dev_err(dev, "Proc IDs not populated %d\n", ret);
668 return ret;
669 }
670
671 tsp->ops = &tsp->sci->ops.proc_ops;
672 tsp->proc_id = ids[0];
673 tsp->host_id = ids[1];
674 tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
675 TI_SCI_RESOURCE_NULL);
676 if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
677 dev_err(dev, "Device ID not populated %d\n", ret);
678 return -ENODEV;
679 }
680
681 return 0;
682}
683
684static int k3_r5f_of_to_priv(struct k3_r5f_core *core)
685{
686 int ret;
687
Sean Andersonf13dc372020-09-15 10:45:08 -0400688 dev_dbg(core->dev, "%s\n", __func__);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530689
Suman Annaa45e6db2021-01-26 18:20:56 -0600690 core->atcm_enable = dev_read_u32_default(core->dev, "ti,atcm-enable", 0);
691 core->btcm_enable = dev_read_u32_default(core->dev, "ti,btcm-enable", 1);
692 core->loczrama = dev_read_u32_default(core->dev, "ti,loczrama", 1);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530693
694 ret = ti_sci_proc_of_to_priv(core->dev, &core->tsp);
695 if (ret)
696 return ret;
697
698 ret = reset_get_by_index(core->dev, 0, &core->reset);
699 if (ret) {
700 dev_err(core->dev, "Reset lines not available: %d\n", ret);
701 return ret;
702 }
703
Suman Anna5d56d252020-08-17 18:15:08 -0500704 core->ipdata = (struct k3_r5f_ip_data *)dev_get_driver_data(core->dev);
705
Lokesh Vutla58633f12019-09-04 16:01:34 +0530706 return 0;
707}
708
709static int k3_r5f_core_of_get_memories(struct k3_r5f_core *core)
710{
711 static const char * const mem_names[] = {"atcm", "btcm"};
712 struct udevice *dev = core->dev;
713 int i;
714
715 dev_dbg(dev, "%s\n", __func__);
716
717 core->num_mems = ARRAY_SIZE(mem_names);
718 core->mem = calloc(core->num_mems, sizeof(*core->mem));
719 if (!core->mem)
720 return -ENOMEM;
721
722 for (i = 0; i < core->num_mems; i++) {
723 core->mem[i].bus_addr = dev_read_addr_size_name(dev,
724 mem_names[i],
725 (fdt_addr_t *)&core->mem[i].size);
726 if (core->mem[i].bus_addr == FDT_ADDR_T_NONE) {
727 dev_err(dev, "%s bus address not found\n",
728 mem_names[i]);
729 return -EINVAL;
730 }
731 core->mem[i].cpu_addr = map_physmem(core->mem[i].bus_addr,
732 core->mem[i].size,
733 MAP_NOCACHE);
734 if (!strcmp(mem_names[i], "atcm")) {
735 core->mem[i].dev_addr = core->loczrama ?
736 0 : K3_R5_TCM_DEV_ADDR;
737 } else {
738 core->mem[i].dev_addr = core->loczrama ?
739 K3_R5_TCM_DEV_ADDR : 0;
740 }
741
742 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
743 mem_names[i], &core->mem[i].bus_addr,
744 core->mem[i].size, core->mem[i].cpu_addr,
745 core->mem[i].dev_addr);
746 }
747
748 return 0;
749}
750
Suman Anna5d56d252020-08-17 18:15:08 -0500751/*
752 * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
753 * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
754 * cores are usable in Split-mode, but only the Core0 TCMs can be used in
755 * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
756 * leveraging the Core1 TCMs as well in certain modes where they would have
757 * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs). This is done by
758 * making a Core1 TCM visible immediately after the corresponding Core0 TCM.
759 * The SoC memory map uses the larger 64 KB sizes for the Core0 TCMs, and the
760 * dts representation reflects this increased size on supported SoCs. The Core0
761 * TCM sizes therefore have to be adjusted to only half the original size in
762 * Split mode.
763 */
764static void k3_r5f_core_adjust_tcm_sizes(struct k3_r5f_core *core)
765{
766 struct k3_r5f_cluster *cluster = core->cluster;
767
768 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
769 return;
770
771 if (!core->ipdata->tcm_is_double)
772 return;
773
774 if (core == cluster->cores[0]) {
775 core->mem[0].size /= 2;
776 core->mem[1].size /= 2;
777
778 dev_dbg(core->dev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
779 core->mem[0].size, core->mem[1].size);
780 }
781}
782
Lokesh Vutla58633f12019-09-04 16:01:34 +0530783/**
784 * k3_r5f_probe() - Basic probe
785 * @dev: corresponding k3 remote processor device
786 *
787 * Return: 0 if all goes good, else appropriate error message.
788 */
789static int k3_r5f_probe(struct udevice *dev)
790{
791 struct k3_r5f_cluster *cluster = dev_get_priv(dev->parent);
792 struct k3_r5f_core *core = dev_get_priv(dev);
793 bool r_state;
794 int ret;
795
796 dev_dbg(dev, "%s\n", __func__);
797
798 core->dev = dev;
799 ret = k3_r5f_of_to_priv(core);
800 if (ret)
801 return ret;
802
803 core->cluster = cluster;
804 /* Assume Primary core gets probed first */
805 if (!cluster->cores[0])
806 cluster->cores[0] = core;
807 else
808 cluster->cores[1] = core;
809
810 ret = k3_r5f_core_of_get_memories(core);
811 if (ret) {
812 dev_err(dev, "Rproc getting internal memories failed\n");
813 return ret;
814 }
815
Tero Kristof454d612021-06-11 11:45:04 +0300816 /*
817 * The PM functionality is not supported by the firmware during
818 * SPL execution with the separated DM firmware image. The following
819 * piece of code is not compiled in that case.
820 */
821 if (!IS_ENABLED(CONFIG_K3_DM_FW)) {
822 ret = core->tsp.sci->ops.dev_ops.is_on(core->tsp.sci,
823 core->tsp.dev_id,
824 &r_state, &core->in_use);
825 if (ret)
826 return ret;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530827
Tero Kristof454d612021-06-11 11:45:04 +0300828 if (core->in_use) {
829 dev_info(dev, "Core %d is already in use. No rproc commands work\n",
830 core->tsp.proc_id);
831 return 0;
832 }
Lokesh Vutla58633f12019-09-04 16:01:34 +0530833
Tero Kristof454d612021-06-11 11:45:04 +0300834 /* Make sure Local reset is asserted. Redundant? */
835 reset_assert(&core->reset);
836 }
Lokesh Vutla58633f12019-09-04 16:01:34 +0530837
838 ret = k3_r5f_rproc_configure(core);
839 if (ret) {
840 dev_err(dev, "rproc configure failed %d\n", ret);
841 return ret;
842 }
843
Suman Anna5d56d252020-08-17 18:15:08 -0500844 k3_r5f_core_adjust_tcm_sizes(core);
845
Lokesh Vutla58633f12019-09-04 16:01:34 +0530846 dev_dbg(dev, "Remoteproc successfully probed\n");
847
848 return 0;
849}
850
851static int k3_r5f_remove(struct udevice *dev)
852{
853 struct k3_r5f_core *core = dev_get_priv(dev);
854
855 free(core->mem);
856
857 ti_sci_proc_release(&core->tsp);
858
859 return 0;
860}
861
Suman Anna5d56d252020-08-17 18:15:08 -0500862static const struct k3_r5f_ip_data k3_data = {
863 .tcm_is_double = false,
864 .tcm_ecc_autoinit = false,
Hari Nagalla89322b82024-05-09 09:20:35 -0500865 .is_single_core = false,
Suman Anna5d56d252020-08-17 18:15:08 -0500866};
867
Hari Nagalla225271d2024-03-12 15:14:35 +0530868static const struct k3_r5f_ip_data j7200_j721s2_data = {
Suman Anna5d56d252020-08-17 18:15:08 -0500869 .tcm_is_double = true,
870 .tcm_ecc_autoinit = true,
Hari Nagalla89322b82024-05-09 09:20:35 -0500871 .is_single_core = false,
872};
873
874static const struct k3_r5f_ip_data am62_data = {
875 .tcm_is_double = false,
876 .tcm_ecc_autoinit = false,
877 .is_single_core = true,
Suman Anna5d56d252020-08-17 18:15:08 -0500878};
879
Lokesh Vutla58633f12019-09-04 16:01:34 +0530880static const struct udevice_id k3_r5f_rproc_ids[] = {
Suman Anna5d56d252020-08-17 18:15:08 -0500881 { .compatible = "ti,am654-r5f", .data = (ulong)&k3_data, },
882 { .compatible = "ti,j721e-r5f", .data = (ulong)&k3_data, },
Hari Nagalla225271d2024-03-12 15:14:35 +0530883 { .compatible = "ti,j7200-r5f", .data = (ulong)&j7200_j721s2_data, },
884 { .compatible = "ti,j721s2-r5f", .data = (ulong)&j7200_j721s2_data, },
Hari Nagalla89322b82024-05-09 09:20:35 -0500885 { .compatible = "ti,am62-r5f", .data = (ulong)&am62_data, },
Lokesh Vutla58633f12019-09-04 16:01:34 +0530886 {}
887};
888
889U_BOOT_DRIVER(k3_r5f_rproc) = {
890 .name = "k3_r5f_rproc",
891 .of_match = k3_r5f_rproc_ids,
892 .id = UCLASS_REMOTEPROC,
893 .ops = &k3_r5f_rproc_ops,
894 .probe = k3_r5f_probe,
895 .remove = k3_r5f_remove,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700896 .priv_auto = sizeof(struct k3_r5f_core),
Lokesh Vutla58633f12019-09-04 16:01:34 +0530897};
898
899static int k3_r5f_cluster_probe(struct udevice *dev)
900{
901 struct k3_r5f_cluster *cluster = dev_get_priv(dev);
902
903 dev_dbg(dev, "%s\n", __func__);
904
Suman Annaa45e6db2021-01-26 18:20:56 -0600905 cluster->mode = dev_read_u32_default(dev, "ti,cluster-mode",
Lokesh Vutla58633f12019-09-04 16:01:34 +0530906 CLUSTER_MODE_LOCKSTEP);
907
Hari Nagalla89322b82024-05-09 09:20:35 -0500908 if (device_is_compatible(dev, "ti,am62-r5fss")) {
909 cluster->mode = CLUSTER_MODE_SINGLECORE;
910 return 0;
911 }
912
Lokesh Vutla58633f12019-09-04 16:01:34 +0530913 if (device_get_child_count(dev) != 2) {
914 dev_err(dev, "Invalid number of R5 cores");
915 return -EINVAL;
916 }
917
918 dev_dbg(dev, "%s: Cluster successfully probed in %s mode\n",
919 __func__, cluster->mode ? "lockstep" : "split");
920
921 return 0;
922}
923
924static const struct udevice_id k3_r5fss_ids[] = {
925 { .compatible = "ti,am654-r5fss"},
926 { .compatible = "ti,j721e-r5fss"},
Suman Anna5d56d252020-08-17 18:15:08 -0500927 { .compatible = "ti,j7200-r5fss"},
Hari Nagalla225271d2024-03-12 15:14:35 +0530928 { .compatible = "ti,j721s2-r5fss"},
Hari Nagalla89322b82024-05-09 09:20:35 -0500929 { .compatible = "ti,am62-r5fss"},
Lokesh Vutla58633f12019-09-04 16:01:34 +0530930 {}
931};
932
933U_BOOT_DRIVER(k3_r5fss) = {
934 .name = "k3_r5fss",
935 .of_match = k3_r5fss_ids,
936 .id = UCLASS_MISC,
937 .probe = k3_r5f_cluster_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700938 .priv_auto = sizeof(struct k3_r5f_cluster),
Suman Anna41e89862020-03-10 18:34:54 -0500939 .flags = DM_FLAG_DEFAULT_PD_CTRL_OFF,
Lokesh Vutla58633f12019-09-04 16:01:34 +0530940};