blob: 8e21a38be7f11396f84ee69dc5bd8a8ca82cc9aa [file] [log] [blame]
Lokesh Vutla58633f12019-09-04 16:01:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments' K3 R5 Remoteproc driver
4 *
Suman Anna5d56d252020-08-17 18:15:08 -05005 * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
Lokesh Vutla58633f12019-09-04 16:01:34 +05306 * Lokesh Vutla <lokeshvutla@ti.com>
Suman Anna5d56d252020-08-17 18:15:08 -05007 * Suman Anna <s-anna@ti.com>
Lokesh Vutla58633f12019-09-04 16:01:34 +05308 */
9
10#include <common.h>
11#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070013#include <malloc.h>
Lokesh Vutla58633f12019-09-04 16:01:34 +053014#include <remoteproc.h>
15#include <errno.h>
16#include <clk.h>
17#include <reset.h>
18#include <asm/io.h>
Simon Glass9bc15642020-02-03 07:36:16 -070019#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070020#include <linux/err.h>
Lokesh Vutla58633f12019-09-04 16:01:34 +053021#include <linux/kernel.h>
22#include <linux/soc/ti/ti_sci_protocol.h>
23#include "ti_sci_proc.h"
24
25/*
26 * R5F's view of this address can either be for ATCM or BTCM with the other
27 * at address 0x0 based on loczrama signal.
28 */
29#define K3_R5_TCM_DEV_ADDR 0x41010000
30
31/* R5 TI-SCI Processor Configuration Flags */
32#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
33#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
34#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
35#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
36#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
37#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
38#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
39#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
40#define PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR 0x10000000
Suman Anna5d56d252020-08-17 18:15:08 -050041/* Available from J7200 SoCs onwards */
42#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
Lokesh Vutla58633f12019-09-04 16:01:34 +053043
44/* R5 TI-SCI Processor Control Flags */
45#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
46
47/* R5 TI-SCI Processor Status Flags */
48#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
49#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
50#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
51#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
52
53#define NR_CORES 2
54
55enum cluster_mode {
56 CLUSTER_MODE_SPLIT = 0,
57 CLUSTER_MODE_LOCKSTEP,
58};
59
60/**
Suman Anna5d56d252020-08-17 18:15:08 -050061 * struct k3_r5f_ip_data - internal data structure used for IP variations
62 * @tcm_is_double: flag to denote the larger unified TCMs in certain modes
63 * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
64 */
65struct k3_r5f_ip_data {
66 bool tcm_is_double;
67 bool tcm_ecc_autoinit;
68};
69
70/**
Lokesh Vutla58633f12019-09-04 16:01:34 +053071 * struct k3_r5_mem - internal memory structure
72 * @cpu_addr: MPU virtual address of the memory region
73 * @bus_addr: Bus address used to access the memory region
74 * @dev_addr: Device address from remoteproc view
75 * @size: Size of the memory region
76 */
77struct k3_r5f_mem {
78 void __iomem *cpu_addr;
79 phys_addr_t bus_addr;
80 u32 dev_addr;
81 size_t size;
82};
83
84/**
85 * struct k3_r5f_core - K3 R5 core structure
86 * @dev: cached device pointer
87 * @cluster: pointer to the parent cluster.
88 * @reset: reset control handle
89 * @tsp: TI-SCI processor control handle
Suman Anna5d56d252020-08-17 18:15:08 -050090 * @ipdata: cached pointer to R5F IP specific feature data
Lokesh Vutla58633f12019-09-04 16:01:34 +053091 * @mem: Array of available internal memories
92 * @num_mem: Number of available memories
93 * @atcm_enable: flag to control ATCM enablement
94 * @btcm_enable: flag to control BTCM enablement
95 * @loczrama: flag to dictate which TCM is at device address 0x0
96 * @in_use: flag to tell if the core is already in use.
97 */
98struct k3_r5f_core {
99 struct udevice *dev;
100 struct k3_r5f_cluster *cluster;
101 struct reset_ctl reset;
102 struct ti_sci_proc tsp;
Suman Anna5d56d252020-08-17 18:15:08 -0500103 struct k3_r5f_ip_data *ipdata;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530104 struct k3_r5f_mem *mem;
105 int num_mems;
106 u32 atcm_enable;
107 u32 btcm_enable;
108 u32 loczrama;
109 bool in_use;
110};
111
112/**
113 * struct k3_r5f_cluster - K3 R5F Cluster structure
114 * @mode: Mode to configure the Cluster - Split or LockStep
115 * @cores: Array of pointers to R5 cores within the cluster
116 */
117struct k3_r5f_cluster {
118 enum cluster_mode mode;
119 struct k3_r5f_core *cores[NR_CORES];
120};
121
122static bool is_primary_core(struct k3_r5f_core *core)
123{
124 return core == core->cluster->cores[0];
125}
126
127static int k3_r5f_proc_request(struct k3_r5f_core *core)
128{
129 struct k3_r5f_cluster *cluster = core->cluster;
130 int i, ret;
131
132 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
133 for (i = 0; i < NR_CORES; i++) {
134 ret = ti_sci_proc_request(&cluster->cores[i]->tsp);
135 if (ret)
136 goto proc_release;
137 }
138 } else {
139 ret = ti_sci_proc_request(&core->tsp);
140 }
141
142 return 0;
143
144proc_release:
145 while (i >= 0) {
146 ti_sci_proc_release(&cluster->cores[i]->tsp);
147 i--;
148 }
149 return ret;
150}
151
152static void k3_r5f_proc_release(struct k3_r5f_core *core)
153{
154 struct k3_r5f_cluster *cluster = core->cluster;
155 int i;
156
157 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
158 for (i = 0; i < NR_CORES; i++)
159 ti_sci_proc_release(&cluster->cores[i]->tsp);
160 else
161 ti_sci_proc_release(&core->tsp);
162}
163
164static int k3_r5f_lockstep_release(struct k3_r5f_cluster *cluster)
165{
166 int ret, c;
167
168 dev_dbg(dev, "%s\n", __func__);
169
170 for (c = NR_CORES - 1; c >= 0; c--) {
171 ret = ti_sci_proc_power_domain_on(&cluster->cores[c]->tsp);
172 if (ret)
173 goto unroll_module_reset;
174 }
175
176 /* deassert local reset on all applicable cores */
177 for (c = NR_CORES - 1; c >= 0; c--) {
178 ret = reset_deassert(&cluster->cores[c]->reset);
179 if (ret)
180 goto unroll_local_reset;
181 }
182
183 return 0;
184
185unroll_local_reset:
186 while (c < NR_CORES) {
187 reset_assert(&cluster->cores[c]->reset);
188 c++;
189 }
190 c = 0;
191unroll_module_reset:
192 while (c < NR_CORES) {
193 ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp);
194 c++;
195 }
196
197 return ret;
198}
199
200static int k3_r5f_split_release(struct k3_r5f_core *core)
201{
202 int ret;
203
204 dev_dbg(dev, "%s\n", __func__);
205
206 ret = ti_sci_proc_power_domain_on(&core->tsp);
207 if (ret) {
208 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
209 ret);
210 return ret;
211 }
212
213 ret = reset_deassert(&core->reset);
214 if (ret) {
215 dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
216 ret);
217 if (ti_sci_proc_power_domain_off(&core->tsp))
218 dev_warn(core->dev, "module-reset assert back failed\n");
219 }
220
221 return ret;
222}
223
224static int k3_r5f_prepare(struct udevice *dev)
225{
226 struct k3_r5f_core *core = dev_get_priv(dev);
227 struct k3_r5f_cluster *cluster = core->cluster;
228 int ret = 0;
229
230 dev_dbg(dev, "%s\n", __func__);
231
232 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
233 ret = k3_r5f_lockstep_release(cluster);
234 else
235 ret = k3_r5f_split_release(core);
236
237 if (ret)
238 dev_err(dev, "Unable to enable cores for TCM loading %d\n",
239 ret);
240
241 return ret;
242}
243
244static int k3_r5f_core_sanity_check(struct k3_r5f_core *core)
245{
246 struct k3_r5f_cluster *cluster = core->cluster;
247
248 if (core->in_use) {
249 dev_err(dev, "Invalid op: Trying to load/start on already running core %d\n",
250 core->tsp.proc_id);
251 return -EINVAL;
252 }
253
254 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !cluster->cores[1]) {
255 printf("Secondary core is not probed in this cluster\n");
256 return -EAGAIN;
257 }
258
259 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !is_primary_core(core)) {
260 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
261 core->tsp.proc_id);
262 return -EINVAL;
263 }
264
265 if (cluster->mode == CLUSTER_MODE_SPLIT && !is_primary_core(core)) {
266 if (!core->cluster->cores[0]->in_use) {
267 dev_err(dev, "Invalid seq: Enable primary core before loading secondary core\n");
268 return -EINVAL;
269 }
270 }
271
272 return 0;
273}
274
Suman Anna5d56d252020-08-17 18:15:08 -0500275/* Zero out TCMs so that ECC can be effective on all TCM addresses */
276void k3_r5f_init_tcm_memories(struct k3_r5f_core *core, bool auto_inited)
277{
278 if (core->ipdata->tcm_ecc_autoinit && auto_inited)
279 return;
280
281 if (core->atcm_enable)
282 memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
283 if (core->btcm_enable)
284 memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
285}
286
Lokesh Vutla58633f12019-09-04 16:01:34 +0530287/**
288 * k3_r5f_load() - Load up the Remote processor image
289 * @dev: rproc device pointer
290 * @addr: Address at which image is available
291 * @size: size of the image
292 *
293 * Return: 0 if all goes good, else appropriate error message.
294 */
295static int k3_r5f_load(struct udevice *dev, ulong addr, ulong size)
296{
297 struct k3_r5f_core *core = dev_get_priv(dev);
Suman Anna5d56d252020-08-17 18:15:08 -0500298 u64 boot_vector;
299 u32 ctrl, sts, cfg = 0;
300 bool mem_auto_init;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530301 int ret;
302
303 dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size);
304
305 ret = k3_r5f_core_sanity_check(core);
306 if (ret)
307 return ret;
308
309 ret = k3_r5f_proc_request(core);
310 if (ret)
311 return ret;
Suman Anna5d56d252020-08-17 18:15:08 -0500312
313 ret = ti_sci_proc_get_status(&core->tsp, &boot_vector, &cfg, &ctrl,
314 &sts);
315 if (ret)
316 return ret;
317 mem_auto_init = !(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530318
319 ret = k3_r5f_prepare(dev);
320 if (ret) {
321 dev_err(dev, "R5f prepare failed for core %d\n",
322 core->tsp.proc_id);
323 goto proc_release;
324 }
325
Suman Anna5d56d252020-08-17 18:15:08 -0500326 k3_r5f_init_tcm_memories(core, mem_auto_init);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530327
328 ret = rproc_elf_load_image(dev, addr, size);
329 if (ret < 0) {
330 dev_err(dev, "Loading elf failedi %d\n", ret);
331 goto proc_release;
332 }
333
334 boot_vector = rproc_elf_get_boot_addr(dev, addr);
335
Suman Anna5d56d252020-08-17 18:15:08 -0500336 dev_dbg(dev, "%s: Boot vector = 0x%llx\n", __func__, boot_vector);
Lokesh Vutla58633f12019-09-04 16:01:34 +0530337
338 ret = ti_sci_proc_set_config(&core->tsp, boot_vector, 0, 0);
339
340proc_release:
341 k3_r5f_proc_release(core);
342
343 return ret;
344}
345
346static int k3_r5f_core_halt(struct k3_r5f_core *core)
347{
348 int ret;
349
350 ret = ti_sci_proc_set_control(&core->tsp,
351 PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
352 if (ret)
353 dev_err(core->dev, "Core %d failed to stop\n",
354 core->tsp.proc_id);
355
356 return ret;
357}
358
359static int k3_r5f_core_run(struct k3_r5f_core *core)
360{
361 int ret;
362
363 ret = ti_sci_proc_set_control(&core->tsp,
364 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
365 if (ret) {
366 dev_err(core->dev, "Core %d failed to start\n",
367 core->tsp.proc_id);
368 return ret;
369 }
370
371 return 0;
372}
373
374/**
375 * k3_r5f_start() - Start the remote processor
376 * @dev: rproc device pointer
377 *
378 * Return: 0 if all went ok, else return appropriate error
379 */
380static int k3_r5f_start(struct udevice *dev)
381{
382 struct k3_r5f_core *core = dev_get_priv(dev);
383 struct k3_r5f_cluster *cluster = core->cluster;
384 int ret, c;
385
386 dev_dbg(dev, "%s\n", __func__);
387
388 ret = k3_r5f_core_sanity_check(core);
389 if (ret)
390 return ret;
391
392 ret = k3_r5f_proc_request(core);
393 if (ret)
394 return ret;
395
396 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
397 if (is_primary_core(core)) {
398 for (c = NR_CORES - 1; c >= 0; c--) {
399 ret = k3_r5f_core_run(cluster->cores[c]);
400 if (ret)
401 goto unroll_core_run;
402 }
403 } else {
404 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
405 core->tsp.proc_id);
406 ret = -EINVAL;
407 goto proc_release;
408 }
409 } else {
410 ret = k3_r5f_core_run(core);
411 if (ret)
412 goto proc_release;
413 }
414
415 core->in_use = true;
416
417 k3_r5f_proc_release(core);
418 return 0;
419
420unroll_core_run:
421 while (c < NR_CORES) {
422 k3_r5f_core_halt(cluster->cores[c]);
423 c++;
424 }
425proc_release:
426 k3_r5f_proc_release(core);
427
428 return ret;
429}
430
431static int k3_r5f_split_reset(struct k3_r5f_core *core)
432{
433 int ret;
434
435 dev_dbg(dev, "%s\n", __func__);
436
437 if (reset_assert(&core->reset))
438 ret = -EINVAL;
439
440 if (ti_sci_proc_power_domain_off(&core->tsp))
441 ret = -EINVAL;
442
443 return ret;
444}
445
446static int k3_r5f_lockstep_reset(struct k3_r5f_cluster *cluster)
447{
448 int ret = 0, c;
449
450 dev_dbg(dev, "%s\n", __func__);
451
452 for (c = 0; c < NR_CORES; c++)
453 if (reset_assert(&cluster->cores[c]->reset))
454 ret = -EINVAL;
455
456 /* disable PSC modules on all applicable cores */
457 for (c = 0; c < NR_CORES; c++)
458 if (ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp))
459 ret = -EINVAL;
460
461 return ret;
462}
463
464static int k3_r5f_unprepare(struct udevice *dev)
465{
466 struct k3_r5f_core *core = dev_get_priv(dev);
467 struct k3_r5f_cluster *cluster = core->cluster;
468 int ret;
469
470 dev_dbg(dev, "%s\n", __func__);
471
472 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
473 if (is_primary_core(core))
474 ret = k3_r5f_lockstep_reset(cluster);
475 } else {
476 ret = k3_r5f_split_reset(core);
477 }
478
479 if (ret)
480 dev_warn(dev, "Unable to enable cores for TCM loading %d\n",
481 ret);
482
483 return 0;
484}
485
486static int k3_r5f_stop(struct udevice *dev)
487{
488 struct k3_r5f_core *core = dev_get_priv(dev);
489 struct k3_r5f_cluster *cluster = core->cluster;
490 int c, ret;
491
492 dev_dbg(dev, "%s\n", __func__);
493
494 ret = k3_r5f_proc_request(core);
495 if (ret)
496 return ret;
497
498 core->in_use = false;
499
500 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
501 if (is_primary_core(core)) {
502 for (c = 0; c < NR_CORES; c++)
503 k3_r5f_core_halt(cluster->cores[c]);
504 } else {
505 dev_err(dev, "Invalid op: Trying to stop secondary core in lockstep mode\n");
506 ret = -EINVAL;
507 goto proc_release;
508 }
509 } else {
510 k3_r5f_core_halt(core);
511 }
512
513 ret = k3_r5f_unprepare(dev);
514proc_release:
515 k3_r5f_proc_release(core);
516 return ret;
517}
518
519static void *k3_r5f_da_to_va(struct udevice *dev, ulong da, ulong size)
520{
521 struct k3_r5f_core *core = dev_get_priv(dev);
522 void __iomem *va = NULL;
523 phys_addr_t bus_addr;
524 u32 dev_addr, offset;
525 ulong mem_size;
526 int i;
527
528 dev_dbg(dev, "%s\n", __func__);
529
530 if (size <= 0)
531 return NULL;
532
533 for (i = 0; i < core->num_mems; i++) {
534 bus_addr = core->mem[i].bus_addr;
535 dev_addr = core->mem[i].dev_addr;
536 mem_size = core->mem[i].size;
537
538 if (da >= bus_addr && (da + size) <= (bus_addr + mem_size)) {
539 offset = da - bus_addr;
540 va = core->mem[i].cpu_addr + offset;
541 return (__force void *)va;
542 }
543
544 if (da >= dev_addr && (da + size) <= (dev_addr + mem_size)) {
545 offset = da - dev_addr;
546 va = core->mem[i].cpu_addr + offset;
547 return (__force void *)va;
548 }
549 }
550
551 /* Assume it is DDR region and return da */
552 return map_physmem(da, size, MAP_NOCACHE);
553}
554
555static int k3_r5f_init(struct udevice *dev)
556{
557 return 0;
558}
559
560static int k3_r5f_reset(struct udevice *dev)
561{
562 return 0;
563}
564
565static const struct dm_rproc_ops k3_r5f_rproc_ops = {
566 .init = k3_r5f_init,
567 .reset = k3_r5f_reset,
568 .start = k3_r5f_start,
569 .stop = k3_r5f_stop,
570 .load = k3_r5f_load,
571 .device_to_virt = k3_r5f_da_to_va,
572};
573
574static int k3_r5f_rproc_configure(struct k3_r5f_core *core)
575{
576 struct k3_r5f_cluster *cluster = core->cluster;
577 u32 set_cfg = 0, clr_cfg = 0, cfg, ctrl, sts;
Suman Anna9ff29302020-03-10 20:24:29 -0500578 bool lockstep_permitted;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530579 u64 boot_vec = 0;
580 int ret;
581
582 dev_dbg(dev, "%s\n", __func__);
583
584 ret = ti_sci_proc_request(&core->tsp);
585 if (ret < 0)
586 return ret;
587
588 /* Do not touch boot vector now. Load will take care of it. */
589 clr_cfg |= PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR;
590
591 ret = ti_sci_proc_get_status(&core->tsp, &boot_vec, &cfg, &ctrl, &sts);
592 if (ret)
593 goto out;
594
595 /* Sanity check for Lockstep mode */
Suman Anna9ff29302020-03-10 20:24:29 -0500596 lockstep_permitted = !!(sts &
597 PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
598 if (cluster->mode && is_primary_core(core) && !lockstep_permitted) {
Lokesh Vutla58633f12019-09-04 16:01:34 +0530599 dev_err(core->dev, "LockStep mode not permitted on this device\n");
600 ret = -EINVAL;
601 goto out;
602 }
603
604 /* Primary core only configuration */
605 if (is_primary_core(core)) {
606 /* always enable ARM mode */
607 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TEINIT;
608 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
609 set_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
Suman Anna9ff29302020-03-10 20:24:29 -0500610 else if (lockstep_permitted)
Lokesh Vutla58633f12019-09-04 16:01:34 +0530611 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
612 }
613
614 if (core->atcm_enable)
615 set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
616 else
617 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
618
619 if (core->btcm_enable)
620 set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
621 else
622 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
623
624 if (core->loczrama)
625 set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
626 else
627 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
628
629 ret = k3_r5f_core_halt(core);
630 if (ret)
631 goto out;
632
633 ret = ti_sci_proc_set_config(&core->tsp, boot_vec, set_cfg, clr_cfg);
634out:
635 ti_sci_proc_release(&core->tsp);
636 return ret;
637}
638
639static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
640{
641 u32 ids[2];
642 int ret;
643
644 dev_dbg(dev, "%s\n", __func__);
645
646 tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
647 if (IS_ERR(tsp->sci)) {
648 dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
649 return PTR_ERR(tsp->sci);
650 }
651
652 ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
653 if (ret) {
654 dev_err(dev, "Proc IDs not populated %d\n", ret);
655 return ret;
656 }
657
658 tsp->ops = &tsp->sci->ops.proc_ops;
659 tsp->proc_id = ids[0];
660 tsp->host_id = ids[1];
661 tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
662 TI_SCI_RESOURCE_NULL);
663 if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
664 dev_err(dev, "Device ID not populated %d\n", ret);
665 return -ENODEV;
666 }
667
668 return 0;
669}
670
671static int k3_r5f_of_to_priv(struct k3_r5f_core *core)
672{
673 int ret;
674
675 dev_dbg(dev, "%s\n", __func__);
676
677 core->atcm_enable = dev_read_u32_default(core->dev, "atcm-enable", 0);
678 core->btcm_enable = dev_read_u32_default(core->dev, "btcm-enable", 1);
679 core->loczrama = dev_read_u32_default(core->dev, "loczrama", 1);
680
681 ret = ti_sci_proc_of_to_priv(core->dev, &core->tsp);
682 if (ret)
683 return ret;
684
685 ret = reset_get_by_index(core->dev, 0, &core->reset);
686 if (ret) {
687 dev_err(core->dev, "Reset lines not available: %d\n", ret);
688 return ret;
689 }
690
Suman Anna5d56d252020-08-17 18:15:08 -0500691 core->ipdata = (struct k3_r5f_ip_data *)dev_get_driver_data(core->dev);
692
Lokesh Vutla58633f12019-09-04 16:01:34 +0530693 return 0;
694}
695
696static int k3_r5f_core_of_get_memories(struct k3_r5f_core *core)
697{
698 static const char * const mem_names[] = {"atcm", "btcm"};
699 struct udevice *dev = core->dev;
700 int i;
701
702 dev_dbg(dev, "%s\n", __func__);
703
704 core->num_mems = ARRAY_SIZE(mem_names);
705 core->mem = calloc(core->num_mems, sizeof(*core->mem));
706 if (!core->mem)
707 return -ENOMEM;
708
709 for (i = 0; i < core->num_mems; i++) {
710 core->mem[i].bus_addr = dev_read_addr_size_name(dev,
711 mem_names[i],
712 (fdt_addr_t *)&core->mem[i].size);
713 if (core->mem[i].bus_addr == FDT_ADDR_T_NONE) {
714 dev_err(dev, "%s bus address not found\n",
715 mem_names[i]);
716 return -EINVAL;
717 }
718 core->mem[i].cpu_addr = map_physmem(core->mem[i].bus_addr,
719 core->mem[i].size,
720 MAP_NOCACHE);
721 if (!strcmp(mem_names[i], "atcm")) {
722 core->mem[i].dev_addr = core->loczrama ?
723 0 : K3_R5_TCM_DEV_ADDR;
724 } else {
725 core->mem[i].dev_addr = core->loczrama ?
726 K3_R5_TCM_DEV_ADDR : 0;
727 }
728
729 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
730 mem_names[i], &core->mem[i].bus_addr,
731 core->mem[i].size, core->mem[i].cpu_addr,
732 core->mem[i].dev_addr);
733 }
734
735 return 0;
736}
737
Suman Anna5d56d252020-08-17 18:15:08 -0500738/*
739 * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
740 * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
741 * cores are usable in Split-mode, but only the Core0 TCMs can be used in
742 * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
743 * leveraging the Core1 TCMs as well in certain modes where they would have
744 * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs). This is done by
745 * making a Core1 TCM visible immediately after the corresponding Core0 TCM.
746 * The SoC memory map uses the larger 64 KB sizes for the Core0 TCMs, and the
747 * dts representation reflects this increased size on supported SoCs. The Core0
748 * TCM sizes therefore have to be adjusted to only half the original size in
749 * Split mode.
750 */
751static void k3_r5f_core_adjust_tcm_sizes(struct k3_r5f_core *core)
752{
753 struct k3_r5f_cluster *cluster = core->cluster;
754
755 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
756 return;
757
758 if (!core->ipdata->tcm_is_double)
759 return;
760
761 if (core == cluster->cores[0]) {
762 core->mem[0].size /= 2;
763 core->mem[1].size /= 2;
764
765 dev_dbg(core->dev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
766 core->mem[0].size, core->mem[1].size);
767 }
768}
769
Lokesh Vutla58633f12019-09-04 16:01:34 +0530770/**
771 * k3_r5f_probe() - Basic probe
772 * @dev: corresponding k3 remote processor device
773 *
774 * Return: 0 if all goes good, else appropriate error message.
775 */
776static int k3_r5f_probe(struct udevice *dev)
777{
778 struct k3_r5f_cluster *cluster = dev_get_priv(dev->parent);
779 struct k3_r5f_core *core = dev_get_priv(dev);
780 bool r_state;
781 int ret;
782
783 dev_dbg(dev, "%s\n", __func__);
784
785 core->dev = dev;
786 ret = k3_r5f_of_to_priv(core);
787 if (ret)
788 return ret;
789
790 core->cluster = cluster;
791 /* Assume Primary core gets probed first */
792 if (!cluster->cores[0])
793 cluster->cores[0] = core;
794 else
795 cluster->cores[1] = core;
796
797 ret = k3_r5f_core_of_get_memories(core);
798 if (ret) {
799 dev_err(dev, "Rproc getting internal memories failed\n");
800 return ret;
801 }
802
803 ret = core->tsp.sci->ops.dev_ops.is_on(core->tsp.sci, core->tsp.dev_id,
804 &r_state, &core->in_use);
805 if (ret)
806 return ret;
807
808 if (core->in_use) {
809 dev_info(dev, "Core %d is already in use. No rproc commands work\n",
810 core->tsp.proc_id);
811 return 0;
812 }
813
814 /* Make sure Local reset is asserted. Redundant? */
815 reset_assert(&core->reset);
816
817 ret = k3_r5f_rproc_configure(core);
818 if (ret) {
819 dev_err(dev, "rproc configure failed %d\n", ret);
820 return ret;
821 }
822
Suman Anna5d56d252020-08-17 18:15:08 -0500823 k3_r5f_core_adjust_tcm_sizes(core);
824
Lokesh Vutla58633f12019-09-04 16:01:34 +0530825 dev_dbg(dev, "Remoteproc successfully probed\n");
826
827 return 0;
828}
829
830static int k3_r5f_remove(struct udevice *dev)
831{
832 struct k3_r5f_core *core = dev_get_priv(dev);
833
834 free(core->mem);
835
836 ti_sci_proc_release(&core->tsp);
837
838 return 0;
839}
840
Suman Anna5d56d252020-08-17 18:15:08 -0500841static const struct k3_r5f_ip_data k3_data = {
842 .tcm_is_double = false,
843 .tcm_ecc_autoinit = false,
844};
845
846static const struct k3_r5f_ip_data j7200_data = {
847 .tcm_is_double = true,
848 .tcm_ecc_autoinit = true,
849};
850
Lokesh Vutla58633f12019-09-04 16:01:34 +0530851static const struct udevice_id k3_r5f_rproc_ids[] = {
Suman Anna5d56d252020-08-17 18:15:08 -0500852 { .compatible = "ti,am654-r5f", .data = (ulong)&k3_data, },
853 { .compatible = "ti,j721e-r5f", .data = (ulong)&k3_data, },
854 { .compatible = "ti,j7200-r5f", .data = (ulong)&j7200_data, },
Lokesh Vutla58633f12019-09-04 16:01:34 +0530855 {}
856};
857
858U_BOOT_DRIVER(k3_r5f_rproc) = {
859 .name = "k3_r5f_rproc",
860 .of_match = k3_r5f_rproc_ids,
861 .id = UCLASS_REMOTEPROC,
862 .ops = &k3_r5f_rproc_ops,
863 .probe = k3_r5f_probe,
864 .remove = k3_r5f_remove,
865 .priv_auto_alloc_size = sizeof(struct k3_r5f_core),
866};
867
868static int k3_r5f_cluster_probe(struct udevice *dev)
869{
870 struct k3_r5f_cluster *cluster = dev_get_priv(dev);
871
872 dev_dbg(dev, "%s\n", __func__);
873
874 cluster->mode = dev_read_u32_default(dev, "lockstep-mode",
875 CLUSTER_MODE_LOCKSTEP);
876
877 if (device_get_child_count(dev) != 2) {
878 dev_err(dev, "Invalid number of R5 cores");
879 return -EINVAL;
880 }
881
882 dev_dbg(dev, "%s: Cluster successfully probed in %s mode\n",
883 __func__, cluster->mode ? "lockstep" : "split");
884
885 return 0;
886}
887
888static const struct udevice_id k3_r5fss_ids[] = {
889 { .compatible = "ti,am654-r5fss"},
890 { .compatible = "ti,j721e-r5fss"},
Suman Anna5d56d252020-08-17 18:15:08 -0500891 { .compatible = "ti,j7200-r5fss"},
Lokesh Vutla58633f12019-09-04 16:01:34 +0530892 {}
893};
894
895U_BOOT_DRIVER(k3_r5fss) = {
896 .name = "k3_r5fss",
897 .of_match = k3_r5fss_ids,
898 .id = UCLASS_MISC,
899 .probe = k3_r5f_cluster_probe,
900 .priv_auto_alloc_size = sizeof(struct k3_r5f_cluster),
Suman Anna41e89862020-03-10 18:34:54 -0500901 .flags = DM_FLAG_DEFAULT_PD_CTRL_OFF,
Lokesh Vutla58633f12019-09-04 16:01:34 +0530902};