blob: c01b29d90f1b07b9dce8ed73e1c771efa86e31e5 [file] [log] [blame]
Lokesh Vutla58633f12019-09-04 16:01:34 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments' K3 R5 Remoteproc driver
4 *
5 * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
6 * Lokesh Vutla <lokeshvutla@ti.com>
7 */
8
9#include <common.h>
10#include <dm.h>
Simon Glass9bc15642020-02-03 07:36:16 -070011#include <malloc.h>
Lokesh Vutla58633f12019-09-04 16:01:34 +053012#include <remoteproc.h>
13#include <errno.h>
14#include <clk.h>
15#include <reset.h>
16#include <asm/io.h>
Simon Glass9bc15642020-02-03 07:36:16 -070017#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070018#include <linux/err.h>
Lokesh Vutla58633f12019-09-04 16:01:34 +053019#include <linux/kernel.h>
20#include <linux/soc/ti/ti_sci_protocol.h>
21#include "ti_sci_proc.h"
22
23/*
24 * R5F's view of this address can either be for ATCM or BTCM with the other
25 * at address 0x0 based on loczrama signal.
26 */
27#define K3_R5_TCM_DEV_ADDR 0x41010000
28
29/* R5 TI-SCI Processor Configuration Flags */
30#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
31#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
32#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
33#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
34#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
35#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
36#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
37#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
38#define PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR 0x10000000
39
40/* R5 TI-SCI Processor Control Flags */
41#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
42
43/* R5 TI-SCI Processor Status Flags */
44#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
45#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
46#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
47#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
48
49#define NR_CORES 2
50
51enum cluster_mode {
52 CLUSTER_MODE_SPLIT = 0,
53 CLUSTER_MODE_LOCKSTEP,
54};
55
56/**
57 * struct k3_r5_mem - internal memory structure
58 * @cpu_addr: MPU virtual address of the memory region
59 * @bus_addr: Bus address used to access the memory region
60 * @dev_addr: Device address from remoteproc view
61 * @size: Size of the memory region
62 */
63struct k3_r5f_mem {
64 void __iomem *cpu_addr;
65 phys_addr_t bus_addr;
66 u32 dev_addr;
67 size_t size;
68};
69
70/**
71 * struct k3_r5f_core - K3 R5 core structure
72 * @dev: cached device pointer
73 * @cluster: pointer to the parent cluster.
74 * @reset: reset control handle
75 * @tsp: TI-SCI processor control handle
76 * @mem: Array of available internal memories
77 * @num_mem: Number of available memories
78 * @atcm_enable: flag to control ATCM enablement
79 * @btcm_enable: flag to control BTCM enablement
80 * @loczrama: flag to dictate which TCM is at device address 0x0
81 * @in_use: flag to tell if the core is already in use.
82 */
83struct k3_r5f_core {
84 struct udevice *dev;
85 struct k3_r5f_cluster *cluster;
86 struct reset_ctl reset;
87 struct ti_sci_proc tsp;
88 struct k3_r5f_mem *mem;
89 int num_mems;
90 u32 atcm_enable;
91 u32 btcm_enable;
92 u32 loczrama;
93 bool in_use;
94};
95
96/**
97 * struct k3_r5f_cluster - K3 R5F Cluster structure
98 * @mode: Mode to configure the Cluster - Split or LockStep
99 * @cores: Array of pointers to R5 cores within the cluster
100 */
101struct k3_r5f_cluster {
102 enum cluster_mode mode;
103 struct k3_r5f_core *cores[NR_CORES];
104};
105
106static bool is_primary_core(struct k3_r5f_core *core)
107{
108 return core == core->cluster->cores[0];
109}
110
111static int k3_r5f_proc_request(struct k3_r5f_core *core)
112{
113 struct k3_r5f_cluster *cluster = core->cluster;
114 int i, ret;
115
116 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
117 for (i = 0; i < NR_CORES; i++) {
118 ret = ti_sci_proc_request(&cluster->cores[i]->tsp);
119 if (ret)
120 goto proc_release;
121 }
122 } else {
123 ret = ti_sci_proc_request(&core->tsp);
124 }
125
126 return 0;
127
128proc_release:
129 while (i >= 0) {
130 ti_sci_proc_release(&cluster->cores[i]->tsp);
131 i--;
132 }
133 return ret;
134}
135
136static void k3_r5f_proc_release(struct k3_r5f_core *core)
137{
138 struct k3_r5f_cluster *cluster = core->cluster;
139 int i;
140
141 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
142 for (i = 0; i < NR_CORES; i++)
143 ti_sci_proc_release(&cluster->cores[i]->tsp);
144 else
145 ti_sci_proc_release(&core->tsp);
146}
147
148static int k3_r5f_lockstep_release(struct k3_r5f_cluster *cluster)
149{
150 int ret, c;
151
152 dev_dbg(dev, "%s\n", __func__);
153
154 for (c = NR_CORES - 1; c >= 0; c--) {
155 ret = ti_sci_proc_power_domain_on(&cluster->cores[c]->tsp);
156 if (ret)
157 goto unroll_module_reset;
158 }
159
160 /* deassert local reset on all applicable cores */
161 for (c = NR_CORES - 1; c >= 0; c--) {
162 ret = reset_deassert(&cluster->cores[c]->reset);
163 if (ret)
164 goto unroll_local_reset;
165 }
166
167 return 0;
168
169unroll_local_reset:
170 while (c < NR_CORES) {
171 reset_assert(&cluster->cores[c]->reset);
172 c++;
173 }
174 c = 0;
175unroll_module_reset:
176 while (c < NR_CORES) {
177 ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp);
178 c++;
179 }
180
181 return ret;
182}
183
184static int k3_r5f_split_release(struct k3_r5f_core *core)
185{
186 int ret;
187
188 dev_dbg(dev, "%s\n", __func__);
189
190 ret = ti_sci_proc_power_domain_on(&core->tsp);
191 if (ret) {
192 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
193 ret);
194 return ret;
195 }
196
197 ret = reset_deassert(&core->reset);
198 if (ret) {
199 dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
200 ret);
201 if (ti_sci_proc_power_domain_off(&core->tsp))
202 dev_warn(core->dev, "module-reset assert back failed\n");
203 }
204
205 return ret;
206}
207
208static int k3_r5f_prepare(struct udevice *dev)
209{
210 struct k3_r5f_core *core = dev_get_priv(dev);
211 struct k3_r5f_cluster *cluster = core->cluster;
212 int ret = 0;
213
214 dev_dbg(dev, "%s\n", __func__);
215
216 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
217 ret = k3_r5f_lockstep_release(cluster);
218 else
219 ret = k3_r5f_split_release(core);
220
221 if (ret)
222 dev_err(dev, "Unable to enable cores for TCM loading %d\n",
223 ret);
224
225 return ret;
226}
227
228static int k3_r5f_core_sanity_check(struct k3_r5f_core *core)
229{
230 struct k3_r5f_cluster *cluster = core->cluster;
231
232 if (core->in_use) {
233 dev_err(dev, "Invalid op: Trying to load/start on already running core %d\n",
234 core->tsp.proc_id);
235 return -EINVAL;
236 }
237
238 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !cluster->cores[1]) {
239 printf("Secondary core is not probed in this cluster\n");
240 return -EAGAIN;
241 }
242
243 if (cluster->mode == CLUSTER_MODE_LOCKSTEP && !is_primary_core(core)) {
244 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
245 core->tsp.proc_id);
246 return -EINVAL;
247 }
248
249 if (cluster->mode == CLUSTER_MODE_SPLIT && !is_primary_core(core)) {
250 if (!core->cluster->cores[0]->in_use) {
251 dev_err(dev, "Invalid seq: Enable primary core before loading secondary core\n");
252 return -EINVAL;
253 }
254 }
255
256 return 0;
257}
258
259/**
260 * k3_r5f_load() - Load up the Remote processor image
261 * @dev: rproc device pointer
262 * @addr: Address at which image is available
263 * @size: size of the image
264 *
265 * Return: 0 if all goes good, else appropriate error message.
266 */
267static int k3_r5f_load(struct udevice *dev, ulong addr, ulong size)
268{
269 struct k3_r5f_core *core = dev_get_priv(dev);
270 u32 boot_vector;
271 int ret;
272
273 dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size);
274
275 ret = k3_r5f_core_sanity_check(core);
276 if (ret)
277 return ret;
278
279 ret = k3_r5f_proc_request(core);
280 if (ret)
281 return ret;
282
283 ret = k3_r5f_prepare(dev);
284 if (ret) {
285 dev_err(dev, "R5f prepare failed for core %d\n",
286 core->tsp.proc_id);
287 goto proc_release;
288 }
289
290 /* Zero out TCMs so that ECC can be effective on all TCM addresses */
291 if (core->atcm_enable)
292 memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
293 if (core->btcm_enable)
294 memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
295
296 ret = rproc_elf_load_image(dev, addr, size);
297 if (ret < 0) {
298 dev_err(dev, "Loading elf failedi %d\n", ret);
299 goto proc_release;
300 }
301
302 boot_vector = rproc_elf_get_boot_addr(dev, addr);
303
304 dev_dbg(dev, "%s: Boot vector = 0x%x\n", __func__, boot_vector);
305
306 ret = ti_sci_proc_set_config(&core->tsp, boot_vector, 0, 0);
307
308proc_release:
309 k3_r5f_proc_release(core);
310
311 return ret;
312}
313
314static int k3_r5f_core_halt(struct k3_r5f_core *core)
315{
316 int ret;
317
318 ret = ti_sci_proc_set_control(&core->tsp,
319 PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
320 if (ret)
321 dev_err(core->dev, "Core %d failed to stop\n",
322 core->tsp.proc_id);
323
324 return ret;
325}
326
327static int k3_r5f_core_run(struct k3_r5f_core *core)
328{
329 int ret;
330
331 ret = ti_sci_proc_set_control(&core->tsp,
332 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
333 if (ret) {
334 dev_err(core->dev, "Core %d failed to start\n",
335 core->tsp.proc_id);
336 return ret;
337 }
338
339 return 0;
340}
341
342/**
343 * k3_r5f_start() - Start the remote processor
344 * @dev: rproc device pointer
345 *
346 * Return: 0 if all went ok, else return appropriate error
347 */
348static int k3_r5f_start(struct udevice *dev)
349{
350 struct k3_r5f_core *core = dev_get_priv(dev);
351 struct k3_r5f_cluster *cluster = core->cluster;
352 int ret, c;
353
354 dev_dbg(dev, "%s\n", __func__);
355
356 ret = k3_r5f_core_sanity_check(core);
357 if (ret)
358 return ret;
359
360 ret = k3_r5f_proc_request(core);
361 if (ret)
362 return ret;
363
364 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
365 if (is_primary_core(core)) {
366 for (c = NR_CORES - 1; c >= 0; c--) {
367 ret = k3_r5f_core_run(cluster->cores[c]);
368 if (ret)
369 goto unroll_core_run;
370 }
371 } else {
372 dev_err(dev, "Invalid op: Trying to start secondary core %d in lockstep mode\n",
373 core->tsp.proc_id);
374 ret = -EINVAL;
375 goto proc_release;
376 }
377 } else {
378 ret = k3_r5f_core_run(core);
379 if (ret)
380 goto proc_release;
381 }
382
383 core->in_use = true;
384
385 k3_r5f_proc_release(core);
386 return 0;
387
388unroll_core_run:
389 while (c < NR_CORES) {
390 k3_r5f_core_halt(cluster->cores[c]);
391 c++;
392 }
393proc_release:
394 k3_r5f_proc_release(core);
395
396 return ret;
397}
398
399static int k3_r5f_split_reset(struct k3_r5f_core *core)
400{
401 int ret;
402
403 dev_dbg(dev, "%s\n", __func__);
404
405 if (reset_assert(&core->reset))
406 ret = -EINVAL;
407
408 if (ti_sci_proc_power_domain_off(&core->tsp))
409 ret = -EINVAL;
410
411 return ret;
412}
413
414static int k3_r5f_lockstep_reset(struct k3_r5f_cluster *cluster)
415{
416 int ret = 0, c;
417
418 dev_dbg(dev, "%s\n", __func__);
419
420 for (c = 0; c < NR_CORES; c++)
421 if (reset_assert(&cluster->cores[c]->reset))
422 ret = -EINVAL;
423
424 /* disable PSC modules on all applicable cores */
425 for (c = 0; c < NR_CORES; c++)
426 if (ti_sci_proc_power_domain_off(&cluster->cores[c]->tsp))
427 ret = -EINVAL;
428
429 return ret;
430}
431
432static int k3_r5f_unprepare(struct udevice *dev)
433{
434 struct k3_r5f_core *core = dev_get_priv(dev);
435 struct k3_r5f_cluster *cluster = core->cluster;
436 int ret;
437
438 dev_dbg(dev, "%s\n", __func__);
439
440 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
441 if (is_primary_core(core))
442 ret = k3_r5f_lockstep_reset(cluster);
443 } else {
444 ret = k3_r5f_split_reset(core);
445 }
446
447 if (ret)
448 dev_warn(dev, "Unable to enable cores for TCM loading %d\n",
449 ret);
450
451 return 0;
452}
453
454static int k3_r5f_stop(struct udevice *dev)
455{
456 struct k3_r5f_core *core = dev_get_priv(dev);
457 struct k3_r5f_cluster *cluster = core->cluster;
458 int c, ret;
459
460 dev_dbg(dev, "%s\n", __func__);
461
462 ret = k3_r5f_proc_request(core);
463 if (ret)
464 return ret;
465
466 core->in_use = false;
467
468 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
469 if (is_primary_core(core)) {
470 for (c = 0; c < NR_CORES; c++)
471 k3_r5f_core_halt(cluster->cores[c]);
472 } else {
473 dev_err(dev, "Invalid op: Trying to stop secondary core in lockstep mode\n");
474 ret = -EINVAL;
475 goto proc_release;
476 }
477 } else {
478 k3_r5f_core_halt(core);
479 }
480
481 ret = k3_r5f_unprepare(dev);
482proc_release:
483 k3_r5f_proc_release(core);
484 return ret;
485}
486
487static void *k3_r5f_da_to_va(struct udevice *dev, ulong da, ulong size)
488{
489 struct k3_r5f_core *core = dev_get_priv(dev);
490 void __iomem *va = NULL;
491 phys_addr_t bus_addr;
492 u32 dev_addr, offset;
493 ulong mem_size;
494 int i;
495
496 dev_dbg(dev, "%s\n", __func__);
497
498 if (size <= 0)
499 return NULL;
500
501 for (i = 0; i < core->num_mems; i++) {
502 bus_addr = core->mem[i].bus_addr;
503 dev_addr = core->mem[i].dev_addr;
504 mem_size = core->mem[i].size;
505
506 if (da >= bus_addr && (da + size) <= (bus_addr + mem_size)) {
507 offset = da - bus_addr;
508 va = core->mem[i].cpu_addr + offset;
509 return (__force void *)va;
510 }
511
512 if (da >= dev_addr && (da + size) <= (dev_addr + mem_size)) {
513 offset = da - dev_addr;
514 va = core->mem[i].cpu_addr + offset;
515 return (__force void *)va;
516 }
517 }
518
519 /* Assume it is DDR region and return da */
520 return map_physmem(da, size, MAP_NOCACHE);
521}
522
523static int k3_r5f_init(struct udevice *dev)
524{
525 return 0;
526}
527
528static int k3_r5f_reset(struct udevice *dev)
529{
530 return 0;
531}
532
533static const struct dm_rproc_ops k3_r5f_rproc_ops = {
534 .init = k3_r5f_init,
535 .reset = k3_r5f_reset,
536 .start = k3_r5f_start,
537 .stop = k3_r5f_stop,
538 .load = k3_r5f_load,
539 .device_to_virt = k3_r5f_da_to_va,
540};
541
542static int k3_r5f_rproc_configure(struct k3_r5f_core *core)
543{
544 struct k3_r5f_cluster *cluster = core->cluster;
545 u32 set_cfg = 0, clr_cfg = 0, cfg, ctrl, sts;
Suman Anna9ff29302020-03-10 20:24:29 -0500546 bool lockstep_permitted;
Lokesh Vutla58633f12019-09-04 16:01:34 +0530547 u64 boot_vec = 0;
548 int ret;
549
550 dev_dbg(dev, "%s\n", __func__);
551
552 ret = ti_sci_proc_request(&core->tsp);
553 if (ret < 0)
554 return ret;
555
556 /* Do not touch boot vector now. Load will take care of it. */
557 clr_cfg |= PROC_BOOT_CFG_FLAG_GEN_IGN_BOOTVECTOR;
558
559 ret = ti_sci_proc_get_status(&core->tsp, &boot_vec, &cfg, &ctrl, &sts);
560 if (ret)
561 goto out;
562
563 /* Sanity check for Lockstep mode */
Suman Anna9ff29302020-03-10 20:24:29 -0500564 lockstep_permitted = !!(sts &
565 PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
566 if (cluster->mode && is_primary_core(core) && !lockstep_permitted) {
Lokesh Vutla58633f12019-09-04 16:01:34 +0530567 dev_err(core->dev, "LockStep mode not permitted on this device\n");
568 ret = -EINVAL;
569 goto out;
570 }
571
572 /* Primary core only configuration */
573 if (is_primary_core(core)) {
574 /* always enable ARM mode */
575 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TEINIT;
576 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
577 set_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
Suman Anna9ff29302020-03-10 20:24:29 -0500578 else if (lockstep_permitted)
Lokesh Vutla58633f12019-09-04 16:01:34 +0530579 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
580 }
581
582 if (core->atcm_enable)
583 set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
584 else
585 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
586
587 if (core->btcm_enable)
588 set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
589 else
590 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
591
592 if (core->loczrama)
593 set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
594 else
595 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
596
597 ret = k3_r5f_core_halt(core);
598 if (ret)
599 goto out;
600
601 ret = ti_sci_proc_set_config(&core->tsp, boot_vec, set_cfg, clr_cfg);
602out:
603 ti_sci_proc_release(&core->tsp);
604 return ret;
605}
606
607static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp)
608{
609 u32 ids[2];
610 int ret;
611
612 dev_dbg(dev, "%s\n", __func__);
613
614 tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci");
615 if (IS_ERR(tsp->sci)) {
616 dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci));
617 return PTR_ERR(tsp->sci);
618 }
619
620 ret = dev_read_u32_array(dev, "ti,sci-proc-ids", ids, 2);
621 if (ret) {
622 dev_err(dev, "Proc IDs not populated %d\n", ret);
623 return ret;
624 }
625
626 tsp->ops = &tsp->sci->ops.proc_ops;
627 tsp->proc_id = ids[0];
628 tsp->host_id = ids[1];
629 tsp->dev_id = dev_read_u32_default(dev, "ti,sci-dev-id",
630 TI_SCI_RESOURCE_NULL);
631 if (tsp->dev_id == TI_SCI_RESOURCE_NULL) {
632 dev_err(dev, "Device ID not populated %d\n", ret);
633 return -ENODEV;
634 }
635
636 return 0;
637}
638
639static int k3_r5f_of_to_priv(struct k3_r5f_core *core)
640{
641 int ret;
642
643 dev_dbg(dev, "%s\n", __func__);
644
645 core->atcm_enable = dev_read_u32_default(core->dev, "atcm-enable", 0);
646 core->btcm_enable = dev_read_u32_default(core->dev, "btcm-enable", 1);
647 core->loczrama = dev_read_u32_default(core->dev, "loczrama", 1);
648
649 ret = ti_sci_proc_of_to_priv(core->dev, &core->tsp);
650 if (ret)
651 return ret;
652
653 ret = reset_get_by_index(core->dev, 0, &core->reset);
654 if (ret) {
655 dev_err(core->dev, "Reset lines not available: %d\n", ret);
656 return ret;
657 }
658
659 return 0;
660}
661
662static int k3_r5f_core_of_get_memories(struct k3_r5f_core *core)
663{
664 static const char * const mem_names[] = {"atcm", "btcm"};
665 struct udevice *dev = core->dev;
666 int i;
667
668 dev_dbg(dev, "%s\n", __func__);
669
670 core->num_mems = ARRAY_SIZE(mem_names);
671 core->mem = calloc(core->num_mems, sizeof(*core->mem));
672 if (!core->mem)
673 return -ENOMEM;
674
675 for (i = 0; i < core->num_mems; i++) {
676 core->mem[i].bus_addr = dev_read_addr_size_name(dev,
677 mem_names[i],
678 (fdt_addr_t *)&core->mem[i].size);
679 if (core->mem[i].bus_addr == FDT_ADDR_T_NONE) {
680 dev_err(dev, "%s bus address not found\n",
681 mem_names[i]);
682 return -EINVAL;
683 }
684 core->mem[i].cpu_addr = map_physmem(core->mem[i].bus_addr,
685 core->mem[i].size,
686 MAP_NOCACHE);
687 if (!strcmp(mem_names[i], "atcm")) {
688 core->mem[i].dev_addr = core->loczrama ?
689 0 : K3_R5_TCM_DEV_ADDR;
690 } else {
691 core->mem[i].dev_addr = core->loczrama ?
692 K3_R5_TCM_DEV_ADDR : 0;
693 }
694
695 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
696 mem_names[i], &core->mem[i].bus_addr,
697 core->mem[i].size, core->mem[i].cpu_addr,
698 core->mem[i].dev_addr);
699 }
700
701 return 0;
702}
703
704/**
705 * k3_r5f_probe() - Basic probe
706 * @dev: corresponding k3 remote processor device
707 *
708 * Return: 0 if all goes good, else appropriate error message.
709 */
710static int k3_r5f_probe(struct udevice *dev)
711{
712 struct k3_r5f_cluster *cluster = dev_get_priv(dev->parent);
713 struct k3_r5f_core *core = dev_get_priv(dev);
714 bool r_state;
715 int ret;
716
717 dev_dbg(dev, "%s\n", __func__);
718
719 core->dev = dev;
720 ret = k3_r5f_of_to_priv(core);
721 if (ret)
722 return ret;
723
724 core->cluster = cluster;
725 /* Assume Primary core gets probed first */
726 if (!cluster->cores[0])
727 cluster->cores[0] = core;
728 else
729 cluster->cores[1] = core;
730
731 ret = k3_r5f_core_of_get_memories(core);
732 if (ret) {
733 dev_err(dev, "Rproc getting internal memories failed\n");
734 return ret;
735 }
736
737 ret = core->tsp.sci->ops.dev_ops.is_on(core->tsp.sci, core->tsp.dev_id,
738 &r_state, &core->in_use);
739 if (ret)
740 return ret;
741
742 if (core->in_use) {
743 dev_info(dev, "Core %d is already in use. No rproc commands work\n",
744 core->tsp.proc_id);
745 return 0;
746 }
747
748 /* Make sure Local reset is asserted. Redundant? */
749 reset_assert(&core->reset);
750
751 ret = k3_r5f_rproc_configure(core);
752 if (ret) {
753 dev_err(dev, "rproc configure failed %d\n", ret);
754 return ret;
755 }
756
757 dev_dbg(dev, "Remoteproc successfully probed\n");
758
759 return 0;
760}
761
762static int k3_r5f_remove(struct udevice *dev)
763{
764 struct k3_r5f_core *core = dev_get_priv(dev);
765
766 free(core->mem);
767
768 ti_sci_proc_release(&core->tsp);
769
770 return 0;
771}
772
773static const struct udevice_id k3_r5f_rproc_ids[] = {
774 { .compatible = "ti,am654-r5f"},
775 { .compatible = "ti,j721e-r5f"},
776 {}
777};
778
779U_BOOT_DRIVER(k3_r5f_rproc) = {
780 .name = "k3_r5f_rproc",
781 .of_match = k3_r5f_rproc_ids,
782 .id = UCLASS_REMOTEPROC,
783 .ops = &k3_r5f_rproc_ops,
784 .probe = k3_r5f_probe,
785 .remove = k3_r5f_remove,
786 .priv_auto_alloc_size = sizeof(struct k3_r5f_core),
787};
788
789static int k3_r5f_cluster_probe(struct udevice *dev)
790{
791 struct k3_r5f_cluster *cluster = dev_get_priv(dev);
792
793 dev_dbg(dev, "%s\n", __func__);
794
795 cluster->mode = dev_read_u32_default(dev, "lockstep-mode",
796 CLUSTER_MODE_LOCKSTEP);
797
798 if (device_get_child_count(dev) != 2) {
799 dev_err(dev, "Invalid number of R5 cores");
800 return -EINVAL;
801 }
802
803 dev_dbg(dev, "%s: Cluster successfully probed in %s mode\n",
804 __func__, cluster->mode ? "lockstep" : "split");
805
806 return 0;
807}
808
809static const struct udevice_id k3_r5fss_ids[] = {
810 { .compatible = "ti,am654-r5fss"},
811 { .compatible = "ti,j721e-r5fss"},
812 {}
813};
814
815U_BOOT_DRIVER(k3_r5fss) = {
816 .name = "k3_r5fss",
817 .of_match = k3_r5fss_ids,
818 .id = UCLASS_MISC,
819 .probe = k3_r5f_cluster_probe,
820 .priv_auto_alloc_size = sizeof(struct k3_r5f_cluster),
Suman Anna41e89862020-03-10 18:34:54 -0500821 .flags = DM_FLAG_DEFAULT_PD_CTRL_OFF,
Lokesh Vutla58633f12019-09-04 16:01:34 +0530822};