blob: 50bcc9030e98482865b33f1d7a6161ccb363f765 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Nishanth Menon08b9dc22015-09-17 15:42:39 -05002/*
3 * (C) Copyright 2015
4 * Texas Instruments Incorporated - http://www.ti.com/
Nishanth Menon08b9dc22015-09-17 15:42:39 -05005 */
Patrick Delaunay81313352021-04-27 11:02:19 +02006
7#define LOG_CATEGORY UCLASS_REMOTEPROC
8
Nishanth Menon08b9dc22015-09-17 15:42:39 -05009#define pr_fmt(fmt) "%s: " fmt, __func__
10#include <common.h>
Keerthy844db202022-01-27 13:16:55 +010011#include <elf.h>
Nishanth Menon08b9dc22015-09-17 15:42:39 -050012#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Nishanth Menon08b9dc22015-09-17 15:42:39 -050014#include <malloc.h>
Keerthy844db202022-01-27 13:16:55 +010015#include <virtio_ring.h>
Nishanth Menon08b9dc22015-09-17 15:42:39 -050016#include <remoteproc.h>
17#include <asm/io.h>
18#include <dm/device-internal.h>
19#include <dm.h>
20#include <dm/uclass.h>
21#include <dm/uclass-internal.h>
Keerthy844db202022-01-27 13:16:55 +010022#include <linux/compat.h>
23
24DECLARE_GLOBAL_DATA_PTR;
25
26struct resource_table {
27 u32 ver;
28 u32 num;
29 u32 reserved[2];
30 u32 offset[0];
31} __packed;
32
33typedef int (*handle_resource_t) (struct udevice *, void *, int offset, int avail);
34
35static struct resource_table *rsc_table;
Nishanth Menon08b9dc22015-09-17 15:42:39 -050036
Nishanth Menon08b9dc22015-09-17 15:42:39 -050037/**
38 * for_each_remoteproc_device() - iterate through the list of rproc devices
39 * @fn: check function to call per match, if this function returns fail,
40 * iteration is aborted with the resultant error value
41 * @skip_dev: Device to skip calling the callback about.
42 * @data: Data to pass to the callback function
43 *
44 * Return: 0 if none of the callback returned a non 0 result, else returns the
45 * result from the callback function
46 */
47static int for_each_remoteproc_device(int (*fn) (struct udevice *dev,
48 struct dm_rproc_uclass_pdata *uc_pdata,
49 const void *data),
50 struct udevice *skip_dev,
51 const void *data)
52{
53 struct udevice *dev;
54 struct dm_rproc_uclass_pdata *uc_pdata;
55 int ret;
56
57 for (ret = uclass_find_first_device(UCLASS_REMOTEPROC, &dev); dev;
58 ret = uclass_find_next_device(&dev)) {
59 if (ret || dev == skip_dev)
60 continue;
Simon Glass71fa5b42020-12-03 16:55:18 -070061 uc_pdata = dev_get_uclass_plat(dev);
Nishanth Menon08b9dc22015-09-17 15:42:39 -050062 ret = fn(dev, uc_pdata, data);
63 if (ret)
64 return ret;
65 }
66
67 return 0;
68}
69
70/**
71 * _rproc_name_is_unique() - iteration helper to check if rproc name is unique
72 * @dev: device that we are checking name for
73 * @uc_pdata: uclass platform data
74 * @data: compare data (this is the name we want to ensure is unique)
75 *
76 * Return: 0 is there is no match(is unique); if there is a match(we dont
77 * have a unique name), return -EINVAL.
78 */
79static int _rproc_name_is_unique(struct udevice *dev,
80 struct dm_rproc_uclass_pdata *uc_pdata,
81 const void *data)
82{
83 const char *check_name = data;
84
85 /* devices not yet populated with data - so skip them */
Nishanth Menon3ab6a4e2015-11-30 22:05:58 -060086 if (!uc_pdata->name || !check_name)
Nishanth Menon08b9dc22015-09-17 15:42:39 -050087 return 0;
88
89 /* Return 0 to search further if we dont match */
90 if (strlen(uc_pdata->name) != strlen(check_name))
91 return 0;
92
93 if (!strcmp(uc_pdata->name, check_name))
94 return -EINVAL;
95
96 return 0;
97}
98
99/**
100 * rproc_name_is_unique() - Check if the rproc name is unique
101 * @check_dev: Device we are attempting to ensure is unique
102 * @check_name: Name we are trying to ensure is unique.
103 *
104 * Return: true if we have a unique name, false if name is not unique.
105 */
106static bool rproc_name_is_unique(struct udevice *check_dev,
107 const char *check_name)
108{
109 int ret;
110
111 ret = for_each_remoteproc_device(_rproc_name_is_unique,
112 check_dev, check_name);
113 return ret ? false : true;
114}
115
116/**
117 * rproc_pre_probe() - Pre probe accessor for the uclass
118 * @dev: device for which we are preprobing
119 *
120 * Parses and fills up the uclass pdata for use as needed by core and
121 * remote proc drivers.
122 *
123 * Return: 0 if all wernt ok, else appropriate error value.
124 */
125static int rproc_pre_probe(struct udevice *dev)
126{
127 struct dm_rproc_uclass_pdata *uc_pdata;
128 const struct dm_rproc_ops *ops;
129
Simon Glass71fa5b42020-12-03 16:55:18 -0700130 uc_pdata = dev_get_uclass_plat(dev);
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500131
132 /* See if we need to populate via fdt */
133
Simon Glass95588622020-12-22 19:30:28 -0700134 if (!dev_get_plat(dev)) {
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500135#if CONFIG_IS_ENABLED(OF_CONTROL)
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500136 bool tmp;
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500137 debug("'%s': using fdt\n", dev->name);
Patrick Delaunayc249ae52021-09-20 17:56:06 +0200138 uc_pdata->name = dev_read_string(dev, "remoteproc-name");
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500139
140 /* Default is internal memory mapped */
141 uc_pdata->mem_type = RPROC_INTERNAL_MEMORY_MAPPED;
Patrick Delaunayc249ae52021-09-20 17:56:06 +0200142 tmp = dev_read_bool(dev, "remoteproc-internal-memory-mapped");
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500143 if (tmp)
144 uc_pdata->mem_type = RPROC_INTERNAL_MEMORY_MAPPED;
145#else
146 /* Nothing much we can do about this, can we? */
147 return -EINVAL;
148#endif
149
150 } else {
Simon Glass95588622020-12-22 19:30:28 -0700151 struct dm_rproc_uclass_pdata *pdata = dev_get_plat(dev);
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500152
153 debug("'%s': using legacy data\n", dev->name);
154 if (pdata->name)
155 uc_pdata->name = pdata->name;
156 uc_pdata->mem_type = pdata->mem_type;
157 uc_pdata->driver_plat_data = pdata->driver_plat_data;
158 }
159
160 /* Else try using device Name */
161 if (!uc_pdata->name)
162 uc_pdata->name = dev->name;
163 if (!uc_pdata->name) {
164 debug("Unnamed device!");
165 return -EINVAL;
166 }
167
168 if (!rproc_name_is_unique(dev, uc_pdata->name)) {
169 debug("%s duplicate name '%s'\n", dev->name, uc_pdata->name);
170 return -EINVAL;
171 }
172
173 ops = rproc_get_ops(dev);
174 if (!ops) {
175 debug("%s driver has no ops?\n", dev->name);
176 return -EINVAL;
177 }
178
179 if (!ops->load || !ops->start) {
180 debug("%s driver has missing mandatory ops?\n", dev->name);
181 return -EINVAL;
182 }
183
184 return 0;
185}
186
187/**
188 * rproc_post_probe() - post probe accessor for the uclass
189 * @dev: deivce we finished probing
190 *
191 * initiate init function after the probe is completed. This allows
192 * the remote processor drivers to split up the initializations between
193 * probe and init as needed.
194 *
195 * Return: if the remote proc driver has a init routine, invokes it and
196 * hands over the return value. overall, 0 if all went well, else appropriate
197 * error value.
198 */
199static int rproc_post_probe(struct udevice *dev)
200{
201 const struct dm_rproc_ops *ops;
202
203 ops = rproc_get_ops(dev);
204 if (!ops) {
205 debug("%s driver has no ops?\n", dev->name);
206 return -EINVAL;
207 }
208
209 if (ops->init)
210 return ops->init(dev);
211
212 return 0;
213}
214
Keerthy844db202022-01-27 13:16:55 +0100215/**
216 * rproc_add_res() - After parsing the resource table add the mappings
217 * @dev: device we finished probing
218 * @mapping: rproc_mem_entry for the resource
219 *
220 * Return: if the remote proc driver has a add_res routine, invokes it and
221 * hands over the return value. overall, 0 if all went well, else appropriate
222 * error value.
223 */
224static int rproc_add_res(struct udevice *dev, struct rproc_mem_entry *mapping)
225{
226 const struct dm_rproc_ops *ops = rproc_get_ops(dev);
227
228 if (!ops->add_res)
229 return -ENOSYS;
230
231 return ops->add_res(dev, mapping);
232}
233
234/**
235 * rproc_alloc_mem() - After parsing the resource table allocat mem
236 * @dev: device we finished probing
237 * @len: rproc_mem_entry for the resource
238 * @align: alignment for the resource
239 *
240 * Return: if the remote proc driver has a add_res routine, invokes it and
241 * hands over the return value. overall, 0 if all went well, else appropriate
242 * error value.
243 */
244static void *rproc_alloc_mem(struct udevice *dev, unsigned long len,
245 unsigned long align)
246{
247 const struct dm_rproc_ops *ops;
248
249 ops = rproc_get_ops(dev);
250 if (!ops) {
251 debug("%s driver has no ops?\n", dev->name);
252 return NULL;
253 }
254
255 if (ops->alloc_mem)
256 return ops->alloc_mem(dev, len, align);
257
258 return NULL;
259}
260
261/**
262 * rproc_config_pagetable() - Configure page table for remote processor
263 * @dev: device we finished probing
264 * @virt: Virtual address of the resource
265 * @phys: Physical address the resource
266 * @len: length the resource
267 *
268 * Return: if the remote proc driver has a add_res routine, invokes it and
269 * hands over the return value. overall, 0 if all went well, else appropriate
270 * error value.
271 */
272static int rproc_config_pagetable(struct udevice *dev, unsigned int virt,
273 unsigned int phys, unsigned int len)
274{
275 const struct dm_rproc_ops *ops;
276
277 ops = rproc_get_ops(dev);
278 if (!ops) {
279 debug("%s driver has no ops?\n", dev->name);
280 return -EINVAL;
281 }
282
283 if (ops->config_pagetable)
284 return ops->config_pagetable(dev, virt, phys, len);
285
286 return 0;
287}
288
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500289UCLASS_DRIVER(rproc) = {
290 .id = UCLASS_REMOTEPROC,
291 .name = "remoteproc",
292 .flags = DM_UC_FLAG_SEQ_ALIAS,
293 .pre_probe = rproc_pre_probe,
294 .post_probe = rproc_post_probe,
Simon Glass33b2efb2020-12-03 16:55:22 -0700295 .per_device_plat_auto = sizeof(struct dm_rproc_uclass_pdata),
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500296};
297
298/* Remoteproc subsystem access functions */
299/**
300 * _rproc_probe_dev() - iteration helper to probe a rproc device
301 * @dev: device to probe
302 * @uc_pdata: uclass data allocated for the device
303 * @data: unused
304 *
305 * Return: 0 if all ok, else appropriate error value.
306 */
307static int _rproc_probe_dev(struct udevice *dev,
308 struct dm_rproc_uclass_pdata *uc_pdata,
309 const void *data)
310{
311 int ret;
312
313 ret = device_probe(dev);
314
315 if (ret)
316 debug("%s: Failed to initialize - %d\n", dev->name, ret);
317 return ret;
318}
319
320/**
321 * _rproc_dev_is_probed() - check if the device has been probed
322 * @dev: device to check
323 * @uc_pdata: unused
324 * @data: unused
325 *
326 * Return: -EAGAIN if not probed else return 0
327 */
328static int _rproc_dev_is_probed(struct udevice *dev,
329 struct dm_rproc_uclass_pdata *uc_pdata,
330 const void *data)
331{
Simon Glass6211d762020-12-19 10:40:10 -0700332 if (dev_get_flags(dev) & DM_FLAG_ACTIVATED)
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500333 return 0;
334
335 return -EAGAIN;
336}
337
338bool rproc_is_initialized(void)
339{
340 int ret = for_each_remoteproc_device(_rproc_dev_is_probed, NULL, NULL);
341 return ret ? false : true;
342}
343
344int rproc_init(void)
345{
346 int ret;
347
348 if (rproc_is_initialized()) {
349 debug("Already initialized\n");
350 return -EINVAL;
351 }
352
353 ret = for_each_remoteproc_device(_rproc_probe_dev, NULL, NULL);
354 return ret;
355}
356
Lokesh Vutladdca80e2018-08-27 15:57:50 +0530357int rproc_dev_init(int id)
358{
359 struct udevice *dev = NULL;
360 int ret;
361
362 ret = uclass_get_device_by_seq(UCLASS_REMOTEPROC, id, &dev);
363 if (ret) {
364 debug("Unknown remote processor id '%d' requested(%d)\n",
365 id, ret);
366 return ret;
367 }
368
369 ret = device_probe(dev);
370 if (ret)
371 debug("%s: Failed to initialize - %d\n", dev->name, ret);
372
373 return ret;
374}
375
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500376int rproc_load(int id, ulong addr, ulong size)
377{
378 struct udevice *dev = NULL;
379 struct dm_rproc_uclass_pdata *uc_pdata;
380 const struct dm_rproc_ops *ops;
381 int ret;
382
383 ret = uclass_get_device_by_seq(UCLASS_REMOTEPROC, id, &dev);
384 if (ret) {
385 debug("Unknown remote processor id '%d' requested(%d)\n",
386 id, ret);
387 return ret;
388 }
389
Simon Glass71fa5b42020-12-03 16:55:18 -0700390 uc_pdata = dev_get_uclass_plat(dev);
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500391
392 ops = rproc_get_ops(dev);
393 if (!ops) {
394 debug("%s driver has no ops?\n", dev->name);
395 return -EINVAL;
396 }
397
398 debug("Loading to '%s' from address 0x%08lX size of %lu bytes\n",
399 uc_pdata->name, addr, size);
400 if (ops->load)
401 return ops->load(dev, addr, size);
402
403 debug("%s: data corruption?? mandatory function is missing!\n",
404 dev->name);
405
406 return -EINVAL;
407};
408
409/*
410 * Completely internal helper enums..
411 * Keeping this isolated helps this code evolve independent of other
412 * parts..
413 */
414enum rproc_ops {
415 RPROC_START,
416 RPROC_STOP,
417 RPROC_RESET,
418 RPROC_PING,
419 RPROC_RUNNING,
420};
421
422/**
423 * _rproc_ops_wrapper() - wrapper for invoking remote proc driver callback
424 * @id: id of the remote processor
425 * @op: one of rproc_ops that indicate what operation to invoke
426 *
427 * Most of the checks and verification for remoteproc operations are more
428 * or less same for almost all operations. This allows us to put a wrapper
429 * and use the common checks to allow the driver to function appropriately.
430 *
431 * Return: 0 if all ok, else appropriate error value.
432 */
433static int _rproc_ops_wrapper(int id, enum rproc_ops op)
434{
435 struct udevice *dev = NULL;
436 struct dm_rproc_uclass_pdata *uc_pdata;
437 const struct dm_rproc_ops *ops;
438 int (*fn)(struct udevice *dev);
439 bool mandatory = false;
440 char *op_str;
441 int ret;
442
443 ret = uclass_get_device_by_seq(UCLASS_REMOTEPROC, id, &dev);
444 if (ret) {
445 debug("Unknown remote processor id '%d' requested(%d)\n",
446 id, ret);
447 return ret;
448 }
449
Simon Glass71fa5b42020-12-03 16:55:18 -0700450 uc_pdata = dev_get_uclass_plat(dev);
Nishanth Menon08b9dc22015-09-17 15:42:39 -0500451
452 ops = rproc_get_ops(dev);
453 if (!ops) {
454 debug("%s driver has no ops?\n", dev->name);
455 return -EINVAL;
456 }
457 switch (op) {
458 case RPROC_START:
459 fn = ops->start;
460 mandatory = true;
461 op_str = "Starting";
462 break;
463 case RPROC_STOP:
464 fn = ops->stop;
465 op_str = "Stopping";
466 break;
467 case RPROC_RESET:
468 fn = ops->reset;
469 op_str = "Resetting";
470 break;
471 case RPROC_RUNNING:
472 fn = ops->is_running;
473 op_str = "Checking if running:";
474 break;
475 case RPROC_PING:
476 fn = ops->ping;
477 op_str = "Pinging";
478 break;
479 default:
480 debug("what is '%d' operation??\n", op);
481 return -EINVAL;
482 }
483
484 debug("%s %s...\n", op_str, uc_pdata->name);
485 if (fn)
486 return fn(dev);
487
488 if (mandatory)
489 debug("%s: data corruption?? mandatory function is missing!\n",
490 dev->name);
491
492 return -ENOSYS;
493}
494
495int rproc_start(int id)
496{
497 return _rproc_ops_wrapper(id, RPROC_START);
498};
499
500int rproc_stop(int id)
501{
502 return _rproc_ops_wrapper(id, RPROC_STOP);
503};
504
505int rproc_reset(int id)
506{
507 return _rproc_ops_wrapper(id, RPROC_RESET);
508};
509
510int rproc_ping(int id)
511{
512 return _rproc_ops_wrapper(id, RPROC_PING);
513};
514
515int rproc_is_running(int id)
516{
517 return _rproc_ops_wrapper(id, RPROC_RUNNING);
518};
Keerthy844db202022-01-27 13:16:55 +0100519
520
521static int handle_trace(struct udevice *dev, struct fw_rsc_trace *rsc,
522 int offset, int avail)
523{
524 if (sizeof(*rsc) > avail) {
525 debug("trace rsc is truncated\n");
526 return -EINVAL;
527 }
528
529 /*
530 * make sure reserved bytes are zeroes
531 */
532 if (rsc->reserved) {
533 debug("trace rsc has non zero reserved bytes\n");
534 return -EINVAL;
535 }
536
537 debug("trace rsc: da 0x%x, len 0x%x\n", rsc->da, rsc->len);
538
539 return 0;
540}
541
542static int handle_devmem(struct udevice *dev, struct fw_rsc_devmem *rsc,
543 int offset, int avail)
544{
545 struct rproc_mem_entry *mapping;
546
547 if (sizeof(*rsc) > avail) {
548 debug("devmem rsc is truncated\n");
549 return -EINVAL;
550 }
551
552 /*
553 * make sure reserved bytes are zeroes
554 */
555 if (rsc->reserved) {
556 debug("devmem rsc has non zero reserved bytes\n");
557 return -EINVAL;
558 }
559
560 debug("devmem rsc: pa 0x%x, da 0x%x, len 0x%x\n",
561 rsc->pa, rsc->da, rsc->len);
562
563 rproc_config_pagetable(dev, rsc->da, rsc->pa, rsc->len);
564
565 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
566 if (!mapping)
567 return -ENOMEM;
568
569 /*
570 * We'll need this info later when we'll want to unmap everything
571 * (e.g. on shutdown).
572 *
573 * We can't trust the remote processor not to change the resource
574 * table, so we must maintain this info independently.
575 */
576 mapping->dma = rsc->pa;
577 mapping->da = rsc->da;
578 mapping->len = rsc->len;
579 rproc_add_res(dev, mapping);
580
581 debug("mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
582 rsc->pa, rsc->da, rsc->len);
583
584 return 0;
585}
586
587static int handle_carveout(struct udevice *dev, struct fw_rsc_carveout *rsc,
588 int offset, int avail)
589{
590 struct rproc_mem_entry *mapping;
591
592 if (sizeof(*rsc) > avail) {
593 debug("carveout rsc is truncated\n");
594 return -EINVAL;
595 }
596
597 /*
598 * make sure reserved bytes are zeroes
599 */
600 if (rsc->reserved) {
601 debug("carveout rsc has non zero reserved bytes\n");
602 return -EINVAL;
603 }
604
605 debug("carveout rsc: da %x, pa %x, len %x, flags %x\n",
606 rsc->da, rsc->pa, rsc->len, rsc->flags);
607
608 rsc->pa = (uintptr_t)rproc_alloc_mem(dev, rsc->len, 8);
609 if (!rsc->pa) {
610 debug
611 ("failed to allocate carveout rsc: da %x, pa %x, len %x, flags %x\n",
612 rsc->da, rsc->pa, rsc->len, rsc->flags);
613 return -ENOMEM;
614 }
615 rproc_config_pagetable(dev, rsc->da, rsc->pa, rsc->len);
616
617 /*
618 * Ok, this is non-standard.
619 *
620 * Sometimes we can't rely on the generic iommu-based DMA API
621 * to dynamically allocate the device address and then set the IOMMU
622 * tables accordingly, because some remote processors might
623 * _require_ us to use hard coded device addresses that their
624 * firmware was compiled with.
625 *
626 * In this case, we must use the IOMMU API directly and map
627 * the memory to the device address as expected by the remote
628 * processor.
629 *
630 * Obviously such remote processor devices should not be configured
631 * to use the iommu-based DMA API: we expect 'dma' to contain the
632 * physical address in this case.
633 */
634 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
635 if (!mapping)
636 return -ENOMEM;
637
638 /*
639 * We'll need this info later when we'll want to unmap
640 * everything (e.g. on shutdown).
641 *
642 * We can't trust the remote processor not to change the
643 * resource table, so we must maintain this info independently.
644 */
645 mapping->dma = rsc->pa;
646 mapping->da = rsc->da;
647 mapping->len = rsc->len;
648 rproc_add_res(dev, mapping);
649
650 debug("carveout mapped 0x%x to 0x%x\n", rsc->da, rsc->pa);
651
652 return 0;
653}
654
655#define RPROC_PAGE_SHIFT 12
656#define RPROC_PAGE_SIZE BIT(RPROC_PAGE_SHIFT)
657#define RPROC_PAGE_ALIGN(x) (((x) + (RPROC_PAGE_SIZE - 1)) & ~(RPROC_PAGE_SIZE - 1))
658
659static int alloc_vring(struct udevice *dev, struct fw_rsc_vdev *rsc, int i)
660{
661 struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
662 int size;
663 int order;
664 void *pa;
665
666 debug("vdev rsc: vring%d: da %x, qsz %d, align %d\n",
667 i, vring->da, vring->num, vring->align);
668
669 /*
670 * verify queue size and vring alignment are sane
671 */
672 if (!vring->num || !vring->align) {
673 debug("invalid qsz (%d) or alignment (%d)\n", vring->num,
674 vring->align);
675 return -EINVAL;
676 }
677
678 /*
679 * actual size of vring (in bytes)
680 */
681 size = RPROC_PAGE_ALIGN(vring_size(vring->num, vring->align));
682 order = vring->align >> RPROC_PAGE_SHIFT;
683
684 pa = rproc_alloc_mem(dev, size, order);
685 if (!pa) {
686 debug("failed to allocate vring rsc\n");
687 return -ENOMEM;
688 }
689 debug("alloc_mem(%#x, %d): %p\n", size, order, pa);
690 vring->da = (uintptr_t)pa;
691
692 return !pa;
693}
694
695static int handle_vdev(struct udevice *dev, struct fw_rsc_vdev *rsc,
696 int offset, int avail)
697{
698 int i, ret;
699 void *pa;
700
701 /*
702 * make sure resource isn't truncated
703 */
704 if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
705 + rsc->config_len > avail) {
706 debug("vdev rsc is truncated\n");
707 return -EINVAL;
708 }
709
710 /*
711 * make sure reserved bytes are zeroes
712 */
713 if (rsc->reserved[0] || rsc->reserved[1]) {
714 debug("vdev rsc has non zero reserved bytes\n");
715 return -EINVAL;
716 }
717
718 debug("vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n",
719 rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
720
721 /*
722 * we currently support only two vrings per rvdev
723 */
724 if (rsc->num_of_vrings > 2) {
725 debug("too many vrings: %d\n", rsc->num_of_vrings);
726 return -EINVAL;
727 }
728
729 /*
730 * allocate the vrings
731 */
732 for (i = 0; i < rsc->num_of_vrings; i++) {
733 ret = alloc_vring(dev, rsc, i);
734 if (ret)
735 goto alloc_error;
736 }
737
738 pa = rproc_alloc_mem(dev, RPMSG_TOTAL_BUF_SPACE, 6);
739 if (!pa) {
740 debug("failed to allocate vdev rsc\n");
741 return -ENOMEM;
742 }
743 debug("vring buffer alloc_mem(%#x, 6): %p\n", RPMSG_TOTAL_BUF_SPACE,
744 pa);
745
746 return 0;
747
748 alloc_error:
749 return ret;
750}
751
752/*
753 * A lookup table for resource handlers. The indices are defined in
754 * enum fw_resource_type.
755 */
756static handle_resource_t loading_handlers[RSC_LAST] = {
757 [RSC_CARVEOUT] = (handle_resource_t)handle_carveout,
758 [RSC_DEVMEM] = (handle_resource_t)handle_devmem,
759 [RSC_TRACE] = (handle_resource_t)handle_trace,
760 [RSC_VDEV] = (handle_resource_t)handle_vdev,
761};
762
763/*
764 * handle firmware resource entries before booting the remote processor
765 */
766static int handle_resources(struct udevice *dev, int len,
767 handle_resource_t handlers[RSC_LAST])
768{
769 handle_resource_t handler;
770 int ret = 0, i;
771
772 for (i = 0; i < rsc_table->num; i++) {
773 int offset = rsc_table->offset[i];
774 struct fw_rsc_hdr *hdr = (void *)rsc_table + offset;
775 int avail = len - offset - sizeof(*hdr);
776 void *rsc = (void *)hdr + sizeof(*hdr);
777
778 /*
779 * make sure table isn't truncated
780 */
781 if (avail < 0) {
782 debug("rsc table is truncated\n");
783 return -EINVAL;
784 }
785
786 debug("rsc: type %d\n", hdr->type);
787
788 if (hdr->type >= RSC_LAST) {
789 debug("unsupported resource %d\n", hdr->type);
790 continue;
791 }
792
793 handler = handlers[hdr->type];
794 if (!handler)
795 continue;
796
797 ret = handler(dev, rsc, offset + sizeof(*hdr), avail);
798 if (ret)
799 break;
800 }
801
802 return ret;
803}
804
805static int
806handle_intmem_to_l3_mapping(struct udevice *dev,
807 struct rproc_intmem_to_l3_mapping *l3_mapping)
808{
809 u32 i = 0;
810
811 for (i = 0; i < l3_mapping->num_entries; i++) {
812 struct l3_map *curr_map = &l3_mapping->mappings[i];
813 struct rproc_mem_entry *mapping;
814
815 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
816 if (!mapping)
817 return -ENOMEM;
818
819 mapping->dma = curr_map->l3_addr;
820 mapping->da = curr_map->priv_addr;
821 mapping->len = curr_map->len;
822 rproc_add_res(dev, mapping);
823 }
824
825 return 0;
826}
827
828static Elf32_Shdr *rproc_find_table(unsigned int addr)
829{
830 Elf32_Ehdr *ehdr; /* Elf header structure pointer */
831 Elf32_Shdr *shdr; /* Section header structure pointer */
832 Elf32_Shdr sectionheader;
833 int i;
834 u8 *elf_data;
835 char *name_table;
836 struct resource_table *ptable;
837
838 ehdr = (Elf32_Ehdr *)(uintptr_t)addr;
839 elf_data = (u8 *)ehdr;
840 shdr = (Elf32_Shdr *)(elf_data + ehdr->e_shoff);
841 memcpy(&sectionheader, &shdr[ehdr->e_shstrndx], sizeof(sectionheader));
842 name_table = (char *)(elf_data + sectionheader.sh_offset);
843
844 for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
845 memcpy(&sectionheader, shdr, sizeof(sectionheader));
846 u32 size = sectionheader.sh_size;
847 u32 offset = sectionheader.sh_offset;
848
849 if (strcmp
850 (name_table + sectionheader.sh_name, ".resource_table"))
851 continue;
852
853 ptable = (struct resource_table *)(elf_data + offset);
854
855 /*
856 * make sure table has at least the header
857 */
858 if (sizeof(struct resource_table) > size) {
859 debug("header-less resource table\n");
860 return NULL;
861 }
862
863 /*
864 * we don't support any version beyond the first
865 */
866 if (ptable->ver != 1) {
867 debug("unsupported fw ver: %d\n", ptable->ver);
868 return NULL;
869 }
870
871 /*
872 * make sure reserved bytes are zeroes
873 */
874 if (ptable->reserved[0] || ptable->reserved[1]) {
875 debug("non zero reserved bytes\n");
876 return NULL;
877 }
878
879 /*
880 * make sure the offsets array isn't truncated
881 */
882 if (ptable->num * sizeof(ptable->offset[0]) +
883 sizeof(struct resource_table) > size) {
884 debug("resource table incomplete\n");
885 return NULL;
886 }
887
888 return shdr;
889 }
890
891 return NULL;
892}
893
894struct resource_table *rproc_find_resource_table(struct udevice *dev,
895 unsigned int addr,
896 int *tablesz)
897{
898 Elf32_Shdr *shdr;
899 Elf32_Shdr sectionheader;
900 struct resource_table *ptable;
901 u8 *elf_data = (u8 *)(uintptr_t)addr;
902
903 shdr = rproc_find_table(addr);
904 if (!shdr) {
905 debug("%s: failed to get resource section header\n", __func__);
906 return NULL;
907 }
908
909 memcpy(&sectionheader, shdr, sizeof(sectionheader));
910 ptable = (struct resource_table *)(elf_data + sectionheader.sh_offset);
911 if (tablesz)
912 *tablesz = sectionheader.sh_size;
913
914 return ptable;
915}
916
917unsigned long rproc_parse_resource_table(struct udevice *dev, struct rproc *cfg)
918{
919 struct resource_table *ptable = NULL;
920 int tablesz;
921 int ret;
922 unsigned long addr;
923
924 addr = cfg->load_addr;
925
926 ptable = rproc_find_resource_table(dev, addr, &tablesz);
927 if (!ptable) {
928 debug("%s : failed to find resource table\n", __func__);
929 return 0;
930 }
931
932 debug("%s : found resource table\n", __func__);
933 rsc_table = kzalloc(tablesz, GFP_KERNEL);
934 if (!rsc_table) {
935 debug("resource table alloc failed!\n");
936 return 0;
937 }
938
939 /*
940 * Copy the resource table into a local buffer before handling the
941 * resource table.
942 */
943 memcpy(rsc_table, ptable, tablesz);
944 if (cfg->intmem_to_l3_mapping)
945 handle_intmem_to_l3_mapping(dev, cfg->intmem_to_l3_mapping);
946 ret = handle_resources(dev, tablesz, loading_handlers);
947 if (ret) {
948 debug("handle_resources failed: %d\n", ret);
949 return 0;
950 }
951
952 /*
953 * Instead of trying to mimic the kernel flow of copying the
954 * processed resource table into its post ELF load location in DDR
955 * copying it into its original location.
956 */
957 memcpy(ptable, rsc_table, tablesz);
958 free(rsc_table);
959 rsc_table = NULL;
960
961 return 1;
962}