blob: 2ca78b550a7e8d8f9a8e2a195a576987cd0edf47 [file] [log] [blame]
Keerthyff14a052022-01-27 13:16:56 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * IPU remoteproc driver for various SoCs
4 *
Nishanth Menoneaa39c62023-11-01 15:56:03 -05005 * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
Keerthyff14a052022-01-27 13:16:56 +01006 * Angela Stegmaier <angelabaker@ti.com>
7 * Venkateswara Rao Mandela <venkat.mandela@ti.com>
8 * Keerthy <j-keerthy@ti.com>
9 */
10
Keerthyff14a052022-01-27 13:16:56 +010011#include <hang.h>
12#include <cpu_func.h>
13#include <dm.h>
14#include <dm/device_compat.h>
15#include <elf.h>
16#include <env.h>
17#include <dm/of_access.h>
18#include <fs_loader.h>
19#include <remoteproc.h>
20#include <errno.h>
21#include <clk.h>
22#include <reset.h>
23#include <regmap.h>
24#include <syscon.h>
25#include <asm/io.h>
26#include <misc.h>
27#include <power-domain.h>
28#include <timer.h>
29#include <fs.h>
30#include <spl.h>
31#include <timer.h>
32#include <reset.h>
33#include <linux/bitmap.h>
34
35#define IPU1_LOAD_ADDR (0xa17ff000)
36#define MAX_REMOTECORE_BIN_SIZE (8 * 0x100000)
37
38enum ipu_num {
39 IPU1 = 0,
40 IPU2,
41 RPROC_END_ENUMS,
42};
43
44#define IPU2_LOAD_ADDR (IPU1_LOAD_ADDR + MAX_REMOTECORE_BIN_SIZE)
45
46#define PAGE_SHIFT 12
47#define PAGESIZE_1M 0x0
48#define PAGESIZE_64K 0x1
49#define PAGESIZE_4K 0x2
50#define PAGESIZE_16M 0x3
51#define LE 0
52#define BE 1
53#define ELEMSIZE_8 0x0
54#define ELEMSIZE_16 0x1
55#define ELEMSIZE_32 0x2
56#define MIXED_TLB 0x0
57#define MIXED_CPU 0x1
58
59#define PGT_SMALLPAGE_SIZE 0x00001000
60#define PGT_LARGEPAGE_SIZE 0x00010000
61#define PGT_SECTION_SIZE 0x00100000
62#define PGT_SUPERSECTION_SIZE 0x01000000
63
64#define PGT_L1_DESC_PAGE 0x00001
65#define PGT_L1_DESC_SECTION 0x00002
66#define PGT_L1_DESC_SUPERSECTION 0x40002
67
68#define PGT_L1_DESC_PAGE_MASK 0xfffffC00
69#define PGT_L1_DESC_SECTION_MASK 0xfff00000
70#define PGT_L1_DESC_SUPERSECTION_MASK 0xff000000
71
72#define PGT_L1_DESC_SMALLPAGE_INDEX_SHIFT 12
73#define PGT_L1_DESC_LARGEPAGE_INDEX_SHIFT 16
74#define PGT_L1_DESC_SECTION_INDEX_SHIFT 20
75#define PGT_L1_DESC_SUPERSECTION_INDEX_SHIFT 24
76
77#define PGT_L2_DESC_SMALLPAGE 0x02
78#define PGT_L2_DESC_LARGEPAGE 0x01
79
80#define PGT_L2_DESC_SMALLPAGE_MASK 0xfffff000
81#define PGT_L2_DESC_LARGEPAGE_MASK 0xffff0000
82
83/*
84 * The memory for the page tables (256 KB per IPU) is placed just before
85 * the carveout memories for the remote processors. 16 KB of memory is
86 * needed for the L1 page table (4096 entries * 4 bytes per 1 MB section).
87 * Any smaller page (64 KB or 4 KB) entries are supported through L2 page
88 * tables (1 KB per table). The remaining 240 KB can provide support for
89 * 240 L2 page tables. Any remoteproc firmware image requiring more than
90 * 240 L2 page table entries would need more memory to be reserved.
91 */
92#define PAGE_TABLE_SIZE_L1 (0x00004000)
93#define PAGE_TABLE_SIZE_L2 (0x400)
94#define MAX_NUM_L2_PAGE_TABLES (240)
95#define PAGE_TABLE_SIZE_L2_TOTAL (MAX_NUM_L2_PAGE_TABLES * PAGE_TABLE_SIZE_L2)
96#define PAGE_TABLE_SIZE (PAGE_TABLE_SIZE_L1 + (PAGE_TABLE_SIZE_L2_TOTAL))
97
98/**
99 * struct omap_rproc_mem - internal memory structure
100 * @cpu_addr: MPU virtual address of the memory region
101 * @bus_addr: bus address used to access the memory region
102 * @dev_addr: device address of the memory region from DSP view
103 * @size: size of the memory region
104 */
105struct omap_rproc_mem {
106 void __iomem *cpu_addr;
107 phys_addr_t bus_addr;
108 u32 dev_addr;
109 size_t size;
110};
111
112struct ipu_privdata {
113 struct omap_rproc_mem mem;
114 struct list_head mappings;
115 const char *fw_name;
116 u32 bootaddr;
117 int id;
118 struct udevice *rdev;
119};
120
121typedef int (*handle_resource_t) (void *, int offset, int avail);
122
123unsigned int *page_table_l1 = (unsigned int *)0x0;
124unsigned int *page_table_l2 = (unsigned int *)0x0;
125
126/*
127 * Set maximum carveout size to 96 MB
128 */
129#define DRA7_RPROC_MAX_CO_SIZE (96 * 0x100000)
130
131/*
132 * These global variables are used for deriving the MMU page tables. They
133 * are initialized for each core with the appropriate values. The length
134 * of the array mem_bitmap is set as per a 96 MB carveout which the
135 * maximum set aside in the current memory map.
136 */
137unsigned long mem_base;
138unsigned long mem_size;
139unsigned long
140
141mem_bitmap[BITS_TO_LONGS(DRA7_RPROC_MAX_CO_SIZE >> PAGE_SHIFT)];
142unsigned long mem_count;
143
144unsigned int pgtable_l2_map[MAX_NUM_L2_PAGE_TABLES];
145unsigned int pgtable_l2_cnt;
146
147void *ipu_alloc_mem(struct udevice *dev, unsigned long len, unsigned long align)
148{
149 unsigned long mask;
150 unsigned long pageno;
151 int count;
152
153 count = ((len + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
154 mask = (1 << align) - 1;
155 pageno =
156 bitmap_find_next_zero_area(mem_bitmap, mem_count, 0, count, mask);
157 debug("%s: count %d mask %#lx pageno %#lx\n", __func__, count, mask,
158 pageno);
159
160 if (pageno >= mem_count) {
161 debug("%s: %s Error allocating memory; "
162 "Please check carveout size\n", __FILE__, __func__);
163 return NULL;
164 }
165
166 bitmap_set(mem_bitmap, pageno, count);
167 return (void *)(mem_base + (pageno << PAGE_SHIFT));
168}
169
170int find_pagesz(unsigned int virt, unsigned int phys, unsigned int len)
171{
172 int pg_sz_ind = -1;
173 unsigned int min_align = __ffs(virt);
174
175 if (min_align > __ffs(phys))
176 min_align = __ffs(phys);
177
178 if (min_align >= PGT_L1_DESC_SUPERSECTION_INDEX_SHIFT &&
179 len >= 0x1000000) {
180 pg_sz_ind = PAGESIZE_16M;
181 goto ret_block;
182 }
183 if (min_align >= PGT_L1_DESC_SECTION_INDEX_SHIFT &&
184 len >= 0x100000) {
185 pg_sz_ind = PAGESIZE_1M;
186 goto ret_block;
187 }
188 if (min_align >= PGT_L1_DESC_LARGEPAGE_INDEX_SHIFT &&
189 len >= 0x10000) {
190 pg_sz_ind = PAGESIZE_64K;
191 goto ret_block;
192 }
193 if (min_align >= PGT_L1_DESC_SMALLPAGE_INDEX_SHIFT &&
194 len >= 0x1000) {
195 pg_sz_ind = PAGESIZE_4K;
196 goto ret_block;
197 }
198
199 ret_block:
200 return pg_sz_ind;
201}
202
203int get_l2_pg_tbl_addr(unsigned int virt, unsigned int *pg_tbl_addr)
204{
205 int ret = -1;
206 int i = 0;
207 int match_found = 0;
208 unsigned int tag = (virt & PGT_L1_DESC_SECTION_MASK);
209
210 *pg_tbl_addr = 0;
211 for (i = 0; (i < pgtable_l2_cnt) && (match_found == 0); i++) {
212 if (tag == pgtable_l2_map[i]) {
213 *pg_tbl_addr =
214 ((unsigned int)page_table_l2) +
215 (i * PAGE_TABLE_SIZE_L2);
216 match_found = 1;
217 ret = 0;
218 }
219 }
220
221 if (match_found == 0 && i < MAX_NUM_L2_PAGE_TABLES) {
222 pgtable_l2_map[i] = tag;
223 pgtable_l2_cnt++;
224 *pg_tbl_addr =
225 ((unsigned int)page_table_l2) + (i * PAGE_TABLE_SIZE_L2);
226 ret = 0;
227 }
228
229 return ret;
230}
231
232int
233config_l2_pagetable(unsigned int virt, unsigned int phys,
234 unsigned int pg_sz, unsigned int pg_tbl_addr)
235{
236 int ret = -1;
237 unsigned int desc = 0;
238 int i = 0;
239 unsigned int *pg_tbl = (unsigned int *)pg_tbl_addr;
240
241 /*
242 * Pick bit 19:12 of the virtual address as index
243 */
244 unsigned int index = (virt & (~PGT_L1_DESC_SECTION_MASK)) >> PAGE_SHIFT;
245
246 switch (pg_sz) {
247 case PAGESIZE_64K:
248 desc =
249 (phys & PGT_L2_DESC_LARGEPAGE_MASK) | PGT_L2_DESC_LARGEPAGE;
250 for (i = 0; i < 16; i++)
251 pg_tbl[index + i] = desc;
252 ret = 0;
253 break;
254 case PAGESIZE_4K:
255 desc =
256 (phys & PGT_L2_DESC_SMALLPAGE_MASK) | PGT_L2_DESC_SMALLPAGE;
257 pg_tbl[index] = desc;
258 ret = 0;
259 break;
260 default:
261 break;
262 }
263
264 return ret;
265}
266
267unsigned int
268ipu_config_pagetable(struct udevice *dev, unsigned int virt, unsigned int phys,
269 unsigned int len)
270{
271 unsigned int index;
272 unsigned int l = len;
273 unsigned int desc;
274 int pg_sz = 0;
275 int i = 0, err = 0;
276 unsigned int pg_tbl_l2_addr = 0;
277 unsigned int tmp_pgsz;
278
279 if ((len & 0x0FFF) != 0)
280 return 0;
281
282 while (l > 0) {
283 pg_sz = find_pagesz(virt, phys, l);
284 index = virt >> PGT_L1_DESC_SECTION_INDEX_SHIFT;
285 switch (pg_sz) {
286 /*
287 * 16 MB super section
288 */
289 case PAGESIZE_16M:
290 /*
291 * Program the next 16 descriptors
292 */
293 desc =
294 (phys & PGT_L1_DESC_SUPERSECTION_MASK) |
295 PGT_L1_DESC_SUPERSECTION;
296 for (i = 0; i < 16; i++)
297 page_table_l1[index + i] = desc;
298 l -= PGT_SUPERSECTION_SIZE;
299 phys += PGT_SUPERSECTION_SIZE;
300 virt += PGT_SUPERSECTION_SIZE;
301 break;
302 /*
303 * 1 MB section
304 */
305 case PAGESIZE_1M:
306 desc =
307 (phys & PGT_L1_DESC_SECTION_MASK) |
308 PGT_L1_DESC_SECTION;
309 page_table_l1[index] = desc;
310 l -= PGT_SECTION_SIZE;
311 phys += PGT_SECTION_SIZE;
312 virt += PGT_SECTION_SIZE;
313 break;
314 /*
315 * 64 KB large page
316 */
317 case PAGESIZE_64K:
318 case PAGESIZE_4K:
319 if (pg_sz == PAGESIZE_64K)
320 tmp_pgsz = 0x10000;
321 else
322 tmp_pgsz = 0x1000;
323
324 err = get_l2_pg_tbl_addr(virt, &pg_tbl_l2_addr);
325 if (err != 0) {
326 debug
327 ("Unable to get level 2 PT address\n");
328 hang();
329 }
330 err =
331 config_l2_pagetable(virt, phys, pg_sz,
332 pg_tbl_l2_addr);
333 desc =
334 (pg_tbl_l2_addr & PGT_L1_DESC_PAGE_MASK) |
335 PGT_L1_DESC_PAGE;
336 page_table_l1[index] = desc;
337 l -= tmp_pgsz;
338 phys += tmp_pgsz;
339 virt += tmp_pgsz;
340 break;
341 case -1:
342 default:
343 return 0;
344 }
345 }
346
347 return len;
348}
349
350int da_to_pa(struct udevice *dev, int da)
351{
352 struct rproc_mem_entry *maps = NULL;
353 struct ipu_privdata *priv = dev_get_priv(dev);
354
355 list_for_each_entry(maps, &priv->mappings, node) {
356 if (da >= maps->da && da < (maps->da + maps->len))
357 return maps->dma + (da - maps->da);
358 }
359
360 return 0;
361}
362
363u32 ipu_config_mmu(u32 core_id, struct rproc *cfg)
364{
365 u32 i = 0;
366 u32 reg = 0;
367
368 /*
369 * Clear the entire pagetable location before programming the
370 * address into the MMU
371 */
372 memset((void *)cfg->page_table_addr, 0x00, PAGE_TABLE_SIZE);
373
374 for (i = 0; i < cfg->num_iommus; i++) {
375 u32 mmu_base = cfg->mmu_base_addr[i];
376
377 __raw_writel((int)cfg->page_table_addr, mmu_base + 0x4c);
378 reg = __raw_readl(mmu_base + 0x88);
379
380 /*
381 * enable bus-error back
382 */
383 __raw_writel(reg | 0x1, mmu_base + 0x88);
384
385 /*
386 * Enable the MMU IRQs during MMU programming for the
387 * late attachcase. This is to allow the MMU fault to be
388 * detected by the kernel.
389 *
390 * MULTIHITFAULT|EMMUMISS|TRANSLATIONFAULT|TABLEWALKFAULT
391 */
392 __raw_writel(0x1E, mmu_base + 0x1c);
393
394 /*
395 * emutlbupdate|TWLENABLE|MMUENABLE
396 */
397 __raw_writel(0x6, mmu_base + 0x44);
398 }
399
400 return 0;
401}
402
403/**
404 * enum ipu_mem - PRU core memory range identifiers
405 */
406enum ipu_mem {
407 PRU_MEM_IRAM = 0,
408 PRU_MEM_CTRL,
409 PRU_MEM_DEBUG,
410 PRU_MEM_MAX,
411};
412
413static int ipu_start(struct udevice *dev)
414{
415 struct ipu_privdata *priv;
416 struct reset_ctl reset;
417 struct rproc *cfg = NULL;
418 int ret;
419
420 priv = dev_get_priv(dev);
421
422 cfg = rproc_cfg_arr[priv->id];
423 if (cfg->config_peripherals)
424 cfg->config_peripherals(priv->id, cfg);
425
426 /*
427 * Start running the remote core
428 */
429 ret = reset_get_by_index(dev, 0, &reset);
430 if (ret < 0) {
431 dev_err(dev, "%s: error getting reset index %d\n", __func__, 0);
432 return ret;
433 }
434
435 ret = reset_deassert(&reset);
436 if (ret < 0) {
437 dev_err(dev, "%s: error deasserting reset %d\n", __func__, 0);
438 return ret;
439 }
440
441 ret = reset_get_by_index(dev, 1, &reset);
442 if (ret < 0) {
443 dev_err(dev, "%s: error getting reset index %d\n", __func__, 1);
444 return ret;
445 }
446
447 ret = reset_deassert(&reset);
448 if (ret < 0) {
449 dev_err(dev, "%s: error deasserting reset %d\n", __func__, 1);
450 return ret;
451 }
452
453 return 0;
454}
455
456static int ipu_stop(struct udevice *dev)
457{
458 return 0;
459}
460
461/**
462 * ipu_init() - Initialize the remote processor
463 * @dev: rproc device pointer
464 *
465 * Return: 0 if all went ok, else return appropriate error
466 */
467static int ipu_init(struct udevice *dev)
468{
469 return 0;
470}
471
472static int ipu_add_res(struct udevice *dev, struct rproc_mem_entry *mapping)
473{
474 struct ipu_privdata *priv = dev_get_priv(dev);
475
476 list_add_tail(&mapping->node, &priv->mappings);
477 return 0;
478}
479
480static int ipu_load(struct udevice *dev, ulong addr, ulong size)
481{
482 Elf32_Ehdr *ehdr; /* Elf header structure pointer */
483 Elf32_Phdr *phdr; /* Program header structure pointer */
484 Elf32_Phdr proghdr;
485 int va;
486 int pa;
487 int i;
488
489 ehdr = (Elf32_Ehdr *)addr;
490 phdr = (Elf32_Phdr *)(addr + ehdr->e_phoff);
491 /*
492 * Load each program header
493 */
494 for (i = 0; i < ehdr->e_phnum; ++i) {
495 memcpy(&proghdr, phdr, sizeof(Elf32_Phdr));
496
497 if (proghdr.p_type != PT_LOAD) {
498 ++phdr;
499 continue;
500 }
501
502 va = proghdr.p_paddr;
503 pa = da_to_pa(dev, va);
504 if (pa)
505 proghdr.p_paddr = pa;
506
507 void *dst = (void *)(uintptr_t)proghdr.p_paddr;
508 void *src = (void *)addr + proghdr.p_offset;
509
510 debug("Loading phdr %i to 0x%p (%i bytes)\n", i, dst,
511 proghdr.p_filesz);
512 if (proghdr.p_filesz)
513 memcpy(dst, src, proghdr.p_filesz);
514
515 flush_cache((unsigned long)dst, proghdr.p_memsz);
516
517 ++phdr;
518 }
519
520 return 0;
521}
522
523static const struct dm_rproc_ops ipu_ops = {
524 .init = ipu_init,
525 .start = ipu_start,
526 .stop = ipu_stop,
527 .load = ipu_load,
528 .add_res = ipu_add_res,
529 .config_pagetable = ipu_config_pagetable,
530 .alloc_mem = ipu_alloc_mem,
531};
532
533/*
534 * If the remotecore binary expects any peripherals to be setup before it has
535 * booted, configure them here.
536 *
537 * These functions are left empty by default as their operation is usecase
538 * specific.
539 */
540
541u32 ipu1_config_peripherals(u32 core_id, struct rproc *cfg)
542{
543 return 0;
544}
545
546u32 ipu2_config_peripherals(u32 core_id, struct rproc *cfg)
547{
548 return 0;
549}
550
551struct rproc_intmem_to_l3_mapping ipu1_intmem_to_l3_mapping = {
552 .num_entries = 1,
553 .mappings = {
554 /*
555 * L2 SRAM
556 */
557 {
558 .priv_addr = 0x55020000,
559 .l3_addr = 0x58820000,
560 .len = (64 * 1024)},
561 }
562};
563
564struct rproc_intmem_to_l3_mapping ipu2_intmem_to_l3_mapping = {
565 .num_entries = 1,
566 .mappings = {
567 /*
568 * L2 SRAM
569 */
570 {
571 .priv_addr = 0x55020000,
572 .l3_addr = 0x55020000,
573 .len = (64 * 1024)},
574 }
575};
576
577struct rproc ipu1_config = {
578 .num_iommus = 1,
579 .mmu_base_addr = {0x58882000, 0},
580 .load_addr = IPU1_LOAD_ADDR,
581 .core_name = "IPU1",
582 .firmware_name = "dra7-ipu1-fw.xem4",
583 .config_mmu = ipu_config_mmu,
584 .config_peripherals = ipu1_config_peripherals,
585 .intmem_to_l3_mapping = &ipu1_intmem_to_l3_mapping
586};
587
588struct rproc ipu2_config = {
589 .num_iommus = 1,
590 .mmu_base_addr = {0x55082000, 0},
591 .load_addr = IPU2_LOAD_ADDR,
592 .core_name = "IPU2",
593 .firmware_name = "dra7-ipu2-fw.xem4",
594 .config_mmu = ipu_config_mmu,
595 .config_peripherals = ipu2_config_peripherals,
596 .intmem_to_l3_mapping = &ipu2_intmem_to_l3_mapping
597};
598
599struct rproc *rproc_cfg_arr[2] = {
600 [IPU2] = &ipu2_config,
601 [IPU1] = &ipu1_config,
602};
603
604u32 spl_pre_boot_core(struct udevice *dev, u32 core_id)
605{
606 struct rproc *cfg = NULL;
607 unsigned long load_elf_status = 0;
608 int tablesz;
609
610 cfg = rproc_cfg_arr[core_id];
611 /*
612 * Check for valid elf image
613 */
614 if (!valid_elf_image(cfg->load_addr))
615 return 1;
616
617 if (rproc_find_resource_table(dev, cfg->load_addr, &tablesz))
618 cfg->has_rsc_table = 1;
619 else
620 cfg->has_rsc_table = 0;
621
622 /*
623 * Configure the MMU
624 */
625 if (cfg->config_mmu && cfg->has_rsc_table)
626 cfg->config_mmu(core_id, cfg);
627
628 /*
629 * Load the remote core. Fill the page table of the first(possibly
630 * only) IOMMU during ELF loading. Copy the page table to the second
631 * IOMMU before running the remote core.
632 */
633
634 page_table_l1 = (unsigned int *)cfg->page_table_addr;
635 page_table_l2 =
636 (unsigned int *)(cfg->page_table_addr + PAGE_TABLE_SIZE_L1);
637 mem_base = cfg->cma_base;
638 mem_size = cfg->cma_size;
639 memset(mem_bitmap, 0x00, sizeof(mem_bitmap));
640 mem_count = (cfg->cma_size >> PAGE_SHIFT);
641
642 /*
643 * Clear variables used for level 2 page table allocation
644 */
645 memset(pgtable_l2_map, 0x00, sizeof(pgtable_l2_map));
646 pgtable_l2_cnt = 0;
647
648 load_elf_status = rproc_parse_resource_table(dev, cfg);
649 if (load_elf_status == 0) {
650 debug("load_elf_image_phdr returned error for core %s\n",
651 cfg->core_name);
652 return 1;
653 }
654
655 flush_cache(cfg->page_table_addr, PAGE_TABLE_SIZE);
656
657 return 0;
658}
659
660static fdt_addr_t ipu_parse_mem_nodes(struct udevice *dev, char *name,
661 int privid, fdt_size_t *sizep)
662{
663 int ret;
664 u32 sp;
665 ofnode mem_node;
666
667 ret = ofnode_read_u32(dev_ofnode(dev), name, &sp);
668 if (ret) {
669 dev_err(dev, "memory-region node fetch failed %d\n", ret);
670 return ret;
671 }
672
673 mem_node = ofnode_get_by_phandle(sp);
674 if (!ofnode_valid(mem_node))
675 return -EINVAL;
676
677 return ofnode_get_addr_size_index(mem_node, 0, sizep);
678}
679
680/**
681 * ipu_probe() - Basic probe
682 * @dev: corresponding k3 remote processor device
683 *
684 * Return: 0 if all goes good, else appropriate error message.
685 */
686static int ipu_probe(struct udevice *dev)
687{
688 struct ipu_privdata *priv;
689 struct rproc *cfg = NULL;
690 struct reset_ctl reset;
691 static const char *const ipu_mem_names[] = { "l2ram" };
692 int ret;
693 fdt_size_t sizep;
694
695 priv = dev_get_priv(dev);
696
697 priv->mem.bus_addr =
698 devfdt_get_addr_size_name(dev,
699 ipu_mem_names[0],
700 (fdt_addr_t *)&priv->mem.size);
701
702 ret = reset_get_by_index(dev, 2, &reset);
703 if (ret < 0) {
704 dev_err(dev, "%s: error getting reset index %d\n", __func__, 2);
705 return ret;
706 }
707
708 ret = reset_deassert(&reset);
709 if (ret < 0) {
710 dev_err(dev, "%s: error deasserting reset %d\n", __func__, 2);
711 return ret;
712 }
713
714 if (priv->mem.bus_addr == FDT_ADDR_T_NONE) {
715 dev_err(dev, "%s bus address not found\n", ipu_mem_names[0]);
716 return -EINVAL;
717 }
718 priv->mem.cpu_addr = map_physmem(priv->mem.bus_addr,
719 priv->mem.size, MAP_NOCACHE);
720
721 if (devfdt_get_addr(dev) == 0x58820000)
722 priv->id = 0;
723 else
724 priv->id = 1;
725
726 cfg = rproc_cfg_arr[priv->id];
727 cfg->cma_base = ipu_parse_mem_nodes(dev, "memory-region", priv->id,
728 &sizep);
729 cfg->cma_size = sizep;
730
731 cfg->page_table_addr = ipu_parse_mem_nodes(dev, "pg-tbl", priv->id,
732 &sizep);
733
734 dev_info(dev,
735 "ID %d memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
736 priv->id, ipu_mem_names[0], &priv->mem.bus_addr,
737 priv->mem.size, priv->mem.cpu_addr, priv->mem.dev_addr);
738
739 INIT_LIST_HEAD(&priv->mappings);
740 if (spl_pre_boot_core(dev, priv->id))
741 return -EINVAL;
742
743 return 0;
744}
745
746static const struct udevice_id ipu_ids[] = {
747 {.compatible = "ti,dra7-ipu"},
748 {}
749};
750
751U_BOOT_DRIVER(ipu) = {
752 .name = "ipu",
753 .of_match = ipu_ids,
754 .id = UCLASS_REMOTEPROC,
755 .ops = &ipu_ops,
756 .probe = ipu_probe,
757 .priv_auto = sizeof(struct ipu_privdata),
758};