blob: 44dde26065b1f338ff8e7f541a1f70c7207f135f [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
wdenkedc48b62002-09-08 17:56:50 +00002/*
3 * (C) Copyright 2002
4 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
wdenkedc48b62002-09-08 17:56:50 +00005 */
6
7/* for now: just dummy functions to satisfy the linker */
8
wdenkf8062712005-01-09 23:16:25 +00009#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070010#include <cpu_func.h>
Thierry Redingc97d9742014-12-09 22:25:22 -070011#include <malloc.h>
wdenkf8062712005-01-09 23:16:25 +000012
Ovidiu Panait68b371a2020-03-29 20:57:39 +030013DECLARE_GLOBAL_DATA_PTR;
14
Wu, Josh22190262015-07-27 11:40:17 +080015/*
16 * Flush range from all levels of d-cache/unified-cache.
17 * Affects the range [start, start + size - 1].
18 */
Jeroen Hofsteed7460772014-06-23 22:07:04 +020019__weak void flush_cache(unsigned long start, unsigned long size)
wdenkedc48b62002-09-08 17:56:50 +000020{
Wu, Josh22190262015-07-27 11:40:17 +080021 flush_dcache_range(start, start + size);
wdenkedc48b62002-09-08 17:56:50 +000022}
Aneesh V3bda3772011-06-16 23:30:50 +000023
24/*
25 * Default implementation:
26 * do a range flush for the entire range
27 */
Jeroen Hofsteed7460772014-06-23 22:07:04 +020028__weak void flush_dcache_all(void)
Aneesh V3bda3772011-06-16 23:30:50 +000029{
30 flush_cache(0, ~0);
31}
Aneesh Vfffbb972011-08-16 04:33:05 +000032
33/*
34 * Default implementation of enable_caches()
35 * Real implementation should be in platform code
36 */
Jeroen Hofsteed7460772014-06-23 22:07:04 +020037__weak void enable_caches(void)
Aneesh Vfffbb972011-08-16 04:33:05 +000038{
39 puts("WARNING: Caches not enabled\n");
40}
Thierry Redingc97d9742014-12-09 22:25:22 -070041
Wu, Joshaaa35452015-07-27 11:40:16 +080042__weak void invalidate_dcache_range(unsigned long start, unsigned long stop)
43{
44 /* An empty stub, real implementation should be in platform code */
45}
46__weak void flush_dcache_range(unsigned long start, unsigned long stop)
47{
48 /* An empty stub, real implementation should be in platform code */
49}
50
Simon Glass85406582016-06-19 19:43:01 -060051int check_cache_range(unsigned long start, unsigned long stop)
52{
53 int ok = 1;
54
55 if (start & (CONFIG_SYS_CACHELINE_SIZE - 1))
56 ok = 0;
57
58 if (stop & (CONFIG_SYS_CACHELINE_SIZE - 1))
59 ok = 0;
60
61 if (!ok) {
Simon Glass143997a2016-06-19 19:43:05 -060062 warn_non_spl("CACHE: Misaligned operation at range [%08lx, %08lx]\n",
63 start, stop);
Simon Glass85406582016-06-19 19:43:01 -060064 }
65
66 return ok;
67}
68
Thierry Redingc97d9742014-12-09 22:25:22 -070069#ifdef CONFIG_SYS_NONCACHED_MEMORY
70/*
71 * Reserve one MMU section worth of address space below the malloc() area that
72 * will be mapped uncached.
73 */
74static unsigned long noncached_start;
75static unsigned long noncached_end;
76static unsigned long noncached_next;
77
78void noncached_init(void)
79{
80 phys_addr_t start, end;
81 size_t size;
82
Stephen Warren9b496432019-08-27 11:54:31 -060083 /* If this calculation changes, update board_f.c:reserve_noncached() */
Thierry Redingc97d9742014-12-09 22:25:22 -070084 end = ALIGN(mem_malloc_start, MMU_SECTION_SIZE) - MMU_SECTION_SIZE;
85 size = ALIGN(CONFIG_SYS_NONCACHED_MEMORY, MMU_SECTION_SIZE);
86 start = end - size;
87
88 debug("mapping memory %pa-%pa non-cached\n", &start, &end);
89
90 noncached_start = start;
91 noncached_end = end;
92 noncached_next = start;
93
Trevor Woerner43ec7e02019-05-03 09:41:00 -040094#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Thierry Redingc97d9742014-12-09 22:25:22 -070095 mmu_set_region_dcache_behaviour(noncached_start, size, DCACHE_OFF);
96#endif
97}
98
99phys_addr_t noncached_alloc(size_t size, size_t align)
100{
101 phys_addr_t next = ALIGN(noncached_next, align);
102
103 if (next >= noncached_end || (noncached_end - next) < size)
104 return 0;
105
106 debug("allocated %zu bytes of uncached memory @%pa\n", size, &next);
107 noncached_next = next + size;
108
109 return next;
110}
111#endif /* CONFIG_SYS_NONCACHED_MEMORY */
Albert ARIBAUDa3823222015-10-23 18:06:40 +0200112
Tom Rini1c640a62017-03-18 09:01:44 -0400113#if CONFIG_IS_ENABLED(SYS_THUMB_BUILD)
Albert ARIBAUDa3823222015-10-23 18:06:40 +0200114void invalidate_l2_cache(void)
115{
116 unsigned int val = 0;
117
118 asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache"
119 : : "r" (val) : "cc");
120 isb();
121}
122#endif
Ovidiu Panait68b371a2020-03-29 20:57:39 +0300123
Ovidiu Panait2a2941b2020-03-29 20:57:41 +0300124int arch_reserve_mmu(void)
Ovidiu Panait68b371a2020-03-29 20:57:39 +0300125{
Ovidiu Panait2b618472020-03-29 20:57:40 +0300126 return arm_reserve_mmu();
127}
128
129__weak int arm_reserve_mmu(void)
130{
Ovidiu Panait68b371a2020-03-29 20:57:39 +0300131#if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF))
132 /* reserve TLB table */
133 gd->arch.tlb_size = PGTABLE_SIZE;
134 gd->relocaddr -= gd->arch.tlb_size;
135
136 /* round down to next 64 kB limit */
137 gd->relocaddr &= ~(0x10000 - 1);
138
139 gd->arch.tlb_addr = gd->relocaddr;
140 debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
141 gd->arch.tlb_addr + gd->arch.tlb_size);
142
143#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
144 /*
145 * Record allocated tlb_addr in case gd->tlb_addr to be overwritten
146 * with location within secure ram.
147 */
148 gd->arch.tlb_allocated = gd->arch.tlb_addr;
149#endif
150#endif
151
152 return 0;
153}