blob: 5400cbe18ff435cf207fd46e2ea0e8d819105560 [file] [log] [blame]
Prafulla Wadaskare565fda2009-06-20 11:01:52 +02001/*
2 * (C) Copyright 2009
3 * Marvell Semiconductor <www.marvell.com>
4 * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
5 *
Wolfgang Denkd79de1d2013-07-08 09:37:19 +02006 * SPDX-License-Identifier: GPL-2.0+
Prafulla Wadaskare565fda2009-06-20 11:01:52 +02007 */
8
9#ifndef _ASM_CACHE_H
10#define _ASM_CACHE_H
11
12#include <asm/system.h>
13
David Feng85fd5f12013-12-14 11:47:35 +080014#ifndef CONFIG_ARM64
15
Prafulla Wadaskare565fda2009-06-20 11:01:52 +020016/*
17 * Invalidate L2 Cache using co-proc instruction
18 */
Albert ARIBAUDa3823222015-10-23 18:06:40 +020019#ifdef CONFIG_SYS_THUMB_BUILD
20void invalidate_l2_cache(void);
21#else
Prafulla Wadaskare565fda2009-06-20 11:01:52 +020022static inline void invalidate_l2_cache(void)
23{
24 unsigned int val=0;
25
26 asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache"
27 : : "r" (val) : "cc");
28 isb();
29}
Albert ARIBAUDa3823222015-10-23 18:06:40 +020030#endif
Kim, Heung Jun3b5ac952009-06-20 11:02:17 +020031
Simon Glass85406582016-06-19 19:43:01 -060032int check_cache_range(unsigned long start, unsigned long stop);
33
Kim, Heung Jun3b5ac952009-06-20 11:02:17 +020034void l2_cache_enable(void);
35void l2_cache_disable(void);
Vincent Stehlé313fe562013-03-04 20:04:43 +000036void set_section_dcache(int section, enum dcache_option option);
Kim, Heung Jun3b5ac952009-06-20 11:02:17 +020037
Jeroen Hofsteed7460772014-06-23 22:07:04 +020038void arm_init_before_mmu(void);
39void arm_init_domains(void);
40void cpu_cache_initialization(void);
R Sricharan08716072013-03-04 20:04:44 +000041void dram_bank_mmu_setup(int bank);
David Feng85fd5f12013-12-14 11:47:35 +080042
43#endif
44
Anton Staaf13b6b592011-10-17 16:46:03 -070045/*
Tom Rini84f9b612016-08-22 08:22:17 -040046 * The value of the largest data cache relevant to DMA operations shall be set
47 * for us in CONFIG_SYS_CACHELINE_SIZE. In some cases this may be a larger
48 * value than found in the L1 cache but this is OK to use in terms of
49 * alignment.
Anton Staaf13b6b592011-10-17 16:46:03 -070050 */
Anton Staaf13b6b592011-10-17 16:46:03 -070051#define ARCH_DMA_MINALIGN CONFIG_SYS_CACHELINE_SIZE
Anton Staaf13b6b592011-10-17 16:46:03 -070052
Prafulla Wadaskare565fda2009-06-20 11:01:52 +020053#endif /* _ASM_CACHE_H */