blob: 950ec1e793cafe869624738961b4e276569b998b [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
Prafulla Wadaskare565fda2009-06-20 11:01:52 +02002/*
3 * (C) Copyright 2009
4 * Marvell Semiconductor <www.marvell.com>
5 * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
Prafulla Wadaskare565fda2009-06-20 11:01:52 +02006 */
7
8#ifndef _ASM_CACHE_H
9#define _ASM_CACHE_H
10
11#include <asm/system.h>
12
David Feng85fd5f12013-12-14 11:47:35 +080013#ifndef CONFIG_ARM64
14
Prafulla Wadaskare565fda2009-06-20 11:01:52 +020015/*
16 * Invalidate L2 Cache using co-proc instruction
17 */
Tom Rini1c640a62017-03-18 09:01:44 -040018#if CONFIG_IS_ENABLED(SYS_THUMB_BUILD)
Albert ARIBAUDa3823222015-10-23 18:06:40 +020019void invalidate_l2_cache(void);
20#else
Prafulla Wadaskare565fda2009-06-20 11:01:52 +020021static inline void invalidate_l2_cache(void)
22{
23 unsigned int val=0;
24
25 asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache"
26 : : "r" (val) : "cc");
27 isb();
28}
Albert ARIBAUDa3823222015-10-23 18:06:40 +020029#endif
Kim, Heung Jun3b5ac952009-06-20 11:02:17 +020030
Simon Glass85406582016-06-19 19:43:01 -060031int check_cache_range(unsigned long start, unsigned long stop);
32
Kim, Heung Jun3b5ac952009-06-20 11:02:17 +020033void l2_cache_enable(void);
34void l2_cache_disable(void);
Vincent Stehlé313fe562013-03-04 20:04:43 +000035void set_section_dcache(int section, enum dcache_option option);
Kim, Heung Jun3b5ac952009-06-20 11:02:17 +020036
Jeroen Hofsteed7460772014-06-23 22:07:04 +020037void arm_init_before_mmu(void);
38void arm_init_domains(void);
39void cpu_cache_initialization(void);
R Sricharan08716072013-03-04 20:04:44 +000040void dram_bank_mmu_setup(int bank);
David Feng85fd5f12013-12-14 11:47:35 +080041
42#endif
43
Anton Staaf13b6b592011-10-17 16:46:03 -070044/*
Tom Rini84f9b612016-08-22 08:22:17 -040045 * The value of the largest data cache relevant to DMA operations shall be set
46 * for us in CONFIG_SYS_CACHELINE_SIZE. In some cases this may be a larger
47 * value than found in the L1 cache but this is OK to use in terms of
48 * alignment.
Anton Staaf13b6b592011-10-17 16:46:03 -070049 */
Anton Staaf13b6b592011-10-17 16:46:03 -070050#define ARCH_DMA_MINALIGN CONFIG_SYS_CACHELINE_SIZE
Anton Staaf13b6b592011-10-17 16:46:03 -070051
Prafulla Wadaskare565fda2009-06-20 11:01:52 +020052#endif /* _ASM_CACHE_H */