armv8: move low-level assembly functions into function-sections
TPL builds today don't need to call into firmware or set up the MMU
(if this changes, it should be controlled through a config option
whether to include this or not), but include the needed support code
for this anyway. By moving these unused low-level functions into
seperate function-sections, the linker can garbage-collect the unused
sections.
Note that (if DM support is enabled), there will be a call to the
cache-flushing code from alloc_priv(...) in drivers/core/device.c.
This then add 52 bytes of binary size (an increase from 20589 to 20641
bytes) compared to completely removing this code.
Even for a feature-rich TPL (including DM support as for the RK3368),
this equates to a size difference of significantly more than 10% in
TPL binary size.
Signed-off-by: Philipp Tomsich <philipp.tomsich@theobroma-systems.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
diff --git a/arch/arm/cpu/armv8/cache.S b/arch/arm/cpu/armv8/cache.S
index 7cba308..ea845d1 100644
--- a/arch/arm/cpu/armv8/cache.S
+++ b/arch/arm/cpu/armv8/cache.S
@@ -22,6 +22,7 @@
* x1: 0 clean & invalidate, 1 invalidate only
* x2~x9: clobbered
*/
+.pushsection .text.__asm_dcache_level, "ax"
ENTRY(__asm_dcache_level)
lsl x12, x0, #1
msr csselr_el1, x12 /* select cache level */
@@ -58,6 +59,7 @@
ret
ENDPROC(__asm_dcache_level)
+.popsection
/*
* void __asm_flush_dcache_all(int invalidate_only)
@@ -66,6 +68,7 @@
*
* flush or invalidate all data cache by SET/WAY.
*/
+.pushsection .text.__asm_dcache_all, "ax"
ENTRY(__asm_dcache_all)
mov x1, x0
dsb sy
@@ -102,16 +105,21 @@
finished:
ret
ENDPROC(__asm_dcache_all)
+.popsection
+.pushsection .text.__asm_flush_dcache_all, "ax"
ENTRY(__asm_flush_dcache_all)
mov x0, #0
b __asm_dcache_all
ENDPROC(__asm_flush_dcache_all)
+.popsection
+.pushsection .text.__asm_invalidate_dcache_all, "ax"
ENTRY(__asm_invalidate_dcache_all)
mov x0, #0x1
b __asm_dcache_all
ENDPROC(__asm_invalidate_dcache_all)
+.popsection
/*
* void __asm_flush_dcache_range(start, end)
@@ -121,6 +129,7 @@
* x0: start address
* x1: end address
*/
+.pushsection .text.__asm_flush_dcache_range, "ax"
ENTRY(__asm_flush_dcache_range)
mrs x3, ctr_el0
lsr x3, x3, #16
@@ -138,6 +147,7 @@
dsb sy
ret
ENDPROC(__asm_flush_dcache_range)
+.popsection
/*
* void __asm_invalidate_dcache_range(start, end)
*
@@ -146,6 +156,7 @@
* x0: start address
* x1: end address
*/
+.pushsection .text.__asm_invalidate_dcache_range, "ax"
ENTRY(__asm_invalidate_dcache_range)
mrs x3, ctr_el0
ubfm x3, x3, #16, #19
@@ -162,41 +173,51 @@
dsb sy
ret
ENDPROC(__asm_invalidate_dcache_range)
+.popsection
/*
* void __asm_invalidate_icache_all(void)
*
* invalidate all tlb entries.
*/
+.pushsection .text.__asm_invalidate_icache_all, "ax"
ENTRY(__asm_invalidate_icache_all)
ic ialluis
isb sy
ret
ENDPROC(__asm_invalidate_icache_all)
+.popsection
+.pushsection .text.__asm_invalidate_l3_dcache, "ax"
ENTRY(__asm_invalidate_l3_dcache)
mov x0, #0 /* return status as success */
ret
ENDPROC(__asm_invalidate_l3_dcache)
.weak __asm_invalidate_l3_dcache
+.popsection
+.pushsection .text.__asm_flush_l3_dcache, "ax"
ENTRY(__asm_flush_l3_dcache)
mov x0, #0 /* return status as success */
ret
ENDPROC(__asm_flush_l3_dcache)
.weak __asm_flush_l3_dcache
+.popsection
+.pushsection .text.__asm_invalidate_l3_icache, "ax"
ENTRY(__asm_invalidate_l3_icache)
mov x0, #0 /* return status as success */
ret
ENDPROC(__asm_invalidate_l3_icache)
.weak __asm_invalidate_l3_icache
+.popsection
/*
* void __asm_switch_ttbr(ulong new_ttbr)
*
* Safely switches to a new page table.
*/
+.pushsection .text.__asm_switch_ttbr, "ax"
ENTRY(__asm_switch_ttbr)
/* x2 = SCTLR (alive throghout the function) */
switch_el x4, 3f, 2f, 1f
@@ -244,3 +265,4 @@
ret x3
ENDPROC(__asm_switch_ttbr)
+.popsection