Add support to indicate size and end of assembly functions

In order for the symbol table in the ELF file to contain the size of
functions written in assembly, it is necessary to report it to the
assembler using the .size directive.

To fulfil the above requirements, this patch introduces an 'endfunc'
macro which contains the .endfunc and .size directives. It also adds
a .func directive to the 'func' assembler macro.

The .func/.endfunc have been used so the assembler can fail if
endfunc is omitted.

Fixes ARM-Software/tf-issues#295

Change-Id: If8cb331b03d7f38fe7e3694d4de26f1075b278fc
Signed-off-by: Kévin Petit <kevin.petit@arm.com>
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index dc60102..0dbab1b 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -56,6 +56,7 @@
 	b.lo    flush_loop
 	dsb	sy
 	ret
+endfunc flush_dcache_range
 
 
 	/* ------------------------------------------
@@ -75,6 +76,7 @@
 	b.lo    inv_loop
 	dsb	sy
 	ret
+endfunc inv_dcache_range
 
 
 	/* ---------------------------------------------------------------
@@ -154,6 +156,7 @@
 	isb
 exit:
 	ret
+endfunc do_dcsw_op
 
 dcsw_loop_table:
 	dcsw_loop isw
@@ -163,10 +166,12 @@
 
 func dcsw_op_louis
 	dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_louis
 
 
 func dcsw_op_all
 	dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_all
 
 	/* ---------------------------------------------------------------
 	 *  Helper macro for data cache operations by set/way for the
@@ -189,6 +194,7 @@
 	 */
 func dcsw_op_level1
 	dcsw_op_level #(1 << LEVEL_SHIFT)
+endfunc dcsw_op_level1
 
 	/* ---------------------------------------------------------------
 	 * Data cache operations by set/way for level 2 cache
@@ -199,6 +205,7 @@
 	 */
 func dcsw_op_level2
 	dcsw_op_level #(2 << LEVEL_SHIFT)
+endfunc dcsw_op_level2
 
 	/* ---------------------------------------------------------------
 	 * Data cache operations by set/way for level 3 cache
@@ -209,3 +216,4 @@
 	 */
 func dcsw_op_level3
 	dcsw_op_level #(3 << LEVEL_SHIFT)
+endfunc dcsw_op_level3
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index f605bf4..5f80b59 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -53,6 +53,7 @@
 	mov	x1, #MPIDR_AFFLVL_SHIFT
 	lsl	x0, x0, x1
 	ret
+endfunc get_afflvl_shift
 
 func mpidr_mask_lower_afflvls
 	cmp	x1, #3
@@ -62,14 +63,17 @@
 	lsr	x0, x0, x2
 	lsl	x0, x0, x2
 	ret
+endfunc mpidr_mask_lower_afflvls
 
 
 func eret
 	eret
+endfunc eret
 
 
 func smc
 	smc	#0
+endfunc smc
 
 /* -----------------------------------------------------------------------
  * void zeromem16(void *mem, unsigned int length);
@@ -97,7 +101,9 @@
 	b.eq	z_end
 	strb	wzr, [x0], #1
 	b	z_loop1
-z_end:	ret
+z_end:
+	ret
+endfunc zeromem16
 
 
 /* --------------------------------------------------------------------------
@@ -129,7 +135,9 @@
 	strb	w3, [x0], #1
 	subs	x2, x2, #1
 	b.ne	m_loop1
-m_end:	ret
+m_end:
+	ret
+endfunc memcpy16
 
 /* ---------------------------------------------------------------------------
  * Disable the MMU at EL3
@@ -148,11 +156,13 @@
 	isb				// ensure MMU is off
 	mov	x0, #DCCISW		// DCache clean and invalidate
 	b	dcsw_op_all
+endfunc disable_mmu_el3
 
 
 func disable_mmu_icache_el3
 	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
 	b	do_disable_mmu
+endfunc disable_mmu_icache_el3
 
 /* ---------------------------------------------------------------------------
  * Enable the use of VFP at EL3
@@ -169,4 +179,5 @@
 	msr	cptr_el3, x0
 	isb
 	ret
+endfunc enable_vfp
 #endif
diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S
index 58a64a6..ee53058 100644
--- a/lib/cpus/aarch64/aem_generic.S
+++ b/lib/cpus/aarch64/aem_generic.S
@@ -49,6 +49,7 @@
 	 * ---------------------------------------------
 	 */
 	b	dcsw_op_louis
+endfunc aem_generic_core_pwr_dwn
 
 
 func aem_generic_cluster_pwr_dwn
@@ -67,6 +68,7 @@
 	 */
 	mov	x0, #DCCISW
 	b	dcsw_op_all
+endfunc aem_generic_cluster_pwr_dwn
 
 	/* ---------------------------------------------
 	 * This function provides cpu specific
@@ -80,6 +82,7 @@
 func aem_generic_cpu_reg_dump
 	mov	x6, #0 /* no registers to report */
 	ret
+endfunc aem_generic_cpu_reg_dump
 
 
 /* cpu_ops for Base AEM FVP */
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index 188f3c1..e149e6e 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -44,6 +44,7 @@
 	msr	sctlr_el3, x1
 	isb
 	ret
+endfunc cortex_a53_disable_dcache
 
 	/* ---------------------------------------------
 	 * Disable intra-cluster coherency
@@ -56,6 +57,7 @@
 	isb
 	dsb	sy
 	ret
+endfunc cortex_a53_disable_smp
 
 func cortex_a53_reset_func
 	/* ---------------------------------------------
@@ -72,6 +74,7 @@
 	isb
 skip_smp_setup:
 	ret
+endfunc cortex_a53_reset_func
 
 func cortex_a53_core_pwr_dwn
 	mov	x18, x30
@@ -95,6 +98,7 @@
 	 */
 	mov	x30, x18
 	b	cortex_a53_disable_smp
+endfunc cortex_a53_core_pwr_dwn
 
 func cortex_a53_cluster_pwr_dwn
 	mov	x18, x30
@@ -131,6 +135,7 @@
 	 */
 	mov	x30, x18
 	b	cortex_a53_disable_smp
+endfunc cortex_a53_cluster_pwr_dwn
 
 	/* ---------------------------------------------
 	 * This function provides cortex_a53 specific
@@ -149,5 +154,6 @@
 	adr	x6, cortex_a53_regs
 	mrs	x8, CPUECTLR_EL1
 	ret
+endfunc cortex_a53_cpu_reg_dump
 
 declare_cpu_ops cortex_a53, CORTEX_A53_MIDR
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index eb6c736..05799d6 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -45,6 +45,7 @@
 	msr	sctlr_el3, x1
 	isb
 	ret
+endfunc cortex_a57_disable_dcache
 
 	/* ---------------------------------------------
 	 * Disable all types of L2 prefetches.
@@ -60,6 +61,7 @@
 	isb
 	dsb	ish
 	ret
+endfunc cortex_a57_disable_l2_prefetch
 
 	/* ---------------------------------------------
 	 * Disable intra-cluster coherency
@@ -70,6 +72,7 @@
 	bic	x0, x0, #CPUECTLR_SMP_BIT
 	msr	CPUECTLR_EL1, x0
 	ret
+endfunc cortex_a57_disable_smp
 
 	/* ---------------------------------------------
 	 * Disable debug interfaces
@@ -81,6 +84,7 @@
 	isb
 	dsb	sy
 	ret
+endfunc cortex_a57_disable_ext_debug
 
 	/* --------------------------------------------------
 	 * Errata Workaround for Cortex A57 Errata #806969.
@@ -113,6 +117,7 @@
 	msr	CPUACTLR_EL1, x1
 skip_806969:
 	ret
+endfunc errata_a57_806969_wa
 
 
 	/* ---------------------------------------------------
@@ -146,6 +151,7 @@
 	msr	CPUACTLR_EL1, x1
 skip_813420:
 	ret
+endfunc errata_a57_813420_wa
 
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A57.
@@ -188,6 +194,7 @@
 skip_smp_setup:
 	isb
 	ret	x19
+endfunc cortex_a57_reset_func
 
 	/* ----------------------------------------------------
 	 * The CPU Ops core power down function for Cortex-A57.
@@ -227,6 +234,7 @@
 	 */
 	mov	x30, x18
 	b	cortex_a57_disable_ext_debug
+endfunc cortex_a57_core_pwr_dwn
 
 	/* -------------------------------------------------------
 	 * The CPU Ops cluster power down function for Cortex-A57.
@@ -280,6 +288,7 @@
 	 */
 	mov	x30, x18
 	b	cortex_a57_disable_ext_debug
+endfunc cortex_a57_cluster_pwr_dwn
 
 	/* ---------------------------------------------
 	 * This function provides cortex_a57 specific
@@ -298,6 +307,7 @@
 	adr	x6, cortex_a57_regs
 	mrs	x8, CPUECTLR_EL1
 	ret
+endfunc cortex_a57_cpu_reg_dump
 
 
 declare_cpu_ops cortex_a57, CORTEX_A57_MIDR
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 2d054fc..eb37f2c 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -44,6 +44,7 @@
 	msr	sctlr_el3, x1
 	isb
 	ret
+endfunc cortex_a72_disable_dcache
 
 	/* ---------------------------------------------
 	 * Disable all types of L2 prefetches.
@@ -58,6 +59,7 @@
 	msr	CPUECTLR_EL1, x0
 	isb
 	ret
+endfunc cortex_a72_disable_l2_prefetch
 
 	/* ---------------------------------------------
 	 * Disable the load-store hardware prefetcher.
@@ -70,6 +72,7 @@
 	isb
 	dsb	ish
 	ret
+endfunc cortex_a72_disable_hw_prefetcher
 
 	/* ---------------------------------------------
 	 * Disable intra-cluster coherency
@@ -80,6 +83,7 @@
 	bic	x0, x0, #CPUECTLR_SMP_BIT
 	msr	CPUECTLR_EL1, x0
 	ret
+endfunc cortex_a72_disable_smp
 
 	/* ---------------------------------------------
 	 * Disable debug interfaces
@@ -91,6 +95,7 @@
 	isb
 	dsb	sy
 	ret
+endfunc cortex_a72_disable_ext_debug
 
 	/* -------------------------------------------------
 	 * The CPU Ops reset function for Cortex-A72.
@@ -106,6 +111,7 @@
 	msr	CPUECTLR_EL1, x0
 	isb
 	ret
+endfunc cortex_a72_reset_func
 
 	/* ----------------------------------------------------
 	 * The CPU Ops core power down function for Cortex-A72.
@@ -151,6 +157,7 @@
 	 */
 	mov	x30, x18
 	b	cortex_a72_disable_ext_debug
+endfunc cortex_a72_core_pwr_dwn
 
 	/* -------------------------------------------------------
 	 * The CPU Ops cluster power down function for Cortex-A72.
@@ -211,6 +218,7 @@
 	 */
 	mov	x30, x18
 	b	cortex_a72_disable_ext_debug
+endfunc cortex_a72_cluster_pwr_dwn
 
 	/* ---------------------------------------------
 	 * This function provides cortex_a72 specific
@@ -229,6 +237,7 @@
 	adr	x6, cortex_a72_regs
 	mrs	x8, CPUECTLR_EL1
 	ret
+endfunc cortex_a72_cpu_reg_dump
 
 
 declare_cpu_ops cortex_a72, CORTEX_A72_MIDR
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index 24c283a..e8a1392 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -67,6 +67,7 @@
 	br	x2
 1:
 	ret
+endfunc reset_handler
 
 #endif /* IMAGE_BL1 || IMAGE_BL31 */
 
@@ -88,6 +89,7 @@
 	/* Get the cpu_ops core_pwr_dwn handler */
 	ldr	x1, [x0, #CPU_PWR_DWN_CORE]
 	br	x1
+endfunc prepare_core_pwr_dwn
 
 	/*
 	 * The prepare cluster power down function for all platforms.  After
@@ -106,6 +108,7 @@
 	/* Get the cpu_ops cluster_pwr_dwn handler */
 	ldr	x1, [x0, #CPU_PWR_DWN_CLUSTER]
 	br	x1
+endfunc prepare_cluster_pwr_dwn
 
 
 	/*
@@ -129,6 +132,7 @@
 	mov x30, x10
 1:
 	ret
+endfunc init_cpu_ops
 #endif /* IMAGE_BL31 */
 
 #if IMAGE_BL31 && CRASH_REPORTING
@@ -153,6 +157,7 @@
 1:
 	mov	x30, x16
 	ret
+endfunc do_cpu_reg_dump
 #endif
 
 	/*
@@ -197,6 +202,7 @@
 	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
 error_exit:
 	ret
+endfunc get_cpu_ops_ptr
 
 #if DEBUG
 	/*
@@ -221,5 +227,6 @@
 	bl	asm_print_str
 1:
 	ret	x5
+endfunc print_revision_warning
 #endif
 
diff --git a/lib/locks/exclusive/spinlock.S b/lib/locks/exclusive/spinlock.S
index 5eae2b0..772f14e 100644
--- a/lib/locks/exclusive/spinlock.S
+++ b/lib/locks/exclusive/spinlock.S
@@ -43,8 +43,10 @@
 	stxr	w1, w2, [x0]
 	cbnz	w1, l2
 	ret
+endfunc spin_lock
 
 
 func spin_unlock
 	stlr	wzr, [x0]
 	ret
+endfunc spin_unlock
diff --git a/lib/semihosting/aarch64/semihosting_call.S b/lib/semihosting/aarch64/semihosting_call.S
index e6a9675..9fa8141 100644
--- a/lib/semihosting/aarch64/semihosting_call.S
+++ b/lib/semihosting/aarch64/semihosting_call.S
@@ -35,3 +35,4 @@
 func semihosting_call
 	hlt	#0xf000
 	ret
+endfunc semihosting_call