Add support to indicate size and end of assembly functions

In order for the symbol table in the ELF file to contain the size of
functions written in assembly, it is necessary to report it to the
assembler using the .size directive.

To fulfil the above requirements, this patch introduces an 'endfunc'
macro which contains the .endfunc and .size directives. It also adds
a .func directive to the 'func' assembler macro.

The .func/.endfunc have been used so the assembler can fail if
endfunc is omitted.

Fixes ARM-Software/tf-issues#295

Change-Id: If8cb331b03d7f38fe7e3694d4de26f1075b278fc
Signed-off-by: Kévin Petit <kevin.petit@arm.com>
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index dc60102..0dbab1b 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -56,6 +56,7 @@
 	b.lo    flush_loop
 	dsb	sy
 	ret
+endfunc flush_dcache_range
 
 
 	/* ------------------------------------------
@@ -75,6 +76,7 @@
 	b.lo    inv_loop
 	dsb	sy
 	ret
+endfunc inv_dcache_range
 
 
 	/* ---------------------------------------------------------------
@@ -154,6 +156,7 @@
 	isb
 exit:
 	ret
+endfunc do_dcsw_op
 
 dcsw_loop_table:
 	dcsw_loop isw
@@ -163,10 +166,12 @@
 
 func dcsw_op_louis
 	dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_louis
 
 
 func dcsw_op_all
 	dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
+endfunc dcsw_op_all
 
 	/* ---------------------------------------------------------------
 	 *  Helper macro for data cache operations by set/way for the
@@ -189,6 +194,7 @@
 	 */
 func dcsw_op_level1
 	dcsw_op_level #(1 << LEVEL_SHIFT)
+endfunc dcsw_op_level1
 
 	/* ---------------------------------------------------------------
 	 * Data cache operations by set/way for level 2 cache
@@ -199,6 +205,7 @@
 	 */
 func dcsw_op_level2
 	dcsw_op_level #(2 << LEVEL_SHIFT)
+endfunc dcsw_op_level2
 
 	/* ---------------------------------------------------------------
 	 * Data cache operations by set/way for level 3 cache
@@ -209,3 +216,4 @@
 	 */
 func dcsw_op_level3
 	dcsw_op_level #(3 << LEVEL_SHIFT)
+endfunc dcsw_op_level3
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index f605bf4..5f80b59 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -53,6 +53,7 @@
 	mov	x1, #MPIDR_AFFLVL_SHIFT
 	lsl	x0, x0, x1
 	ret
+endfunc get_afflvl_shift
 
 func mpidr_mask_lower_afflvls
 	cmp	x1, #3
@@ -62,14 +63,17 @@
 	lsr	x0, x0, x2
 	lsl	x0, x0, x2
 	ret
+endfunc mpidr_mask_lower_afflvls
 
 
 func eret
 	eret
+endfunc eret
 
 
 func smc
 	smc	#0
+endfunc smc
 
 /* -----------------------------------------------------------------------
  * void zeromem16(void *mem, unsigned int length);
@@ -97,7 +101,9 @@
 	b.eq	z_end
 	strb	wzr, [x0], #1
 	b	z_loop1
-z_end:	ret
+z_end:
+	ret
+endfunc zeromem16
 
 
 /* --------------------------------------------------------------------------
@@ -129,7 +135,9 @@
 	strb	w3, [x0], #1
 	subs	x2, x2, #1
 	b.ne	m_loop1
-m_end:	ret
+m_end:
+	ret
+endfunc memcpy16
 
 /* ---------------------------------------------------------------------------
  * Disable the MMU at EL3
@@ -148,11 +156,13 @@
 	isb				// ensure MMU is off
 	mov	x0, #DCCISW		// DCache clean and invalidate
 	b	dcsw_op_all
+endfunc disable_mmu_el3
 
 
 func disable_mmu_icache_el3
 	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
 	b	do_disable_mmu
+endfunc disable_mmu_icache_el3
 
 /* ---------------------------------------------------------------------------
  * Enable the use of VFP at EL3
@@ -169,4 +179,5 @@
 	msr	cptr_el3, x0
 	isb
 	ret
+endfunc enable_vfp
 #endif