AMU: Add assembler helper functions for aarch32

Change-Id: Id6dfe885a63561b1d2649521bd020367b96ae1af
Signed-off-by: Joel Hutton <joel.hutton@arm.com>
diff --git a/bl32/sp_min/sp_min.mk b/bl32/sp_min/sp_min.mk
index 67a1981..145820e 100644
--- a/bl32/sp_min/sp_min.mk
+++ b/bl32/sp_min/sp_min.mk
@@ -23,7 +23,8 @@
 endif
 
 ifeq (${ENABLE_AMU}, 1)
-BL32_SOURCES		+=	lib/extensions/amu/aarch32/amu.c
+BL32_SOURCES		+=	lib/extensions/amu/aarch32/amu.c\
+				lib/extensions/amu/aarch32/amu_helpers.S
 endif
 
 ifeq (${WORKAROUND_CVE_2017_5715},1)
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
index 134d534..630d3f2 100644
--- a/include/lib/aarch32/arch.h
+++ b/include/lib/aarch32/arch.h
@@ -558,4 +558,40 @@
 #define AMEVTYPER02	p15, 0, c13, c6, 2
 #define AMEVTYPER03	p15, 0, c13, c6, 3
 
+/* Activity Monitor Group 1 Event Counter Registers */
+#define AMEVCNTR10	p15, 0, c4
+#define AMEVCNTR11	p15, 1, c4
+#define AMEVCNTR12	p15, 2, c4
+#define AMEVCNTR13	p15, 3, c4
+#define AMEVCNTR14	p15, 4, c4
+#define AMEVCNTR15	p15, 5, c4
+#define AMEVCNTR16	p15, 6, c4
+#define AMEVCNTR17	p15, 7, c4
+#define AMEVCNTR18	p15, 0, c5
+#define AMEVCNTR19	p15, 1, c5
+#define AMEVCNTR1A	p15, 2, c5
+#define AMEVCNTR1B	p15, 3, c5
+#define AMEVCNTR1C	p15, 4, c5
+#define AMEVCNTR1D	p15, 5, c5
+#define AMEVCNTR1E	p15, 6, c5
+#define AMEVCNTR1F	p15, 7, c5
+
+/* Activity Monitor Group 1 Event Type Registers */
+#define AMEVTYPER10	p15, 0, c13, c14, 0
+#define AMEVTYPER11	p15, 0, c13, c14, 1
+#define AMEVTYPER12	p15, 0, c13, c14, 2
+#define AMEVTYPER13	p15, 0, c13, c14, 3
+#define AMEVTYPER14	p15, 0, c13, c14, 4
+#define AMEVTYPER15	p15, 0, c13, c14, 5
+#define AMEVTYPER16	p15, 0, c13, c14, 6
+#define AMEVTYPER17	p15, 0, c13, c14, 7
+#define AMEVTYPER18	p15, 0, c13, c15, 0
+#define AMEVTYPER19	p15, 0, c13, c15, 1
+#define AMEVTYPER1A	p15, 0, c13, c15, 2
+#define AMEVTYPER1B	p15, 0, c13, c15, 3
+#define AMEVTYPER1C	p15, 0, c13, c15, 4
+#define AMEVTYPER1D	p15, 0, c13, c15, 5
+#define AMEVTYPER1E	p15, 0, c13, c15, 6
+#define AMEVTYPER1F	p15, 0, c13, c15, 7
+
 #endif /* __ARCH_H__ */
diff --git a/lib/extensions/amu/aarch32/amu_helpers.S b/lib/extensions/amu/aarch32/amu_helpers.S
new file mode 100644
index 0000000..84dca04
--- /dev/null
+++ b/lib/extensions/amu/aarch32/amu_helpers.S
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert_macros.S>
+#include <asm_macros.S>
+
+	.globl	amu_group0_cnt_read_internal
+	.globl	amu_group0_cnt_write_internal
+	.globl	amu_group1_cnt_read_internal
+	.globl	amu_group1_cnt_write_internal
+	.globl	amu_group1_set_evtype_internal
+
+/*
+ * uint64_t amu_group0_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `r0`.
+ */
+func amu_group0_cnt_read_internal
+#if ENABLE_ASSERTIONS
+	/* `idx` should be between [0, 3] */
+	mov	r1, r0
+	lsr	r1, r1, #2
+	cmp	r1, #0
+	ASM_ASSERT(eq)
+#endif
+
+	/*
+	 * Given `idx` calculate address of ldcopr16/bx lr instruction pair
+	 * in the table below.
+	 */
+	adr	r1, 1f
+	lsl	r0, r0, #3	/* each ldcopr16/bx lr sequence is 8 bytes */
+	add	r1, r1, r0
+	bx	r1
+1:
+	ldcopr16	r0, r1, AMEVCNTR00	/* index 0 */
+	bx		lr
+	ldcopr16	r0, r1, AMEVCNTR01	/* index 1 */
+	bx 		lr
+	ldcopr16	r0, r1, AMEVCNTR02	/* index 2 */
+	bx 		lr
+	ldcopr16	r0, r1, AMEVCNTR03	/* index 3 */
+	bx 		lr
+endfunc amu_group0_cnt_read_internal
+
+/*
+ * void amu_group0_cnt_write_internal(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func amu_group0_cnt_write_internal
+#if ENABLE_ASSERTIONS
+	/* `idx` should be between [0, 3] */
+	mov	r2, r0
+	lsr	r2, r2, #2
+	cmp	r2, #0
+	ASM_ASSERT(eq)
+#endif
+
+	/*
+	 * Given `idx` calculate address of stcopr16/bx lr instruction pair
+	 * in the table below.
+	 */
+	adr	r2, 1f
+	lsl	r0, r0, #3	/* each stcopr16/bx lr sequence is 8 bytes */
+	add	r2, r2, r0
+	bx	r2
+
+1:
+	stcopr16	r0,r1, AMEVCNTR00	/* index 0 */
+	bx 		lr
+	stcopr16	r0,r1, AMEVCNTR01	/* index 1 */
+	bx 		lr
+	stcopr16	r0,r1, AMEVCNTR02	/* index 2 */
+	bx 		lr
+	stcopr16	r0,r1, AMEVCNTR03	/* index 3 */
+	bx 		lr
+endfunc amu_group0_cnt_write_internal
+
+/*
+ * uint64_t amu_group1_cnt_read_internal(int idx);
+ *
+ * Given `idx`, read the corresponding AMU counter
+ * and return it in `r0`.
+ */
+func amu_group1_cnt_read_internal
+#if ENABLE_ASSERTIONS
+	/* `idx` should be between [0, 15] */
+	mov	r2, r0
+	lsr	r2, r2, #4
+	cmp	r2, #0
+	ASM_ASSERT(eq)
+#endif
+
+	/*
+	 * Given `idx` calculate address of ldcopr16/bx lr instruction pair
+	 * in the table below.
+	 */
+	adr	r1, 1f
+	lsl	r0, r0, #3	/* each ldcopr16/bx lr sequence is 8 bytes */
+	add	r1, r1, r0
+	bx	r1
+
+1:
+	ldcopr16	r0,r1, AMEVCNTR10	/* index 0 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR11	/* index 1 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR12	/* index 2 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR13	/* index 3 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR14	/* index 4 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR15	/* index 5 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR16	/* index 6 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR17	/* index 7 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR18	/* index 8 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR19	/* index 9 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR1A	/* index 10 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR1B	/* index 11 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR1C	/* index 12 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR1D	/* index 13 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR1E	/* index 14 */
+	bx	lr
+	ldcopr16	r0,r1, AMEVCNTR1F	/* index 15 */
+	bx	lr
+endfunc amu_group1_cnt_read_internal
+
+/*
+ * void amu_group1_cnt_write_internal(int idx, uint64_t val);
+ *
+ * Given `idx`, write `val` to the corresponding AMU counter.
+ */
+func amu_group1_cnt_write_internal
+#if ENABLE_ASSERTIONS
+	/* `idx` should be between [0, 15] */
+	mov	r2, r0
+	lsr	r2, r2, #4
+	cmp	r2, #0
+	ASM_ASSERT(eq)
+#endif
+
+	/*
+	 * Given `idx` calculate address of ldcopr16/bx lr instruction pair
+	 * in the table below.
+	 */
+	adr	r2, 1f
+	lsl	r0, r0, #3	/* each stcopr16/bx lr sequence is 8 bytes */
+	add	r2, r2, r0
+	bx	r2
+
+1:
+	stcopr16	r0,r1,	AMEVCNTR10	/* index 0 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR11	/* index 1 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR12	/* index 2 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR13	/* index 3 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR14	/* index 4 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR15	/* index 5 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR16	/* index 6 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR17	/* index 7 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR18	/* index 8 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR19	/* index 9 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR1A	/* index 10 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR1B	/* index 11 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR1C	/* index 12 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR1D	/* index 13 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR1E	/* index 14 */
+	bx		lr
+	stcopr16	r0,r1,	AMEVCNTR1F	/* index 15 */
+	bx		lr
+endfunc amu_group1_cnt_write_internal
+
+/*
+ * void amu_group1_set_evtype_internal(int idx, unsigned int val);
+ *
+ * Program the AMU event type register indexed by `idx`
+ * with the value `val`.
+ */
+func amu_group1_set_evtype_internal
+#if ENABLE_ASSERTIONS
+	/* `idx` should be between [0, 15] */
+	mov	r2, r0
+	lsr	r2, r2, #4
+	cmp	r2, #0
+	ASM_ASSERT(eq)
+
+	/* val should be between [0, 65535] */
+	mov	r2, r1
+	lsr	r2, r2, #16
+	cmp	r2, #0
+	ASM_ASSERT(eq)
+#endif
+
+	/*
+	 * Given `idx` calculate address of stcopr/bx lr instruction pair
+	 * in the table below.
+	 */
+	adr	r2, 1f
+	lsl	r0, r0, #3	/* each stcopr/bx lr sequence is 8 bytes */
+	add	r2, r2, r0
+	bx	r2
+
+1:
+	stcopr	r0,	AMEVTYPER10 /* index 0 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER11 /* index 1 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER12 /* index 2 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER13 /* index 3 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER14 /* index 4 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER15 /* index 5 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER16 /* index 6 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER17 /* index 7 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER18 /* index 8 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER19 /* index 9 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER1A /* index 10 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER1B /* index 11 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER1C /* index 12 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER1D /* index 13 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER1E /* index 14 */
+	bx	lr
+	stcopr	r0,	AMEVTYPER1F /* index 15 */
+	bx	lr
+endfunc amu_group1_set_evtype_internal