Increase type widths to satisfy width requirements

Usually, C has no problem up-converting types to larger bit sizes. MISRA
rule 10.7 requires that you not do this, or be very explicit about this.
This resolves the following required rule:

    bl1/aarch64/bl1_context_mgmt.c:81:[MISRA C-2012 Rule 10.7 (required)]<None>
    The width of the composite expression "0U | ((mode & 3U) << 2U) | 1U |
    0x3c0U" (32 bits) is less that the right hand operand
    "18446744073709547519ULL" (64 bits).

This also resolves MISRA defects such as:

    bl2/aarch64/bl2arch_setup.c:18:[MISRA C-2012 Rule 12.2 (required)]
    In the expression "3U << 20", shifting more than 7 bits, the number
    of bits in the essential type of the left expression, "3U", is
    not allowed.

Further, MISRA requires that all shifts don't overflow. The definition of
PAGE_SIZE was (1U << 12), and 1U is 8 bits. This caused about 50 issues.
This fixes the violation by changing the definition to 1UL << 12. Since
this uses 32bits, it should not create any issues for aarch32.

This patch also contains a fix for a build failure in the sun50i_a64
platform. Specifically, these misra fixes removed a single and
instruction,

    92407e73        and     x19, x19, #0xffffffff

from the cm_setup_context function caused a relocation in
psci_cpus_on_start to require a linker-generated stub. This increased the
size of the .text section and caused an alignment later on to go over a
page boundary and round up to the end of RAM before placing the .data
section. This sectionn is of non-zero size and therefore causes a link
error.

The fix included in this reorders the functions during link time
without changing their ording with respect to alignment.

Change-Id: I76b4b662c3d262296728a8b9aab7a33b02087f16
Signed-off-by: Jimmy Brisson <jimmy.brisson@arm.com>
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index d298f2b..0528916 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -496,7 +496,7 @@
 	/* Test if the limits are 4K aligned */
 #if ENABLE_ASSERTIONS
 	orr	x0, x0, x1
-	tst	x0, #(PAGE_SIZE - 1)
+	tst	x0, #(PAGE_SIZE_MASK)
 	ASM_ASSERT(eq)
 #endif
 	/*
@@ -504,7 +504,7 @@
 	 * Assume that this function is called within a page at the start of
 	 * fixup region.
 	 */
-	and	x2, x30, #~(PAGE_SIZE - 1)
+	and	x2, x30, #~(PAGE_SIZE_MASK)
 	sub	x0, x2, x6	/* Diff(S) = Current Address - Compiled Address */
 
 	adrp	x1, __GOT_START__
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index e5434eb..b460731 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -710,7 +710,7 @@
 	assert(ctx != NULL);
 
 	/* Ensure that the bit position is a valid one */
-	assert(((1U << bit_pos) & SCR_VALID_BIT_MASK) != 0U);
+	assert(((1UL << bit_pos) & SCR_VALID_BIT_MASK) != 0U);
 
 	/* Ensure that the 'value' is only a bit wide */
 	assert(value <= 1U);
@@ -721,7 +721,7 @@
 	 */
 	state = get_el3state_ctx(ctx);
 	scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
-	scr_el3 &= ~(1U << bit_pos);
+	scr_el3 &= ~(1UL << bit_pos);
 	scr_el3 |= (u_register_t)value << bit_pos;
 	write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
 }
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index 6d81377..9f8a08a 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -663,7 +663,8 @@
 
 		mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1;
 
-		ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+		ep->spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX,
+				   DISABLE_ALL_EXCEPTIONS);
 	} else {
 
 		mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
@@ -675,7 +676,8 @@
 		 */
 		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
 
-		ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
+		ep->spsr = SPSR_MODE32((uint64_t)mode, entrypoint & 0x1, ee,
+				       daif);
 	}
 
 	return PSCI_E_SUCCESS;
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index 30babc6..9fae7e9 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
  *
  * SPDX-License-Identifier: BSD-3-Clause
  */
@@ -472,7 +472,7 @@
 	/*
 	 * Sanity checks.
 	 */
-	for (size_t i = 0U; i < pages_count; ++i) {
+	for (unsigned int i = 0U; i < pages_count; ++i) {
 		const uint64_t *entry;
 		uint64_t desc, attr_index;
 		unsigned int level;
@@ -497,8 +497,8 @@
 			(level != XLAT_TABLE_LEVEL_MAX)) {
 			WARN("Address 0x%lx is not mapped at the right granularity.\n",
 			     base_va);
-			WARN("Granularity is 0x%llx, should be 0x%x.\n",
-			     (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
+			WARN("Granularity is 0x%lx, should be 0x%lx.\n",
+			     XLAT_BLOCK_SIZE(level), PAGE_SIZE);
 			return -EINVAL;
 		}