Replace ASM signed tests with unsigned

ge, lt, gt and le condition codes in assembly provide a signed test
whereas hs, lo, hi and ls provide the unsigned counterpart. Signed tests
should only be used when strictly necessary, as using them on logically
unsigned values can lead to inverting the test for high enough values.
All offsets, addresses and usually counters are actually unsigned
values, and should be tested as such.

Replace the occurrences of signed condition codes where it was
unnecessary by an unsigned test as the unsigned tests allow the full
range of unsigned values to be used without inverting the result with
some large operands.

Change-Id: I58b7e98d03e3a4476dfb45230311f296d224980a
Signed-off-by: Douglas Raillard <douglas.raillard@arm.com>
diff --git a/lib/aarch32/cache_helpers.S b/lib/aarch32/cache_helpers.S
index d0e5cd0..b17b903 100644
--- a/lib/aarch32/cache_helpers.S
+++ b/lib/aarch32/cache_helpers.S
@@ -118,7 +118,7 @@
 	mov	r12, r2, LSR r10	// extract cache type bits from clidr
 	and	r12, r12, #7   		// mask the bits for current cache only
 	cmp	r12, #2			// see what cache we have at this level
-	blt	level_done      	// no cache or only instruction cache at this level
+	blo	level_done      	// no cache or only instruction cache at this level
 
 	stcopr	r1, CSSELR		// select current cache level in csselr
 	isb				// isb to sych the new cssr&csidr
@@ -138,14 +138,14 @@
 
 	blx	r6
 	subs	r7, r7, #1              // decrement the set number
-	bge	loop3
+	bhs	loop3
 	subs	r9, r9, #1              // decrement the way number
-	bge	loop2
+	bhs	loop2
 level_done:
 	add	r1, r1, #2		// increment the cache number
 	cmp	r3, r1
 	dsb	sy			// ensure completion of previous cache maintenance instruction
-	bgt	loop1
+	bhi	loop1
 
 	mov	r6, #0
 	stcopr	r6, CSSELR		//select cache level 0 in csselr
diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S
index dc84799..5b17c21 100644
--- a/lib/aarch32/misc_helpers.S
+++ b/lib/aarch32/misc_helpers.S
@@ -170,7 +170,7 @@
 /* copy 4 bytes at a time */
 m_loop4:
 	cmp	r2, #4
-	blt	m_loop1
+	blo	m_loop1
 	ldr	r3, [r1], #4
 	str	r3, [r0], #4
 	sub	r2, r2, #4
diff --git a/lib/aarch64/cache_helpers.S b/lib/aarch64/cache_helpers.S
index 476b906..acafea7 100644
--- a/lib/aarch64/cache_helpers.S
+++ b/lib/aarch64/cache_helpers.S
@@ -119,7 +119,7 @@
 	lsr	x1, x0, x2		// extract cache type bits from clidr
 	and	x1, x1, #7		// mask the bits for current cache only
 	cmp	x1, #2			// see what cache we have at this level
-	b.lt	level_done		// nothing to do if no cache or icache
+	b.lo	level_done		// nothing to do if no cache or icache
 
 	msr	csselr_el1, x10		// select current cache level in csselr
 	isb				// isb to sych the new cssr&csidr
@@ -144,10 +144,10 @@
 	orr	w11, w9, w7		// combine cache, way and set number
 	dc	\_op, x11
 	subs	w7, w7, w17		// decrement set number
-	b.ge	loop3_\_op
+	b.hs	loop3_\_op
 
 	subs	x9, x9, x16		// decrement way number
-	b.ge	loop2_\_op
+	b.hs	loop2_\_op
 
 	b	level_done
 	.endm
@@ -155,7 +155,7 @@
 level_done:
 	add	x10, x10, #2		// increment cache number
 	cmp	x3, x10
-	b.gt    loop1
+	b.hi    loop1
 	msr	csselr_el1, xzr		// select cache level 0 in csselr
 	dsb	sy			// barrier to complete final cache operation
 	isb
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
index c41978e..dc1b6e6 100644
--- a/lib/cpus/aarch32/cpu_helpers.S
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -157,7 +157,7 @@
 1:
 	/* Check if we have reached end of list */
 	cmp	r4, r5
-	bge	error_exit
+	bhs	error_exit
 
 	/* load the midr from the cpu_ops */
 	ldr	r1, [r4], #CPU_OPS_SIZE