xlat v2: Remove unused tlbi helper

xlat_arch_tlbi_va_regime() isn't used, so it has been renamed to
xlat_arch_tlbi_va() and the previous implementation has been removed.

Change-Id: Ic118bed3fb68234748d86b2e9e95b25650289276
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index 2eae79c..1e0a91d 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -55,18 +55,7 @@
 	return UPPER_ATTRS(XN);
 }
 
-void xlat_arch_tlbi_va(uintptr_t va)
-{
-	/*
-	 * Ensure the translation table write has drained into memory before
-	 * invalidating the TLB entry.
-	 */
-	dsbishst();
-
-	tlbimvaais(TLBI_ADDR(va));
-}
-
-void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime __unused)
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime __unused)
 {
 	/*
 	 * Ensure the translation table write has drained into memory before
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 40bf08a..e427625 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -123,18 +123,7 @@
 	}
 }
 
-void xlat_arch_tlbi_va(uintptr_t va)
-{
-#if IMAGE_EL == 1
-	assert(IS_IN_EL(1));
-	xlat_arch_tlbi_va_regime(va, EL1_EL0_REGIME);
-#elif IMAGE_EL == 3
-	assert(IS_IN_EL(3));
-	xlat_arch_tlbi_va_regime(va, EL3_REGIME);
-#endif
-}
-
-void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime)
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
 {
 	/*
 	 * Ensure the translation table write has drained into memory before
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index 30d7db8..104303a 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -310,7 +310,7 @@
 		if (action == ACTION_WRITE_BLOCK_ENTRY) {
 
 			table_base[table_idx] = INVALID_DESC;
-			xlat_arch_tlbi_va_regime(table_idx_va, ctx->xlat_regime);
+			xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
 
 		} else if (action == ACTION_RECURSE_INTO_TABLE) {
 
@@ -326,8 +326,8 @@
 			 */
 			if (xlat_table_is_empty(ctx, subtable)) {
 				table_base[table_idx] = INVALID_DESC;
-				xlat_arch_tlbi_va_regime(table_idx_va,
-						ctx->xlat_regime);
+				xlat_arch_tlbi_va(table_idx_va,
+						  ctx->xlat_regime);
 			}
 
 		} else {
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 1c3a729..4661bb6 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -45,18 +45,14 @@
  * Invalidate all TLB entries that match the given virtual address. This
  * operation applies to all PEs in the same Inner Shareable domain as the PE
  * that executes this function. This functions must be called for every
- * translation table entry that is modified.
- *
- * xlat_arch_tlbi_va() applies the invalidation to the exception level of the
- * current translation regime, whereas xlat_arch_tlbi_va_regime() applies it to
- * the given translation regime.
+ * translation table entry that is modified. It only affects the specified
+ * translation regime.
  *
  * Note, however, that it is architecturally UNDEFINED to invalidate TLB entries
  * pertaining to a higher exception level, e.g. invalidating EL3 entries from
  * S-EL1.
  */
-void xlat_arch_tlbi_va(uintptr_t va);
-void xlat_arch_tlbi_va_regime(uintptr_t va, int xlat_regime);
+void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime);
 
 /*
  * This function has to be called at the end of any code that uses the function
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index fa375e6..90a0a86 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -543,7 +543,7 @@
 		*entry = INVALID_DESC;
 
 		/* Invalidate any cached copy of this mapping in the TLBs. */
-		xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
+		xlat_arch_tlbi_va(base_va, ctx->xlat_regime);
 
 		/* Ensure completion of the invalidation. */
 		xlat_arch_tlbi_va_sync();