feat(drtm): add a few DRTM DMA protection APIs

Added DRTM DMA protections APIs, and called them during
the DLME launch and DRTM SMC handling.

Change-Id: I29e7238c04e2ca9f26600276c5c05bff5387789e
Signed-off-by: Manish V Badarkhe <Manish.Badarkhe@arm.com>
diff --git a/services/std_svc/drtm/drtm_dma_prot.c b/services/std_svc/drtm/drtm_dma_prot.c
index 9d014a0..48317fd 100644
--- a/services/std_svc/drtm/drtm_dma_prot.c
+++ b/services/std_svc/drtm/drtm_dma_prot.c
@@ -14,11 +14,32 @@
 #include <string.h>
 
 #include <common/debug.h>
-
+#include <drivers/arm/smmu_v3.h>
 #include "drtm_dma_prot.h"
+#include "drtm_main.h"
+#include "drtm_remediation.h"
 #include <plat/common/platform.h>
+#include <smccc_helpers.h>
 
 /*
+ *  ________________________  LAUNCH success        ________________________
+ * |        Initial         | -------------------> |      Prot engaged      |
+ * |````````````````````````|                      |````````````````````````|
+ * |  request.type == NONE  |                      |  request.type != NONE  |
+ * |                        | <------------------- |                        |
+ * `________________________'        UNPROTECT_MEM `________________________'
+ *
+ * Transitions that are not shown correspond to ABI calls that do not change
+ * state and result in an error being returned to the caller.
+ */
+static struct dma_prot active_prot = {
+	.type = PROTECT_NONE,
+};
+
+/* Version-independent type. */
+typedef struct drtm_dl_dma_prot_args_v1 struct_drtm_dl_dma_prot_args;
+
+/*
  * This function checks that platform supports complete DMA protection.
  * and returns false - if the platform supports complete DMA protection.
  * and returns true - if the platform does not support complete DMA protection.
@@ -59,3 +80,184 @@
 
 	return must_init_fail;
 }
+
+/*
+ * Checks that the DMA protection arguments are valid and that the given
+ * protected regions are covered by DMA protection.
+ */
+enum drtm_retc drtm_dma_prot_check_args(const struct_drtm_dl_dma_prot_args *a,
+					int a_dma_prot_type,
+					drtm_mem_region_t p)
+{
+	switch ((enum dma_prot_type)a_dma_prot_type) {
+	case PROTECT_MEM_ALL:
+		if (a->dma_prot_table_paddr || a->dma_prot_table_size) {
+			ERROR("DRTM: invalid launch due to inconsistent"
+			      " DMA protection arguments\n");
+			return MEM_PROTECT_INVALID;
+		}
+		/*
+		 * Full DMA protection ought to ensure that the DLME and NWd
+		 * DCE regions are protected, no further checks required.
+		 */
+		return SUCCESS;
+
+	default:
+		ERROR("DRTM: invalid launch due to unsupported DMA protection type\n");
+		return MEM_PROTECT_INVALID;
+	}
+}
+
+enum drtm_retc drtm_dma_prot_engage(const struct_drtm_dl_dma_prot_args *a,
+				    int a_dma_prot_type)
+{
+	const uintptr_t *smmus;
+	size_t num_smmus = 0;
+
+	if (active_prot.type != PROTECT_NONE) {
+		ERROR("DRTM: launch denied as previous DMA protection"
+		      " is still engaged\n");
+		return DENIED;
+	}
+
+	if (a_dma_prot_type == PROTECT_NONE) {
+		return SUCCESS;
+		/* Only PROTECT_MEM_ALL is supported currently. */
+	} else if (a_dma_prot_type != PROTECT_MEM_ALL) {
+		ERROR("%s(): unimplemented DMA protection type\n", __func__);
+		panic();
+	}
+
+	/*
+	 * Engage SMMUs in accordance with the request we have previously received.
+	 * Only PROTECT_MEM_ALL is implemented currently.
+	 */
+	plat_enumerate_smmus(&smmus, &num_smmus);
+	for (const uintptr_t *smmu = smmus; smmu < smmus+num_smmus; smmu++) {
+		/*
+		 * TODO: Invalidate SMMU's Stage-1 and Stage-2 TLB entries.  This ensures
+		 * that any outstanding device transactions are completed, see Section
+		 * 3.21.1, specification IHI_0070_C_a for an approximate reference.
+		 */
+		int rc = smmuv3_ns_set_abort_all(*smmu);
+		if (rc != 0) {
+			ERROR("DRTM: SMMU at PA 0x%lx failed to engage DMA protection"
+			      " rc=%d\n", *smmu, rc);
+			return INTERNAL_ERROR;
+		}
+	}
+
+	/*
+	 * TODO: Restrict DMA from the GIC.
+	 *
+	 * Full DMA protection may be achieved as follows:
+	 *
+	 * With a GICv3:
+	 * - Set GICR_CTLR.EnableLPIs to 0, for each GICR;
+	 *   GICR_CTLR.RWP == 0 must be the case before finishing, for each GICR.
+	 * - Set GITS_CTLR.Enabled to 0;
+	 *   GITS_CTLR.Quiescent == 1 must be the case before finishing.
+	 *
+	 * In addition, with a GICv4:
+	 * - Set GICR_VPENDBASER.Valid to 0, for each GICR;
+	 *   GICR_CTLR.RWP == 0 must be the case before finishing, for each GICR.
+	 *
+	 * Alternatively, e.g. if some bit values cannot be changed at runtime,
+	 * this procedure should return an error if the LPI Pending and
+	 * Configuration tables overlap the regions being protected.
+	 */
+
+	active_prot.type = a_dma_prot_type;
+
+	return SUCCESS;
+}
+
+/*
+ * Undo what has previously been done in drtm_dma_prot_engage(), or enter
+ * remediation if it is not possible.
+ */
+enum drtm_retc drtm_dma_prot_disengage(void)
+{
+	const uintptr_t *smmus;
+	size_t num_smmus = 0;
+	const char *err_str = "cannot undo PROTECT_MEM_ALL SMMU config";
+
+	if (active_prot.type == PROTECT_NONE) {
+		return SUCCESS;
+		/* Only PROTECT_MEM_ALL is supported currently. */
+	} else if (active_prot.type != PROTECT_MEM_ALL) {
+		ERROR("%s(): unimplemented DMA protection type\n", __func__);
+		panic();
+	}
+
+	/*
+	 * For PROTECT_MEM_ALL, undo the SMMU configuration for "abort all" mode
+	 * done during engage().
+	 */
+	/* Simply enter remediation for now. */
+	(void)smmus;
+	(void)num_smmus;
+	drtm_enter_remediation(1ULL, err_str);
+
+	/* TODO: Undo GIC DMA restrictions. */
+
+	active_prot.type = PROTECT_NONE;
+
+	return SUCCESS;
+}
+
+uint64_t drtm_unprotect_mem(void *ctx)
+{
+	enum drtm_retc ret;
+
+	switch (active_prot.type) {
+	case PROTECT_NONE:
+		ERROR("DRTM: invalid UNPROTECT_MEM, no DMA protection has"
+		      " previously been engaged\n");
+		ret = DENIED;
+		break;
+
+	case PROTECT_MEM_ALL:
+		/*
+		 * UNPROTECT_MEM is a no-op for PROTECT_MEM_ALL:  DRTM must not touch
+		 * the NS SMMU as it is expected that the DLME has configured it.
+		 */
+		active_prot.type = PROTECT_NONE;
+
+		ret = SUCCESS;
+		break;
+
+	default:
+		ret = drtm_dma_prot_disengage();
+		break;
+	}
+
+	SMC_RET1(ctx, ret);
+}
+
+void drtm_dma_prot_serialise_table(uint8_t *dst, size_t *size_out)
+{
+	if (active_prot.type == PROTECT_NONE) {
+		return;
+	} else if (active_prot.type != PROTECT_MEM_ALL) {
+		ERROR("%s(): unimplemented DMA protection type\n", __func__);
+		panic();
+	}
+
+	struct __packed descr_table_1 {
+		drtm_memory_region_descriptor_table_t header;
+		drtm_mem_region_t regions[1];
+	} prot_table = {
+		.header = {
+			.revision = 1,
+			.num_regions = sizeof(((struct descr_table_1 *)NULL)->regions) /
+				sizeof(((struct descr_table_1 *)NULL)->regions[0])
+		},
+		.regions = {
+			{.region_address = 0, PAGES_AND_TYPE(UINT64_MAX, 0x3)},
+		}
+	};
+
+	memcpy(dst, &prot_table, sizeof(prot_table));
+	*size_out = sizeof(prot_table);
+}