spd: trusty : fix defects flagged by MISRA scan

Main Fixes:

Use int32_t replace int [Rule 4.6]

Added explicit casts (e.g. 0U) to integers in order for them to be
  compatible with whatever operation they're used in [Rule 10.1]

Force operands of an operator to the same type category [Rule 10.4]

Fixed if statement conditional to be essentially boolean [Rule 14.4]

Voided non c-library functions whose return types are not used
[Rule 17.7]

Change-Id: I98caa330c371757eb2dfb9438448cb99115ed907
Signed-off-by: Anthony Zhou <anzhou@nvidia.com>
diff --git a/services/spd/trusty/smcall.h b/services/spd/trusty/smcall.h
index 742c8c4..9c1c38c 100644
--- a/services/spd/trusty/smcall.h
+++ b/services/spd/trusty/smcall.h
@@ -7,69 +7,68 @@
 #ifndef SMCALL_H
 #define SMCALL_H
 
-#define SMC_NUM_ENTITIES	64
-#define SMC_NUM_ARGS		4
-#define SMC_NUM_PARAMS		(SMC_NUM_ARGS - 1)
+#define SMC_NUM_ENTITIES	64U
+#define SMC_NUM_ARGS		4U
+#define SMC_NUM_PARAMS		(SMC_NUM_ARGS - 1U)
 
-#define SMC_IS_FASTCALL(smc_nr)	((smc_nr) & 0x80000000)
-#define SMC_IS_SMC64(smc_nr)	((smc_nr) & 0x40000000)
-#define SMC_ENTITY(smc_nr)	(((smc_nr) & 0x3F000000) >> 24)
-#define SMC_FUNCTION(smc_nr)	((smc_nr) & 0x0000FFFF)
+#define SMC_IS_FASTCALL(smc_nr)	((smc_nr) & 0x80000000U)
+#define SMC_IS_SMC64(smc_nr)	((smc_nr) & 0x40000000U)
+#define SMC_ENTITY(smc_nr)	(((smc_nr) & 0x3F000000U) >> 24U)
+#define SMC_FUNCTION(smc_nr)	((smc_nr) & 0x0000FFFFU)
 
 #define SMC_NR(entity, fn, fastcall, smc64)			\
-		(((((unsigned int) (fastcall)) & 0x1) << 31) |	\
-		(((smc64) & 0x1) << 30) |			\
-		(((entity) & 0x3F) << 24) |			\
-		((fn) & 0xFFFF)					\
-		)
+		(((((uint32_t)(fastcall)) & 0x1U) << 31U) |	\
+		(((smc64) & 0x1U) << 30U) |			\
+		(((entity) & 0x3FU) << 24U) |			\
+		((fn) & 0xFFFFU))
 
-#define SMC_FASTCALL_NR(entity, fn)	SMC_NR((entity), (fn), 1, 0)
-#define SMC_FASTCALL64_NR(entity, fn)	SMC_NR((entity), (fn), 1, 1)
-#define SMC_YIELDCALL_NR(entity, fn)	SMC_NR((entity), (fn), 0, 0)
-#define SMC_YIELDCALL64_NR(entity, fn)	SMC_NR((entity), (fn), 0, 1)
+#define SMC_FASTCALL_NR(entity, fn)	SMC_NR((entity), (fn), 1U, 0U)
+#define SMC_FASTCALL64_NR(entity, fn)	SMC_NR((entity), (fn), 1U, 1U)
+#define SMC_YIELDCALL_NR(entity, fn)	SMC_NR((entity), (fn), 0U, 0U)
+#define SMC_YIELDCALL64_NR(entity, fn)	SMC_NR((entity), (fn), 0U, 1U)
 
-#define	SMC_ENTITY_ARCH			0	/* ARM Architecture calls */
-#define	SMC_ENTITY_CPU			1	/* CPU Service calls */
-#define	SMC_ENTITY_SIP			2	/* SIP Service calls */
-#define	SMC_ENTITY_OEM			3	/* OEM Service calls */
-#define	SMC_ENTITY_STD			4	/* Standard Service calls */
-#define	SMC_ENTITY_RESERVED		5	/* Reserved for future use */
-#define	SMC_ENTITY_TRUSTED_APP		48	/* Trusted Application calls */
-#define	SMC_ENTITY_TRUSTED_OS		50	/* Trusted OS calls */
-#define SMC_ENTITY_LOGGING              51	/* Used for secure -> nonsecure logging */
-#define	SMC_ENTITY_SECURE_MONITOR	60	/* Trusted OS calls internal to secure monitor */
+#define	SMC_ENTITY_ARCH			0U	/* ARM Architecture calls */
+#define	SMC_ENTITY_CPU			1U	/* CPU Service calls */
+#define	SMC_ENTITY_SIP			2U	/* SIP Service calls */
+#define	SMC_ENTITY_OEM			3U	/* OEM Service calls */
+#define	SMC_ENTITY_STD			4U	/* Standard Service calls */
+#define	SMC_ENTITY_RESERVED		5U	/* Reserved for future use */
+#define	SMC_ENTITY_TRUSTED_APP		48U	/* Trusted Application calls */
+#define	SMC_ENTITY_TRUSTED_OS		50U	/* Trusted OS calls */
+#define SMC_ENTITY_LOGGING              51U	/* Used for secure -> nonsecure logging */
+#define	SMC_ENTITY_SECURE_MONITOR	60U	/* Trusted OS calls internal to secure monitor */
 
 /* FC = Fast call, YC = Yielding call */
-#define SMC_YC_RESTART_LAST	SMC_YIELDCALL_NR  (SMC_ENTITY_SECURE_MONITOR, 0)
-#define SMC_YC_NOP		SMC_YIELDCALL_NR  (SMC_ENTITY_SECURE_MONITOR, 1)
+#define SMC_YC_RESTART_LAST	SMC_YIELDCALL_NR  (SMC_ENTITY_SECURE_MONITOR, 0U)
+#define SMC_YC_NOP		SMC_YIELDCALL_NR  (SMC_ENTITY_SECURE_MONITOR, 1U)
 
 /*
  * Return from secure os to non-secure os with return value in r1
  */
-#define SMC_YC_NS_RETURN	SMC_YIELDCALL_NR  (SMC_ENTITY_SECURE_MONITOR, 0)
+#define SMC_YC_NS_RETURN	SMC_YIELDCALL_NR  (SMC_ENTITY_SECURE_MONITOR, 0U)
 
-#define SMC_FC_RESERVED		SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0)
-#define SMC_FC_FIQ_EXIT		SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1)
-#define SMC_FC_REQUEST_FIQ	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2)
-#define SMC_FC_GET_NEXT_IRQ	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3)
-#define SMC_FC_FIQ_ENTER	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 4)
+#define SMC_FC_RESERVED		SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 0U)
+#define SMC_FC_FIQ_EXIT		SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 1U)
+#define SMC_FC_REQUEST_FIQ	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 2U)
+#define SMC_FC_GET_NEXT_IRQ	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 3U)
+#define SMC_FC_FIQ_ENTER	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 4U)
 
-#define SMC_FC64_SET_FIQ_HANDLER SMC_FASTCALL64_NR(SMC_ENTITY_SECURE_MONITOR, 5)
-#define SMC_FC64_GET_FIQ_REGS	SMC_FASTCALL64_NR (SMC_ENTITY_SECURE_MONITOR, 6)
+#define SMC_FC64_SET_FIQ_HANDLER SMC_FASTCALL64_NR(SMC_ENTITY_SECURE_MONITOR, 5U)
+#define SMC_FC64_GET_FIQ_REGS	SMC_FASTCALL64_NR (SMC_ENTITY_SECURE_MONITOR, 6U)
 
-#define SMC_FC_CPU_SUSPEND	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 7)
-#define SMC_FC_CPU_RESUME	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8)
+#define SMC_FC_CPU_SUSPEND	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 7U)
+#define SMC_FC_CPU_RESUME	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 8U)
 
-#define SMC_FC_AARCH_SWITCH	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9)
-#define SMC_FC_GET_VERSION_STR	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10)
+#define SMC_FC_AARCH_SWITCH	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 9U)
+#define SMC_FC_GET_VERSION_STR	SMC_FASTCALL_NR (SMC_ENTITY_SECURE_MONITOR, 10U)
 
 /* Trusted OS entity calls */
-#define SMC_YC_VIRTIO_GET_DESCR	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20)
-#define SMC_YC_VIRTIO_START	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21)
-#define SMC_YC_VIRTIO_STOP	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22)
+#define SMC_YC_VIRTIO_GET_DESCR	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 20U)
+#define SMC_YC_VIRTIO_START	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 21U)
+#define SMC_YC_VIRTIO_STOP	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 22U)
 
-#define SMC_YC_VDEV_RESET	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23)
-#define SMC_YC_VDEV_KICK_VQ	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24)
-#define SMC_YC_SET_ROT_PARAMS	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 65535)
+#define SMC_YC_VDEV_RESET	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 23U)
+#define SMC_YC_VDEV_KICK_VQ	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 24U)
+#define SMC_YC_SET_ROT_PARAMS	  SMC_YIELDCALL_NR(SMC_ENTITY_TRUSTED_OS, 65535U)
 
 #endif /* SMCALL_H */
diff --git a/services/spd/trusty/trusty.c b/services/spd/trusty/trusty.c
index c9d73f0..0305143 100644
--- a/services/spd/trusty/trusty.c
+++ b/services/spd/trusty/trusty.c
@@ -21,7 +21,10 @@
 #include "smcall.h"
 
 /* macro to check if Hypervisor is enabled in the HCR_EL2 register */
-#define HYP_ENABLE_FLAG		0x286001
+#define HYP_ENABLE_FLAG		0x286001U
+
+/* length of Trusty's input parameters (in bytes) */
+#define TRUSTY_PARAMS_LEN_BYTES	(4096U * 2)
 
 struct trusty_stack {
 	uint8_t space[PLATFORM_STACK_SIZE] __aligned(16);
@@ -32,7 +35,7 @@
 	cpu_context_t	cpu_ctx;
 	void		*saved_sp;
 	uint32_t	saved_security_state;
-	int		fiq_handler_active;
+	int32_t		fiq_handler_active;
 	uint64_t	fiq_handler_pc;
 	uint64_t	fiq_handler_cpsr;
 	uint64_t	fiq_handler_sp;
@@ -43,7 +46,7 @@
 	struct trusty_stack	secure_stack;
 };
 
-struct args {
+struct smc_args {
 	uint64_t	r0;
 	uint64_t	r1;
 	uint64_t	r2;
@@ -56,8 +59,8 @@
 
 static struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT];
 
-struct args trusty_init_context_stack(void **sp, void *new_stack);
-struct args trusty_context_switch_helper(void **sp, void *smc_params);
+struct smc_args trusty_init_context_stack(void **sp, void *new_stack);
+struct smc_args trusty_context_switch_helper(void **sp, void *smc_params);
 
 static uint32_t current_vmid;
 
@@ -66,37 +69,37 @@
 	return &trusty_cpu_ctx[plat_my_core_pos()];
 }
 
-static uint32_t is_hypervisor_mode(void)
+static bool is_hypervisor_mode(void)
 {
 	uint64_t hcr = read_hcr();
 
-	return !!(hcr & HYP_ENABLE_FLAG);
+	return ((hcr & HYP_ENABLE_FLAG) != 0U) ? true : false;
 }
 
-static struct args trusty_context_switch(uint32_t security_state, uint64_t r0,
+static struct smc_args trusty_context_switch(uint32_t security_state, uint64_t r0,
 					 uint64_t r1, uint64_t r2, uint64_t r3)
 {
-	struct args ret;
+	struct smc_args args, ret_args;
 	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
 	struct trusty_cpu_ctx *ctx_smc;
 
 	assert(ctx->saved_security_state != security_state);
 
-	ret.r7 = 0;
+	args.r7 = 0;
 	if (is_hypervisor_mode()) {
 		/* According to the ARM DEN0028A spec, VMID is stored in x7 */
 		ctx_smc = cm_get_context(NON_SECURE);
-		assert(ctx_smc);
-		ret.r7 = SMC_GET_GP(ctx_smc, CTX_GPREG_X7);
+		assert(ctx_smc != NULL);
+		args.r7 = SMC_GET_GP(ctx_smc, CTX_GPREG_X7);
 	}
 	/* r4, r5, r6 reserved for future use. */
-	ret.r6 = 0;
-	ret.r5 = 0;
-	ret.r4 = 0;
-	ret.r3 = r3;
-	ret.r2 = r2;
-	ret.r1 = r1;
-	ret.r0 = r0;
+	args.r6 = 0;
+	args.r5 = 0;
+	args.r4 = 0;
+	args.r3 = r3;
+	args.r2 = r2;
+	args.r1 = r1;
+	args.r0 = r0;
 
 	/*
 	 * To avoid the additional overhead in PSCI flow, skip FP context
@@ -109,9 +112,9 @@
 	cm_el1_sysregs_context_save(security_state);
 
 	ctx->saved_security_state = security_state;
-	ret = trusty_context_switch_helper(&ctx->saved_sp, &ret);
+	ret_args = trusty_context_switch_helper(&ctx->saved_sp, &args);
 
-	assert(ctx->saved_security_state == !security_state);
+	assert(ctx->saved_security_state == ((security_state == 0U) ? 1U : 0U));
 
 	cm_el1_sysregs_context_restore(security_state);
 	if (r0 != SMC_FC_CPU_SUSPEND && r0 != SMC_FC_CPU_RESUME)
@@ -119,7 +122,7 @@
 
 	cm_set_next_eret_context(security_state);
 
-	return ret;
+	return ret_args;
 }
 
 static uint64_t trusty_fiq_handler(uint32_t id,
@@ -127,29 +130,29 @@
 				   void *handle,
 				   void *cookie)
 {
-	struct args ret;
+	struct smc_args ret;
 	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
 
 	assert(!is_caller_secure(flags));
 
 	ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_ENTER, 0, 0, 0);
-	if (ret.r0) {
+	if (ret.r0 != 0U) {
 		SMC_RET0(handle);
 	}
 
-	if (ctx->fiq_handler_active) {
+	if (ctx->fiq_handler_active != 0) {
 		INFO("%s: fiq handler already active\n", __func__);
 		SMC_RET0(handle);
 	}
 
 	ctx->fiq_handler_active = 1;
-	memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
+	(void)memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs));
 	ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3);
 	ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3);
 	ctx->fiq_sp_el1 = read_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1);
 
 	write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp);
-	cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, ctx->fiq_handler_cpsr);
+	cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, (uint32_t)ctx->fiq_handler_cpsr);
 
 	SMC_RET0(handle);
 }
@@ -159,9 +162,9 @@
 {
 	struct trusty_cpu_ctx *ctx;
 
-	if (cpu >= PLATFORM_CORE_COUNT) {
+	if (cpu >= (uint64_t)PLATFORM_CORE_COUNT) {
 		ERROR("%s: cpu %lld >= %d\n", __func__, cpu, PLATFORM_CORE_COUNT);
-		return SM_ERR_INVALID_PARAMETERS;
+		return (uint64_t)SM_ERR_INVALID_PARAMETERS;
 	}
 
 	ctx = &trusty_cpu_ctx[cpu];
@@ -182,16 +185,16 @@
 
 static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t x3)
 {
-	struct args ret;
+	struct smc_args ret;
 	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
 
-	if (!ctx->fiq_handler_active) {
+	if (ctx->fiq_handler_active == 0) {
 		NOTICE("%s: fiq handler not active\n", __func__);
-		SMC_RET1(handle, SM_ERR_INVALID_PARAMETERS);
+		SMC_RET1(handle, (uint64_t)SM_ERR_INVALID_PARAMETERS);
 	}
 
 	ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_EXIT, 0, 0, 0);
-	if (ret.r0 != 1) {
+	if (ret.r0 != 1U) {
 		INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %lld\n",
 		       __func__, handle, ret.r0);
 	}
@@ -205,10 +208,10 @@
 	 * x1-x4 and x8-x17 need to be restored here because smc_handler64
 	 * corrupts them (el1 code also restored them).
 	 */
-	memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
+	(void)memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs));
 	ctx->fiq_handler_active = 0;
 	write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1);
-	cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, ctx->fiq_cpsr);
+	cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, (uint32_t)ctx->fiq_cpsr);
 
 	SMC_RET0(handle);
 }
@@ -222,8 +225,8 @@
 			 void *handle,
 			 u_register_t flags)
 {
-	struct args ret;
-	uint32_t vmid = 0;
+	struct smc_args ret;
+	uint32_t vmid = 0U;
 	entry_point_info_t *ep_info = bl31_plat_get_next_image_ep_info(SECURE);
 
 	/*
@@ -231,10 +234,12 @@
 	 * Verified Boot is not even supported and returning success here
 	 * would not compromise the boot process.
 	 */
-	if (!ep_info && (smc_fid == SMC_YC_SET_ROT_PARAMS)) {
+	if ((ep_info == NULL) && (smc_fid == SMC_YC_SET_ROT_PARAMS)) {
 		SMC_RET1(handle, 0);
-	} else if (!ep_info) {
+	} else if (ep_info == NULL) {
 		SMC_RET1(handle, SMC_UNK);
+	} else {
+		; /* do nothing */
 	}
 
 	if (is_caller_secure(flags)) {
@@ -279,12 +284,11 @@
 
 static int32_t trusty_init(void)
 {
-	void el3_exit(void);
 	entry_point_info_t *ep_info;
-	struct args zero_args = {0};
+	struct smc_args zero_args = {0};
 	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
 	uint32_t cpu = plat_my_core_pos();
-	int reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx),
+	uint64_t reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx),
 			       CTX_SPSR_EL3));
 
 	/*
@@ -292,7 +296,7 @@
 	 * failure.
 	 */
 	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
-	assert(ep_info);
+	assert(ep_info != NULL);
 
 	fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE)));
 	cm_el1_sysregs_context_save(NON_SECURE);
@@ -304,7 +308,7 @@
 	 * Adjust secondary cpu entry point for 32 bit images to the
 	 * end of exception vectors
 	 */
-	if ((cpu != 0) && (reg_width == MODE_RW_32)) {
+	if ((cpu != 0U) && (reg_width == MODE_RW_32)) {
 		INFO("trusty: cpu %d, adjust entry point to 0x%lx\n",
 		     cpu, ep_info->pc + (1U << 5));
 		cm_set_elr_el3(SECURE, ep_info->pc + (1U << 5));
@@ -314,10 +318,10 @@
 	fpregs_context_restore(get_fpregs_ctx(cm_get_context(SECURE)));
 	cm_set_next_eret_context(SECURE);
 
-	ctx->saved_security_state = ~0; /* initial saved state is invalid */
-	trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack.end);
+	ctx->saved_security_state = ~0U; /* initial saved state is invalid */
+	(void)trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack.end);
 
-	trusty_context_switch_helper(&ctx->saved_sp, &zero_args);
+	(void)trusty_context_switch_helper(&ctx->saved_sp, &zero_args);
 
 	cm_el1_sysregs_context_restore(NON_SECURE);
 	fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE)));
@@ -328,10 +332,10 @@
 
 static void trusty_cpu_suspend(uint32_t off)
 {
-	struct args ret;
+	struct smc_args ret;
 
 	ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_SUSPEND, off, 0, 0);
-	if (ret.r0 != 0) {
+	if (ret.r0 != 0U) {
 		INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %lld\n",
 		     __func__, plat_my_core_pos(), ret.r0);
 	}
@@ -339,10 +343,10 @@
 
 static void trusty_cpu_resume(uint32_t on)
 {
-	struct args ret;
+	struct smc_args ret;
 
 	ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_RESUME, on, 0, 0);
-	if (ret.r0 != 0) {
+	if (ret.r0 != 0U) {
 		INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %lld\n",
 		     __func__, plat_my_core_pos(), ret.r0);
 	}
@@ -359,8 +363,8 @@
 {
 	struct trusty_cpu_ctx *ctx = get_trusty_ctx();
 
-	if (!ctx->saved_sp) {
-		trusty_init();
+	if (ctx->saved_sp == NULL) {
+		(void)trusty_init();
 	} else {
 		trusty_cpu_resume(1);
 	}
@@ -398,12 +402,12 @@
 	entry_point_info_t *ep_info;
 	uint32_t instr;
 	uint32_t flags;
-	int ret;
+	int32_t ret;
 	bool aarch32 = false;
 
 	/* Get trusty's entry point info */
 	ep_info = bl31_plat_get_next_image_ep_info(SECURE);
-	if (!ep_info) {
+	if (ep_info == NULL) {
 		INFO("Trusty image missing.\n");
 		return -1;
 	}
@@ -444,8 +448,9 @@
 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
 					      trusty_fiq_handler,
 					      flags);
-	if (ret)
+	if (ret != 0) {
 		ERROR("trusty: failed to register fiq handler, ret = %d\n", ret);
+	}
 
 	if (aarch32) {
 		entry_point_info_t *ns_ep_info;