xen: Port Xen hypervisor related code from mini-os

Port hypervisor related code from Mini-OS. This is referencing the code
of Mini-OS from [1] by Huang Shijie and Volodymyr Babchuk which is for
ARM64.
Update essential arch code to support required bit operations, memory
barriers etc.

Copyright for the bits ported belong to at least the following authors,
please see related files for details:

Copyright (c) 2002-2003, K A Fraser
Copyright (c) 2005, Grzegorz Milos, gm281@cam.ac.uk,Intel Research Cambridge
Copyright (c) 2014, Karim Allah Ahmed <karim.allah.ahmed@gmail.com>

[1] - https://github.com/zyzii/mini-os.git

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Signed-off-by: Anastasiia Lukianenko <anastasiia_lukianenko@epam.com>
[trini: Drop wmb() from musb-net/linux-compat.h now]
Signed-off-by: Tom Rini <trini@konsulko.com>
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 8959749..ade1401 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -110,9 +110,13 @@
  * have some advantages to use them instead of the simple one here.
  */
 #define mb()		dsb()
+#define rmb()		dsb()
+#define wmb()		dsb()
 #define __iormb()	dmb()
 #define __iowmb()	dmb()
 
+#define smp_processor_id()	0
+
 #define writeb(v,c)	({ u8  __v = v; __iowmb(); __arch_putb(__v,c); __v; })
 #define writew(v,c)	({ u16 __v = v; __iowmb(); __arch_putw(__v,c); __v; })
 #define writel(v,c)	({ u32 __v = v; __iowmb(); __arch_putl(__v,c); __v; })
diff --git a/arch/arm/include/asm/xen/system.h b/arch/arm/include/asm/xen/system.h
new file mode 100644
index 0000000..0fc8a79
--- /dev/null
+++ b/arch/arm/include/asm/xen/system.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * (C) 2014 Karim Allah Ahmed <karim.allah.ahmed@gmail.com>
+ * (C) 2020, EPAM Systems Inc.
+ */
+#ifndef _ASM_ARM_XEN_SYSTEM_H
+#define _ASM_ARM_XEN_SYSTEM_H
+
+#include <compiler.h>
+#include <asm/bitops.h>
+
+/* If *ptr == old, then store new there (and return new).
+ * Otherwise, return the old value.
+ * Atomic.
+ */
+#define synch_cmpxchg(ptr, old, new) \
+({ __typeof__(*ptr) stored = old; \
+	__atomic_compare_exchange_n(ptr, &stored, new, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ? new : old; \
+})
+
+/* As test_and_clear_bit, but using __ATOMIC_SEQ_CST */
+static inline int synch_test_and_clear_bit(int nr, volatile void *addr)
+{
+	u8 *byte = ((u8 *)addr) + (nr >> 3);
+	u8 bit = 1 << (nr & 7);
+	u8 orig;
+
+	orig = __atomic_fetch_and(byte, ~bit, __ATOMIC_SEQ_CST);
+
+	return (orig & bit) != 0;
+}
+
+/* As test_and_set_bit, but using __ATOMIC_SEQ_CST */
+static inline int synch_test_and_set_bit(int nr, volatile void *base)
+{
+	u8 *byte = ((u8 *)base) + (nr >> 3);
+	u8 bit = 1 << (nr & 7);
+	u8 orig;
+
+	orig = __atomic_fetch_or(byte, bit, __ATOMIC_SEQ_CST);
+
+	return (orig & bit) != 0;
+}
+
+/* As set_bit, but using __ATOMIC_SEQ_CST */
+static inline void synch_set_bit(int nr, volatile void *addr)
+{
+	synch_test_and_set_bit(nr, addr);
+}
+
+/* As clear_bit, but using __ATOMIC_SEQ_CST */
+static inline void synch_clear_bit(int nr, volatile void *addr)
+{
+	synch_test_and_clear_bit(nr, addr);
+}
+
+/* As test_bit, but with a following memory barrier. */
+//static inline int synch_test_bit(int nr, volatile void *addr)
+static inline int synch_test_bit(int nr, const void *addr)
+{
+	int result;
+
+	result = test_bit(nr, addr);
+	barrier();
+	return result;
+}
+
+#define xchg(ptr, v)	__atomic_exchange_n(ptr, v, __ATOMIC_SEQ_CST)
+#define xchg(ptr, v)	__atomic_exchange_n(ptr, v, __ATOMIC_SEQ_CST)
+
+#define xen_mb()	mb()
+#define xen_rmb()	rmb()
+#define xen_wmb()	wmb()
+
+#define to_phys(x)		((unsigned long)(x))
+#define to_virt(x)		((void *)(x))
+
+#define PFN_UP(x)		(unsigned long)(((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
+#define PFN_DOWN(x)		(unsigned long)((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x)		((unsigned long)(x) << PAGE_SHIFT)
+#define PHYS_PFN(x)		(unsigned long)((x) >> PAGE_SHIFT)
+
+#define virt_to_pfn(_virt)	(PFN_DOWN(to_phys(_virt)))
+#define virt_to_mfn(_virt)	(PFN_DOWN(to_phys(_virt)))
+#define mfn_to_virt(_mfn)	(to_virt(PFN_PHYS(_mfn)))
+#define pfn_to_virt(_pfn)	(to_virt(PFN_PHYS(_pfn)))
+
+#endif
diff --git a/common/board_r.c b/common/board_r.c
index d48d2bb..add544d 100644
--- a/common/board_r.c
+++ b/common/board_r.c
@@ -56,6 +56,9 @@
 #include <timer.h>
 #include <trace.h>
 #include <watchdog.h>
+#ifdef CONFIG_XEN
+#include <xen.h>
+#endif
 #ifdef CONFIG_ADDR_MAP
 #include <asm/mmu.h>
 #endif
@@ -465,6 +468,13 @@
 }
 #endif
 
+#ifdef CONFIG_XEN
+static int initr_xen(void)
+{
+	xen_init();
+	return 0;
+}
+#endif
 /*
  * Tell if it's OK to load the environment early in boot.
  *
@@ -762,6 +772,9 @@
 #ifdef CONFIG_MMC
 	initr_mmc,
 #endif
+#ifdef CONFIG_XEN
+	initr_xen,
+#endif
 	initr_env,
 #ifdef CONFIG_SYS_BOOTPARAMS_LEN
 	initr_malloc_bootparams,
diff --git a/drivers/Makefile b/drivers/Makefile
index 2178871..33126b2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -29,6 +29,7 @@
 obj-$(CONFIG_$(SPL_TPL_)TPM) += tpm/
 obj-$(CONFIG_$(SPL_TPL_)ACPI_PMC) += power/acpi_pmc/
 obj-$(CONFIG_$(SPL_)BOARD) += board/
+obj-$(CONFIG_XEN) += xen/
 
 ifndef CONFIG_TPL_BUILD
 ifdef CONFIG_SPL_BUILD
diff --git a/drivers/usb/musb-new/linux-compat.h b/drivers/usb/musb-new/linux-compat.h
index 733b197..6d9f19d 100644
--- a/drivers/usb/musb-new/linux-compat.h
+++ b/drivers/usb/musb-new/linux-compat.h
@@ -10,10 +10,6 @@
 
 #define platform_data device_data
 
-#ifndef wmb
-#define wmb()			asm volatile (""   : : : "memory")
-#endif
-
 #define msleep(a)	udelay(a * 1000)
 
 /*
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
new file mode 100644
index 0000000..1211bf2
--- /dev/null
+++ b/drivers/xen/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier:	GPL-2.0+
+#
+# (C) Copyright 2020 EPAM Systems Inc.
+
+obj-y += hypervisor.o
diff --git a/drivers/xen/hypervisor.c b/drivers/xen/hypervisor.c
new file mode 100644
index 0000000..108e970
--- /dev/null
+++ b/drivers/xen/hypervisor.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: MIT License
+/*
+ * hypervisor.c
+ *
+ * Communication to/from hypervisor.
+ *
+ * Copyright (c) 2002-2003, K A Fraser
+ * Copyright (c) 2005, Grzegorz Milos, gm281@cam.ac.uk,Intel Research Cambridge
+ * Copyright (c) 2020, EPAM Systems Inc.
+ */
+#include <common.h>
+#include <cpu_func.h>
+#include <log.h>
+#include <memalign.h>
+
+#include <asm/io.h>
+#include <asm/armv8/mmu.h>
+#include <asm/xen/system.h>
+
+#include <linux/bug.h>
+
+#include <xen/hvm.h>
+#include <xen/interface/memory.h>
+
+#define active_evtchns(cpu, sh, idx)	\
+	((sh)->evtchn_pending[idx] &	\
+	 ~(sh)->evtchn_mask[idx])
+
+int in_callback;
+
+/*
+ * Shared page for communicating with the hypervisor.
+ * Events flags go here, for example.
+ */
+struct shared_info *HYPERVISOR_shared_info;
+
+static const char *param_name(int op)
+{
+#define PARAM(x)[HVM_PARAM_##x] = #x
+	static const char *const names[] = {
+		PARAM(CALLBACK_IRQ),
+		PARAM(STORE_PFN),
+		PARAM(STORE_EVTCHN),
+		PARAM(PAE_ENABLED),
+		PARAM(IOREQ_PFN),
+		PARAM(VPT_ALIGN),
+		PARAM(CONSOLE_PFN),
+		PARAM(CONSOLE_EVTCHN),
+	};
+#undef PARAM
+
+	if (op >= ARRAY_SIZE(names))
+		return "unknown";
+
+	if (!names[op])
+		return "reserved";
+
+	return names[op];
+}
+
+/**
+ * hvm_get_parameter_maintain_dcache - function to obtain a HVM
+ * parameter value.
+ * @idx: HVM parameter index
+ * @value: Value to fill in
+ *
+ * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
+ * all memory which is shared with other entities in the system
+ * (including the hypervisor and other guests) must reside in memory
+ * which is mapped as Normal Inner Write-Back Outer Write-Back
+ * Inner-Shareable.
+ *
+ * Thus, page attributes must be equally set for all the entities
+ * working with that page.
+ *
+ * Before MMU setup the data cache is turned off, so it means that
+ * manual data cache maintenance is required, because of the
+ * difference of page attributes.
+ */
+int hvm_get_parameter_maintain_dcache(int idx, uint64_t *value)
+{
+	struct xen_hvm_param xhv;
+	int ret;
+
+	invalidate_dcache_range((unsigned long)&xhv,
+				(unsigned long)&xhv + sizeof(xhv));
+	xhv.domid = DOMID_SELF;
+	xhv.index = idx;
+	invalidate_dcache_range((unsigned long)&xhv,
+				(unsigned long)&xhv + sizeof(xhv));
+
+	ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
+	if (ret < 0) {
+		pr_err("Cannot get hvm parameter %s (%d): %d!\n",
+			   param_name(idx), idx, ret);
+		BUG();
+	}
+	invalidate_dcache_range((unsigned long)&xhv,
+				(unsigned long)&xhv + sizeof(xhv));
+
+	*value = xhv.value;
+
+	return ret;
+}
+
+int hvm_get_parameter(int idx, uint64_t *value)
+{
+	struct xen_hvm_param xhv;
+	int ret;
+
+	xhv.domid = DOMID_SELF;
+	xhv.index = idx;
+	ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
+	if (ret < 0) {
+		pr_err("Cannot get hvm parameter %s (%d): %d!\n",
+			   param_name(idx), idx, ret);
+		BUG();
+	}
+
+	*value = xhv.value;
+
+	return ret;
+}
+
+struct shared_info *map_shared_info(void *p)
+{
+	struct xen_add_to_physmap xatp;
+
+	HYPERVISOR_shared_info = (struct shared_info *)memalign(PAGE_SIZE,
+								PAGE_SIZE);
+	if (!HYPERVISOR_shared_info)
+		BUG();
+
+	xatp.domid = DOMID_SELF;
+	xatp.idx = 0;
+	xatp.space = XENMAPSPACE_shared_info;
+	xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
+	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp) != 0)
+		BUG();
+
+	return HYPERVISOR_shared_info;
+}
+
+void do_hypervisor_callback(struct pt_regs *regs)
+{
+	unsigned long l1, l2, l1i, l2i;
+	unsigned int port;
+	int cpu = 0;
+	struct shared_info *s = HYPERVISOR_shared_info;
+	struct vcpu_info *vcpu_info = &s->vcpu_info[cpu];
+
+	in_callback = 1;
+
+	vcpu_info->evtchn_upcall_pending = 0;
+	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
+
+	while (l1 != 0) {
+		l1i = __ffs(l1);
+		l1 &= ~(1UL << l1i);
+
+		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
+			l2i = __ffs(l2);
+			l2 &= ~(1UL << l2i);
+
+			port = (l1i * (sizeof(unsigned long) * 8)) + l2i;
+			/* TODO: handle new event: do_event(port, regs); */
+			/* Suppress -Wunused-but-set-variable */
+			(void)(port);
+		}
+	}
+
+	in_callback = 0;
+}
+
+void force_evtchn_callback(void)
+{
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+	int save;
+#endif
+	struct vcpu_info *vcpu;
+
+	vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+	save = vcpu->evtchn_upcall_mask;
+#endif
+
+	while (vcpu->evtchn_upcall_pending) {
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+		vcpu->evtchn_upcall_mask = 1;
+#endif
+		do_hypervisor_callback(NULL);
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+		vcpu->evtchn_upcall_mask = save;
+#endif
+	};
+}
+
+void mask_evtchn(uint32_t port)
+{
+	struct shared_info *s = HYPERVISOR_shared_info;
+
+	synch_set_bit(port, &s->evtchn_mask[0]);
+}
+
+void unmask_evtchn(uint32_t port)
+{
+	struct shared_info *s = HYPERVISOR_shared_info;
+	struct vcpu_info *vcpu_info = &s->vcpu_info[smp_processor_id()];
+
+	synch_clear_bit(port, &s->evtchn_mask[0]);
+
+	/*
+	 * Just like a real IO-APIC we 'lose the interrupt edge' if the
+	 * channel is masked.
+	 */
+	if (synch_test_bit(port, &s->evtchn_pending[0]) &&
+	    !synch_test_and_set_bit(port / (sizeof(unsigned long) * 8),
+				    &vcpu_info->evtchn_pending_sel)) {
+		vcpu_info->evtchn_upcall_pending = 1;
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+		if (!vcpu_info->evtchn_upcall_mask)
+#endif
+			force_evtchn_callback();
+	}
+}
+
+void clear_evtchn(uint32_t port)
+{
+	struct shared_info *s = HYPERVISOR_shared_info;
+
+	synch_clear_bit(port, &s->evtchn_pending[0]);
+}
+
+void xen_init(void)
+{
+	debug("%s\n", __func__);
+
+	map_shared_info(NULL);
+}
+
diff --git a/include/xen.h b/include/xen.h
new file mode 100644
index 0000000..abc3546
--- /dev/null
+++ b/include/xen.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * (C) 2020, EPAM Systems Inc.
+ */
+#ifndef __XEN_H__
+#define __XEN_H__
+
+/**
+ * xen_init() - Xen initialization
+ *
+ * Map Xen memory pages.
+ */
+void xen_init(void);
+
+#endif /* __XEN_H__ */
diff --git a/include/xen/hvm.h b/include/xen/hvm.h
new file mode 100644
index 0000000..f02c079
--- /dev/null
+++ b/include/xen/hvm.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Simple wrappers around HVM functions
+ *
+ * Copyright (c) 2002-2003, K A Fraser
+ * Copyright (c) 2005, Grzegorz Milos, gm281@cam.ac.uk,Intel Research Cambridge
+ * Copyright (c) 2020, EPAM Systems Inc.
+ */
+#ifndef XEN_HVM_H__
+#define XEN_HVM_H__
+
+#include <asm/xen/hypercall.h>
+#include <xen/interface/hvm/params.h>
+#include <xen/interface/xen.h>
+
+extern struct shared_info *HYPERVISOR_shared_info;
+
+int hvm_get_parameter(int idx, uint64_t *value);
+int hvm_get_parameter_maintain_dcache(int idx, uint64_t *value);
+
+struct shared_info *map_shared_info(void *p);
+void do_hypervisor_callback(struct pt_regs *regs);
+void mask_evtchn(uint32_t port);
+void unmask_evtchn(uint32_t port);
+void clear_evtchn(uint32_t port);
+
+#endif /* XEN_HVM_H__ */