blob: d28df823c684004bf3ee3d6ba0552a59357357bb [file] [log] [blame]
Oleksandr Andrushchenko2280d332020-08-06 12:42:48 +03001// SPDX-License-Identifier: MIT License
2/*
3 * hypervisor.c
4 *
5 * Communication to/from hypervisor.
6 *
7 * Copyright (c) 2002-2003, K A Fraser
8 * Copyright (c) 2005, Grzegorz Milos, gm281@cam.ac.uk,Intel Research Cambridge
9 * Copyright (c) 2020, EPAM Systems Inc.
10 */
Oleksandr Andrushchenko2280d332020-08-06 12:42:48 +030011#include <cpu_func.h>
12#include <log.h>
13#include <memalign.h>
14
15#include <asm/io.h>
16#include <asm/armv8/mmu.h>
17#include <asm/xen/system.h>
18
19#include <linux/bug.h>
20
21#include <xen/hvm.h>
Oleksandr Andrushchenkob22ce842020-08-06 12:42:49 +030022#include <xen/events.h>
Oleksandr Andrushchenko3cc1dcc2020-08-06 12:42:54 +030023#include <xen/gnttab.h>
Oleksandr Andrushchenko4b728452020-08-06 12:42:53 +030024#include <xen/xenbus.h>
Oleksandr Andrushchenko2280d332020-08-06 12:42:48 +030025#include <xen/interface/memory.h>
26
27#define active_evtchns(cpu, sh, idx) \
28 ((sh)->evtchn_pending[idx] & \
29 ~(sh)->evtchn_mask[idx])
30
31int in_callback;
32
33/*
34 * Shared page for communicating with the hypervisor.
35 * Events flags go here, for example.
36 */
37struct shared_info *HYPERVISOR_shared_info;
38
39static const char *param_name(int op)
40{
41#define PARAM(x)[HVM_PARAM_##x] = #x
42 static const char *const names[] = {
43 PARAM(CALLBACK_IRQ),
44 PARAM(STORE_PFN),
45 PARAM(STORE_EVTCHN),
46 PARAM(PAE_ENABLED),
47 PARAM(IOREQ_PFN),
48 PARAM(VPT_ALIGN),
49 PARAM(CONSOLE_PFN),
50 PARAM(CONSOLE_EVTCHN),
51 };
52#undef PARAM
53
54 if (op >= ARRAY_SIZE(names))
55 return "unknown";
56
57 if (!names[op])
58 return "reserved";
59
60 return names[op];
61}
62
63/**
64 * hvm_get_parameter_maintain_dcache - function to obtain a HVM
65 * parameter value.
66 * @idx: HVM parameter index
67 * @value: Value to fill in
68 *
69 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
70 * all memory which is shared with other entities in the system
71 * (including the hypervisor and other guests) must reside in memory
72 * which is mapped as Normal Inner Write-Back Outer Write-Back
73 * Inner-Shareable.
74 *
75 * Thus, page attributes must be equally set for all the entities
76 * working with that page.
77 *
78 * Before MMU setup the data cache is turned off, so it means that
79 * manual data cache maintenance is required, because of the
80 * difference of page attributes.
81 */
82int hvm_get_parameter_maintain_dcache(int idx, uint64_t *value)
83{
84 struct xen_hvm_param xhv;
85 int ret;
86
87 invalidate_dcache_range((unsigned long)&xhv,
88 (unsigned long)&xhv + sizeof(xhv));
89 xhv.domid = DOMID_SELF;
90 xhv.index = idx;
91 invalidate_dcache_range((unsigned long)&xhv,
92 (unsigned long)&xhv + sizeof(xhv));
93
94 ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
95 if (ret < 0) {
96 pr_err("Cannot get hvm parameter %s (%d): %d!\n",
97 param_name(idx), idx, ret);
98 BUG();
99 }
100 invalidate_dcache_range((unsigned long)&xhv,
101 (unsigned long)&xhv + sizeof(xhv));
102
103 *value = xhv.value;
104
105 return ret;
106}
107
108int hvm_get_parameter(int idx, uint64_t *value)
109{
110 struct xen_hvm_param xhv;
111 int ret;
112
113 xhv.domid = DOMID_SELF;
114 xhv.index = idx;
115 ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
116 if (ret < 0) {
117 pr_err("Cannot get hvm parameter %s (%d): %d!\n",
118 param_name(idx), idx, ret);
119 BUG();
120 }
121
122 *value = xhv.value;
123
124 return ret;
125}
126
127struct shared_info *map_shared_info(void *p)
128{
129 struct xen_add_to_physmap xatp;
130
131 HYPERVISOR_shared_info = (struct shared_info *)memalign(PAGE_SIZE,
132 PAGE_SIZE);
133 if (!HYPERVISOR_shared_info)
134 BUG();
135
136 xatp.domid = DOMID_SELF;
137 xatp.idx = 0;
138 xatp.space = XENMAPSPACE_shared_info;
139 xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
140 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp) != 0)
141 BUG();
142
143 return HYPERVISOR_shared_info;
144}
145
Dmytro Firsov9b2b0212022-07-19 14:55:28 +0000146void unmap_shared_info(void)
147{
148 xen_pfn_t shared_info_pfn = virt_to_pfn(HYPERVISOR_shared_info);
149 struct xen_remove_from_physmap xrfp = {0};
150 struct xen_memory_reservation reservation = {0};
151 xen_ulong_t nr_exts = 1;
152
153 xrfp.domid = DOMID_SELF;
154 xrfp.gpfn = shared_info_pfn;
155 if (HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrfp) != 0)
156 panic("Failed to unmap HYPERVISOR_shared_info\n");
157
158 /*
159 * After removing from physmap there will be a hole in address space on
160 * HYPERVISOR_shared_info address, so to free memory allocated with
161 * memalign and prevent exceptions during access to this page we need to
162 * fill this 4KB hole with XENMEM_populate_physmap before jumping to Linux.
163 */
164 reservation.domid = DOMID_SELF;
165 reservation.extent_order = 0;
166 reservation.address_bits = 0;
167 set_xen_guest_handle(reservation.extent_start, &shared_info_pfn);
168 reservation.nr_extents = nr_exts;
169 if (HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation) != nr_exts)
170 panic("Failed to populate memory on HYPERVISOR_shared_info addr\n");
171
172 /* Now we can return this to memory allocator */
173 free(HYPERVISOR_shared_info);
174}
175
Oleksandr Andrushchenko2280d332020-08-06 12:42:48 +0300176void do_hypervisor_callback(struct pt_regs *regs)
177{
178 unsigned long l1, l2, l1i, l2i;
179 unsigned int port;
180 int cpu = 0;
181 struct shared_info *s = HYPERVISOR_shared_info;
182 struct vcpu_info *vcpu_info = &s->vcpu_info[cpu];
183
184 in_callback = 1;
185
186 vcpu_info->evtchn_upcall_pending = 0;
187 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
188
189 while (l1 != 0) {
190 l1i = __ffs(l1);
191 l1 &= ~(1UL << l1i);
192
193 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
194 l2i = __ffs(l2);
195 l2 &= ~(1UL << l2i);
196
197 port = (l1i * (sizeof(unsigned long) * 8)) + l2i;
Oleksandr Andrushchenkob22ce842020-08-06 12:42:49 +0300198 do_event(port, regs);
Oleksandr Andrushchenko2280d332020-08-06 12:42:48 +0300199 }
200 }
201
202 in_callback = 0;
203}
204
205void force_evtchn_callback(void)
206{
207#ifdef XEN_HAVE_PV_UPCALL_MASK
208 int save;
209#endif
210 struct vcpu_info *vcpu;
211
212 vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];
213#ifdef XEN_HAVE_PV_UPCALL_MASK
214 save = vcpu->evtchn_upcall_mask;
215#endif
216
217 while (vcpu->evtchn_upcall_pending) {
218#ifdef XEN_HAVE_PV_UPCALL_MASK
219 vcpu->evtchn_upcall_mask = 1;
220#endif
221 do_hypervisor_callback(NULL);
222#ifdef XEN_HAVE_PV_UPCALL_MASK
223 vcpu->evtchn_upcall_mask = save;
224#endif
225 };
226}
227
228void mask_evtchn(uint32_t port)
229{
230 struct shared_info *s = HYPERVISOR_shared_info;
231
232 synch_set_bit(port, &s->evtchn_mask[0]);
233}
234
235void unmask_evtchn(uint32_t port)
236{
237 struct shared_info *s = HYPERVISOR_shared_info;
238 struct vcpu_info *vcpu_info = &s->vcpu_info[smp_processor_id()];
239
240 synch_clear_bit(port, &s->evtchn_mask[0]);
241
242 /*
243 * Just like a real IO-APIC we 'lose the interrupt edge' if the
244 * channel is masked.
245 */
246 if (synch_test_bit(port, &s->evtchn_pending[0]) &&
247 !synch_test_and_set_bit(port / (sizeof(unsigned long) * 8),
248 &vcpu_info->evtchn_pending_sel)) {
249 vcpu_info->evtchn_upcall_pending = 1;
250#ifdef XEN_HAVE_PV_UPCALL_MASK
251 if (!vcpu_info->evtchn_upcall_mask)
252#endif
253 force_evtchn_callback();
254 }
255}
256
257void clear_evtchn(uint32_t port)
258{
259 struct shared_info *s = HYPERVISOR_shared_info;
260
261 synch_clear_bit(port, &s->evtchn_pending[0]);
262}
263
Ovidiu Panaitb5738e62020-11-28 10:43:14 +0200264int xen_init(void)
Oleksandr Andrushchenko2280d332020-08-06 12:42:48 +0300265{
Michal Simekcdea6ca2023-04-18 14:51:56 +0200266 int el = current_el();
267
Oleksandr Andrushchenko2280d332020-08-06 12:42:48 +0300268 debug("%s\n", __func__);
269
Michal Simekcdea6ca2023-04-18 14:51:56 +0200270 if (el != 1) {
271 puts("XEN:\tnot running from EL1\n");
272 return 0;
273 }
274
Oleksandr Andrushchenko2280d332020-08-06 12:42:48 +0300275 map_shared_info(NULL);
Oleksandr Andrushchenkob22ce842020-08-06 12:42:49 +0300276 init_events();
Oleksandr Andrushchenko4b728452020-08-06 12:42:53 +0300277 init_xenbus();
Oleksandr Andrushchenko3cc1dcc2020-08-06 12:42:54 +0300278 init_gnttab();
Ovidiu Panaitb5738e62020-11-28 10:43:14 +0200279
280 return 0;
Oleksandr Andrushchenko2280d332020-08-06 12:42:48 +0300281}
282
Oleksandr Andrushchenko10fa5362020-08-06 12:43:00 +0300283void xen_fini(void)
284{
285 debug("%s\n", __func__);
286
287 fini_gnttab();
288 fini_xenbus();
289 fini_events();
Dmytro Firsov9b2b0212022-07-19 14:55:28 +0000290 unmap_shared_info();
Oleksandr Andrushchenko10fa5362020-08-06 12:43:00 +0300291}