blob: 27c48959f852e94139303f875c49d714c543213b [file] [log] [blame]
Saurabh Gorecha70389ca2020-04-22 21:31:24 +05301/*
2 * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7#include <stdbool.h>
8#include <stddef.h>
9#include <stdint.h>
10#include <string.h>
11
12#include <common/debug.h>
13#include <common/runtime_svc.h>
14#include <context.h>
15#include <lib/coreboot.h>
16#include <lib/utils_def.h>
17#include <lib/xlat_tables/xlat_tables_v2.h>
18#include <smccc_helpers.h>
19#include <tools_share/uuid.h>
20
21#include <qti_plat.h>
22#include <qti_secure_io_cfg.h>
23#include <qtiseclib_interface.h>
24/*
25 * SIP service - SMC function IDs for SiP Service queries
26 *
27 */
28#define QTI_SIP_SVC_CALL_COUNT_ID U(0x0200ff00)
29#define QTI_SIP_SVC_UID_ID U(0x0200ff01)
30/* 0x8200ff02 is reserved */
31#define QTI_SIP_SVC_VERSION_ID U(0x0200ff03)
32
33/*
34 * Syscall's to allow Non Secure world accessing peripheral/IO memory
35 * those are secure/proteced BUT not required to be secure.
36 */
37#define QTI_SIP_SVC_SECURE_IO_READ_ID U(0x02000501)
38#define QTI_SIP_SVC_SECURE_IO_WRITE_ID U(0x02000502)
39
40/*
41 * Syscall's to assigns a list of intermediate PAs from a
42 * source Virtual Machine (VM) to a destination VM.
43 */
44#define QTI_SIP_SVC_MEM_ASSIGN_ID U(0x02000C16)
45
46#define QTI_SIP_SVC_SECURE_IO_READ_PARAM_ID U(0x1)
47#define QTI_SIP_SVC_SECURE_IO_WRITE_PARAM_ID U(0x2)
48#define QTI_SIP_SVC_MEM_ASSIGN_PARAM_ID U(0x1117)
49
50#define QTI_SIP_SVC_CALL_COUNT U(0x3)
51#define QTI_SIP_SVC_VERSION_MAJOR U(0x0)
52#define QTI_SIP_SVC_VERSION_MINOR U(0x0)
53
54#define QTI_VM_LAST U(44)
55#define SIZE4K U(0x1000)
56#define QTI_VM_MAX_LIST_SIZE U(0x20)
57
58#define FUNCID_OEN_NUM_MASK ((FUNCID_OEN_MASK << FUNCID_OEN_SHIFT)\
59 |(FUNCID_NUM_MASK << FUNCID_NUM_SHIFT))
60
61enum {
62 QTI_SIP_SUCCESS = 0,
63 QTI_SIP_NOT_SUPPORTED = -1,
64 QTI_SIP_PREEMPTED = -2,
65 QTI_SIP_INVALID_PARAM = -3,
66};
67
68/* QTI SiP Service UUID */
69DEFINE_SVC_UUID2(qti_sip_svc_uid,
70 0x43864748, 0x217f, 0x41ad, 0xaa, 0x5a,
71 0xba, 0xe7, 0x0f, 0xa5, 0x52, 0xaf);
72
73static bool qti_is_secure_io_access_allowed(u_register_t addr)
74{
75 int i = 0;
76
77 for (i = 0; i < ARRAY_SIZE(qti_secure_io_allowed_regs); i++) {
78 if ((uintptr_t) addr == qti_secure_io_allowed_regs[i]) {
79 return true;
80 }
81 }
82
83 return false;
84}
85
86bool qti_mem_assign_validate_param(memprot_info_t *mem_info,
87 u_register_t u_num_mappings,
88 uint32_t *source_vm_list,
89 u_register_t src_vm_list_cnt,
90 memprot_dst_vm_perm_info_t *dest_vm_list,
91 u_register_t dst_vm_list_cnt)
92{
93 int i;
94
95 if (!source_vm_list || !dest_vm_list || !mem_info
96 || (src_vm_list_cnt == 0)
97 || (src_vm_list_cnt >= QTI_VM_LAST) || (dst_vm_list_cnt == 0)
98 || (dst_vm_list_cnt >= QTI_VM_LAST) || (u_num_mappings == 0)
99 || u_num_mappings > QTI_VM_MAX_LIST_SIZE) {
100 return false;
101 }
102 for (i = 0; i < u_num_mappings; i++) {
103 if ((mem_info[i].mem_addr & (SIZE4K - 1))
104 || (mem_info[i].mem_size & (SIZE4K - 1))) {
105 return false;
106 }
107
108 if ((mem_info[i].mem_addr + mem_info[i].mem_size) <
109 mem_info[i].mem_addr) {
110 return false;
111 }
112 if (coreboot_get_memory_type(mem_info[i].mem_addr) !=
113 CB_MEM_RAM) {
114 return false;
115 }
116
117 if (coreboot_get_memory_type
118 (mem_info[i].mem_addr + mem_info[i].mem_size) !=
119 CB_MEM_RAM) {
120 return false;
121 }
122
123 }
124 for (i = 0; i < src_vm_list_cnt; i++) {
125 if (source_vm_list[i] >= QTI_VM_LAST) {
126 return false;
127 }
128 }
129 for (i = 0; i < dst_vm_list_cnt; i++) {
130 if (dest_vm_list[i].dst_vm >= QTI_VM_LAST) {
131 return false;
132 }
133 }
134 return true;
135}
136
137static uintptr_t qti_sip_mem_assign(void *handle, uint32_t smc_cc,
138 u_register_t x1,
139 u_register_t x2,
140 u_register_t x3, u_register_t x4)
141{
142 uintptr_t dyn_map_start = 0, dyn_map_end = 0;
143 size_t dyn_map_size = 0;
144 u_register_t x6, x7;
145 int ret = QTI_SIP_NOT_SUPPORTED;
146 u_register_t x5 = read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X5);
147
148 if (smc_cc == SMC_32) {
149 x5 = (uint32_t) x5;
150 }
151 /* Validate input arg count & retrieve arg3-6 from NS Buffer. */
152 if ((x1 != QTI_SIP_SVC_MEM_ASSIGN_PARAM_ID) || (x5 == 0x0)) {
153 goto unmap_return;
154 }
155
156 /* Map NS Buffer. */
157 dyn_map_start = x5;
158 dyn_map_size =
159 (smc_cc ==
160 SMC_32) ? (sizeof(uint32_t) * 4) : (sizeof(uint64_t) * 4);
161 if (qti_mmap_add_dynamic_region(dyn_map_start, dyn_map_size,
162 (MT_NS | MT_RO_DATA)) != 0) {
163 goto unmap_return;
164 }
165 /* Retrieve indirect args. */
166 if (smc_cc == SMC_32) {
167 x6 = *((uint32_t *) x5 + 1);
168 x7 = *((uint32_t *) x5 + 2);
169 x5 = *(uint32_t *) x5;
170 } else {
171 x6 = *((uint64_t *) x5 + 1);
172 x7 = *((uint64_t *) x5 + 2);
173 x5 = *(uint64_t *) x5;
174 }
175 /* Un-Map NS Buffer. */
176 if (qti_mmap_remove_dynamic_region(dyn_map_start, dyn_map_size) != 0) {
177 goto unmap_return;
178 }
179
180 /*
181 * Map NS Buffers.
182 * arg0,2,4 points to buffers & arg1,3,5 hold sizes.
183 * MAP api's fail to map if it's already mapped. Let's
184 * find lowest start & highest end address, then map once.
185 */
186 dyn_map_start = MIN(x2, x4);
187 dyn_map_start = MIN(dyn_map_start, x6);
188 dyn_map_end = MAX((x2 + x3), (x4 + x5));
189 dyn_map_end = MAX(dyn_map_end, (x6 + x7));
190 dyn_map_size = dyn_map_end - dyn_map_start;
191
192 if (qti_mmap_add_dynamic_region(dyn_map_start, dyn_map_size,
193 (MT_NS | MT_RO_DATA)) != 0) {
194 goto unmap_return;
195 }
196 memprot_info_t *mem_info_p = (memprot_info_t *) x2;
197 uint32_t u_num_mappings = x3 / sizeof(memprot_info_t);
198 uint32_t *source_vm_list_p = (uint32_t *) x4;
199 uint32_t src_vm_list_cnt = x5 / sizeof(uint32_t);
200 memprot_dst_vm_perm_info_t *dest_vm_list_p =
201 (memprot_dst_vm_perm_info_t *) x6;
202 uint32_t dst_vm_list_cnt =
203 x7 / sizeof(memprot_dst_vm_perm_info_t);
204 if (qti_mem_assign_validate_param(mem_info_p, u_num_mappings,
205 source_vm_list_p, src_vm_list_cnt,
206 dest_vm_list_p,
207 dst_vm_list_cnt) != true) {
208 goto unmap_return;
209 }
210
211 memprot_info_t mem_info[QTI_VM_MAX_LIST_SIZE];
212 /* Populating the arguments */
213 for (int i = 0; i < u_num_mappings; i++) {
214 mem_info[i].mem_addr = mem_info_p[i].mem_addr;
215 mem_info[i].mem_size = mem_info_p[i].mem_size;
216 }
217
218 memprot_dst_vm_perm_info_t dest_vm_list[QTI_VM_LAST];
219
220 for (int i = 0; i < dst_vm_list_cnt; i++) {
221 dest_vm_list[i].dst_vm = dest_vm_list_p[i].dst_vm;
222 dest_vm_list[i].dst_vm_perm =
223 dest_vm_list_p[i].dst_vm_perm;
224 dest_vm_list[i].ctx = dest_vm_list_p[i].ctx;
225 dest_vm_list[i].ctx_size = dest_vm_list_p[i].ctx_size;
226 }
227
228 uint32_t source_vm_list[QTI_VM_LAST];
229
230 for (int i = 0; i < src_vm_list_cnt; i++) {
231 source_vm_list[i] = source_vm_list_p[i];
232 }
233 /* Un-Map NS Buffers. */
234 if (qti_mmap_remove_dynamic_region(dyn_map_start,
235 dyn_map_size) != 0) {
236 goto unmap_return;
237 }
238 /* Invoke API lib api. */
239 ret = qtiseclib_mem_assign(mem_info, u_num_mappings,
240 source_vm_list, src_vm_list_cnt,
241 dest_vm_list, dst_vm_list_cnt);
242
243 if (ret == 0) {
244 SMC_RET2(handle, QTI_SIP_SUCCESS, ret);
245 }
246unmap_return:
247 /* Un-Map NS Buffers if mapped */
248 if (dyn_map_start && dyn_map_size) {
249 qti_mmap_remove_dynamic_region(dyn_map_start, dyn_map_size);
250 }
251
252 SMC_RET2(handle, QTI_SIP_INVALID_PARAM, ret);
253}
254
255/*
256 * This function handles QTI specific syscalls. Currently only SiP calls are present.
257 * Both FAST & YIELD type call land here.
258 */
259static uintptr_t qti_sip_handler(uint32_t smc_fid,
260 u_register_t x1,
261 u_register_t x2,
262 u_register_t x3,
263 u_register_t x4,
264 void *cookie, void *handle, u_register_t flags)
265{
266 uint32_t l_smc_fid = smc_fid & FUNCID_OEN_NUM_MASK;
267
268 if (GET_SMC_CC(smc_fid) == SMC_32) {
269 x1 = (uint32_t) x1;
270 x2 = (uint32_t) x2;
271 x3 = (uint32_t) x3;
272 x4 = (uint32_t) x4;
273 }
274
275 switch (l_smc_fid) {
276 case QTI_SIP_SVC_CALL_COUNT_ID:
277 {
278 SMC_RET1(handle, QTI_SIP_SVC_CALL_COUNT);
279 break;
280 }
281 case QTI_SIP_SVC_UID_ID:
282 {
283 /* Return UID to the caller */
284 SMC_UUID_RET(handle, qti_sip_svc_uid);
285 break;
286 }
287 case QTI_SIP_SVC_VERSION_ID:
288 {
289 /* Return the version of current implementation */
290 SMC_RET2(handle, QTI_SIP_SVC_VERSION_MAJOR,
291 QTI_SIP_SVC_VERSION_MINOR);
292 break;
293 }
294 case QTI_SIP_SVC_SECURE_IO_READ_ID:
295 {
296 if ((x1 == QTI_SIP_SVC_SECURE_IO_READ_PARAM_ID) &&
297 qti_is_secure_io_access_allowed(x2)) {
298 SMC_RET2(handle, QTI_SIP_SUCCESS,
299 *((volatile uint32_t *)x2));
300 }
301 SMC_RET1(handle, QTI_SIP_INVALID_PARAM);
302 break;
303 }
304 case QTI_SIP_SVC_SECURE_IO_WRITE_ID:
305 {
306 if ((x1 == QTI_SIP_SVC_SECURE_IO_WRITE_PARAM_ID) &&
307 qti_is_secure_io_access_allowed(x2)) {
308 *((volatile uint32_t *)x2) = x3;
309 SMC_RET1(handle, QTI_SIP_SUCCESS);
310 }
311 SMC_RET1(handle, QTI_SIP_INVALID_PARAM);
312 break;
313 }
314 case QTI_SIP_SVC_MEM_ASSIGN_ID:
315 {
316 return qti_sip_mem_assign(handle, GET_SMC_CC(smc_fid),
317 x1, x2, x3, x4);
318 break;
319 }
320 default:
321 {
322 SMC_RET1(handle, QTI_SIP_NOT_SUPPORTED);
323 }
324 }
325 return (uintptr_t) handle;
326}
327
328/* Define a runtime service descriptor for both fast & yield SiP calls */
329DECLARE_RT_SVC(qti_sip_fast_svc, OEN_SIP_START,
330 OEN_SIP_END, SMC_TYPE_FAST, NULL, qti_sip_handler);
331
332DECLARE_RT_SVC(qti_sip_yield_svc, OEN_SIP_START,
333 OEN_SIP_END, SMC_TYPE_YIELD, NULL, qti_sip_handler);