blob: 345b7d8e000ca22a8d85c6007438a3c6d6c04cdd [file] [log] [blame]
Marvin Hsu21eea972017-04-11 11:00:48 +08001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8#include <arch_helpers.h>
9#include <assert.h>
10#include <common/debug.h>
11#include <delay_timer.h>
12#include <errno.h>
13#include <mmio.h>
14#include <psci.h>
15#include <se_private.h>
16#include <security_engine.h>
17#include <tegra_platform.h>
18
19/*******************************************************************************
20 * Constants and Macros
21 ******************************************************************************/
22
23#define TIMEOUT_100MS 100UL // Timeout in 100ms
24
25/*******************************************************************************
26 * Data structure and global variables
27 ******************************************************************************/
28
29/* The security engine contexts are formatted as follows:
30 *
31 * SE1 CONTEXT:
32 * #--------------------------------#
33 * | Random Data 1 Block |
34 * #--------------------------------#
35 * | Sticky Bits 2 Blocks |
36 * #--------------------------------#
37 * | Key Table 64 Blocks |
38 * | For each Key (x16): |
39 * | Key: 2 Blocks |
40 * | Original-IV: 1 Block |
41 * | Updated-IV: 1 Block |
42 * #--------------------------------#
43 * | RSA Keys 64 Blocks |
44 * #--------------------------------#
45 * | Known Pattern 1 Block |
46 * #--------------------------------#
47 *
48 * SE2/PKA1 CONTEXT:
49 * #--------------------------------#
50 * | Random Data 1 Block |
51 * #--------------------------------#
52 * | Sticky Bits 2 Blocks |
53 * #--------------------------------#
54 * | Key Table 64 Blocks |
55 * | For each Key (x16): |
56 * | Key: 2 Blocks |
57 * | Original-IV: 1 Block |
58 * | Updated-IV: 1 Block |
59 * #--------------------------------#
60 * | RSA Keys 64 Blocks |
61 * #--------------------------------#
62 * | PKA sticky bits 1 Block |
63 * #--------------------------------#
64 * | PKA keys 512 Blocks |
65 * #--------------------------------#
66 * | Known Pattern 1 Block |
67 * #--------------------------------#
68 */
69
70/* SE input and output linked list buffers */
71static tegra_se_io_lst_t se1_src_ll_buf;
72static tegra_se_io_lst_t se1_dst_ll_buf;
73
74/* SE2 input and output linked list buffers */
75static tegra_se_io_lst_t se2_src_ll_buf;
76static tegra_se_io_lst_t se2_dst_ll_buf;
77
78/* SE1 security engine device handle */
79static tegra_se_dev_t se_dev_1 = {
80 .se_num = 1,
81 /* setup base address for se */
82 .se_base = TEGRA_SE1_BASE,
83 /* Setup context size in AES blocks */
84 .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1,
85 /* Setup SRC buffers for SE operations */
86 .src_ll_buf = &se1_src_ll_buf,
87 /* Setup DST buffers for SE operations */
88 .dst_ll_buf = &se1_dst_ll_buf,
89};
90
91/* SE2 security engine device handle */
92static tegra_se_dev_t se_dev_2 = {
93 .se_num = 2,
94 /* setup base address for se */
95 .se_base = TEGRA_SE2_BASE,
96 /* Setup context size in AES blocks */
97 .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2,
98 /* Setup SRC buffers for SE operations */
99 .src_ll_buf = &se2_src_ll_buf,
100 /* Setup DST buffers for SE operations */
101 .dst_ll_buf = &se2_dst_ll_buf,
102};
103
104/*******************************************************************************
105 * Functions Definition
106 ******************************************************************************/
107
108static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev)
109{
110 flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)),
111 sizeof(tegra_se_io_lst_t));
112 flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)),
113 sizeof(tegra_se_io_lst_t));
114}
115
116/*
117 * Check for context save operation complete
118 * This function is invoked after the context save operation,
119 * and it checks the following conditions:
120 * 1. SE_INT_STATUS = SE_OP_DONE
121 * 2. SE_STATUS = IDLE
122 * 3. AHB bus data transfer complete.
123 * 4. SE_ERR_STATUS is clean.
124 */
125static int32_t tegra_se_context_save_complete(const tegra_se_dev_t *se_dev)
126{
127 uint32_t val = 0;
128 int32_t ret = 0;
129 uint32_t timeout;
130
131 /* Poll the SE interrupt register to ensure H/W operation complete */
132 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
133 for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) &&
134 (timeout < TIMEOUT_100MS); timeout++) {
135 mdelay(1);
136 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
137 }
138
139 if (timeout == TIMEOUT_100MS) {
140 ERROR("%s: ERR: Atomic context save operation timeout!\n",
141 __func__);
142 ret = -ETIMEDOUT;
143 }
144
145 /* Poll the SE status idle to ensure H/W operation complete */
146 if (ret == 0) {
147 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
148 for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS);
149 timeout++) {
150 mdelay(1);
151 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
152 }
153
154 if (timeout == TIMEOUT_100MS) {
155 ERROR("%s: ERR: MEM_INTERFACE and SE state "
156 "idle state timeout.\n", __func__);
157 ret = -ETIMEDOUT;
158 }
159 }
160
161 /* Check AHB bus transfer complete */
162 if (ret == 0) {
163 val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
164 for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) &&
165 (timeout < TIMEOUT_100MS); timeout++) {
166 mdelay(1);
167 val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
168 }
169
170 if (timeout == TIMEOUT_100MS) {
171 ERROR("%s: SE write over AHB timeout.\n", __func__);
172 ret = -ETIMEDOUT;
173 }
174 }
175
176 /* Ensure that no errors are thrown during operation */
177 if (ret == 0) {
178 val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET);
179 if (val != 0U) {
180 ERROR("%s: error during SE operation! 0x%x", __func__, val);
181 ret = -ENOTSUP;
182 }
183 }
184
185 return ret;
186}
187
188/*
189 * Verify the SE context save auto has been enabled.
190 * SE_CTX_SAVE_AUTO.ENABLE == ENABLE
191 * If the SE context save auto is not enabled, then set
192 * the context save auto enable and lock the setting.
193 * If the SE context save auto is not enabled and the
194 * enable setting is locked, then return an error.
195 */
196static inline int32_t tegra_se_ctx_save_auto_enable(const tegra_se_dev_t *se_dev)
197{
198 uint32_t val;
199 int32_t ret = 0;
200
201 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
202 if (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_DIS) {
203 if (SE_CTX_SAVE_AUTO_LOCK(val) == SE_CTX_SAVE_AUTO_LOCK_EN) {
204 ERROR("%s: ERR: Cannot enable atomic. Write locked!\n",
205 __func__);
206 ret = -EACCES;
207 }
208
209 /* Program SE_CTX_SAVE_AUTO */
210 if (ret == 0) {
211 tegra_se_write_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET,
212 SE_CTX_SAVE_AUTO_LOCK_EN |
213 SE_CTX_SAVE_AUTO_EN);
214 }
215 }
216
217 return ret;
218}
219
220/*
221 * SE atomic context save. At SC7 entry, SE driver triggers the
222 * hardware automatically performs the context save operation.
223 */
224static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
225{
226 int32_t ret = 0;
227 uint32_t val = 0;
228 uint32_t blk_count_limit = 0;
229 uint32_t block_count, timeout;
230
231 /* Wait for previous operation to finish */
232 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
233 for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) {
234 mdelay(1);
235 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
236 }
237
238 if (timeout == TIMEOUT_100MS) {
239 ERROR("%s: ERR: SE status is not idle!\n", __func__);
240 ret = -ETIMEDOUT;
241 }
242
243 /* Clear any pending interrupts */
244 if (ret == 0) {
245 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
246 tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val);
247
248 /* Ensure HW atomic context save has been enabled
249 * This should have been done at boot time.
250 * SE_CTX_SAVE_AUTO.ENABLE == ENABLE
251 */
252 ret = tegra_se_ctx_save_auto_enable(se_dev);
253 }
254
255 /* Read the context save progress counter: block_count
256 * Ensure no previous context save has been triggered
257 * SE_CTX_SAVE_AUTO.CURR_CNT == 0
258 */
259 if (ret == 0) {
260 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
261 block_count = SE_CTX_SAVE_GET_BLK_COUNT(val);
262 if (block_count != 0U) {
263 ERROR("%s: ctx_save triggered multiple times\n",
264 __func__);
265 ret = -EALREADY;
266 }
267 }
268
269 /* Set the destination block count when the context save complete */
270 if (ret == 0) {
271 blk_count_limit = block_count + se_dev->ctx_size_blks;
272 }
273
274 /* Program SE_CONFIG register as for RNG operation
275 * SE_CONFIG.ENC_ALG = RNG
276 * SE_CONFIG.DEC_ALG = NOP
277 * SE_CONFIG.ENC_MODE is ignored
278 * SE_CONFIG.DEC_MODE is ignored
279 * SE_CONFIG.DST = MEMORY
280 */
281 if (ret == 0) {
282 val = (SE_CONFIG_ENC_ALG_RNG |
283 SE_CONFIG_DEC_ALG_NOP |
284 SE_CONFIG_DST_MEMORY);
285 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
286
287 tegra_se_make_data_coherent(se_dev);
288
289 /* SE_CTX_SAVE operation */
290 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET,
291 SE_OP_CTX_SAVE);
292
293 ret = tegra_se_context_save_complete(se_dev);
294 }
295
296 /* Check that context has written the correct number of blocks */
297 if (ret == 0) {
298 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
299 if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) {
300 ERROR("%s: expected %d blocks but %d were written\n",
301 __func__, blk_count_limit, val);
302 ret = -ECANCELED;
303 }
304 }
305
306 return ret;
307}
308
309/*
310 * Initialize the SE engine handle
311 */
312void tegra_se_init(void)
313{
314 INFO("%s: start SE init\n", __func__);
315
316 /* TODO: Bug 1854340. Generate random SRK */
317
318 INFO("%s: SE init done\n", __func__);
319}
320
321/*
322 * Security engine power suspend entry point.
323 * This function is invoked from PSCI power domain suspend handler.
324 */
325int32_t tegra_se_suspend(void)
326{
327 int32_t ret = 0;
328
329 /* Atomic context save se2 and pka1 */
330 INFO("%s: SE2/PKA1 atomic context save\n", __func__);
331 ret = tegra_se_context_save_atomic(&se_dev_2);
332
333 /* Atomic context save se */
334 if (ret == 0) {
335 INFO("%s: SE1 atomic context save\n", __func__);
336 ret = tegra_se_context_save_atomic(&se_dev_1);
337 }
338
339 if (ret == 0) {
340 INFO("%s: SE atomic context save done\n", __func__);
341 }
342
343 return ret;
344}
345
346/*
347 * Save TZRAM to shadow TZRAM in AON
348 */
349int32_t tegra_se_save_tzram(void)
350{
351 uint32_t val = 0;
352 int32_t ret = 0;
353 uint32_t timeout;
354
355 INFO("%s: SE TZRAM save start\n", __func__);
356
357 val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE);
358 tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val);
359
360 val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
361 for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) &&
362 (timeout < TIMEOUT_100MS); timeout++) {
363 mdelay(1);
364 val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
365 }
366
367 if (timeout == TIMEOUT_100MS) {
368 ERROR("%s: ERR: TZRAM save timeout!\n", __func__);
369 ret = -ETIMEDOUT;
370 }
371
372 if (ret == 0) {
373 INFO("%s: SE TZRAM save done!\n", __func__);
374 }
375
376 return ret;
377}
378
379/*
380 * The function is invoked by SE resume
381 */
382static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
383{
384 uint32_t val;
385
386 assert(se_dev);
387
388 /* Lock RNG source to ENTROPY on resume */
389 val = DRBG_RO_ENT_IGNORE_MEM_ENABLE |
390 DRBG_RO_ENT_SRC_LOCK_ENABLE |
391 DRBG_RO_ENT_SRC_ENABLE;
392 tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val);
393
394 /* Enable and lock the SE atomic context save setting */
395 if (tegra_se_ctx_save_auto_enable(se_dev) != 0) {
396 ERROR("%s: ERR: enable SE%d context save auto failed!\n",
397 __func__, se_dev->se_num);
398 }
399
400 /* TODO: Bug 1854340. Set a random value to SRK */
401}
402
403/*
404 * The function is invoked on SC7 resume
405 */
406void tegra_se_resume(void)
407{
408 tegra_se_warm_boot_resume(&se_dev_1);
409 tegra_se_warm_boot_resume(&se_dev_2);
410}