blob: d5e04912686ea08ccc162472b9fbadbec2399c85 [file] [log] [blame]
Olivier Deprez5ac897f2020-01-09 10:45:52 +01001/*
2 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
Marvin Hsu21eea972017-04-11 11:00:48 +08003 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8#include <arch_helpers.h>
9#include <assert.h>
10#include <common/debug.h>
Ambroise Vincentffbf32a2019-03-28 09:01:18 +000011#include <drivers/delay_timer.h>
Marvin Hsu21eea972017-04-11 11:00:48 +080012#include <errno.h>
Ambroise Vincentffbf32a2019-03-28 09:01:18 +000013#include <lib/mmio.h>
14#include <lib/psci/psci.h>
Marvin Hsu21eea972017-04-11 11:00:48 +080015#include <se_private.h>
16#include <security_engine.h>
17#include <tegra_platform.h>
18
19/*******************************************************************************
20 * Constants and Macros
21 ******************************************************************************/
22
Anthony Zhou0e07e452017-07-26 17:16:54 +080023#define TIMEOUT_100MS 100U // Timeout in 100ms
Marvin Hsu40d3a672017-04-11 11:00:48 +080024#define RNG_AES_KEY_INDEX 1
Marvin Hsu21eea972017-04-11 11:00:48 +080025
26/*******************************************************************************
27 * Data structure and global variables
28 ******************************************************************************/
29
30/* The security engine contexts are formatted as follows:
31 *
32 * SE1 CONTEXT:
33 * #--------------------------------#
34 * | Random Data 1 Block |
35 * #--------------------------------#
36 * | Sticky Bits 2 Blocks |
37 * #--------------------------------#
38 * | Key Table 64 Blocks |
39 * | For each Key (x16): |
40 * | Key: 2 Blocks |
41 * | Original-IV: 1 Block |
42 * | Updated-IV: 1 Block |
43 * #--------------------------------#
44 * | RSA Keys 64 Blocks |
45 * #--------------------------------#
46 * | Known Pattern 1 Block |
47 * #--------------------------------#
48 *
49 * SE2/PKA1 CONTEXT:
50 * #--------------------------------#
51 * | Random Data 1 Block |
52 * #--------------------------------#
53 * | Sticky Bits 2 Blocks |
54 * #--------------------------------#
55 * | Key Table 64 Blocks |
56 * | For each Key (x16): |
57 * | Key: 2 Blocks |
58 * | Original-IV: 1 Block |
59 * | Updated-IV: 1 Block |
60 * #--------------------------------#
61 * | RSA Keys 64 Blocks |
62 * #--------------------------------#
63 * | PKA sticky bits 1 Block |
64 * #--------------------------------#
65 * | PKA keys 512 Blocks |
66 * #--------------------------------#
67 * | Known Pattern 1 Block |
68 * #--------------------------------#
69 */
70
Marvin Hsu40d3a672017-04-11 11:00:48 +080071/* Known pattern data */
72static const uint32_t se_ctx_known_pattern_data[SE_CTX_KNOWN_PATTERN_SIZE_WORDS] = {
73 /* 128 bit AES block */
74 0x0C0D0E0F,
75 0x08090A0B,
76 0x04050607,
77 0x00010203,
78};
79
Marvin Hsu21eea972017-04-11 11:00:48 +080080/* SE input and output linked list buffers */
81static tegra_se_io_lst_t se1_src_ll_buf;
82static tegra_se_io_lst_t se1_dst_ll_buf;
83
84/* SE2 input and output linked list buffers */
85static tegra_se_io_lst_t se2_src_ll_buf;
86static tegra_se_io_lst_t se2_dst_ll_buf;
87
88/* SE1 security engine device handle */
89static tegra_se_dev_t se_dev_1 = {
90 .se_num = 1,
Marvin Hsu40d3a672017-04-11 11:00:48 +080091 /* Setup base address for se */
Marvin Hsu21eea972017-04-11 11:00:48 +080092 .se_base = TEGRA_SE1_BASE,
93 /* Setup context size in AES blocks */
94 .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1,
95 /* Setup SRC buffers for SE operations */
96 .src_ll_buf = &se1_src_ll_buf,
97 /* Setup DST buffers for SE operations */
98 .dst_ll_buf = &se1_dst_ll_buf,
Marvin Hsu40d3a672017-04-11 11:00:48 +080099 /* Setup context save destination */
100 .ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE),
Marvin Hsu21eea972017-04-11 11:00:48 +0800101};
102
103/* SE2 security engine device handle */
104static tegra_se_dev_t se_dev_2 = {
105 .se_num = 2,
Marvin Hsu40d3a672017-04-11 11:00:48 +0800106 /* Setup base address for se */
Marvin Hsu21eea972017-04-11 11:00:48 +0800107 .se_base = TEGRA_SE2_BASE,
108 /* Setup context size in AES blocks */
109 .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2,
110 /* Setup SRC buffers for SE operations */
111 .src_ll_buf = &se2_src_ll_buf,
112 /* Setup DST buffers for SE operations */
113 .dst_ll_buf = &se2_dst_ll_buf,
Marvin Hsu40d3a672017-04-11 11:00:48 +0800114 /* Setup context save destination */
115 .ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE + 0x1000),
Marvin Hsu21eea972017-04-11 11:00:48 +0800116};
117
Samuel Payne69b0e4a2017-06-15 21:12:45 -0700118static bool ecid_valid;
119
Marvin Hsu21eea972017-04-11 11:00:48 +0800120/*******************************************************************************
121 * Functions Definition
122 ******************************************************************************/
123
124static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev)
125{
126 flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)),
127 sizeof(tegra_se_io_lst_t));
128 flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)),
129 sizeof(tegra_se_io_lst_t));
130}
131
132/*
Sam Payne809c7732017-05-15 11:10:37 -0700133 * Check that SE operation has completed after kickoff
134 * This function is invoked after an SE operation has been started,
Marvin Hsu21eea972017-04-11 11:00:48 +0800135 * and it checks the following conditions:
136 * 1. SE_INT_STATUS = SE_OP_DONE
137 * 2. SE_STATUS = IDLE
138 * 3. AHB bus data transfer complete.
139 * 4. SE_ERR_STATUS is clean.
140 */
Sam Payne809c7732017-05-15 11:10:37 -0700141static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev)
Marvin Hsu21eea972017-04-11 11:00:48 +0800142{
143 uint32_t val = 0;
144 int32_t ret = 0;
145 uint32_t timeout;
146
147 /* Poll the SE interrupt register to ensure H/W operation complete */
148 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
149 for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) &&
150 (timeout < TIMEOUT_100MS); timeout++) {
151 mdelay(1);
152 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
153 }
154
155 if (timeout == TIMEOUT_100MS) {
156 ERROR("%s: ERR: Atomic context save operation timeout!\n",
157 __func__);
158 ret = -ETIMEDOUT;
159 }
160
161 /* Poll the SE status idle to ensure H/W operation complete */
162 if (ret == 0) {
163 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
164 for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS);
165 timeout++) {
166 mdelay(1);
167 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
168 }
169
170 if (timeout == TIMEOUT_100MS) {
171 ERROR("%s: ERR: MEM_INTERFACE and SE state "
172 "idle state timeout.\n", __func__);
173 ret = -ETIMEDOUT;
174 }
175 }
176
177 /* Check AHB bus transfer complete */
178 if (ret == 0) {
179 val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
180 for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) &&
181 (timeout < TIMEOUT_100MS); timeout++) {
182 mdelay(1);
183 val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
184 }
185
186 if (timeout == TIMEOUT_100MS) {
187 ERROR("%s: SE write over AHB timeout.\n", __func__);
188 ret = -ETIMEDOUT;
189 }
190 }
191
192 /* Ensure that no errors are thrown during operation */
193 if (ret == 0) {
194 val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET);
195 if (val != 0U) {
196 ERROR("%s: error during SE operation! 0x%x", __func__, val);
197 ret = -ENOTSUP;
198 }
199 }
200
201 return ret;
202}
203
204/*
Samuel Payne25fdca22017-06-15 13:57:47 -0700205 * Returns true if the SE engine is configured to perform SE context save in
206 * hardware.
Marvin Hsu21eea972017-04-11 11:00:48 +0800207 */
Marvin Hsu40d3a672017-04-11 11:00:48 +0800208static inline bool tegra_se_atomic_save_enabled(const tegra_se_dev_t *se_dev)
Marvin Hsu21eea972017-04-11 11:00:48 +0800209{
210 uint32_t val;
Marvin Hsu21eea972017-04-11 11:00:48 +0800211
212 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
Marvin Hsu40d3a672017-04-11 11:00:48 +0800213 return (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_EN);
Marvin Hsu21eea972017-04-11 11:00:48 +0800214}
215
216/*
Sam Payne809c7732017-05-15 11:10:37 -0700217 * Wait for SE engine to be idle and clear pending interrupts before
218 * starting the next SE operation.
Marvin Hsu21eea972017-04-11 11:00:48 +0800219 */
Sam Payne809c7732017-05-15 11:10:37 -0700220static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev)
Marvin Hsu21eea972017-04-11 11:00:48 +0800221{
222 int32_t ret = 0;
223 uint32_t val = 0;
Sam Payne809c7732017-05-15 11:10:37 -0700224 uint32_t timeout;
Marvin Hsu21eea972017-04-11 11:00:48 +0800225
226 /* Wait for previous operation to finish */
227 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
228 for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) {
229 mdelay(1);
230 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
231 }
232
233 if (timeout == TIMEOUT_100MS) {
234 ERROR("%s: ERR: SE status is not idle!\n", __func__);
235 ret = -ETIMEDOUT;
236 }
237
Sam Payne809c7732017-05-15 11:10:37 -0700238 /* Clear any pending interrupts from previous operation */
239 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
240 tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val);
241 return ret;
242}
243
244/*
245 * SE atomic context save. At SC7 entry, SE driver triggers the
246 * hardware automatically performs the context save operation.
247 */
248static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
249{
250 int32_t ret = 0;
251 uint32_t val = 0;
252 uint32_t blk_count_limit = 0;
253 uint32_t block_count;
254
255 /* Check that previous operation is finalized */
256 ret = tegra_se_operation_prepare(se_dev);
Marvin Hsu21eea972017-04-11 11:00:48 +0800257
Marvin Hsu21eea972017-04-11 11:00:48 +0800258 /* Read the context save progress counter: block_count
259 * Ensure no previous context save has been triggered
260 * SE_CTX_SAVE_AUTO.CURR_CNT == 0
261 */
262 if (ret == 0) {
263 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
264 block_count = SE_CTX_SAVE_GET_BLK_COUNT(val);
265 if (block_count != 0U) {
266 ERROR("%s: ctx_save triggered multiple times\n",
267 __func__);
268 ret = -EALREADY;
269 }
270 }
271
272 /* Set the destination block count when the context save complete */
273 if (ret == 0) {
274 blk_count_limit = block_count + se_dev->ctx_size_blks;
275 }
276
277 /* Program SE_CONFIG register as for RNG operation
278 * SE_CONFIG.ENC_ALG = RNG
279 * SE_CONFIG.DEC_ALG = NOP
280 * SE_CONFIG.ENC_MODE is ignored
281 * SE_CONFIG.DEC_MODE is ignored
282 * SE_CONFIG.DST = MEMORY
283 */
284 if (ret == 0) {
285 val = (SE_CONFIG_ENC_ALG_RNG |
286 SE_CONFIG_DEC_ALG_NOP |
287 SE_CONFIG_DST_MEMORY);
288 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
289
290 tegra_se_make_data_coherent(se_dev);
291
292 /* SE_CTX_SAVE operation */
293 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET,
294 SE_OP_CTX_SAVE);
295
Sam Payne809c7732017-05-15 11:10:37 -0700296 ret = tegra_se_operation_complete(se_dev);
Marvin Hsu21eea972017-04-11 11:00:48 +0800297 }
298
299 /* Check that context has written the correct number of blocks */
300 if (ret == 0) {
301 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
302 if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) {
303 ERROR("%s: expected %d blocks but %d were written\n",
304 __func__, blk_count_limit, val);
305 ret = -ECANCELED;
306 }
307 }
308
309 return ret;
310}
311
312/*
Sam Payne809c7732017-05-15 11:10:37 -0700313 * Security engine primitive operations, including normal operation
314 * and the context save operation.
315 */
Marvin Hsu40d3a672017-04-11 11:00:48 +0800316static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes,
317 bool context_save)
Sam Payne809c7732017-05-15 11:10:37 -0700318{
319 uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE;
320 int ret = 0;
321
322 assert(se_dev);
323
324 /* Use device buffers for in and out */
325 tegra_se_write_32(se_dev, SE_OUT_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->dst_ll_buf)));
326 tegra_se_write_32(se_dev, SE_IN_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->src_ll_buf)));
327
328 /* Check that previous operation is finalized */
329 ret = tegra_se_operation_prepare(se_dev);
330 if (ret != 0) {
331 goto op_error;
332 }
333
334 /* Program SE operation size */
335 if (nblocks) {
336 tegra_se_write_32(se_dev, SE_BLOCK_COUNT_REG_OFFSET, nblocks - 1);
337 }
338
339 /* Make SE LL data coherent before the SE operation */
340 tegra_se_make_data_coherent(se_dev);
341
342 /* Start hardware operation */
Marvin Hsu40d3a672017-04-11 11:00:48 +0800343 if (context_save)
344 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_CTX_SAVE);
345 else
346 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START);
Sam Payne809c7732017-05-15 11:10:37 -0700347
348 /* Wait for operation to finish */
349 ret = tegra_se_operation_complete(se_dev);
350
351op_error:
352 return ret;
353}
354
355/*
Marvin Hsu40d3a672017-04-11 11:00:48 +0800356 * Normal security engine operations other than the context save
357 */
358int tegra_se_start_normal_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
359{
360 return tegra_se_perform_operation(se_dev, nbytes, false);
361}
362
363/*
364 * Security engine context save operation
365 */
366int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
367{
368 return tegra_se_perform_operation(se_dev, nbytes, true);
369}
370
371/*
Sam Payne809c7732017-05-15 11:10:37 -0700372 * Security Engine sequence to generat SRK
373 * SE and SE2 will generate different SRK by different
374 * entropy seeds.
375 */
376static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev)
377{
378 int ret = PSCI_E_INTERN_FAIL;
379 uint32_t val;
380
381 /* Confgure the following hardware register settings:
382 * SE_CONFIG.DEC_ALG = NOP
383 * SE_CONFIG.ENC_ALG = RNG
384 * SE_CONFIG.DST = SRK
385 * SE_OPERATION.OP = START
386 * SE_CRYPTO_LAST_BLOCK = 0
387 */
388 se_dev->src_ll_buf->last_buff_num = 0;
389 se_dev->dst_ll_buf->last_buff_num = 0;
390
391 /* Configure random number generator */
Samuel Payne69b0e4a2017-06-15 21:12:45 -0700392 if (ecid_valid)
393 val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_ENTROPY);
394 else
395 val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY);
Sam Payne809c7732017-05-15 11:10:37 -0700396 tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
397
398 /* Configure output destination = SRK */
399 val = (SE_CONFIG_ENC_ALG_RNG |
400 SE_CONFIG_DEC_ALG_NOP |
401 SE_CONFIG_DST_SRK);
402 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
403
404 /* Perform hardware operation */
Marvin Hsu40d3a672017-04-11 11:00:48 +0800405 ret = tegra_se_start_normal_operation(se_dev, 0);
406
407 return ret;
408}
409
410/*
411 * Generate plain text random data to some memory location using
412 * SE/SE2's SP800-90 random number generator. The random data size
413 * must be some multiple of the AES block size (16 bytes).
414 */
415static int tegra_se_lp_generate_random_data(tegra_se_dev_t *se_dev)
416{
417 int ret = 0;
418 uint32_t val;
419
420 /* Set some arbitrary memory location to store the random data */
421 se_dev->dst_ll_buf->last_buff_num = 0;
422 if (!se_dev->ctx_save_buf) {
423 ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
424 return PSCI_E_NOT_PRESENT;
425 }
426 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
427 se_dev->ctx_save_buf)->rand_data)));
428 se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_RANDOM_DATA_SIZE;
429
430
431 /* Confgure the following hardware register settings:
432 * SE_CONFIG.DEC_ALG = NOP
433 * SE_CONFIG.ENC_ALG = RNG
434 * SE_CONFIG.ENC_MODE = KEY192
435 * SE_CONFIG.DST = MEMORY
436 */
437 val = (SE_CONFIG_ENC_ALG_RNG |
438 SE_CONFIG_DEC_ALG_NOP |
439 SE_CONFIG_ENC_MODE_KEY192 |
440 SE_CONFIG_DST_MEMORY);
441 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
442
443 /* Program the RNG options in SE_CRYPTO_CONFIG as follows:
444 * XOR_POS = BYPASS
445 * INPUT_SEL = RANDOM (Entropy or LFSR)
446 * HASH_ENB = DISABLE
447 */
448 val = (SE_CRYPTO_INPUT_RANDOM |
449 SE_CRYPTO_XOR_BYPASS |
450 SE_CRYPTO_CORE_ENCRYPT |
451 SE_CRYPTO_HASH_DISABLE |
452 SE_CRYPTO_KEY_INDEX(RNG_AES_KEY_INDEX) |
453 SE_CRYPTO_IV_ORIGINAL);
454 tegra_se_write_32(se_dev, SE_CRYPTO_REG_OFFSET, val);
455
456 /* Configure RNG */
Samuel Payne69b0e4a2017-06-15 21:12:45 -0700457 if (ecid_valid)
458 val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_LFSR);
459 else
460 val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_LFSR);
Marvin Hsu40d3a672017-04-11 11:00:48 +0800461 tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
462
463 /* SE normal operation */
464 ret = tegra_se_start_normal_operation(se_dev, SE_CTX_SAVE_RANDOM_DATA_SIZE);
465
466 return ret;
467}
468
469/*
470 * Encrypt memory blocks with SRK as part of the security engine context.
471 * The data blocks include: random data and the known pattern data, where
472 * the random data is the first block and known pattern is the last block.
473 */
474static int tegra_se_lp_data_context_save(tegra_se_dev_t *se_dev,
475 uint64_t src_addr, uint64_t dst_addr, uint32_t data_size)
476{
477 int ret = 0;
478
479 se_dev->src_ll_buf->last_buff_num = 0;
480 se_dev->dst_ll_buf->last_buff_num = 0;
481 se_dev->src_ll_buf->buffer[0].addr = src_addr;
482 se_dev->src_ll_buf->buffer[0].data_len = data_size;
483 se_dev->dst_ll_buf->buffer[0].addr = dst_addr;
484 se_dev->dst_ll_buf->buffer[0].data_len = data_size;
485
486 /* By setting the context source from memory and calling the context save
487 * operation, the SE encrypts the memory data with SRK.
488 */
489 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, SE_CTX_SAVE_SRC_MEM);
490
491 ret = tegra_se_start_ctx_save_operation(se_dev, data_size);
492
493 return ret;
494}
495
496/*
497 * Context save the key table access control sticky bits and
498 * security status of each key-slot. The encrypted sticky-bits are
499 * 32 bytes (2 AES blocks) and formatted as the following structure:
500 * { bit in registers bit in context save
501 * SECURITY_0[4] 158
502 * SE_RSA_KEYTABLE_ACCE4SS_1[2:0] 157:155
503 * SE_RSA_KEYTABLE_ACCE4SS_0[2:0] 154:152
504 * SE_RSA_SECURITY_PERKEY_0[1:0] 151:150
505 * SE_CRYPTO_KEYTABLE_ACCESS_15[7:0] 149:142
506 * ...,
507 * SE_CRYPTO_KEYTABLE_ACCESS_0[7:0] 29:22
508 * SE_CRYPTO_SECURITY_PERKEY_0[15:0] 21:6
509 * SE_TZRAM_SECURITY_0[1:0] 5:4
510 * SE_SECURITY_0[16] 3:3
511 * SE_SECURITY_0[2:0] } 2:0
512 */
513static int tegra_se_lp_sticky_bits_context_save(tegra_se_dev_t *se_dev)
514{
515 int ret = PSCI_E_INTERN_FAIL;
516 uint32_t val = 0;
517
518 se_dev->dst_ll_buf->last_buff_num = 0;
519 if (!se_dev->ctx_save_buf) {
520 ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
521 return PSCI_E_NOT_PRESENT;
522 }
523 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
524 se_dev->ctx_save_buf)->sticky_bits)));
525 se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_STICKY_BITS_SIZE;
526
527 /*
528 * The 1st AES block save the sticky-bits context 1 - 16 bytes (0 - 3 words).
529 * The 2nd AES block save the sticky-bits context 17 - 32 bytes (4 - 7 words).
530 */
531 for (int i = 0; i < 2; i++) {
532 val = SE_CTX_SAVE_SRC_STICKY_BITS |
533 SE_CTX_SAVE_STICKY_WORD_QUAD(i);
534 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
535
536 /* SE context save operation */
537 ret = tegra_se_start_ctx_save_operation(se_dev,
538 SE_CTX_SAVE_STICKY_BITS_SIZE);
539 if (ret)
540 break;
541 se_dev->dst_ll_buf->buffer[0].addr += SE_CTX_SAVE_STICKY_BITS_SIZE;
542 }
543
544 return ret;
545}
546
547static int tegra_se_aeskeytable_context_save(tegra_se_dev_t *se_dev)
548{
549 uint32_t val = 0;
550 int ret = 0;
551
552 se_dev->dst_ll_buf->last_buff_num = 0;
553 if (!se_dev->ctx_save_buf) {
554 ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
555 ret = -EINVAL;
556 goto aes_keytable_save_err;
557 }
558
559 /* AES key context save */
560 for (int slot = 0; slot < TEGRA_SE_AES_KEYSLOT_COUNT; slot++) {
561 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
562 ((tegra_se_context_t *)se_dev->
563 ctx_save_buf)->key_slots[slot].key)));
564 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
565 for (int i = 0; i < 2; i++) {
566 val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
567 SE_CTX_SAVE_KEY_INDEX(slot) |
568 SE_CTX_SAVE_WORD_QUAD(i);
569 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
570
571 /* SE context save operation */
572 ret = tegra_se_start_ctx_save_operation(se_dev,
573 TEGRA_SE_KEY_128_SIZE);
574 if (ret) {
575 ERROR("%s: ERR: AES key CTX_SAVE OP failed, "
576 "slot=%d, word_quad=%d.\n",
577 __func__, slot, i);
578 goto aes_keytable_save_err;
579 }
580 se_dev->dst_ll_buf->buffer[0].addr += TEGRA_SE_KEY_128_SIZE;
581 }
582
583 /* OIV context save */
584 se_dev->dst_ll_buf->last_buff_num = 0;
585 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
586 ((tegra_se_context_t *)se_dev->
587 ctx_save_buf)->key_slots[slot].oiv)));
588 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
589
590 val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
591 SE_CTX_SAVE_KEY_INDEX(slot) |
592 SE_CTX_SAVE_WORD_QUAD_ORIG_IV;
593 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
594
595 /* SE context save operation */
596 ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
597 if (ret) {
598 ERROR("%s: ERR: OIV CTX_SAVE OP failed, slot=%d.\n",
599 __func__, slot);
600 goto aes_keytable_save_err;
601 }
602
603 /* UIV context save */
604 se_dev->dst_ll_buf->last_buff_num = 0;
605 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
606 ((tegra_se_context_t *)se_dev->
607 ctx_save_buf)->key_slots[slot].uiv)));
608 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
609
610 val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
611 SE_CTX_SAVE_KEY_INDEX(slot) |
612 SE_CTX_SAVE_WORD_QUAD_UPD_IV;
613 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
614
615 /* SE context save operation */
616 ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
617 if (ret) {
618 ERROR("%s: ERR: UIV CTX_SAVE OP failed, slot=%d\n",
619 __func__, slot);
620 goto aes_keytable_save_err;
621 }
622 }
623
624aes_keytable_save_err:
625 return ret;
626}
627
628static int tegra_se_lp_rsakeytable_context_save(tegra_se_dev_t *se_dev)
629{
630 uint32_t val = 0;
631 int ret = 0;
632 /* First the modulus and then the exponent must be
633 * encrypted and saved. This is repeated for SLOT 0
634 * and SLOT 1. Hence the order:
635 * SLOT 0 exponent : RSA_KEY_INDEX : 0
636 * SLOT 0 modulus : RSA_KEY_INDEX : 1
637 * SLOT 1 exponent : RSA_KEY_INDEX : 2
638 * SLOT 1 modulus : RSA_KEY_INDEX : 3
639 */
640 const unsigned int key_index_mod[TEGRA_SE_RSA_KEYSLOT_COUNT][2] = {
641 /* RSA key slot 0 */
642 {SE_RSA_KEY_INDEX_SLOT0_EXP, SE_RSA_KEY_INDEX_SLOT0_MOD},
643 /* RSA key slot 1 */
644 {SE_RSA_KEY_INDEX_SLOT1_EXP, SE_RSA_KEY_INDEX_SLOT1_MOD},
645 };
646
647 se_dev->dst_ll_buf->last_buff_num = 0;
648 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
649 ((tegra_se_context_t *)se_dev->
650 ctx_save_buf)->rsa_keys)));
651 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
652
653 for (int slot = 0; slot < TEGRA_SE_RSA_KEYSLOT_COUNT; slot++) {
654 /* loop for modulus and exponent */
655 for (int index = 0; index < 2; index++) {
656 for (int word_quad = 0; word_quad < 16; word_quad++) {
657 val = SE_CTX_SAVE_SRC_RSA_KEYTABLE |
658 SE_CTX_SAVE_RSA_KEY_INDEX(
659 key_index_mod[slot][index]) |
660 SE_CTX_RSA_WORD_QUAD(word_quad);
661 tegra_se_write_32(se_dev,
662 SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
663
664 /* SE context save operation */
665 ret = tegra_se_start_ctx_save_operation(se_dev,
666 TEGRA_SE_KEY_128_SIZE);
667 if (ret) {
668 ERROR("%s: ERR: slot=%d.\n",
669 __func__, slot);
670 goto rsa_keytable_save_err;
671 }
672
673 /* Update the pointer to the next word quad */
674 se_dev->dst_ll_buf->buffer[0].addr +=
675 TEGRA_SE_KEY_128_SIZE;
676 }
677 }
678 }
679
680rsa_keytable_save_err:
681 return ret;
682}
683
684static int tegra_se_pkakeytable_sticky_bits_save(tegra_se_dev_t *se_dev)
685{
686 int ret = 0;
687
688 se_dev->dst_ll_buf->last_buff_num = 0;
689 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
690 ((tegra_se2_context_blob_t *)se_dev->
691 ctx_save_buf)->pka_ctx.sticky_bits)));
692 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_BLOCK_SIZE;
693
694 /* PKA1 sticky bits are 1 AES block (16 bytes) */
695 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
696 SE_CTX_SAVE_SRC_PKA1_STICKY_BITS |
697 SE_CTX_STICKY_WORD_QUAD_WORDS_0_3);
698
699 /* SE context save operation */
700 ret = tegra_se_start_ctx_save_operation(se_dev, 0);
701 if (ret) {
702 ERROR("%s: ERR: PKA1 sticky bits CTX_SAVE OP failed\n",
703 __func__);
704 goto pka_sticky_bits_save_err;
705 }
Sam Payne809c7732017-05-15 11:10:37 -0700706
Marvin Hsu40d3a672017-04-11 11:00:48 +0800707pka_sticky_bits_save_err:
Sam Payne809c7732017-05-15 11:10:37 -0700708 return ret;
709}
710
Marvin Hsu40d3a672017-04-11 11:00:48 +0800711static int tegra_se_pkakeytable_context_save(tegra_se_dev_t *se_dev)
712{
713 uint32_t val = 0;
714 int ret = 0;
715
716 se_dev->dst_ll_buf->last_buff_num = 0;
717 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
718 ((tegra_se2_context_blob_t *)se_dev->
719 ctx_save_buf)->pka_ctx.pka_keys)));
720 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
721
722 /* for each slot, save word quad 0-127 */
723 for (int slot = 0; slot < TEGRA_SE_PKA1_KEYSLOT_COUNT; slot++) {
724 for (int word_quad = 0; word_quad < 512/4; word_quad++) {
725 val = SE_CTX_SAVE_SRC_PKA1_KEYTABLE |
726 SE_CTX_PKA1_WORD_QUAD_L((slot * 128) +
727 word_quad) |
728 SE_CTX_PKA1_WORD_QUAD_H((slot * 128) +
729 word_quad);
730 tegra_se_write_32(se_dev,
731 SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
732
733 /* SE context save operation */
734 ret = tegra_se_start_ctx_save_operation(se_dev,
735 TEGRA_SE_KEY_128_SIZE);
736 if (ret) {
737 ERROR("%s: ERR: pka1 keytable ctx save error\n",
738 __func__);
739 goto pka_keytable_save_err;
740 }
741
742 /* Update the pointer to the next word quad */
743 se_dev->dst_ll_buf->buffer[0].addr +=
744 TEGRA_SE_KEY_128_SIZE;
745 }
746 }
747
748pka_keytable_save_err:
749 return ret;
750}
751
752static int tegra_se_save_SRK(tegra_se_dev_t *se_dev)
753{
754 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
755 SE_CTX_SAVE_SRC_SRK);
756
757 /* SE context save operation */
758 return tegra_se_start_ctx_save_operation(se_dev, 0);
759}
760
Sam Payne809c7732017-05-15 11:10:37 -0700761/*
Marvin Hsu40d3a672017-04-11 11:00:48 +0800762 * Lock both SE from non-TZ clients.
763 */
764static inline void tegra_se_lock(tegra_se_dev_t *se_dev)
765{
766 uint32_t val;
767
768 assert(se_dev);
769 val = tegra_se_read_32(se_dev, SE_SECURITY_REG_OFFSET);
770 val |= SE_SECURITY_TZ_LOCK_SOFT(SE_SECURE);
771 tegra_se_write_32(se_dev, SE_SECURITY_REG_OFFSET, val);
772}
773
774/*
775 * Use SRK to encrypt SE state and save to TZRAM carveout
776 */
777static int tegra_se_context_save_sw(tegra_se_dev_t *se_dev)
778{
779 int err = 0;
780
781 assert(se_dev);
782
783 /* Lock entire SE/SE2 as TZ protected */
784 tegra_se_lock(se_dev);
785
786 INFO("%s: generate SRK\n", __func__);
787 /* Generate SRK */
788 err = tegra_se_generate_srk(se_dev);
789 if (err) {
790 ERROR("%s: ERR: SRK generation failed\n", __func__);
791 return err;
792 }
793
794 INFO("%s: generate random data\n", __func__);
795 /* Generate random data */
796 err = tegra_se_lp_generate_random_data(se_dev);
797 if (err) {
798 ERROR("%s: ERR: LP random pattern generation failed\n", __func__);
799 return err;
800 }
801
802 INFO("%s: encrypt random data\n", __func__);
803 /* Encrypt the random data block */
804 err = tegra_se_lp_data_context_save(se_dev,
805 ((uint64_t)(&(((tegra_se_context_t *)se_dev->
806 ctx_save_buf)->rand_data))),
807 ((uint64_t)(&(((tegra_se_context_t *)se_dev->
808 ctx_save_buf)->rand_data))),
809 SE_CTX_SAVE_RANDOM_DATA_SIZE);
810 if (err) {
811 ERROR("%s: ERR: random pattern encryption failed\n", __func__);
812 return err;
813 }
814
815 INFO("%s: save SE sticky bits\n", __func__);
816 /* Save AES sticky bits context */
817 err = tegra_se_lp_sticky_bits_context_save(se_dev);
818 if (err) {
819 ERROR("%s: ERR: sticky bits context save failed\n", __func__);
820 return err;
821 }
822
823 INFO("%s: save AES keytables\n", __func__);
824 /* Save AES key table context */
825 err = tegra_se_aeskeytable_context_save(se_dev);
826 if (err) {
827 ERROR("%s: ERR: LP keytable save failed\n", __func__);
828 return err;
829 }
830
831 /* RSA key slot table context save */
832 INFO("%s: save RSA keytables\n", __func__);
833 err = tegra_se_lp_rsakeytable_context_save(se_dev);
834 if (err) {
835 ERROR("%s: ERR: rsa key table context save failed\n", __func__);
836 return err;
837 }
838
839 /* Only SE2 has an interface with PKA1; thus, PKA1's context is saved
840 * via SE2.
841 */
842 if (se_dev->se_num == 2) {
843 /* Encrypt PKA1 sticky bits on SE2 only */
844 INFO("%s: save PKA sticky bits\n", __func__);
845 err = tegra_se_pkakeytable_sticky_bits_save(se_dev);
846 if (err) {
847 ERROR("%s: ERR: PKA sticky bits context save failed\n", __func__);
848 return err;
849 }
850
851 /* Encrypt PKA1 keyslots on SE2 only */
852 INFO("%s: save PKA keytables\n", __func__);
853 err = tegra_se_pkakeytable_context_save(se_dev);
854 if (err) {
855 ERROR("%s: ERR: PKA key table context save failed\n", __func__);
856 return err;
857 }
858 }
859
860 /* Encrypt known pattern */
861 if (se_dev->se_num == 1) {
862 err = tegra_se_lp_data_context_save(se_dev,
863 ((uint64_t)(&se_ctx_known_pattern_data)),
864 ((uint64_t)(&(((tegra_se_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
865 SE_CTX_KNOWN_PATTERN_SIZE);
866 } else if (se_dev->se_num == 2) {
867 err = tegra_se_lp_data_context_save(se_dev,
868 ((uint64_t)(&se_ctx_known_pattern_data)),
869 ((uint64_t)(&(((tegra_se2_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
870 SE_CTX_KNOWN_PATTERN_SIZE);
871 }
872 if (err) {
873 ERROR("%s: ERR: save LP known pattern failure\n", __func__);
874 return err;
875 }
876
877 /* Write lp context buffer address into PMC scratch register */
878 if (se_dev->se_num == 1) {
879 /* SE context address */
880 mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH117_OFFSET,
881 ((uint64_t)(se_dev->ctx_save_buf)));
882 } else if (se_dev->se_num == 2) {
883 /* SE2 & PKA1 context address */
884 mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH116_OFFSET,
885 ((uint64_t)(se_dev->ctx_save_buf)));
886 }
887
888 /* Saves SRK to PMC secure scratch registers for BootROM, which
889 * verifies and restores the security engine context on warm boot.
890 */
891 err = tegra_se_save_SRK(se_dev);
892 if (err < 0) {
893 ERROR("%s: ERR: LP SRK save failure\n", __func__);
894 return err;
895 }
896
897 INFO("%s: SE context save done \n", __func__);
898
899 return err;
900}
901
902/*
Marvin Hsu21eea972017-04-11 11:00:48 +0800903 * Initialize the SE engine handle
904 */
905void tegra_se_init(void)
906{
Samuel Payne69b0e4a2017-06-15 21:12:45 -0700907 uint32_t val = 0;
Marvin Hsu21eea972017-04-11 11:00:48 +0800908 INFO("%s: start SE init\n", __func__);
909
Sam Payne809c7732017-05-15 11:10:37 -0700910 /* Generate random SRK to initialize DRBG */
911 tegra_se_generate_srk(&se_dev_1);
912 tegra_se_generate_srk(&se_dev_2);
Marvin Hsu21eea972017-04-11 11:00:48 +0800913
Samuel Payne69b0e4a2017-06-15 21:12:45 -0700914 /* determine if ECID is valid */
915 val = mmio_read_32(TEGRA_FUSE_BASE + FUSE_JTAG_SECUREID_VALID);
916 ecid_valid = (val == ECID_VALID);
917
Marvin Hsu21eea972017-04-11 11:00:48 +0800918 INFO("%s: SE init done\n", __func__);
919}
920
Samuel Payne1e6bed42017-06-12 10:15:43 -0700921static void tegra_se_enable_clocks(void)
922{
923 uint32_t val = 0;
924
925 /* Enable entropy clock */
926 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
927 val |= ENTROPY_CLK_ENB_BIT;
928 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
929
930 /* De-Assert Entropy Reset */
931 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W);
932 val &= ~ENTROPY_RESET_BIT;
933 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W, val);
934
935 /* Enable SE clock */
936 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
937 val |= SE_CLK_ENB_BIT;
938 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
939
940 /* De-Assert SE Reset */
941 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V);
942 val &= ~SE_RESET_BIT;
943 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V, val);
944}
945
946static void tegra_se_disable_clocks(void)
947{
948 uint32_t val = 0;
949
950 /* Disable entropy clock */
951 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
952 val &= ~ENTROPY_CLK_ENB_BIT;
953 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
954
955 /* Disable SE clock */
956 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
957 val &= ~SE_CLK_ENB_BIT;
958 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
959}
960
Marvin Hsu21eea972017-04-11 11:00:48 +0800961/*
962 * Security engine power suspend entry point.
963 * This function is invoked from PSCI power domain suspend handler.
964 */
965int32_t tegra_se_suspend(void)
966{
967 int32_t ret = 0;
Samuel Payneae1e0792017-06-12 16:38:23 -0700968 uint32_t val = 0;
969
970 /* SE does not use SMMU in EL3, disable SMMU.
971 * This will be re-enabled by kernel on resume */
972 val = mmio_read_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0);
973 val &= ~PPCS_SMMU_ENABLE;
974 mmio_write_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0, val);
975
Samuel Payne1e6bed42017-06-12 10:15:43 -0700976 tegra_se_enable_clocks();
Marvin Hsu21eea972017-04-11 11:00:48 +0800977
Marvin Hsu40d3a672017-04-11 11:00:48 +0800978 if (tegra_se_atomic_save_enabled(&se_dev_2) &&
979 tegra_se_atomic_save_enabled(&se_dev_1)) {
980 /* Atomic context save se2 and pka1 */
981 INFO("%s: SE2/PKA1 atomic context save\n", __func__);
982 if (ret == 0) {
983 ret = tegra_se_context_save_atomic(&se_dev_2);
984 }
Marvin Hsu21eea972017-04-11 11:00:48 +0800985
Marvin Hsu40d3a672017-04-11 11:00:48 +0800986 /* Atomic context save se */
987 if (ret == 0) {
988 INFO("%s: SE1 atomic context save\n", __func__);
989 ret = tegra_se_context_save_atomic(&se_dev_1);
990 }
Marvin Hsu21eea972017-04-11 11:00:48 +0800991
Marvin Hsu40d3a672017-04-11 11:00:48 +0800992 if (ret == 0) {
993 INFO("%s: SE atomic context save done\n", __func__);
994 }
995 } else if (!tegra_se_atomic_save_enabled(&se_dev_2) &&
996 !tegra_se_atomic_save_enabled(&se_dev_1)) {
997 /* SW context save se2 and pka1 */
998 INFO("%s: SE2/PKA1 legacy(SW) context save\n", __func__);
999 if (ret == 0) {
1000 ret = tegra_se_context_save_sw(&se_dev_2);
1001 }
1002
1003 /* SW context save se */
1004 if (ret == 0) {
1005 INFO("%s: SE1 legacy(SW) context save\n", __func__);
1006 ret = tegra_se_context_save_sw(&se_dev_1);
1007 }
1008
1009 if (ret == 0) {
1010 INFO("%s: SE SW context save done\n", __func__);
1011 }
1012 } else {
1013 ERROR("%s: One SE set for atomic CTX save, the other is not\n",
1014 __func__);
Marvin Hsu21eea972017-04-11 11:00:48 +08001015 }
1016
Samuel Payne1e6bed42017-06-12 10:15:43 -07001017 tegra_se_disable_clocks();
1018
Marvin Hsu21eea972017-04-11 11:00:48 +08001019 return ret;
1020}
1021
1022/*
1023 * Save TZRAM to shadow TZRAM in AON
1024 */
1025int32_t tegra_se_save_tzram(void)
1026{
1027 uint32_t val = 0;
1028 int32_t ret = 0;
1029 uint32_t timeout;
1030
1031 INFO("%s: SE TZRAM save start\n", __func__);
Samuel Payne1e6bed42017-06-12 10:15:43 -07001032 tegra_se_enable_clocks();
Marvin Hsu21eea972017-04-11 11:00:48 +08001033
1034 val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE);
1035 tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val);
1036
1037 val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
1038 for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) &&
1039 (timeout < TIMEOUT_100MS); timeout++) {
1040 mdelay(1);
1041 val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
1042 }
1043
1044 if (timeout == TIMEOUT_100MS) {
1045 ERROR("%s: ERR: TZRAM save timeout!\n", __func__);
1046 ret = -ETIMEDOUT;
1047 }
1048
1049 if (ret == 0) {
1050 INFO("%s: SE TZRAM save done!\n", __func__);
1051 }
1052
Samuel Payne1e6bed42017-06-12 10:15:43 -07001053 tegra_se_disable_clocks();
1054
Marvin Hsu21eea972017-04-11 11:00:48 +08001055 return ret;
1056}
1057
1058/*
1059 * The function is invoked by SE resume
1060 */
1061static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
1062{
1063 uint32_t val;
1064
1065 assert(se_dev);
1066
1067 /* Lock RNG source to ENTROPY on resume */
1068 val = DRBG_RO_ENT_IGNORE_MEM_ENABLE |
1069 DRBG_RO_ENT_SRC_LOCK_ENABLE |
1070 DRBG_RO_ENT_SRC_ENABLE;
1071 tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val);
1072
Sam Payne809c7732017-05-15 11:10:37 -07001073 /* Set a random value to SRK to initialize DRBG */
1074 tegra_se_generate_srk(se_dev);
Marvin Hsu21eea972017-04-11 11:00:48 +08001075}
1076
1077/*
1078 * The function is invoked on SC7 resume
1079 */
1080void tegra_se_resume(void)
1081{
1082 tegra_se_warm_boot_resume(&se_dev_1);
1083 tegra_se_warm_boot_resume(&se_dev_2);
1084}