blob: 9650896fb75315afbddef44d327c019f83419c1b [file] [log] [blame]
Marvin Hsu21eea972017-04-11 11:00:48 +08001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8#include <arch_helpers.h>
9#include <assert.h>
10#include <common/debug.h>
11#include <delay_timer.h>
12#include <errno.h>
13#include <mmio.h>
14#include <psci.h>
15#include <se_private.h>
16#include <security_engine.h>
17#include <tegra_platform.h>
18
19/*******************************************************************************
20 * Constants and Macros
21 ******************************************************************************/
22
Anthony Zhou0e07e452017-07-26 17:16:54 +080023#define TIMEOUT_100MS 100U // Timeout in 100ms
Marvin Hsu40d3a672017-04-11 11:00:48 +080024#define RNG_AES_KEY_INDEX 1
Marvin Hsu21eea972017-04-11 11:00:48 +080025
26/*******************************************************************************
27 * Data structure and global variables
28 ******************************************************************************/
29
30/* The security engine contexts are formatted as follows:
31 *
32 * SE1 CONTEXT:
33 * #--------------------------------#
34 * | Random Data 1 Block |
35 * #--------------------------------#
36 * | Sticky Bits 2 Blocks |
37 * #--------------------------------#
38 * | Key Table 64 Blocks |
39 * | For each Key (x16): |
40 * | Key: 2 Blocks |
41 * | Original-IV: 1 Block |
42 * | Updated-IV: 1 Block |
43 * #--------------------------------#
44 * | RSA Keys 64 Blocks |
45 * #--------------------------------#
46 * | Known Pattern 1 Block |
47 * #--------------------------------#
48 *
49 * SE2/PKA1 CONTEXT:
50 * #--------------------------------#
51 * | Random Data 1 Block |
52 * #--------------------------------#
53 * | Sticky Bits 2 Blocks |
54 * #--------------------------------#
55 * | Key Table 64 Blocks |
56 * | For each Key (x16): |
57 * | Key: 2 Blocks |
58 * | Original-IV: 1 Block |
59 * | Updated-IV: 1 Block |
60 * #--------------------------------#
61 * | RSA Keys 64 Blocks |
62 * #--------------------------------#
63 * | PKA sticky bits 1 Block |
64 * #--------------------------------#
65 * | PKA keys 512 Blocks |
66 * #--------------------------------#
67 * | Known Pattern 1 Block |
68 * #--------------------------------#
69 */
70
Marvin Hsu40d3a672017-04-11 11:00:48 +080071/* Known pattern data */
72static const uint32_t se_ctx_known_pattern_data[SE_CTX_KNOWN_PATTERN_SIZE_WORDS] = {
73 /* 128 bit AES block */
74 0x0C0D0E0F,
75 0x08090A0B,
76 0x04050607,
77 0x00010203,
78};
79
Marvin Hsu21eea972017-04-11 11:00:48 +080080/* SE input and output linked list buffers */
81static tegra_se_io_lst_t se1_src_ll_buf;
82static tegra_se_io_lst_t se1_dst_ll_buf;
83
84/* SE2 input and output linked list buffers */
85static tegra_se_io_lst_t se2_src_ll_buf;
86static tegra_se_io_lst_t se2_dst_ll_buf;
87
88/* SE1 security engine device handle */
89static tegra_se_dev_t se_dev_1 = {
90 .se_num = 1,
Marvin Hsu40d3a672017-04-11 11:00:48 +080091 /* Setup base address for se */
Marvin Hsu21eea972017-04-11 11:00:48 +080092 .se_base = TEGRA_SE1_BASE,
93 /* Setup context size in AES blocks */
94 .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1,
95 /* Setup SRC buffers for SE operations */
96 .src_ll_buf = &se1_src_ll_buf,
97 /* Setup DST buffers for SE operations */
98 .dst_ll_buf = &se1_dst_ll_buf,
Marvin Hsu40d3a672017-04-11 11:00:48 +080099 /* Setup context save destination */
100 .ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE),
Marvin Hsu21eea972017-04-11 11:00:48 +0800101};
102
103/* SE2 security engine device handle */
104static tegra_se_dev_t se_dev_2 = {
105 .se_num = 2,
Marvin Hsu40d3a672017-04-11 11:00:48 +0800106 /* Setup base address for se */
Marvin Hsu21eea972017-04-11 11:00:48 +0800107 .se_base = TEGRA_SE2_BASE,
108 /* Setup context size in AES blocks */
109 .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2,
110 /* Setup SRC buffers for SE operations */
111 .src_ll_buf = &se2_src_ll_buf,
112 /* Setup DST buffers for SE operations */
113 .dst_ll_buf = &se2_dst_ll_buf,
Marvin Hsu40d3a672017-04-11 11:00:48 +0800114 /* Setup context save destination */
115 .ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE + 0x1000),
Marvin Hsu21eea972017-04-11 11:00:48 +0800116};
117
118/*******************************************************************************
119 * Functions Definition
120 ******************************************************************************/
121
122static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev)
123{
124 flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)),
125 sizeof(tegra_se_io_lst_t));
126 flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)),
127 sizeof(tegra_se_io_lst_t));
128}
129
130/*
Sam Payne809c7732017-05-15 11:10:37 -0700131 * Check that SE operation has completed after kickoff
132 * This function is invoked after an SE operation has been started,
Marvin Hsu21eea972017-04-11 11:00:48 +0800133 * and it checks the following conditions:
134 * 1. SE_INT_STATUS = SE_OP_DONE
135 * 2. SE_STATUS = IDLE
136 * 3. AHB bus data transfer complete.
137 * 4. SE_ERR_STATUS is clean.
138 */
Sam Payne809c7732017-05-15 11:10:37 -0700139static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev)
Marvin Hsu21eea972017-04-11 11:00:48 +0800140{
141 uint32_t val = 0;
142 int32_t ret = 0;
143 uint32_t timeout;
144
145 /* Poll the SE interrupt register to ensure H/W operation complete */
146 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
147 for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) &&
148 (timeout < TIMEOUT_100MS); timeout++) {
149 mdelay(1);
150 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
151 }
152
153 if (timeout == TIMEOUT_100MS) {
154 ERROR("%s: ERR: Atomic context save operation timeout!\n",
155 __func__);
156 ret = -ETIMEDOUT;
157 }
158
159 /* Poll the SE status idle to ensure H/W operation complete */
160 if (ret == 0) {
161 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
162 for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS);
163 timeout++) {
164 mdelay(1);
165 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
166 }
167
168 if (timeout == TIMEOUT_100MS) {
169 ERROR("%s: ERR: MEM_INTERFACE and SE state "
170 "idle state timeout.\n", __func__);
171 ret = -ETIMEDOUT;
172 }
173 }
174
175 /* Check AHB bus transfer complete */
176 if (ret == 0) {
177 val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
178 for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) &&
179 (timeout < TIMEOUT_100MS); timeout++) {
180 mdelay(1);
181 val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
182 }
183
184 if (timeout == TIMEOUT_100MS) {
185 ERROR("%s: SE write over AHB timeout.\n", __func__);
186 ret = -ETIMEDOUT;
187 }
188 }
189
190 /* Ensure that no errors are thrown during operation */
191 if (ret == 0) {
192 val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET);
193 if (val != 0U) {
194 ERROR("%s: error during SE operation! 0x%x", __func__, val);
195 ret = -ENOTSUP;
196 }
197 }
198
199 return ret;
200}
201
202/*
Samuel Payne25fdca22017-06-15 13:57:47 -0700203 * Returns true if the SE engine is configured to perform SE context save in
204 * hardware.
Marvin Hsu21eea972017-04-11 11:00:48 +0800205 */
Marvin Hsu40d3a672017-04-11 11:00:48 +0800206static inline bool tegra_se_atomic_save_enabled(const tegra_se_dev_t *se_dev)
Marvin Hsu21eea972017-04-11 11:00:48 +0800207{
208 uint32_t val;
Marvin Hsu21eea972017-04-11 11:00:48 +0800209
210 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
Marvin Hsu40d3a672017-04-11 11:00:48 +0800211 return (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_EN);
Marvin Hsu21eea972017-04-11 11:00:48 +0800212}
213
214/*
Sam Payne809c7732017-05-15 11:10:37 -0700215 * Wait for SE engine to be idle and clear pending interrupts before
216 * starting the next SE operation.
Marvin Hsu21eea972017-04-11 11:00:48 +0800217 */
Sam Payne809c7732017-05-15 11:10:37 -0700218static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev)
Marvin Hsu21eea972017-04-11 11:00:48 +0800219{
220 int32_t ret = 0;
221 uint32_t val = 0;
Sam Payne809c7732017-05-15 11:10:37 -0700222 uint32_t timeout;
Marvin Hsu21eea972017-04-11 11:00:48 +0800223
224 /* Wait for previous operation to finish */
225 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
226 for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) {
227 mdelay(1);
228 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
229 }
230
231 if (timeout == TIMEOUT_100MS) {
232 ERROR("%s: ERR: SE status is not idle!\n", __func__);
233 ret = -ETIMEDOUT;
234 }
235
Sam Payne809c7732017-05-15 11:10:37 -0700236 /* Clear any pending interrupts from previous operation */
237 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
238 tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val);
239 return ret;
240}
241
242/*
243 * SE atomic context save. At SC7 entry, SE driver triggers the
244 * hardware automatically performs the context save operation.
245 */
246static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
247{
248 int32_t ret = 0;
249 uint32_t val = 0;
250 uint32_t blk_count_limit = 0;
251 uint32_t block_count;
252
253 /* Check that previous operation is finalized */
254 ret = tegra_se_operation_prepare(se_dev);
Marvin Hsu21eea972017-04-11 11:00:48 +0800255
Marvin Hsu21eea972017-04-11 11:00:48 +0800256 /* Read the context save progress counter: block_count
257 * Ensure no previous context save has been triggered
258 * SE_CTX_SAVE_AUTO.CURR_CNT == 0
259 */
260 if (ret == 0) {
261 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
262 block_count = SE_CTX_SAVE_GET_BLK_COUNT(val);
263 if (block_count != 0U) {
264 ERROR("%s: ctx_save triggered multiple times\n",
265 __func__);
266 ret = -EALREADY;
267 }
268 }
269
270 /* Set the destination block count when the context save complete */
271 if (ret == 0) {
272 blk_count_limit = block_count + se_dev->ctx_size_blks;
273 }
274
275 /* Program SE_CONFIG register as for RNG operation
276 * SE_CONFIG.ENC_ALG = RNG
277 * SE_CONFIG.DEC_ALG = NOP
278 * SE_CONFIG.ENC_MODE is ignored
279 * SE_CONFIG.DEC_MODE is ignored
280 * SE_CONFIG.DST = MEMORY
281 */
282 if (ret == 0) {
283 val = (SE_CONFIG_ENC_ALG_RNG |
284 SE_CONFIG_DEC_ALG_NOP |
285 SE_CONFIG_DST_MEMORY);
286 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
287
288 tegra_se_make_data_coherent(se_dev);
289
290 /* SE_CTX_SAVE operation */
291 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET,
292 SE_OP_CTX_SAVE);
293
Sam Payne809c7732017-05-15 11:10:37 -0700294 ret = tegra_se_operation_complete(se_dev);
Marvin Hsu21eea972017-04-11 11:00:48 +0800295 }
296
297 /* Check that context has written the correct number of blocks */
298 if (ret == 0) {
299 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
300 if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) {
301 ERROR("%s: expected %d blocks but %d were written\n",
302 __func__, blk_count_limit, val);
303 ret = -ECANCELED;
304 }
305 }
306
307 return ret;
308}
309
310/*
Sam Payne809c7732017-05-15 11:10:37 -0700311 * Security engine primitive operations, including normal operation
312 * and the context save operation.
313 */
Marvin Hsu40d3a672017-04-11 11:00:48 +0800314static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes,
315 bool context_save)
Sam Payne809c7732017-05-15 11:10:37 -0700316{
317 uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE;
318 int ret = 0;
319
320 assert(se_dev);
321
322 /* Use device buffers for in and out */
323 tegra_se_write_32(se_dev, SE_OUT_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->dst_ll_buf)));
324 tegra_se_write_32(se_dev, SE_IN_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->src_ll_buf)));
325
326 /* Check that previous operation is finalized */
327 ret = tegra_se_operation_prepare(se_dev);
328 if (ret != 0) {
329 goto op_error;
330 }
331
332 /* Program SE operation size */
333 if (nblocks) {
334 tegra_se_write_32(se_dev, SE_BLOCK_COUNT_REG_OFFSET, nblocks - 1);
335 }
336
337 /* Make SE LL data coherent before the SE operation */
338 tegra_se_make_data_coherent(se_dev);
339
340 /* Start hardware operation */
Marvin Hsu40d3a672017-04-11 11:00:48 +0800341 if (context_save)
342 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_CTX_SAVE);
343 else
344 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START);
Sam Payne809c7732017-05-15 11:10:37 -0700345
346 /* Wait for operation to finish */
347 ret = tegra_se_operation_complete(se_dev);
348
349op_error:
350 return ret;
351}
352
353/*
Marvin Hsu40d3a672017-04-11 11:00:48 +0800354 * Normal security engine operations other than the context save
355 */
356int tegra_se_start_normal_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
357{
358 return tegra_se_perform_operation(se_dev, nbytes, false);
359}
360
361/*
362 * Security engine context save operation
363 */
364int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
365{
366 return tegra_se_perform_operation(se_dev, nbytes, true);
367}
368
369/*
Sam Payne809c7732017-05-15 11:10:37 -0700370 * Security Engine sequence to generat SRK
371 * SE and SE2 will generate different SRK by different
372 * entropy seeds.
373 */
374static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev)
375{
376 int ret = PSCI_E_INTERN_FAIL;
377 uint32_t val;
378
379 /* Confgure the following hardware register settings:
380 * SE_CONFIG.DEC_ALG = NOP
381 * SE_CONFIG.ENC_ALG = RNG
382 * SE_CONFIG.DST = SRK
383 * SE_OPERATION.OP = START
384 * SE_CRYPTO_LAST_BLOCK = 0
385 */
386 se_dev->src_ll_buf->last_buff_num = 0;
387 se_dev->dst_ll_buf->last_buff_num = 0;
388
389 /* Configure random number generator */
390 val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY);
391 tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
392
393 /* Configure output destination = SRK */
394 val = (SE_CONFIG_ENC_ALG_RNG |
395 SE_CONFIG_DEC_ALG_NOP |
396 SE_CONFIG_DST_SRK);
397 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
398
399 /* Perform hardware operation */
Marvin Hsu40d3a672017-04-11 11:00:48 +0800400 ret = tegra_se_start_normal_operation(se_dev, 0);
401
402 return ret;
403}
404
405/*
406 * Generate plain text random data to some memory location using
407 * SE/SE2's SP800-90 random number generator. The random data size
408 * must be some multiple of the AES block size (16 bytes).
409 */
410static int tegra_se_lp_generate_random_data(tegra_se_dev_t *se_dev)
411{
412 int ret = 0;
413 uint32_t val;
414
415 /* Set some arbitrary memory location to store the random data */
416 se_dev->dst_ll_buf->last_buff_num = 0;
417 if (!se_dev->ctx_save_buf) {
418 ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
419 return PSCI_E_NOT_PRESENT;
420 }
421 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
422 se_dev->ctx_save_buf)->rand_data)));
423 se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_RANDOM_DATA_SIZE;
424
425
426 /* Confgure the following hardware register settings:
427 * SE_CONFIG.DEC_ALG = NOP
428 * SE_CONFIG.ENC_ALG = RNG
429 * SE_CONFIG.ENC_MODE = KEY192
430 * SE_CONFIG.DST = MEMORY
431 */
432 val = (SE_CONFIG_ENC_ALG_RNG |
433 SE_CONFIG_DEC_ALG_NOP |
434 SE_CONFIG_ENC_MODE_KEY192 |
435 SE_CONFIG_DST_MEMORY);
436 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
437
438 /* Program the RNG options in SE_CRYPTO_CONFIG as follows:
439 * XOR_POS = BYPASS
440 * INPUT_SEL = RANDOM (Entropy or LFSR)
441 * HASH_ENB = DISABLE
442 */
443 val = (SE_CRYPTO_INPUT_RANDOM |
444 SE_CRYPTO_XOR_BYPASS |
445 SE_CRYPTO_CORE_ENCRYPT |
446 SE_CRYPTO_HASH_DISABLE |
447 SE_CRYPTO_KEY_INDEX(RNG_AES_KEY_INDEX) |
448 SE_CRYPTO_IV_ORIGINAL);
449 tegra_se_write_32(se_dev, SE_CRYPTO_REG_OFFSET, val);
450
451 /* Configure RNG */
452 val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_LFSR);
453 tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
454
455 /* SE normal operation */
456 ret = tegra_se_start_normal_operation(se_dev, SE_CTX_SAVE_RANDOM_DATA_SIZE);
457
458 return ret;
459}
460
461/*
462 * Encrypt memory blocks with SRK as part of the security engine context.
463 * The data blocks include: random data and the known pattern data, where
464 * the random data is the first block and known pattern is the last block.
465 */
466static int tegra_se_lp_data_context_save(tegra_se_dev_t *se_dev,
467 uint64_t src_addr, uint64_t dst_addr, uint32_t data_size)
468{
469 int ret = 0;
470
471 se_dev->src_ll_buf->last_buff_num = 0;
472 se_dev->dst_ll_buf->last_buff_num = 0;
473 se_dev->src_ll_buf->buffer[0].addr = src_addr;
474 se_dev->src_ll_buf->buffer[0].data_len = data_size;
475 se_dev->dst_ll_buf->buffer[0].addr = dst_addr;
476 se_dev->dst_ll_buf->buffer[0].data_len = data_size;
477
478 /* By setting the context source from memory and calling the context save
479 * operation, the SE encrypts the memory data with SRK.
480 */
481 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, SE_CTX_SAVE_SRC_MEM);
482
483 ret = tegra_se_start_ctx_save_operation(se_dev, data_size);
484
485 return ret;
486}
487
488/*
489 * Context save the key table access control sticky bits and
490 * security status of each key-slot. The encrypted sticky-bits are
491 * 32 bytes (2 AES blocks) and formatted as the following structure:
492 * { bit in registers bit in context save
493 * SECURITY_0[4] 158
494 * SE_RSA_KEYTABLE_ACCE4SS_1[2:0] 157:155
495 * SE_RSA_KEYTABLE_ACCE4SS_0[2:0] 154:152
496 * SE_RSA_SECURITY_PERKEY_0[1:0] 151:150
497 * SE_CRYPTO_KEYTABLE_ACCESS_15[7:0] 149:142
498 * ...,
499 * SE_CRYPTO_KEYTABLE_ACCESS_0[7:0] 29:22
500 * SE_CRYPTO_SECURITY_PERKEY_0[15:0] 21:6
501 * SE_TZRAM_SECURITY_0[1:0] 5:4
502 * SE_SECURITY_0[16] 3:3
503 * SE_SECURITY_0[2:0] } 2:0
504 */
505static int tegra_se_lp_sticky_bits_context_save(tegra_se_dev_t *se_dev)
506{
507 int ret = PSCI_E_INTERN_FAIL;
508 uint32_t val = 0;
509
510 se_dev->dst_ll_buf->last_buff_num = 0;
511 if (!se_dev->ctx_save_buf) {
512 ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
513 return PSCI_E_NOT_PRESENT;
514 }
515 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
516 se_dev->ctx_save_buf)->sticky_bits)));
517 se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_STICKY_BITS_SIZE;
518
519 /*
520 * The 1st AES block save the sticky-bits context 1 - 16 bytes (0 - 3 words).
521 * The 2nd AES block save the sticky-bits context 17 - 32 bytes (4 - 7 words).
522 */
523 for (int i = 0; i < 2; i++) {
524 val = SE_CTX_SAVE_SRC_STICKY_BITS |
525 SE_CTX_SAVE_STICKY_WORD_QUAD(i);
526 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
527
528 /* SE context save operation */
529 ret = tegra_se_start_ctx_save_operation(se_dev,
530 SE_CTX_SAVE_STICKY_BITS_SIZE);
531 if (ret)
532 break;
533 se_dev->dst_ll_buf->buffer[0].addr += SE_CTX_SAVE_STICKY_BITS_SIZE;
534 }
535
536 return ret;
537}
538
539static int tegra_se_aeskeytable_context_save(tegra_se_dev_t *se_dev)
540{
541 uint32_t val = 0;
542 int ret = 0;
543
544 se_dev->dst_ll_buf->last_buff_num = 0;
545 if (!se_dev->ctx_save_buf) {
546 ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
547 ret = -EINVAL;
548 goto aes_keytable_save_err;
549 }
550
551 /* AES key context save */
552 for (int slot = 0; slot < TEGRA_SE_AES_KEYSLOT_COUNT; slot++) {
553 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
554 ((tegra_se_context_t *)se_dev->
555 ctx_save_buf)->key_slots[slot].key)));
556 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
557 for (int i = 0; i < 2; i++) {
558 val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
559 SE_CTX_SAVE_KEY_INDEX(slot) |
560 SE_CTX_SAVE_WORD_QUAD(i);
561 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
562
563 /* SE context save operation */
564 ret = tegra_se_start_ctx_save_operation(se_dev,
565 TEGRA_SE_KEY_128_SIZE);
566 if (ret) {
567 ERROR("%s: ERR: AES key CTX_SAVE OP failed, "
568 "slot=%d, word_quad=%d.\n",
569 __func__, slot, i);
570 goto aes_keytable_save_err;
571 }
572 se_dev->dst_ll_buf->buffer[0].addr += TEGRA_SE_KEY_128_SIZE;
573 }
574
575 /* OIV context save */
576 se_dev->dst_ll_buf->last_buff_num = 0;
577 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
578 ((tegra_se_context_t *)se_dev->
579 ctx_save_buf)->key_slots[slot].oiv)));
580 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
581
582 val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
583 SE_CTX_SAVE_KEY_INDEX(slot) |
584 SE_CTX_SAVE_WORD_QUAD_ORIG_IV;
585 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
586
587 /* SE context save operation */
588 ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
589 if (ret) {
590 ERROR("%s: ERR: OIV CTX_SAVE OP failed, slot=%d.\n",
591 __func__, slot);
592 goto aes_keytable_save_err;
593 }
594
595 /* UIV context save */
596 se_dev->dst_ll_buf->last_buff_num = 0;
597 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
598 ((tegra_se_context_t *)se_dev->
599 ctx_save_buf)->key_slots[slot].uiv)));
600 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
601
602 val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
603 SE_CTX_SAVE_KEY_INDEX(slot) |
604 SE_CTX_SAVE_WORD_QUAD_UPD_IV;
605 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
606
607 /* SE context save operation */
608 ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
609 if (ret) {
610 ERROR("%s: ERR: UIV CTX_SAVE OP failed, slot=%d\n",
611 __func__, slot);
612 goto aes_keytable_save_err;
613 }
614 }
615
616aes_keytable_save_err:
617 return ret;
618}
619
620static int tegra_se_lp_rsakeytable_context_save(tegra_se_dev_t *se_dev)
621{
622 uint32_t val = 0;
623 int ret = 0;
624 /* First the modulus and then the exponent must be
625 * encrypted and saved. This is repeated for SLOT 0
626 * and SLOT 1. Hence the order:
627 * SLOT 0 exponent : RSA_KEY_INDEX : 0
628 * SLOT 0 modulus : RSA_KEY_INDEX : 1
629 * SLOT 1 exponent : RSA_KEY_INDEX : 2
630 * SLOT 1 modulus : RSA_KEY_INDEX : 3
631 */
632 const unsigned int key_index_mod[TEGRA_SE_RSA_KEYSLOT_COUNT][2] = {
633 /* RSA key slot 0 */
634 {SE_RSA_KEY_INDEX_SLOT0_EXP, SE_RSA_KEY_INDEX_SLOT0_MOD},
635 /* RSA key slot 1 */
636 {SE_RSA_KEY_INDEX_SLOT1_EXP, SE_RSA_KEY_INDEX_SLOT1_MOD},
637 };
638
639 se_dev->dst_ll_buf->last_buff_num = 0;
640 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
641 ((tegra_se_context_t *)se_dev->
642 ctx_save_buf)->rsa_keys)));
643 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
644
645 for (int slot = 0; slot < TEGRA_SE_RSA_KEYSLOT_COUNT; slot++) {
646 /* loop for modulus and exponent */
647 for (int index = 0; index < 2; index++) {
648 for (int word_quad = 0; word_quad < 16; word_quad++) {
649 val = SE_CTX_SAVE_SRC_RSA_KEYTABLE |
650 SE_CTX_SAVE_RSA_KEY_INDEX(
651 key_index_mod[slot][index]) |
652 SE_CTX_RSA_WORD_QUAD(word_quad);
653 tegra_se_write_32(se_dev,
654 SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
655
656 /* SE context save operation */
657 ret = tegra_se_start_ctx_save_operation(se_dev,
658 TEGRA_SE_KEY_128_SIZE);
659 if (ret) {
660 ERROR("%s: ERR: slot=%d.\n",
661 __func__, slot);
662 goto rsa_keytable_save_err;
663 }
664
665 /* Update the pointer to the next word quad */
666 se_dev->dst_ll_buf->buffer[0].addr +=
667 TEGRA_SE_KEY_128_SIZE;
668 }
669 }
670 }
671
672rsa_keytable_save_err:
673 return ret;
674}
675
676static int tegra_se_pkakeytable_sticky_bits_save(tegra_se_dev_t *se_dev)
677{
678 int ret = 0;
679
680 se_dev->dst_ll_buf->last_buff_num = 0;
681 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
682 ((tegra_se2_context_blob_t *)se_dev->
683 ctx_save_buf)->pka_ctx.sticky_bits)));
684 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_BLOCK_SIZE;
685
686 /* PKA1 sticky bits are 1 AES block (16 bytes) */
687 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
688 SE_CTX_SAVE_SRC_PKA1_STICKY_BITS |
689 SE_CTX_STICKY_WORD_QUAD_WORDS_0_3);
690
691 /* SE context save operation */
692 ret = tegra_se_start_ctx_save_operation(se_dev, 0);
693 if (ret) {
694 ERROR("%s: ERR: PKA1 sticky bits CTX_SAVE OP failed\n",
695 __func__);
696 goto pka_sticky_bits_save_err;
697 }
Sam Payne809c7732017-05-15 11:10:37 -0700698
Marvin Hsu40d3a672017-04-11 11:00:48 +0800699pka_sticky_bits_save_err:
Sam Payne809c7732017-05-15 11:10:37 -0700700 return ret;
701}
702
Marvin Hsu40d3a672017-04-11 11:00:48 +0800703static int tegra_se_pkakeytable_context_save(tegra_se_dev_t *se_dev)
704{
705 uint32_t val = 0;
706 int ret = 0;
707
708 se_dev->dst_ll_buf->last_buff_num = 0;
709 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
710 ((tegra_se2_context_blob_t *)se_dev->
711 ctx_save_buf)->pka_ctx.pka_keys)));
712 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
713
714 /* for each slot, save word quad 0-127 */
715 for (int slot = 0; slot < TEGRA_SE_PKA1_KEYSLOT_COUNT; slot++) {
716 for (int word_quad = 0; word_quad < 512/4; word_quad++) {
717 val = SE_CTX_SAVE_SRC_PKA1_KEYTABLE |
718 SE_CTX_PKA1_WORD_QUAD_L((slot * 128) +
719 word_quad) |
720 SE_CTX_PKA1_WORD_QUAD_H((slot * 128) +
721 word_quad);
722 tegra_se_write_32(se_dev,
723 SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
724
725 /* SE context save operation */
726 ret = tegra_se_start_ctx_save_operation(se_dev,
727 TEGRA_SE_KEY_128_SIZE);
728 if (ret) {
729 ERROR("%s: ERR: pka1 keytable ctx save error\n",
730 __func__);
731 goto pka_keytable_save_err;
732 }
733
734 /* Update the pointer to the next word quad */
735 se_dev->dst_ll_buf->buffer[0].addr +=
736 TEGRA_SE_KEY_128_SIZE;
737 }
738 }
739
740pka_keytable_save_err:
741 return ret;
742}
743
744static int tegra_se_save_SRK(tegra_se_dev_t *se_dev)
745{
746 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
747 SE_CTX_SAVE_SRC_SRK);
748
749 /* SE context save operation */
750 return tegra_se_start_ctx_save_operation(se_dev, 0);
751}
752
Sam Payne809c7732017-05-15 11:10:37 -0700753/*
Marvin Hsu40d3a672017-04-11 11:00:48 +0800754 * Lock both SE from non-TZ clients.
755 */
756static inline void tegra_se_lock(tegra_se_dev_t *se_dev)
757{
758 uint32_t val;
759
760 assert(se_dev);
761 val = tegra_se_read_32(se_dev, SE_SECURITY_REG_OFFSET);
762 val |= SE_SECURITY_TZ_LOCK_SOFT(SE_SECURE);
763 tegra_se_write_32(se_dev, SE_SECURITY_REG_OFFSET, val);
764}
765
766/*
767 * Use SRK to encrypt SE state and save to TZRAM carveout
768 */
769static int tegra_se_context_save_sw(tegra_se_dev_t *se_dev)
770{
771 int err = 0;
772
773 assert(se_dev);
774
775 /* Lock entire SE/SE2 as TZ protected */
776 tegra_se_lock(se_dev);
777
778 INFO("%s: generate SRK\n", __func__);
779 /* Generate SRK */
780 err = tegra_se_generate_srk(se_dev);
781 if (err) {
782 ERROR("%s: ERR: SRK generation failed\n", __func__);
783 return err;
784 }
785
786 INFO("%s: generate random data\n", __func__);
787 /* Generate random data */
788 err = tegra_se_lp_generate_random_data(se_dev);
789 if (err) {
790 ERROR("%s: ERR: LP random pattern generation failed\n", __func__);
791 return err;
792 }
793
794 INFO("%s: encrypt random data\n", __func__);
795 /* Encrypt the random data block */
796 err = tegra_se_lp_data_context_save(se_dev,
797 ((uint64_t)(&(((tegra_se_context_t *)se_dev->
798 ctx_save_buf)->rand_data))),
799 ((uint64_t)(&(((tegra_se_context_t *)se_dev->
800 ctx_save_buf)->rand_data))),
801 SE_CTX_SAVE_RANDOM_DATA_SIZE);
802 if (err) {
803 ERROR("%s: ERR: random pattern encryption failed\n", __func__);
804 return err;
805 }
806
807 INFO("%s: save SE sticky bits\n", __func__);
808 /* Save AES sticky bits context */
809 err = tegra_se_lp_sticky_bits_context_save(se_dev);
810 if (err) {
811 ERROR("%s: ERR: sticky bits context save failed\n", __func__);
812 return err;
813 }
814
815 INFO("%s: save AES keytables\n", __func__);
816 /* Save AES key table context */
817 err = tegra_se_aeskeytable_context_save(se_dev);
818 if (err) {
819 ERROR("%s: ERR: LP keytable save failed\n", __func__);
820 return err;
821 }
822
823 /* RSA key slot table context save */
824 INFO("%s: save RSA keytables\n", __func__);
825 err = tegra_se_lp_rsakeytable_context_save(se_dev);
826 if (err) {
827 ERROR("%s: ERR: rsa key table context save failed\n", __func__);
828 return err;
829 }
830
831 /* Only SE2 has an interface with PKA1; thus, PKA1's context is saved
832 * via SE2.
833 */
834 if (se_dev->se_num == 2) {
835 /* Encrypt PKA1 sticky bits on SE2 only */
836 INFO("%s: save PKA sticky bits\n", __func__);
837 err = tegra_se_pkakeytable_sticky_bits_save(se_dev);
838 if (err) {
839 ERROR("%s: ERR: PKA sticky bits context save failed\n", __func__);
840 return err;
841 }
842
843 /* Encrypt PKA1 keyslots on SE2 only */
844 INFO("%s: save PKA keytables\n", __func__);
845 err = tegra_se_pkakeytable_context_save(se_dev);
846 if (err) {
847 ERROR("%s: ERR: PKA key table context save failed\n", __func__);
848 return err;
849 }
850 }
851
852 /* Encrypt known pattern */
853 if (se_dev->se_num == 1) {
854 err = tegra_se_lp_data_context_save(se_dev,
855 ((uint64_t)(&se_ctx_known_pattern_data)),
856 ((uint64_t)(&(((tegra_se_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
857 SE_CTX_KNOWN_PATTERN_SIZE);
858 } else if (se_dev->se_num == 2) {
859 err = tegra_se_lp_data_context_save(se_dev,
860 ((uint64_t)(&se_ctx_known_pattern_data)),
861 ((uint64_t)(&(((tegra_se2_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
862 SE_CTX_KNOWN_PATTERN_SIZE);
863 }
864 if (err) {
865 ERROR("%s: ERR: save LP known pattern failure\n", __func__);
866 return err;
867 }
868
869 /* Write lp context buffer address into PMC scratch register */
870 if (se_dev->se_num == 1) {
871 /* SE context address */
872 mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH117_OFFSET,
873 ((uint64_t)(se_dev->ctx_save_buf)));
874 } else if (se_dev->se_num == 2) {
875 /* SE2 & PKA1 context address */
876 mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH116_OFFSET,
877 ((uint64_t)(se_dev->ctx_save_buf)));
878 }
879
880 /* Saves SRK to PMC secure scratch registers for BootROM, which
881 * verifies and restores the security engine context on warm boot.
882 */
883 err = tegra_se_save_SRK(se_dev);
884 if (err < 0) {
885 ERROR("%s: ERR: LP SRK save failure\n", __func__);
886 return err;
887 }
888
889 INFO("%s: SE context save done \n", __func__);
890
891 return err;
892}
893
894/*
Marvin Hsu21eea972017-04-11 11:00:48 +0800895 * Initialize the SE engine handle
896 */
897void tegra_se_init(void)
898{
899 INFO("%s: start SE init\n", __func__);
900
Sam Payne809c7732017-05-15 11:10:37 -0700901 /* Generate random SRK to initialize DRBG */
902 tegra_se_generate_srk(&se_dev_1);
903 tegra_se_generate_srk(&se_dev_2);
Marvin Hsu21eea972017-04-11 11:00:48 +0800904
905 INFO("%s: SE init done\n", __func__);
906}
907
Samuel Payne1e6bed42017-06-12 10:15:43 -0700908static void tegra_se_enable_clocks(void)
909{
910 uint32_t val = 0;
911
912 /* Enable entropy clock */
913 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
914 val |= ENTROPY_CLK_ENB_BIT;
915 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
916
917 /* De-Assert Entropy Reset */
918 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W);
919 val &= ~ENTROPY_RESET_BIT;
920 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W, val);
921
922 /* Enable SE clock */
923 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
924 val |= SE_CLK_ENB_BIT;
925 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
926
927 /* De-Assert SE Reset */
928 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V);
929 val &= ~SE_RESET_BIT;
930 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V, val);
931}
932
933static void tegra_se_disable_clocks(void)
934{
935 uint32_t val = 0;
936
937 /* Disable entropy clock */
938 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
939 val &= ~ENTROPY_CLK_ENB_BIT;
940 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
941
942 /* Disable SE clock */
943 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
944 val &= ~SE_CLK_ENB_BIT;
945 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
946}
947
Marvin Hsu21eea972017-04-11 11:00:48 +0800948/*
949 * Security engine power suspend entry point.
950 * This function is invoked from PSCI power domain suspend handler.
951 */
952int32_t tegra_se_suspend(void)
953{
954 int32_t ret = 0;
Samuel Payneae1e0792017-06-12 16:38:23 -0700955 uint32_t val = 0;
956
957 /* SE does not use SMMU in EL3, disable SMMU.
958 * This will be re-enabled by kernel on resume */
959 val = mmio_read_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0);
960 val &= ~PPCS_SMMU_ENABLE;
961 mmio_write_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0, val);
962
Samuel Payne1e6bed42017-06-12 10:15:43 -0700963 tegra_se_enable_clocks();
Marvin Hsu21eea972017-04-11 11:00:48 +0800964
Marvin Hsu40d3a672017-04-11 11:00:48 +0800965 if (tegra_se_atomic_save_enabled(&se_dev_2) &&
966 tegra_se_atomic_save_enabled(&se_dev_1)) {
967 /* Atomic context save se2 and pka1 */
968 INFO("%s: SE2/PKA1 atomic context save\n", __func__);
969 if (ret == 0) {
970 ret = tegra_se_context_save_atomic(&se_dev_2);
971 }
Marvin Hsu21eea972017-04-11 11:00:48 +0800972
Marvin Hsu40d3a672017-04-11 11:00:48 +0800973 /* Atomic context save se */
974 if (ret == 0) {
975 INFO("%s: SE1 atomic context save\n", __func__);
976 ret = tegra_se_context_save_atomic(&se_dev_1);
977 }
Marvin Hsu21eea972017-04-11 11:00:48 +0800978
Marvin Hsu40d3a672017-04-11 11:00:48 +0800979 if (ret == 0) {
980 INFO("%s: SE atomic context save done\n", __func__);
981 }
982 } else if (!tegra_se_atomic_save_enabled(&se_dev_2) &&
983 !tegra_se_atomic_save_enabled(&se_dev_1)) {
984 /* SW context save se2 and pka1 */
985 INFO("%s: SE2/PKA1 legacy(SW) context save\n", __func__);
986 if (ret == 0) {
987 ret = tegra_se_context_save_sw(&se_dev_2);
988 }
989
990 /* SW context save se */
991 if (ret == 0) {
992 INFO("%s: SE1 legacy(SW) context save\n", __func__);
993 ret = tegra_se_context_save_sw(&se_dev_1);
994 }
995
996 if (ret == 0) {
997 INFO("%s: SE SW context save done\n", __func__);
998 }
999 } else {
1000 ERROR("%s: One SE set for atomic CTX save, the other is not\n",
1001 __func__);
Marvin Hsu21eea972017-04-11 11:00:48 +08001002 }
1003
Samuel Payne1e6bed42017-06-12 10:15:43 -07001004 tegra_se_disable_clocks();
1005
Marvin Hsu21eea972017-04-11 11:00:48 +08001006 return ret;
1007}
1008
1009/*
1010 * Save TZRAM to shadow TZRAM in AON
1011 */
1012int32_t tegra_se_save_tzram(void)
1013{
1014 uint32_t val = 0;
1015 int32_t ret = 0;
1016 uint32_t timeout;
1017
1018 INFO("%s: SE TZRAM save start\n", __func__);
Samuel Payne1e6bed42017-06-12 10:15:43 -07001019 tegra_se_enable_clocks();
Marvin Hsu21eea972017-04-11 11:00:48 +08001020
1021 val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE);
1022 tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val);
1023
1024 val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
1025 for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) &&
1026 (timeout < TIMEOUT_100MS); timeout++) {
1027 mdelay(1);
1028 val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
1029 }
1030
1031 if (timeout == TIMEOUT_100MS) {
1032 ERROR("%s: ERR: TZRAM save timeout!\n", __func__);
1033 ret = -ETIMEDOUT;
1034 }
1035
1036 if (ret == 0) {
1037 INFO("%s: SE TZRAM save done!\n", __func__);
1038 }
1039
Samuel Payne1e6bed42017-06-12 10:15:43 -07001040 tegra_se_disable_clocks();
1041
Marvin Hsu21eea972017-04-11 11:00:48 +08001042 return ret;
1043}
1044
1045/*
1046 * The function is invoked by SE resume
1047 */
1048static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
1049{
1050 uint32_t val;
1051
1052 assert(se_dev);
1053
1054 /* Lock RNG source to ENTROPY on resume */
1055 val = DRBG_RO_ENT_IGNORE_MEM_ENABLE |
1056 DRBG_RO_ENT_SRC_LOCK_ENABLE |
1057 DRBG_RO_ENT_SRC_ENABLE;
1058 tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val);
1059
Sam Payne809c7732017-05-15 11:10:37 -07001060 /* Set a random value to SRK to initialize DRBG */
1061 tegra_se_generate_srk(se_dev);
Marvin Hsu21eea972017-04-11 11:00:48 +08001062}
1063
1064/*
1065 * The function is invoked on SC7 resume
1066 */
1067void tegra_se_resume(void)
1068{
1069 tegra_se_warm_boot_resume(&se_dev_1);
1070 tegra_se_warm_boot_resume(&se_dev_2);
1071}