blob: fa99db62084b757ffe895f6687b9fb269a93b3c5 [file] [log] [blame]
Marvin Hsu21eea972017-04-11 11:00:48 +08001/*
2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8#include <arch_helpers.h>
9#include <assert.h>
10#include <common/debug.h>
11#include <delay_timer.h>
12#include <errno.h>
13#include <mmio.h>
14#include <psci.h>
15#include <se_private.h>
16#include <security_engine.h>
17#include <tegra_platform.h>
18
19/*******************************************************************************
20 * Constants and Macros
21 ******************************************************************************/
22
23#define TIMEOUT_100MS 100UL // Timeout in 100ms
24
25/*******************************************************************************
26 * Data structure and global variables
27 ******************************************************************************/
28
29/* The security engine contexts are formatted as follows:
30 *
31 * SE1 CONTEXT:
32 * #--------------------------------#
33 * | Random Data 1 Block |
34 * #--------------------------------#
35 * | Sticky Bits 2 Blocks |
36 * #--------------------------------#
37 * | Key Table 64 Blocks |
38 * | For each Key (x16): |
39 * | Key: 2 Blocks |
40 * | Original-IV: 1 Block |
41 * | Updated-IV: 1 Block |
42 * #--------------------------------#
43 * | RSA Keys 64 Blocks |
44 * #--------------------------------#
45 * | Known Pattern 1 Block |
46 * #--------------------------------#
47 *
48 * SE2/PKA1 CONTEXT:
49 * #--------------------------------#
50 * | Random Data 1 Block |
51 * #--------------------------------#
52 * | Sticky Bits 2 Blocks |
53 * #--------------------------------#
54 * | Key Table 64 Blocks |
55 * | For each Key (x16): |
56 * | Key: 2 Blocks |
57 * | Original-IV: 1 Block |
58 * | Updated-IV: 1 Block |
59 * #--------------------------------#
60 * | RSA Keys 64 Blocks |
61 * #--------------------------------#
62 * | PKA sticky bits 1 Block |
63 * #--------------------------------#
64 * | PKA keys 512 Blocks |
65 * #--------------------------------#
66 * | Known Pattern 1 Block |
67 * #--------------------------------#
68 */
69
70/* SE input and output linked list buffers */
71static tegra_se_io_lst_t se1_src_ll_buf;
72static tegra_se_io_lst_t se1_dst_ll_buf;
73
74/* SE2 input and output linked list buffers */
75static tegra_se_io_lst_t se2_src_ll_buf;
76static tegra_se_io_lst_t se2_dst_ll_buf;
77
78/* SE1 security engine device handle */
79static tegra_se_dev_t se_dev_1 = {
80 .se_num = 1,
81 /* setup base address for se */
82 .se_base = TEGRA_SE1_BASE,
83 /* Setup context size in AES blocks */
84 .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1,
85 /* Setup SRC buffers for SE operations */
86 .src_ll_buf = &se1_src_ll_buf,
87 /* Setup DST buffers for SE operations */
88 .dst_ll_buf = &se1_dst_ll_buf,
89};
90
91/* SE2 security engine device handle */
92static tegra_se_dev_t se_dev_2 = {
93 .se_num = 2,
94 /* setup base address for se */
95 .se_base = TEGRA_SE2_BASE,
96 /* Setup context size in AES blocks */
97 .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2,
98 /* Setup SRC buffers for SE operations */
99 .src_ll_buf = &se2_src_ll_buf,
100 /* Setup DST buffers for SE operations */
101 .dst_ll_buf = &se2_dst_ll_buf,
102};
103
104/*******************************************************************************
105 * Functions Definition
106 ******************************************************************************/
107
108static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev)
109{
110 flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)),
111 sizeof(tegra_se_io_lst_t));
112 flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)),
113 sizeof(tegra_se_io_lst_t));
114}
115
116/*
Sam Payne809c7732017-05-15 11:10:37 -0700117 * Check that SE operation has completed after kickoff
118 * This function is invoked after an SE operation has been started,
Marvin Hsu21eea972017-04-11 11:00:48 +0800119 * and it checks the following conditions:
120 * 1. SE_INT_STATUS = SE_OP_DONE
121 * 2. SE_STATUS = IDLE
122 * 3. AHB bus data transfer complete.
123 * 4. SE_ERR_STATUS is clean.
124 */
Sam Payne809c7732017-05-15 11:10:37 -0700125static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev)
Marvin Hsu21eea972017-04-11 11:00:48 +0800126{
127 uint32_t val = 0;
128 int32_t ret = 0;
129 uint32_t timeout;
130
131 /* Poll the SE interrupt register to ensure H/W operation complete */
132 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
133 for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) &&
134 (timeout < TIMEOUT_100MS); timeout++) {
135 mdelay(1);
136 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
137 }
138
139 if (timeout == TIMEOUT_100MS) {
140 ERROR("%s: ERR: Atomic context save operation timeout!\n",
141 __func__);
142 ret = -ETIMEDOUT;
143 }
144
145 /* Poll the SE status idle to ensure H/W operation complete */
146 if (ret == 0) {
147 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
148 for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS);
149 timeout++) {
150 mdelay(1);
151 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
152 }
153
154 if (timeout == TIMEOUT_100MS) {
155 ERROR("%s: ERR: MEM_INTERFACE and SE state "
156 "idle state timeout.\n", __func__);
157 ret = -ETIMEDOUT;
158 }
159 }
160
161 /* Check AHB bus transfer complete */
162 if (ret == 0) {
163 val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
164 for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) &&
165 (timeout < TIMEOUT_100MS); timeout++) {
166 mdelay(1);
167 val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
168 }
169
170 if (timeout == TIMEOUT_100MS) {
171 ERROR("%s: SE write over AHB timeout.\n", __func__);
172 ret = -ETIMEDOUT;
173 }
174 }
175
176 /* Ensure that no errors are thrown during operation */
177 if (ret == 0) {
178 val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET);
179 if (val != 0U) {
180 ERROR("%s: error during SE operation! 0x%x", __func__, val);
181 ret = -ENOTSUP;
182 }
183 }
184
185 return ret;
186}
187
188/*
189 * Verify the SE context save auto has been enabled.
190 * SE_CTX_SAVE_AUTO.ENABLE == ENABLE
191 * If the SE context save auto is not enabled, then set
192 * the context save auto enable and lock the setting.
193 * If the SE context save auto is not enabled and the
194 * enable setting is locked, then return an error.
195 */
196static inline int32_t tegra_se_ctx_save_auto_enable(const tegra_se_dev_t *se_dev)
197{
198 uint32_t val;
199 int32_t ret = 0;
200
201 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
202 if (SE_CTX_SAVE_AUTO_ENABLE(val) == SE_CTX_SAVE_AUTO_DIS) {
203 if (SE_CTX_SAVE_AUTO_LOCK(val) == SE_CTX_SAVE_AUTO_LOCK_EN) {
204 ERROR("%s: ERR: Cannot enable atomic. Write locked!\n",
205 __func__);
206 ret = -EACCES;
207 }
208
209 /* Program SE_CTX_SAVE_AUTO */
210 if (ret == 0) {
211 tegra_se_write_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET,
212 SE_CTX_SAVE_AUTO_LOCK_EN |
213 SE_CTX_SAVE_AUTO_EN);
214 }
215 }
216
217 return ret;
218}
219
220/*
Sam Payne809c7732017-05-15 11:10:37 -0700221 * Wait for SE engine to be idle and clear pending interrupts before
222 * starting the next SE operation.
Marvin Hsu21eea972017-04-11 11:00:48 +0800223 */
Sam Payne809c7732017-05-15 11:10:37 -0700224static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev)
Marvin Hsu21eea972017-04-11 11:00:48 +0800225{
226 int32_t ret = 0;
227 uint32_t val = 0;
Sam Payne809c7732017-05-15 11:10:37 -0700228 uint32_t timeout;
Marvin Hsu21eea972017-04-11 11:00:48 +0800229
230 /* Wait for previous operation to finish */
231 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
232 for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) {
233 mdelay(1);
234 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
235 }
236
237 if (timeout == TIMEOUT_100MS) {
238 ERROR("%s: ERR: SE status is not idle!\n", __func__);
239 ret = -ETIMEDOUT;
240 }
241
Sam Payne809c7732017-05-15 11:10:37 -0700242 /* Clear any pending interrupts from previous operation */
243 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
244 tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val);
245 return ret;
246}
247
248/*
249 * SE atomic context save. At SC7 entry, SE driver triggers the
250 * hardware automatically performs the context save operation.
251 */
252static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
253{
254 int32_t ret = 0;
255 uint32_t val = 0;
256 uint32_t blk_count_limit = 0;
257 uint32_t block_count;
258
259 /* Check that previous operation is finalized */
260 ret = tegra_se_operation_prepare(se_dev);
Marvin Hsu21eea972017-04-11 11:00:48 +0800261
Sam Payne809c7732017-05-15 11:10:37 -0700262 /* Ensure HW atomic context save has been enabled
263 * This should have been done at boot time.
264 * SE_CTX_SAVE_AUTO.ENABLE == ENABLE
265 */
266 if (ret == 0) {
Marvin Hsu21eea972017-04-11 11:00:48 +0800267 ret = tegra_se_ctx_save_auto_enable(se_dev);
268 }
269
270 /* Read the context save progress counter: block_count
271 * Ensure no previous context save has been triggered
272 * SE_CTX_SAVE_AUTO.CURR_CNT == 0
273 */
274 if (ret == 0) {
275 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
276 block_count = SE_CTX_SAVE_GET_BLK_COUNT(val);
277 if (block_count != 0U) {
278 ERROR("%s: ctx_save triggered multiple times\n",
279 __func__);
280 ret = -EALREADY;
281 }
282 }
283
284 /* Set the destination block count when the context save complete */
285 if (ret == 0) {
286 blk_count_limit = block_count + se_dev->ctx_size_blks;
287 }
288
289 /* Program SE_CONFIG register as for RNG operation
290 * SE_CONFIG.ENC_ALG = RNG
291 * SE_CONFIG.DEC_ALG = NOP
292 * SE_CONFIG.ENC_MODE is ignored
293 * SE_CONFIG.DEC_MODE is ignored
294 * SE_CONFIG.DST = MEMORY
295 */
296 if (ret == 0) {
297 val = (SE_CONFIG_ENC_ALG_RNG |
298 SE_CONFIG_DEC_ALG_NOP |
299 SE_CONFIG_DST_MEMORY);
300 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
301
302 tegra_se_make_data_coherent(se_dev);
303
304 /* SE_CTX_SAVE operation */
305 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET,
306 SE_OP_CTX_SAVE);
307
Sam Payne809c7732017-05-15 11:10:37 -0700308 ret = tegra_se_operation_complete(se_dev);
Marvin Hsu21eea972017-04-11 11:00:48 +0800309 }
310
311 /* Check that context has written the correct number of blocks */
312 if (ret == 0) {
313 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
314 if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) {
315 ERROR("%s: expected %d blocks but %d were written\n",
316 __func__, blk_count_limit, val);
317 ret = -ECANCELED;
318 }
319 }
320
321 return ret;
322}
323
324/*
Sam Payne809c7732017-05-15 11:10:37 -0700325 * Security engine primitive operations, including normal operation
326 * and the context save operation.
327 */
328static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
329{
330 uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE;
331 int ret = 0;
332
333 assert(se_dev);
334
335 /* Use device buffers for in and out */
336 tegra_se_write_32(se_dev, SE_OUT_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->dst_ll_buf)));
337 tegra_se_write_32(se_dev, SE_IN_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->src_ll_buf)));
338
339 /* Check that previous operation is finalized */
340 ret = tegra_se_operation_prepare(se_dev);
341 if (ret != 0) {
342 goto op_error;
343 }
344
345 /* Program SE operation size */
346 if (nblocks) {
347 tegra_se_write_32(se_dev, SE_BLOCK_COUNT_REG_OFFSET, nblocks - 1);
348 }
349
350 /* Make SE LL data coherent before the SE operation */
351 tegra_se_make_data_coherent(se_dev);
352
353 /* Start hardware operation */
354 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START);
355
356 /* Wait for operation to finish */
357 ret = tegra_se_operation_complete(se_dev);
358
359op_error:
360 return ret;
361}
362
363/*
364 * Security Engine sequence to generat SRK
365 * SE and SE2 will generate different SRK by different
366 * entropy seeds.
367 */
368static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev)
369{
370 int ret = PSCI_E_INTERN_FAIL;
371 uint32_t val;
372
373 /* Confgure the following hardware register settings:
374 * SE_CONFIG.DEC_ALG = NOP
375 * SE_CONFIG.ENC_ALG = RNG
376 * SE_CONFIG.DST = SRK
377 * SE_OPERATION.OP = START
378 * SE_CRYPTO_LAST_BLOCK = 0
379 */
380 se_dev->src_ll_buf->last_buff_num = 0;
381 se_dev->dst_ll_buf->last_buff_num = 0;
382
383 /* Configure random number generator */
384 val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY);
385 tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
386
387 /* Configure output destination = SRK */
388 val = (SE_CONFIG_ENC_ALG_RNG |
389 SE_CONFIG_DEC_ALG_NOP |
390 SE_CONFIG_DST_SRK);
391 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
392
393 /* Perform hardware operation */
394 ret = tegra_se_perform_operation(se_dev, 0);
395
396 return ret;
397}
398
399/*
Marvin Hsu21eea972017-04-11 11:00:48 +0800400 * Initialize the SE engine handle
401 */
402void tegra_se_init(void)
403{
404 INFO("%s: start SE init\n", __func__);
405
Sam Payne809c7732017-05-15 11:10:37 -0700406 /* Generate random SRK to initialize DRBG */
407 tegra_se_generate_srk(&se_dev_1);
408 tegra_se_generate_srk(&se_dev_2);
Marvin Hsu21eea972017-04-11 11:00:48 +0800409
410 INFO("%s: SE init done\n", __func__);
411}
412
413/*
414 * Security engine power suspend entry point.
415 * This function is invoked from PSCI power domain suspend handler.
416 */
417int32_t tegra_se_suspend(void)
418{
419 int32_t ret = 0;
420
421 /* Atomic context save se2 and pka1 */
422 INFO("%s: SE2/PKA1 atomic context save\n", __func__);
423 ret = tegra_se_context_save_atomic(&se_dev_2);
424
425 /* Atomic context save se */
426 if (ret == 0) {
427 INFO("%s: SE1 atomic context save\n", __func__);
428 ret = tegra_se_context_save_atomic(&se_dev_1);
429 }
430
431 if (ret == 0) {
432 INFO("%s: SE atomic context save done\n", __func__);
433 }
434
435 return ret;
436}
437
438/*
439 * Save TZRAM to shadow TZRAM in AON
440 */
441int32_t tegra_se_save_tzram(void)
442{
443 uint32_t val = 0;
444 int32_t ret = 0;
445 uint32_t timeout;
446
447 INFO("%s: SE TZRAM save start\n", __func__);
448
449 val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE);
450 tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val);
451
452 val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
453 for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) &&
454 (timeout < TIMEOUT_100MS); timeout++) {
455 mdelay(1);
456 val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
457 }
458
459 if (timeout == TIMEOUT_100MS) {
460 ERROR("%s: ERR: TZRAM save timeout!\n", __func__);
461 ret = -ETIMEDOUT;
462 }
463
464 if (ret == 0) {
465 INFO("%s: SE TZRAM save done!\n", __func__);
466 }
467
468 return ret;
469}
470
471/*
472 * The function is invoked by SE resume
473 */
474static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
475{
476 uint32_t val;
477
478 assert(se_dev);
479
480 /* Lock RNG source to ENTROPY on resume */
481 val = DRBG_RO_ENT_IGNORE_MEM_ENABLE |
482 DRBG_RO_ENT_SRC_LOCK_ENABLE |
483 DRBG_RO_ENT_SRC_ENABLE;
484 tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val);
485
486 /* Enable and lock the SE atomic context save setting */
487 if (tegra_se_ctx_save_auto_enable(se_dev) != 0) {
488 ERROR("%s: ERR: enable SE%d context save auto failed!\n",
489 __func__, se_dev->se_num);
490 }
491
Sam Payne809c7732017-05-15 11:10:37 -0700492 /* Set a random value to SRK to initialize DRBG */
493 tegra_se_generate_srk(se_dev);
Marvin Hsu21eea972017-04-11 11:00:48 +0800494}
495
496/*
497 * The function is invoked on SC7 resume
498 */
499void tegra_se_resume(void)
500{
501 tegra_se_warm_boot_resume(&se_dev_1);
502 tegra_se_warm_boot_resume(&se_dev_2);
503}