blob: 0531b1b735fe0cedaba5aa104a0740b6ec41ed27 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Ruchika Gupta4345a572014-10-07 15:46:20 +05302/*
3 * Copyright 2014 Freescale Semiconductor, Inc.
4 *
Ruchika Gupta4345a572014-10-07 15:46:20 +05305 */
6
7#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -07008#include <cpu_func.h>
Ruchika Gupta4345a572014-10-07 15:46:20 +05309#include <malloc.h>
Clemens Gruber58b13962018-01-07 20:26:29 +010010#include <memalign.h>
Raul Cardenasb5a36d82015-02-27 11:22:06 -060011#include <fsl_sec.h>
Masahiro Yamada64e4f7f2016-09-21 11:28:57 +090012#include <linux/errno.h>
Ruchika Gupta4345a572014-10-07 15:46:20 +053013#include "jobdesc.h"
14#include "desc.h"
15#include "jr.h"
16
Clemens Gruber58b13962018-01-07 20:26:29 +010017/**
18 * blob_decap() - Decapsulate the data from a blob
19 * @key_mod: - Key modifier address
20 * @src: - Source address (blob)
21 * @dst: - Destination address (data)
22 * @len: - Size of decapsulated data
23 *
24 * Note: Start and end of the key_mod, src and dst buffers have to be aligned to
25 * the cache line size (ARCH_DMA_MINALIGN) for the CAAM operation to succeed.
26 *
27 * Returns zero on success, negative on error.
28 */
gaurav ranab64308e2015-02-25 09:37:09 +053029int blob_decap(u8 *key_mod, u8 *src, u8 *dst, u32 len)
Ruchika Gupta4345a572014-10-07 15:46:20 +053030{
Clemens Gruber58b13962018-01-07 20:26:29 +010031 int ret, size, i = 0;
Ruchika Gupta4345a572014-10-07 15:46:20 +053032 u32 *desc;
33
Clemens Gruber58b13962018-01-07 20:26:29 +010034 if (!IS_ALIGNED((uintptr_t)key_mod, ARCH_DMA_MINALIGN) ||
35 !IS_ALIGNED((uintptr_t)src, ARCH_DMA_MINALIGN) ||
36 !IS_ALIGNED((uintptr_t)dst, ARCH_DMA_MINALIGN)) {
37 puts("Error: blob_decap: Address arguments are not aligned!\n");
38 return -EINVAL;
39 }
40
Sumit Gargff9eb052016-07-14 13:27:50 -040041 printf("\nDecapsulating blob to get data\n");
Clemens Gruber58b13962018-01-07 20:26:29 +010042 desc = malloc_cache_aligned(sizeof(int) * MAX_CAAM_DESCSIZE);
Ruchika Gupta4345a572014-10-07 15:46:20 +053043 if (!desc) {
44 debug("Not enough memory for descriptor allocation\n");
Clemens Gruber58b13962018-01-07 20:26:29 +010045 return -ENOMEM;
Ruchika Gupta4345a572014-10-07 15:46:20 +053046 }
47
Clemens Gruber58b13962018-01-07 20:26:29 +010048 size = ALIGN(16, ARCH_DMA_MINALIGN);
49 flush_dcache_range((unsigned long)key_mod,
50 (unsigned long)key_mod + size);
51
52 size = ALIGN(BLOB_SIZE(len), ARCH_DMA_MINALIGN);
53 flush_dcache_range((unsigned long)src,
54 (unsigned long)src + size);
55
Ruchika Gupta4345a572014-10-07 15:46:20 +053056 inline_cnstr_jobdesc_blob_decap(desc, key_mod, src, dst, len);
57
Sumit Gargff9eb052016-07-14 13:27:50 -040058 debug("Descriptor dump:\n");
Ruchika Gupta4345a572014-10-07 15:46:20 +053059 for (i = 0; i < 14; i++)
Sumit Gargff9eb052016-07-14 13:27:50 -040060 debug("Word[%d]: %08x\n", i, *(desc + i));
Clemens Gruber58b13962018-01-07 20:26:29 +010061
62 size = ALIGN(sizeof(int) * MAX_CAAM_DESCSIZE, ARCH_DMA_MINALIGN);
63 flush_dcache_range((unsigned long)desc,
64 (unsigned long)desc + size);
65
Ruchika Gupta4345a572014-10-07 15:46:20 +053066 ret = run_descriptor_jr(desc);
67
Clemens Gruber58b13962018-01-07 20:26:29 +010068 if (ret) {
69 printf("Error in blob decapsulation: %d\n", ret);
70 } else {
71 size = ALIGN(len, ARCH_DMA_MINALIGN);
72 invalidate_dcache_range((unsigned long)dst,
73 (unsigned long)dst + size);
74
75 puts("Blob decapsulation successful.\n");
76 }
Ruchika Gupta4345a572014-10-07 15:46:20 +053077
78 free(desc);
79 return ret;
80}
81
Clemens Gruber58b13962018-01-07 20:26:29 +010082/**
83 * blob_encap() - Encapsulate the data as a blob
84 * @key_mod: - Key modifier address
85 * @src: - Source address (data)
86 * @dst: - Destination address (blob)
87 * @len: - Size of data to be encapsulated
88 *
89 * Note: Start and end of the key_mod, src and dst buffers have to be aligned to
90 * the cache line size (ARCH_DMA_MINALIGN) for the CAAM operation to succeed.
91 *
92 * Returns zero on success, negative on error.
93 */
gaurav ranab64308e2015-02-25 09:37:09 +053094int blob_encap(u8 *key_mod, u8 *src, u8 *dst, u32 len)
Ruchika Gupta4345a572014-10-07 15:46:20 +053095{
Clemens Gruber58b13962018-01-07 20:26:29 +010096 int ret, size, i = 0;
Ruchika Gupta4345a572014-10-07 15:46:20 +053097 u32 *desc;
98
Clemens Gruber58b13962018-01-07 20:26:29 +010099 if (!IS_ALIGNED((uintptr_t)key_mod, ARCH_DMA_MINALIGN) ||
100 !IS_ALIGNED((uintptr_t)src, ARCH_DMA_MINALIGN) ||
101 !IS_ALIGNED((uintptr_t)dst, ARCH_DMA_MINALIGN)) {
102 puts("Error: blob_encap: Address arguments are not aligned!\n");
103 return -EINVAL;
104 }
105
Ruchika Gupta4345a572014-10-07 15:46:20 +0530106 printf("\nEncapsulating data to form blob\n");
Clemens Gruber58b13962018-01-07 20:26:29 +0100107 desc = malloc_cache_aligned(sizeof(int) * MAX_CAAM_DESCSIZE);
Ruchika Gupta4345a572014-10-07 15:46:20 +0530108 if (!desc) {
109 debug("Not enough memory for descriptor allocation\n");
Clemens Gruber58b13962018-01-07 20:26:29 +0100110 return -ENOMEM;
Ruchika Gupta4345a572014-10-07 15:46:20 +0530111 }
112
Clemens Gruber58b13962018-01-07 20:26:29 +0100113 size = ALIGN(16, ARCH_DMA_MINALIGN);
114 flush_dcache_range((unsigned long)key_mod,
115 (unsigned long)key_mod + size);
116
117 size = ALIGN(len, ARCH_DMA_MINALIGN);
118 flush_dcache_range((unsigned long)src,
119 (unsigned long)src + size);
120
Ruchika Gupta4345a572014-10-07 15:46:20 +0530121 inline_cnstr_jobdesc_blob_encap(desc, key_mod, src, dst, len);
Sumit Gargff9eb052016-07-14 13:27:50 -0400122
123 debug("Descriptor dump:\n");
Ruchika Gupta4345a572014-10-07 15:46:20 +0530124 for (i = 0; i < 14; i++)
Sumit Gargff9eb052016-07-14 13:27:50 -0400125 debug("Word[%d]: %08x\n", i, *(desc + i));
Clemens Gruber58b13962018-01-07 20:26:29 +0100126
127 size = ALIGN(sizeof(int) * MAX_CAAM_DESCSIZE, ARCH_DMA_MINALIGN);
128 flush_dcache_range((unsigned long)desc,
129 (unsigned long)desc + size);
130
Ruchika Gupta4345a572014-10-07 15:46:20 +0530131 ret = run_descriptor_jr(desc);
132
Clemens Gruber58b13962018-01-07 20:26:29 +0100133 if (ret) {
134 printf("Error in blob encapsulation: %d\n", ret);
135 } else {
136 size = ALIGN(BLOB_SIZE(len), ARCH_DMA_MINALIGN);
137 invalidate_dcache_range((unsigned long)dst,
138 (unsigned long)dst + size);
139
140 puts("Blob encapsulation successful.\n");
141 }
Ruchika Gupta4345a572014-10-07 15:46:20 +0530142
143 free(desc);
144 return ret;
145}
Raul Cardenasb5a36d82015-02-27 11:22:06 -0600146
147#ifdef CONFIG_CMD_DEKBLOB
148int blob_dek(const u8 *src, u8 *dst, u8 len)
149{
150 int ret, size, i = 0;
151 u32 *desc;
152
153 int out_sz = WRP_HDR_SIZE + len + KEY_BLOB_SIZE + MAC_SIZE;
154
155 puts("\nEncapsulating provided DEK to form blob\n");
156 desc = memalign(ARCH_DMA_MINALIGN,
157 sizeof(uint32_t) * DEK_BLOB_DESCSIZE);
158 if (!desc) {
159 debug("Not enough memory for descriptor allocation\n");
160 return -ENOMEM;
161 }
162
163 ret = inline_cnstr_jobdesc_blob_dek(desc, src, dst, len);
164 if (ret) {
165 debug("Error in Job Descriptor Construction: %d\n", ret);
166 } else {
167 size = roundup(sizeof(uint32_t) * DEK_BLOB_DESCSIZE,
168 ARCH_DMA_MINALIGN);
169 flush_dcache_range((unsigned long)desc,
170 (unsigned long)desc + size);
171 size = roundup(sizeof(uint8_t) * out_sz, ARCH_DMA_MINALIGN);
172 flush_dcache_range((unsigned long)dst,
173 (unsigned long)dst + size);
174
175 ret = run_descriptor_jr(desc);
176 }
177
178 if (ret) {
179 debug("Error in Encapsulation %d\n", ret);
180 goto err;
181 }
182
183 size = roundup(out_sz, ARCH_DMA_MINALIGN);
184 invalidate_dcache_range((unsigned long)dst, (unsigned long)dst+size);
185
186 puts("DEK Blob\n");
187 for (i = 0; i < out_sz; i++)
188 printf("%02X", ((uint8_t *)dst)[i]);
189 printf("\n");
190
191err:
192 free(desc);
193 return ret;
194}
195#endif