blob: 79b32e2627c4579816a036efaea2adaff2c939f8 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Ruchika Guptaac1b2692014-10-15 11:35:30 +05302/*
3 * Copyright 2014 Freescale Semiconductor, Inc.
Kshitiz Varshneyc4686322021-09-19 17:09:53 +02004 * Copyright 2021 NXP
Ruchika Guptaac1b2692014-10-15 11:35:30 +05305 */
6
Simon Glass63334482019-11-14 12:57:39 -07007#include <cpu_func.h>
Simon Glass0f2af882020-05-10 11:40:05 -06008#include <log.h>
Ruchika Guptaac1b2692014-10-15 11:35:30 +05309#include <malloc.h>
Breno Lima45e65122018-01-17 10:03:45 -020010#include <memalign.h>
Ruchika Guptaac1b2692014-10-15 11:35:30 +053011#include "jobdesc.h"
12#include "desc.h"
13#include "jr.h"
gaurav ranaef201592015-02-20 12:51:46 +053014#include "fsl_hash.h"
15#include <hw_sha.h>
Simon Glass274e0b02020-05-10 11:39:56 -060016#include <asm/cache.h>
Masahiro Yamada64e4f7f2016-09-21 11:28:57 +090017#include <linux/errno.h>
Ruchika Guptaac1b2692014-10-15 11:35:30 +053018
19#define CRYPTO_MAX_ALG_NAME 80
20#define SHA1_DIGEST_SIZE 20
21#define SHA256_DIGEST_SIZE 32
22
23struct caam_hash_template {
24 char name[CRYPTO_MAX_ALG_NAME];
25 unsigned int digestsize;
26 u32 alg_type;
27};
28
29enum caam_hash_algos {
30 SHA1 = 0,
31 SHA256
32};
33
34static struct caam_hash_template driver_hash[] = {
35 {
36 .name = "sha1",
37 .digestsize = SHA1_DIGEST_SIZE,
38 .alg_type = OP_ALG_ALGSEL_SHA1,
39 },
40 {
41 .name = "sha256",
42 .digestsize = SHA256_DIGEST_SIZE,
43 .alg_type = OP_ALG_ALGSEL_SHA256,
44 },
45};
46
gaurav ranaef201592015-02-20 12:51:46 +053047static enum caam_hash_algos get_hash_type(struct hash_algo *algo)
48{
49 if (!strcmp(algo->name, driver_hash[SHA1].name))
50 return SHA1;
51 else
52 return SHA256;
53}
54
55/* Create the context for progressive hashing using h/w acceleration.
56 *
57 * @ctxp: Pointer to the pointer of the context for hashing
58 * @caam_algo: Enum for SHA1 or SHA256
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +010059 * Return: 0 if ok, -ENOMEM on error
gaurav ranaef201592015-02-20 12:51:46 +053060 */
61static int caam_hash_init(void **ctxp, enum caam_hash_algos caam_algo)
62{
63 *ctxp = calloc(1, sizeof(struct sha_ctx));
64 if (*ctxp == NULL) {
65 debug("Cannot allocate memory for context\n");
66 return -ENOMEM;
67 }
68 return 0;
69}
70
71/*
72 * Update sg table for progressive hashing using h/w acceleration
73 *
74 * The context is freed by this function if an error occurs.
75 * We support at most 32 Scatter/Gather Entries.
76 *
77 * @hash_ctx: Pointer to the context for hashing
78 * @buf: Pointer to the buffer being hashed
79 * @size: Size of the buffer being hashed
80 * @is_last: 1 if this is the last update; 0 otherwise
81 * @caam_algo: Enum for SHA1 or SHA256
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +010082 * Return: 0 if ok, -EINVAL on error
gaurav ranaef201592015-02-20 12:51:46 +053083 */
84static int caam_hash_update(void *hash_ctx, const void *buf,
85 unsigned int size, int is_last,
86 enum caam_hash_algos caam_algo)
87{
Heinrich Schuchardt0dd1d842020-06-27 10:13:55 +020088 uint32_t final;
Ye Li3c3e9a12021-03-25 17:30:36 +080089 caam_dma_addr_t addr = virt_to_phys((void *)buf);
gaurav ranaef201592015-02-20 12:51:46 +053090 struct sha_ctx *ctx = hash_ctx;
91
92 if (ctx->sg_num >= MAX_SG_32) {
93 free(ctx);
94 return -EINVAL;
95 }
96
Ye Li3c3e9a12021-03-25 17:30:36 +080097#ifdef CONFIG_CAAM_64BIT
Aneesh Bansal43421822015-10-29 22:58:03 +053098 sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, (uint32_t)(addr >> 32));
gaurav ranaef201592015-02-20 12:51:46 +053099#else
Aneesh Bansal43421822015-10-29 22:58:03 +0530100 sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, 0x0);
gaurav ranaef201592015-02-20 12:51:46 +0530101#endif
Ye Li3c3e9a12021-03-25 17:30:36 +0800102 sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_lo, (caam_dma_addr_t)addr);
gaurav ranaef201592015-02-20 12:51:46 +0530103
104 sec_out32(&ctx->sg_tbl[ctx->sg_num].len_flag,
105 (size & SG_ENTRY_LENGTH_MASK));
106
107 ctx->sg_num++;
108
109 if (is_last) {
110 final = sec_in32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag) |
111 SG_ENTRY_FINAL_BIT;
112 sec_out32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag, final);
113 }
114
115 return 0;
116}
117
118/*
119 * Perform progressive hashing on the given buffer and copy hash at
120 * destination buffer
121 *
Kshitiz Varshneyc4686322021-09-19 17:09:53 +0200122 * The context is freed after successful completion of hash operation.
123 * In case of failure, context is not freed.
gaurav ranaef201592015-02-20 12:51:46 +0530124 * @hash_ctx: Pointer to the context for hashing
125 * @dest_buf: Pointer to the destination buffer where hash is to be copied
126 * @size: Size of the buffer being hashed
127 * @caam_algo: Enum for SHA1 or SHA256
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100128 * Return: 0 if ok, -EINVAL on error
gaurav ranaef201592015-02-20 12:51:46 +0530129 */
130static int caam_hash_finish(void *hash_ctx, void *dest_buf,
131 int size, enum caam_hash_algos caam_algo)
132{
Gaurav Jain609892f2022-07-29 15:32:23 +0530133 uint32_t len = 0, sg_entry_len;
gaurav ranaef201592015-02-20 12:51:46 +0530134 struct sha_ctx *ctx = hash_ctx;
135 int i = 0, ret = 0;
Gaurav Jain609892f2022-07-29 15:32:23 +0530136 caam_dma_addr_t addr;
gaurav ranaef201592015-02-20 12:51:46 +0530137
138 if (size < driver_hash[caam_algo].digestsize) {
gaurav ranaef201592015-02-20 12:51:46 +0530139 return -EINVAL;
140 }
141
Gaurav Jain609892f2022-07-29 15:32:23 +0530142 flush_dcache_range((ulong)ctx->sg_tbl,
143 (ulong)(ctx->sg_tbl) + (ctx->sg_num * sizeof(struct sg_entry)));
144 for (i = 0; i < ctx->sg_num; i++) {
145 sg_entry_len = (sec_in32(&ctx->sg_tbl[i].len_flag) &
146 SG_ENTRY_LENGTH_MASK);
147 len += sg_entry_len;
148#ifdef CONFIG_CAAM_64BIT
149 addr = sec_in32(&ctx->sg_tbl[i].addr_hi);
150 addr = (addr << 32) | sec_in32(&ctx->sg_tbl[i].addr_lo);
151#else
152 addr = sec_in32(&ctx->sg_tbl[i].addr_lo);
153#endif
154 flush_dcache_range(addr, addr + sg_entry_len);
155 }
gaurav ranaef201592015-02-20 12:51:46 +0530156 inline_cnstr_jobdesc_hash(ctx->sha_desc, (uint8_t *)ctx->sg_tbl, len,
157 ctx->hash,
158 driver_hash[caam_algo].alg_type,
159 driver_hash[caam_algo].digestsize,
160 1);
161
Gaurav Jain5c2c4062022-05-11 14:23:19 +0530162 flush_dcache_range((ulong)ctx->sha_desc,
163 (ulong)(ctx->sha_desc) + (sizeof(uint32_t) * MAX_CAAM_DESCSIZE));
164 flush_dcache_range((ulong)ctx->hash,
165 (ulong)(ctx->hash) + driver_hash[caam_algo].digestsize);
166
gaurav ranaef201592015-02-20 12:51:46 +0530167 ret = run_descriptor_jr(ctx->sha_desc);
168
Kshitiz Varshneyc4686322021-09-19 17:09:53 +0200169 if (ret) {
gaurav ranaef201592015-02-20 12:51:46 +0530170 debug("Error %x\n", ret);
Kshitiz Varshneyc4686322021-09-19 17:09:53 +0200171 return ret;
172 } else {
Gaurav Jain5c2c4062022-05-11 14:23:19 +0530173 invalidate_dcache_range((ulong)ctx->hash,
174 (ulong)(ctx->hash) + driver_hash[caam_algo].digestsize);
gaurav ranaef201592015-02-20 12:51:46 +0530175 memcpy(dest_buf, ctx->hash, sizeof(ctx->hash));
Kshitiz Varshneyc4686322021-09-19 17:09:53 +0200176 }
gaurav ranaef201592015-02-20 12:51:46 +0530177 free(ctx);
178 return ret;
179}
180
Ruchika Guptaac1b2692014-10-15 11:35:30 +0530181int caam_hash(const unsigned char *pbuf, unsigned int buf_len,
182 unsigned char *pout, enum caam_hash_algos algo)
183{
184 int ret = 0;
185 uint32_t *desc;
Breno Lima45e65122018-01-17 10:03:45 -0200186 unsigned int size;
Ruchika Guptaac1b2692014-10-15 11:35:30 +0530187
Gaurav Jain95fa7942022-04-19 10:52:28 +0530188 desc = malloc_cache_aligned(sizeof(int) * MAX_CAAM_DESCSIZE);
189 if (!desc) {
190 debug("Not enough memory for descriptor allocation\n");
191 return -ENOMEM;
192 }
193
Breno Lima45e65122018-01-17 10:03:45 -0200194 size = ALIGN(buf_len, ARCH_DMA_MINALIGN);
195 flush_dcache_range((unsigned long)pbuf, (unsigned long)pbuf + size);
196
Ruchika Guptaac1b2692014-10-15 11:35:30 +0530197 inline_cnstr_jobdesc_hash(desc, pbuf, buf_len, pout,
198 driver_hash[algo].alg_type,
199 driver_hash[algo].digestsize,
200 0);
201
Breno Lima45e65122018-01-17 10:03:45 -0200202 size = ALIGN(sizeof(int) * MAX_CAAM_DESCSIZE, ARCH_DMA_MINALIGN);
203 flush_dcache_range((unsigned long)desc, (unsigned long)desc + size);
Gaurav Jain95fa7942022-04-19 10:52:28 +0530204 size = ALIGN(driver_hash[algo].digestsize, ARCH_DMA_MINALIGN);
205 invalidate_dcache_range((unsigned long)pout, (unsigned long)pout + size);
Breno Lima45e65122018-01-17 10:03:45 -0200206
Ruchika Guptaac1b2692014-10-15 11:35:30 +0530207 ret = run_descriptor_jr(desc);
208
Breno Lima45e65122018-01-17 10:03:45 -0200209 size = ALIGN(driver_hash[algo].digestsize, ARCH_DMA_MINALIGN);
210 invalidate_dcache_range((unsigned long)pout,
211 (unsigned long)pout + size);
212
Ruchika Guptaac1b2692014-10-15 11:35:30 +0530213 free(desc);
214 return ret;
215}
216
217void hw_sha256(const unsigned char *pbuf, unsigned int buf_len,
218 unsigned char *pout, unsigned int chunk_size)
219{
220 if (caam_hash(pbuf, buf_len, pout, SHA256))
221 printf("CAAM was not setup properly or it is faulty\n");
222}
223
224void hw_sha1(const unsigned char *pbuf, unsigned int buf_len,
225 unsigned char *pout, unsigned int chunk_size)
226{
227 if (caam_hash(pbuf, buf_len, pout, SHA1))
228 printf("CAAM was not setup properly or it is faulty\n");
229}
gaurav ranaef201592015-02-20 12:51:46 +0530230
231int hw_sha_init(struct hash_algo *algo, void **ctxp)
232{
233 return caam_hash_init(ctxp, get_hash_type(algo));
234}
235
236int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf,
237 unsigned int size, int is_last)
238{
239 return caam_hash_update(ctx, buf, size, is_last, get_hash_type(algo));
240}
241
242int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf,
243 int size)
244{
245 return caam_hash_finish(ctx, dest_buf, size, get_hash_type(algo));
246}