blob: c2686df02f16fa3bee31b21703061d5f4a9d7df4 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Ruchika Guptaac1b2692014-10-15 11:35:30 +05302/*
3 * Copyright 2014 Freescale Semiconductor, Inc.
4 *
Ruchika Guptaac1b2692014-10-15 11:35:30 +05305 */
6
7#include <common.h>
8#include <malloc.h>
Breno Lima45e65122018-01-17 10:03:45 -02009#include <memalign.h>
Ruchika Guptaac1b2692014-10-15 11:35:30 +053010#include "jobdesc.h"
11#include "desc.h"
12#include "jr.h"
gaurav ranaef201592015-02-20 12:51:46 +053013#include "fsl_hash.h"
14#include <hw_sha.h>
Masahiro Yamada64e4f7f2016-09-21 11:28:57 +090015#include <linux/errno.h>
Ruchika Guptaac1b2692014-10-15 11:35:30 +053016
17#define CRYPTO_MAX_ALG_NAME 80
18#define SHA1_DIGEST_SIZE 20
19#define SHA256_DIGEST_SIZE 32
20
21struct caam_hash_template {
22 char name[CRYPTO_MAX_ALG_NAME];
23 unsigned int digestsize;
24 u32 alg_type;
25};
26
27enum caam_hash_algos {
28 SHA1 = 0,
29 SHA256
30};
31
32static struct caam_hash_template driver_hash[] = {
33 {
34 .name = "sha1",
35 .digestsize = SHA1_DIGEST_SIZE,
36 .alg_type = OP_ALG_ALGSEL_SHA1,
37 },
38 {
39 .name = "sha256",
40 .digestsize = SHA256_DIGEST_SIZE,
41 .alg_type = OP_ALG_ALGSEL_SHA256,
42 },
43};
44
gaurav ranaef201592015-02-20 12:51:46 +053045static enum caam_hash_algos get_hash_type(struct hash_algo *algo)
46{
47 if (!strcmp(algo->name, driver_hash[SHA1].name))
48 return SHA1;
49 else
50 return SHA256;
51}
52
53/* Create the context for progressive hashing using h/w acceleration.
54 *
55 * @ctxp: Pointer to the pointer of the context for hashing
56 * @caam_algo: Enum for SHA1 or SHA256
57 * @return 0 if ok, -ENOMEM on error
58 */
59static int caam_hash_init(void **ctxp, enum caam_hash_algos caam_algo)
60{
61 *ctxp = calloc(1, sizeof(struct sha_ctx));
62 if (*ctxp == NULL) {
63 debug("Cannot allocate memory for context\n");
64 return -ENOMEM;
65 }
66 return 0;
67}
68
69/*
70 * Update sg table for progressive hashing using h/w acceleration
71 *
72 * The context is freed by this function if an error occurs.
73 * We support at most 32 Scatter/Gather Entries.
74 *
75 * @hash_ctx: Pointer to the context for hashing
76 * @buf: Pointer to the buffer being hashed
77 * @size: Size of the buffer being hashed
78 * @is_last: 1 if this is the last update; 0 otherwise
79 * @caam_algo: Enum for SHA1 or SHA256
80 * @return 0 if ok, -EINVAL on error
81 */
82static int caam_hash_update(void *hash_ctx, const void *buf,
83 unsigned int size, int is_last,
84 enum caam_hash_algos caam_algo)
85{
86 uint32_t final = 0;
Aneesh Bansal43421822015-10-29 22:58:03 +053087 phys_addr_t addr = virt_to_phys((void *)buf);
gaurav ranaef201592015-02-20 12:51:46 +053088 struct sha_ctx *ctx = hash_ctx;
89
90 if (ctx->sg_num >= MAX_SG_32) {
91 free(ctx);
92 return -EINVAL;
93 }
94
95#ifdef CONFIG_PHYS_64BIT
Aneesh Bansal43421822015-10-29 22:58:03 +053096 sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, (uint32_t)(addr >> 32));
gaurav ranaef201592015-02-20 12:51:46 +053097#else
Aneesh Bansal43421822015-10-29 22:58:03 +053098 sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, 0x0);
gaurav ranaef201592015-02-20 12:51:46 +053099#endif
Aneesh Bansal43421822015-10-29 22:58:03 +0530100 sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_lo, (uint32_t)addr);
gaurav ranaef201592015-02-20 12:51:46 +0530101
102 sec_out32(&ctx->sg_tbl[ctx->sg_num].len_flag,
103 (size & SG_ENTRY_LENGTH_MASK));
104
105 ctx->sg_num++;
106
107 if (is_last) {
108 final = sec_in32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag) |
109 SG_ENTRY_FINAL_BIT;
110 sec_out32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag, final);
111 }
112
113 return 0;
114}
115
116/*
117 * Perform progressive hashing on the given buffer and copy hash at
118 * destination buffer
119 *
120 * The context is freed after completion of hash operation.
121 *
122 * @hash_ctx: Pointer to the context for hashing
123 * @dest_buf: Pointer to the destination buffer where hash is to be copied
124 * @size: Size of the buffer being hashed
125 * @caam_algo: Enum for SHA1 or SHA256
126 * @return 0 if ok, -EINVAL on error
127 */
128static int caam_hash_finish(void *hash_ctx, void *dest_buf,
129 int size, enum caam_hash_algos caam_algo)
130{
131 uint32_t len = 0;
132 struct sha_ctx *ctx = hash_ctx;
133 int i = 0, ret = 0;
134
135 if (size < driver_hash[caam_algo].digestsize) {
136 free(ctx);
137 return -EINVAL;
138 }
139
140 for (i = 0; i < ctx->sg_num; i++)
141 len += (sec_in32(&ctx->sg_tbl[i].len_flag) &
142 SG_ENTRY_LENGTH_MASK);
143
144 inline_cnstr_jobdesc_hash(ctx->sha_desc, (uint8_t *)ctx->sg_tbl, len,
145 ctx->hash,
146 driver_hash[caam_algo].alg_type,
147 driver_hash[caam_algo].digestsize,
148 1);
149
150 ret = run_descriptor_jr(ctx->sha_desc);
151
152 if (ret)
153 debug("Error %x\n", ret);
154 else
155 memcpy(dest_buf, ctx->hash, sizeof(ctx->hash));
156
157 free(ctx);
158 return ret;
159}
160
Ruchika Guptaac1b2692014-10-15 11:35:30 +0530161int caam_hash(const unsigned char *pbuf, unsigned int buf_len,
162 unsigned char *pout, enum caam_hash_algos algo)
163{
164 int ret = 0;
165 uint32_t *desc;
Breno Lima45e65122018-01-17 10:03:45 -0200166 unsigned int size;
Ruchika Guptaac1b2692014-10-15 11:35:30 +0530167
Breno Lima45e65122018-01-17 10:03:45 -0200168 desc = malloc_cache_aligned(sizeof(int) * MAX_CAAM_DESCSIZE);
Ruchika Guptaac1b2692014-10-15 11:35:30 +0530169 if (!desc) {
170 debug("Not enough memory for descriptor allocation\n");
gaurav ranaef201592015-02-20 12:51:46 +0530171 return -ENOMEM;
Ruchika Guptaac1b2692014-10-15 11:35:30 +0530172 }
173
Breno Lima45e65122018-01-17 10:03:45 -0200174 if (!IS_ALIGNED((uintptr_t)pbuf, ARCH_DMA_MINALIGN) ||
175 !IS_ALIGNED((uintptr_t)pout, ARCH_DMA_MINALIGN)) {
176 puts("Error: Address arguments are not aligned\n");
177 return -EINVAL;
178 }
179
180 size = ALIGN(buf_len, ARCH_DMA_MINALIGN);
181 flush_dcache_range((unsigned long)pbuf, (unsigned long)pbuf + size);
182
Ruchika Guptaac1b2692014-10-15 11:35:30 +0530183 inline_cnstr_jobdesc_hash(desc, pbuf, buf_len, pout,
184 driver_hash[algo].alg_type,
185 driver_hash[algo].digestsize,
186 0);
187
Breno Lima45e65122018-01-17 10:03:45 -0200188 size = ALIGN(sizeof(int) * MAX_CAAM_DESCSIZE, ARCH_DMA_MINALIGN);
189 flush_dcache_range((unsigned long)desc, (unsigned long)desc + size);
190
Ruchika Guptaac1b2692014-10-15 11:35:30 +0530191 ret = run_descriptor_jr(desc);
192
Breno Lima45e65122018-01-17 10:03:45 -0200193 size = ALIGN(driver_hash[algo].digestsize, ARCH_DMA_MINALIGN);
194 invalidate_dcache_range((unsigned long)pout,
195 (unsigned long)pout + size);
196
Ruchika Guptaac1b2692014-10-15 11:35:30 +0530197 free(desc);
198 return ret;
199}
200
201void hw_sha256(const unsigned char *pbuf, unsigned int buf_len,
202 unsigned char *pout, unsigned int chunk_size)
203{
204 if (caam_hash(pbuf, buf_len, pout, SHA256))
205 printf("CAAM was not setup properly or it is faulty\n");
206}
207
208void hw_sha1(const unsigned char *pbuf, unsigned int buf_len,
209 unsigned char *pout, unsigned int chunk_size)
210{
211 if (caam_hash(pbuf, buf_len, pout, SHA1))
212 printf("CAAM was not setup properly or it is faulty\n");
213}
gaurav ranaef201592015-02-20 12:51:46 +0530214
215int hw_sha_init(struct hash_algo *algo, void **ctxp)
216{
217 return caam_hash_init(ctxp, get_hash_type(algo));
218}
219
220int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf,
221 unsigned int size, int is_last)
222{
223 return caam_hash_update(ctx, buf, size, is_last, get_hash_type(algo));
224}
225
226int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf,
227 int size)
228{
229 return caam_hash_finish(ctx, dest_buf, size, get_hash_type(algo));
230}