Merge "[rdkb][common][bsp][Fix wifi7 bring up fail on kirkstone]"
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/999-2740-crypto-add-eip197-aes-ctr-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/999-2740-crypto-add-eip197-aes-ctr-support.patch
new file mode 100644
index 0000000..52c095d
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/999-2740-crypto-add-eip197-aes-ctr-support.patch
@@ -0,0 +1,397 @@
+--- a/drivers/crypto/inside-secure/safexcel.c
++++ b/drivers/crypto/inside-secure/safexcel.c
+@@ -1222,6 +1222,7 @@ static struct safexcel_alg_template *saf
+ 	&safexcel_alg_cfb_aes,
+ 	&safexcel_alg_ofb_aes,
+ 	&safexcel_alg_ctr_aes,
++	&safexcel_alg_basic_ctr_aes,
+ 	&safexcel_alg_md5,
+ 	&safexcel_alg_sha1,
+ 	&safexcel_alg_sha224,
+--- a/drivers/crypto/inside-secure/safexcel.h
++++ b/drivers/crypto/inside-secure/safexcel.h
+@@ -930,6 +930,7 @@ extern struct safexcel_alg_template safe
+ extern struct safexcel_alg_template safexcel_alg_cfb_aes;
+ extern struct safexcel_alg_template safexcel_alg_ofb_aes;
+ extern struct safexcel_alg_template safexcel_alg_ctr_aes;
++extern struct safexcel_alg_template safexcel_alg_basic_ctr_aes;
+ extern struct safexcel_alg_template safexcel_alg_md5;
+ extern struct safexcel_alg_template safexcel_alg_sha1;
+ extern struct safexcel_alg_template safexcel_alg_sha224;
+--- a/drivers/crypto/inside-secure/safexcel_cipher.c
++++ b/drivers/crypto/inside-secure/safexcel_cipher.c
+@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {
+ 	u8 xcm;  /* 0=authenc, 1=GCM, 2 reserved for CCM */
+ 	u8 aadskip;
+ 	u8 blocksz;
++	bool basic_ctr;
++	u32 processed;
+ 	u32 ivmask;
+ 	u32 ctrinit;
+ 
+@@ -79,7 +81,7 @@ struct safexcel_cipher_req {
+ static int safexcel_skcipher_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ 				struct safexcel_command_desc *cdesc)
+ {
+-	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
++	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD && !(ctx->basic_ctr)) {
+ 		cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+ 		/* 32 bit nonce */
+ 		cdesc->control_data.token[0] = ctx->nonce;
+@@ -513,8 +515,8 @@ static int safexcel_aead_setkey(struct c
+ 	memcpy(ctx->opad, &ostate.state, ctx->state_sz);
+ 
+ 	memzero_explicit(&keys, sizeof(keys));
+-	return 0;
+ 
++	return 0;
+ badkey:
+ 	memzero_explicit(&keys, sizeof(keys));
+ 	return err;
+@@ -622,6 +624,43 @@ static int safexcel_context_control(stru
+ 	return 0;
+ }
+ 
++static int safexcel_queue_req(struct crypto_async_request *base,
++			struct safexcel_cipher_req *sreq,
++			enum safexcel_cipher_direction dir)
++{
++	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
++	struct safexcel_crypto_priv *priv = ctx->priv;
++	int ret, ring;
++
++	sreq->needs_inv = false;
++	sreq->direction = dir;
++
++	if (ctx->base.ctxr) {
++		if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
++			sreq->needs_inv = true;
++			ctx->base.needs_inv = false;
++		}
++	} else {
++		ctx->base.ring = safexcel_select_ring(priv);
++		ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
++						 EIP197_GFP_FLAGS(*base),
++						 &ctx->base.ctxr_dma);
++		if (!ctx->base.ctxr)
++			return -ENOMEM;
++	}
++
++	ring = ctx->base.ring;
++
++	spin_lock_bh(&priv->ring[ring].queue_lock);
++	ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
++	spin_unlock_bh(&priv->ring[ring].queue_lock);
++
++	queue_work(priv->ring[ring].workqueue,
++		   &priv->ring[ring].work_data.work);
++
++	return ret;
++}
++
+ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+ 				      struct crypto_async_request *async,
+ 				      struct scatterlist *src,
+@@ -635,6 +674,7 @@ static int safexcel_handle_req_result(st
+ 	struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ 	struct safexcel_result_desc *rdesc;
+ 	int ndesc = 0;
++	int flag;
+ 
+ 	*ret = 0;
+ 
+@@ -677,7 +717,13 @@ static int safexcel_handle_req_result(st
+ 				    crypto_skcipher_ivsize(skcipher)));
+ 	}
+ 
+-	*should_complete = true;
++	if (ctx->basic_ctr && ctx->processed != cryptlen) {
++		*should_complete = false;
++		flag = safexcel_queue_req(async, sreq, sreq->direction);
++	} else {
++		*should_complete = true;
++		ctx->processed = 0;
++	}
+ 
+ 	return ndesc;
+ }
+@@ -700,12 +746,16 @@ static int safexcel_send_req(struct cryp
+ 	unsigned int totlen;
+ 	unsigned int totlen_src = cryptlen + assoclen;
+ 	unsigned int totlen_dst = totlen_src;
++	unsigned int pass_byte = 0;
++	unsigned int pass;
+ 	struct safexcel_token *atoken;
+ 	int n_cdesc = 0, n_rdesc = 0;
+ 	int queued, i, ret = 0;
+ 	bool first = true;
+ 
+-	sreq->nr_src = sg_nents_for_len(src, totlen_src);
++	pass_byte = ctx->processed;
++	pass = pass_byte;
++	sreq->nr_src = sg_nents_for_len(src, totlen_src + pass_byte);
+ 
+ 	if (ctx->aead) {
+ 		/*
+@@ -736,7 +786,7 @@ static int safexcel_send_req(struct cryp
+ 				    crypto_skcipher_ivsize(skcipher)));
+ 	}
+ 
+-	sreq->nr_dst = sg_nents_for_len(dst, totlen_dst);
++	sreq->nr_dst = sg_nents_for_len(dst, totlen_dst + pass_byte);
+ 
+ 	/*
+ 	 * Remember actual input length, source buffer length may be
+@@ -798,14 +848,23 @@ static int safexcel_send_req(struct cryp
+ 	for_each_sg(src, sg, sreq->nr_src, i) {
+ 		int len = sg_dma_len(sg);
+ 
++		if (pass) {
++			if (pass >= len) {
++				pass -= len;
++				continue;
++			}
++			len = len - pass;
++		}
+ 		/* Do not overflow the request */
+ 		if (queued < len)
+ 			len = queued;
+ 
+ 		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
+-					   !(queued - len),
+-					   sg_dma_address(sg), len, totlen,
+-					   ctx->base.ctxr_dma, &atoken);
++					!(queued - len),
++					sg_dma_address(sg) + pass, len,
++					totlen, ctx->base.ctxr_dma, &atoken);
++		pass = 0;
++
+ 		if (IS_ERR(cdesc)) {
+ 			/* No space left in the command descriptor ring */
+ 			ret = PTR_ERR(cdesc);
+@@ -820,6 +879,7 @@ static int safexcel_send_req(struct cryp
+ 		if (!queued)
+ 			break;
+ 	}
++
+ skip_cdesc:
+ 	/* Add context control words and token to first command descriptor */
+ 	safexcel_context_control(ctx, base, sreq, first_cdesc);
+@@ -831,11 +891,20 @@ skip_cdesc:
+ 		safexcel_skcipher_token(ctx, iv, first_cdesc, atoken,
+ 					cryptlen);
+ 
++	pass = pass_byte;
+ 	/* result descriptors */
+ 	for_each_sg(dst, sg, sreq->nr_dst, i) {
+ 		bool last = (i == sreq->nr_dst - 1);
+ 		u32 len = sg_dma_len(sg);
+ 
++		if (pass) {
++			if (pass >= len) {
++				pass -= len;
++				continue;
++			}
++			len -= pass;
++		}
++
+ 		/* only allow the part of the buffer we know we need */
+ 		if (len > totlen_dst)
+ 			len = totlen_dst;
+@@ -855,9 +924,11 @@ skip_cdesc:
+ 						   len - assoclen);
+ 			assoclen = 0;
+ 		} else {
++
+ 			rdesc = safexcel_add_rdesc(priv, ring, first, last,
+-						   sg_dma_address(sg),
+-						   len);
++					   sg_dma_address(sg) + pass,
++					   len);
++			pass = 0;
+ 		}
+ 		if (IS_ERR(rdesc)) {
+ 			/* No space left in the result descriptor ring */
+@@ -892,6 +963,7 @@ skip_cdesc:
+ 
+ 	*commands = n_cdesc;
+ 	*results = n_rdesc;
++
+ 	return 0;
+ 
+ rdesc_rollback:
+@@ -1033,6 +1105,26 @@ static int safexcel_cipher_send_inv(stru
+ 	return 0;
+ }
+ 
++static void accum_iv(u8 *iv, u32 blocks)
++{
++	u32 *counter;
++	int i;
++
++	for (i = 12; i >= 0; i = i - 4) {
++		counter = (u32 *) &iv[i];
++		if (be32_to_cpu(*counter) + blocks >= be32_to_cpu(*counter)) {
++			*counter = cpu_to_be32(be32_to_cpu(*counter) + blocks);
++			blocks = 0;
++		} else {
++			*counter = cpu_to_be32(be32_to_cpu(*counter) + blocks);
++			blocks = 1;
++		}
++
++		if (blocks == 0)
++			break;
++	}
++}
++
+ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
+ 				  int *commands, int *results)
+ {
+@@ -1049,6 +1141,8 @@ static int safexcel_skcipher_send(struct
+ 	} else {
+ 		struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ 		u8 input_iv[AES_BLOCK_SIZE];
++		u32 blocks;
++		u32 *counter;
+ 
+ 		/*
+ 		 * Save input IV in case of CBC decrypt mode
+@@ -1056,9 +1150,29 @@ static int safexcel_skcipher_send(struct
+ 		 */
+ 		memcpy(input_iv, req->iv, crypto_skcipher_ivsize(skcipher));
+ 
+-		ret = safexcel_send_req(async, ring, sreq, req->src,
++		if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD && ctx->basic_ctr) {
++			counter = (u32 *) &req->iv[12];
++			blocks = (req->cryptlen / ctx->blocksz) - (ctx->processed / 16);
++			if (req->cryptlen % ctx->blocksz)
++				blocks++;
++			if (be32_to_cpu(*counter) + blocks < be32_to_cpu(*counter)) {
++				blocks = 0 - be32_to_cpu(*counter);
++				ret = safexcel_send_req(async, ring, sreq, req->src,
++						req->dst, min(blocks * AES_BLOCK_SIZE, req->cryptlen), 0, 0, input_iv,
++						commands, results);
++				ctx->processed += min(blocks * AES_BLOCK_SIZE, req->cryptlen);
++			} else {
++				ret = safexcel_send_req(async, ring, sreq, req->src,
++						req->dst, req->cryptlen - ctx->processed,
++						0, 0, input_iv, commands, results);
++				ctx->processed = req->cryptlen;
++			}
++			accum_iv(req->iv, blocks);
++		} else {
++			ret = safexcel_send_req(async, ring, sreq, req->src,
+ 					req->dst, req->cryptlen, 0, 0, input_iv,
+ 					commands, results);
++		}
+ 	}
+ 
+ 	sreq->rdescs = *results;
+@@ -1152,43 +1266,6 @@ static int safexcel_aead_exit_inv(struct
+ 	return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
+ }
+ 
+-static int safexcel_queue_req(struct crypto_async_request *base,
+-			struct safexcel_cipher_req *sreq,
+-			enum safexcel_cipher_direction dir)
+-{
+-	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+-	struct safexcel_crypto_priv *priv = ctx->priv;
+-	int ret, ring;
+-
+-	sreq->needs_inv = false;
+-	sreq->direction = dir;
+-
+-	if (ctx->base.ctxr) {
+-		if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
+-			sreq->needs_inv = true;
+-			ctx->base.needs_inv = false;
+-		}
+-	} else {
+-		ctx->base.ring = safexcel_select_ring(priv);
+-		ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
+-						 EIP197_GFP_FLAGS(*base),
+-						 &ctx->base.ctxr_dma);
+-		if (!ctx->base.ctxr)
+-			return -ENOMEM;
+-	}
+-
+-	ring = ctx->base.ring;
+-
+-	spin_lock_bh(&priv->ring[ring].queue_lock);
+-	ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
+-	spin_unlock_bh(&priv->ring[ring].queue_lock);
+-
+-	queue_work(priv->ring[ring].workqueue,
+-		   &priv->ring[ring].work_data.work);
+-
+-	return ret;
+-}
+-
+ static int safexcel_encrypt(struct skcipher_request *req)
+ {
+ 	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+@@ -1216,6 +1293,8 @@ static int safexcel_skcipher_cra_init(st
+ 	ctx->base.send = safexcel_skcipher_send;
+ 	ctx->base.handle_result = safexcel_skcipher_handle_result;
+ 	ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
++	ctx->basic_ctr = false;
++	ctx->processed = 0;
+ 	ctx->ctrinit = 1;
+ 	return 0;
+ }
+@@ -1496,6 +1575,44 @@ struct safexcel_alg_template safexcel_al
+ 	},
+ };
+ 
++static int safexcel_skcipher_basic_aes_ctr_cra_init(struct crypto_tfm *tfm)
++{
++	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++	safexcel_skcipher_cra_init(tfm);
++	ctx->alg = SAFEXCEL_AES;
++	ctx->blocksz = AES_BLOCK_SIZE;
++	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
++	ctx->basic_ctr = true;
++	return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_basic_ctr_aes = {
++	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
++	.algo_mask = SAFEXCEL_ALG_AES,
++	.alg.skcipher = {
++		.setkey = safexcel_skcipher_aes_setkey,
++		.encrypt = safexcel_encrypt,
++		.decrypt = safexcel_decrypt,
++		.min_keysize = AES_MIN_KEY_SIZE,
++		.max_keysize = AES_MAX_KEY_SIZE,
++		.ivsize = AES_BLOCK_SIZE,
++		.base = {
++			.cra_name = "ctr(aes)",
++			.cra_driver_name = "safexcel-basic-ctr-aes",
++			.cra_priority = SAFEXCEL_CRA_PRIORITY,
++			.cra_flags = CRYPTO_ALG_ASYNC |
++					 CRYPTO_ALG_KERN_DRIVER_ONLY,
++			.cra_blocksize = 1,
++			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++			.cra_alignmask = 0,
++			.cra_init = safexcel_skcipher_basic_aes_ctr_cra_init,
++			.cra_exit = safexcel_skcipher_cra_exit,
++			.cra_module = THIS_MODULE,
++		},
++	},
++};
++
+ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
+ 			       unsigned int len)
+ {
+@@ -1724,6 +1841,9 @@ static int safexcel_aead_cra_init(struct
+ 	ctx->aead = true;
+ 	ctx->base.send = safexcel_aead_send;
+ 	ctx->base.handle_result = safexcel_aead_handle_result;
++	ctx->basic_ctr = false;
++	ctx->processed = 0;
++
+ 	return 0;
+ }
+ 
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/patches-5.4.inc b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/patches-5.4.inc
index 38511b9..4571e0e 100644
--- a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/patches-5.4.inc
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/patches-5.4.inc
@@ -217,6 +217,7 @@
     file://999-2739-drivers_net_phy_add_an8855_gsw.patch \
     file://999-2739-net-phy-aquantia-add-CUX3410.patch \
     file://999-2739-net_dsa_add_tag_arht.patch \
+    file://999-2740-crypto-add-eip197-aes-ctr-support.patch \
     file://999-2800-misc-add-mtk-platform.patch \
     file://999-2850-fips-140-3-compliance.patch \
     file://999-2900-dts-mt7622-enable-new-mtk-snand-for-ubi.patch \