blob: 9e101e8b33c1122ffdebdabfc28375106aeffd91 [file] [log] [blame]
developer94c513e2023-08-21 17:33:25 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2023 MediaTek Inc.
4 *
5 * Author: Chris.Chou <chris.chou@mediatek.com>
6 * Ren-Ting Wang <ren-ting.wang@mediatek.com>
7 */
8
9#include <linux/bitops.h>
10
11#include <mtk_eth_soc.h>
12#include <mtk_hnat/hnat.h>
13#include <mtk_hnat/nf_hnat_mtk.h>
14
15#include <pce/cdrt.h>
16#include <pce/cls.h>
17#include <pce/netsys.h>
18
19#include <crypto-eip/ddk/configs/cs_hwpal_ext.h>
20#include <crypto-eip/ddk/kit/iotoken/iotoken.h>
21#include <crypto-eip/ddk/kit/iotoken/iotoken_ext.h>
22
23#include "crypto-eip/crypto-eip.h"
24#include "crypto-eip/ddk-wrapper.h"
25#include "crypto-eip/internal.h"
26
27static LIST_HEAD(xfrm_params_head);
28
29static void mtk_xfrm_offload_cdrt_tear_down(struct mtk_xfrm_params *xfrm_params)
30{
31 memset(&xfrm_params->cdrt->desc, 0, sizeof(struct cdrt_desc));
32
33 mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
34}
35
36static int mtk_xfrm_offload_cdrt_setup(struct mtk_xfrm_params *xfrm_params)
37{
38 struct cdrt_desc *cdesc = &xfrm_params->cdrt->desc;
39
40 cdesc->desc1.common.type = 3;
41 cdesc->desc1.token_len = 48;
42 cdesc->desc1.p_tr[0] = __pa(xfrm_params->p_tr) | 2;
43
44 cdesc->desc2.hw_srv = 2;
45 cdesc->desc2.allow_pad = 1;
46 cdesc->desc2.strip_pad = 1;
47
48 return mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
49}
50
developer8ac909d2023-08-25 11:03:33 +080051static void mtk_xfrm_offload_cls_entry_tear_down(struct mtk_xfrm_params *xfrm_params)
52{
53 memset(&xfrm_params->cdrt->cls->cdesc, 0, sizeof(struct cls_desc));
54
55 mtk_pce_cls_entry_write(xfrm_params->cdrt->cls);
56
57 mtk_pce_cls_entry_free(xfrm_params->cdrt->cls);
58}
59
developer94c513e2023-08-21 17:33:25 +080060static int mtk_xfrm_offload_cls_entry_setup(struct mtk_xfrm_params *xfrm_params)
61{
developer8ac909d2023-08-25 11:03:33 +080062 struct cls_desc *cdesc;
63
64 xfrm_params->cdrt->cls = mtk_pce_cls_entry_alloc();
65 if (IS_ERR(xfrm_params->cdrt->cls))
66 return PTR_ERR(xfrm_params->cdrt->cls);
developer94c513e2023-08-21 17:33:25 +080067
developer8ac909d2023-08-25 11:03:33 +080068 cdesc = &xfrm_params->cdrt->cls->cdesc;
developer94c513e2023-08-21 17:33:25 +080069
developer8ac909d2023-08-25 11:03:33 +080070 CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE0);
71 CLS_DESC_DATA(cdesc, tport_idx, 0x2);
72 CLS_DESC_DATA(cdesc, cdrt_idx, xfrm_params->cdrt->idx);
73
74 CLS_DESC_MASK_DATA(cdesc, tag,
75 CLS_DESC_TAG_MASK, CLS_DESC_TAG_MATCH_L4_HDR);
76 CLS_DESC_MASK_DATA(cdesc, l4_udp_hdr_nez,
77 CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK,
78 CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK);
79 CLS_DESC_MASK_DATA(cdesc, l4_type,
80 CLS_DESC_L4_TYPE_MASK, IPPROTO_ESP);
81 CLS_DESC_MASK_DATA(cdesc, l4_valid,
82 0x3,
83 CLS_DESC_VALID_UPPER_HALF_WORD_BIT |
84 CLS_DESC_VALID_LOWER_HALF_WORD_BIT);
85 CLS_DESC_MASK_DATA(cdesc, l4_hdr_usr_data,
86 0xFFFFFFFF, be32_to_cpu(xfrm_params->xs->id.spi));
87
88 return mtk_pce_cls_entry_write(xfrm_params->cdrt->cls);
developer94c513e2023-08-21 17:33:25 +080089}
90
91static void mtk_xfrm_offload_context_tear_down(struct mtk_xfrm_params *xfrm_params)
92{
93 mtk_xfrm_offload_cdrt_tear_down(xfrm_params);
94
95 /* TODO: free context */
96 devm_kfree(crypto_dev, xfrm_params->p_tr);
97
98 /* TODO: transform record tear down */
99}
100
101static int mtk_xfrm_offload_context_setup(struct mtk_xfrm_params *xfrm_params)
102{
103 u32 *tr;
104 int ret;
105
106 xfrm_params->p_tr = devm_kcalloc(crypto_dev, sizeof(u32),
107 TRANSFORM_RECORD_LEN, GFP_KERNEL);
108 if (unlikely(!xfrm_params->p_tr))
109 return -ENOMEM;
110
111 switch (xfrm_params->xs->outer_mode.encap) {
112 case XFRM_MODE_TUNNEL:
113 tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TUNNEL);
114 break;
115 case XFRM_MODE_TRANSPORT:
116 tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TRANSPORT);
117 break;
118 default:
119 ret = -ENOMEM;
120 goto err_out;
121 }
122
123 if (!tr) {
124 ret = -EINVAL;
125 goto err_out;
126 }
127
128 memcpy(xfrm_params->p_tr, tr, sizeof(u32) * TRANSFORM_RECORD_LEN);
129
130 /* TODO: free tr */
131
132 return mtk_xfrm_offload_cdrt_setup(xfrm_params);
133
134err_out:
135 devm_kfree(crypto_dev, xfrm_params->p_tr);
136
137 return ret;
138}
139
140static int mtk_xfrm_offload_state_add_outbound(struct xfrm_state *xs,
141 struct mtk_xfrm_params *xfrm_params)
142{
143 int ret;
144
145 xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_ENCRYPT);
146 if (IS_ERR(xfrm_params->cdrt))
147 return PTR_ERR(xfrm_params->cdrt);
148
149 xfrm_params->dir = SAB_DIRECTION_OUTBOUND;
150
151 ret = mtk_xfrm_offload_context_setup(xfrm_params);
152 if (ret)
153 goto free_cdrt;
154
155 return ret;
156
157free_cdrt:
158 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
159
160 return ret;
161}
162
163static int mtk_xfrm_offload_state_add_inbound(struct xfrm_state *xs,
164 struct mtk_xfrm_params *xfrm_params)
165{
166 int ret;
167
168 xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_DECRYPT);
169 if (IS_ERR(xfrm_params->cdrt))
170 return PTR_ERR(xfrm_params->cdrt);
171
172 xfrm_params->dir = SAB_DIRECTION_INBOUND;
173
174 ret = mtk_xfrm_offload_context_setup(xfrm_params);
175 if (ret)
176 goto free_cdrt;
177
178 ret = mtk_xfrm_offload_cls_entry_setup(xfrm_params);
179 if (ret)
180 goto tear_down_context;
181
182 return ret;
183
184tear_down_context:
185 mtk_xfrm_offload_context_tear_down(xfrm_params);
186
187free_cdrt:
188 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
189
190 return ret;
191}
192
193int mtk_xfrm_offload_state_add(struct xfrm_state *xs)
194{
195 struct mtk_xfrm_params *xfrm_params;
196 int ret = 0;
197
198 /* TODO: maybe support IPv6 in the future? */
199 if (xs->props.family != AF_INET) {
200 CRYPTO_NOTICE("Only IPv4 xfrm states may be offloaded\n");
201 return -EINVAL;
202 }
203
204 /* only support ESP right now */
205 if (xs->id.proto != IPPROTO_ESP) {
206 CRYPTO_NOTICE("Unsupported protocol 0x%04x\n", xs->id.proto);
207 return -EINVAL;
208 }
209
210 /* only support tunnel mode or transport mode */
211 if (!(xs->outer_mode.encap == XFRM_MODE_TUNNEL
212 || xs->outer_mode.encap == XFRM_MODE_TRANSPORT))
213 return -EINVAL;
214
215 xfrm_params = devm_kzalloc(crypto_dev,
216 sizeof(struct mtk_xfrm_params),
217 GFP_KERNEL);
218 if (!xfrm_params)
219 return -ENOMEM;
220
221 xfrm_params->xs = xs;
222 INIT_LIST_HEAD(&xfrm_params->node);
223
224 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
225 /* rx path */
226 ret = mtk_xfrm_offload_state_add_inbound(xs, xfrm_params);
227 else
228 /* tx path */
229 ret = mtk_xfrm_offload_state_add_outbound(xs, xfrm_params);
230
231 if (ret) {
232 devm_kfree(crypto_dev, xfrm_params);
233 goto out;
234 }
235
236 xs->xso.offload_handle = (unsigned long)xfrm_params;
237 list_add_tail(&xfrm_params->node, &xfrm_params_head);
238out:
239 return ret;
240}
241
242void mtk_xfrm_offload_state_delete(struct xfrm_state *xs)
243{
244}
245
246void mtk_xfrm_offload_state_free(struct xfrm_state *xs)
247{
248 struct mtk_xfrm_params *xfrm_params;
249
250 if (!xs->xso.offload_handle)
251 return;
252
253 xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
254
255 list_del(&xfrm_params->node);
256
257 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
developer8ac909d2023-08-25 11:03:33 +0800258 mtk_xfrm_offload_cls_entry_tear_down(xfrm_params);
developer94c513e2023-08-21 17:33:25 +0800259
260 mtk_xfrm_offload_context_tear_down(xfrm_params);
261
262 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
263
264 devm_kfree(crypto_dev, xfrm_params);
265}
266
267void mtk_xfrm_offload_state_tear_down(void)
268{
269 struct mtk_xfrm_params *xfrm_params, *tmp;
270
271 list_for_each_entry_safe(xfrm_params, tmp, &xfrm_params_head, node)
272 mtk_xfrm_offload_state_free(xfrm_params->xs);
273}
274
275int mtk_xfrm_offload_policy_add(struct xfrm_policy *xp)
276{
277 return 0;
278}
279
280bool mtk_xfrm_offload_ok(struct sk_buff *skb,
281 struct xfrm_state *xs)
282{
283 struct mtk_xfrm_params *xfrm_params;
284
285 xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
286 skb_hnat_cdrt(skb) = xfrm_params->cdrt->idx;
287
288 return false;
289}