blob: 81d3415f008ab2ffe8ddb758c03f1b1a1c6752ca [file] [log] [blame]
developer4f0d2ba2023-08-21 17:33:25 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2023 MediaTek Inc.
4 *
5 * Author: Chris.Chou <chris.chou@mediatek.com>
6 * Ren-Ting Wang <ren-ting.wang@mediatek.com>
7 */
8
9#include <linux/bitops.h>
developer38937342024-07-05 15:43:05 +080010#include <linux/spinlock.h>
developer4f0d2ba2023-08-21 17:33:25 +080011
12#include <mtk_eth_soc.h>
developer12ea7142024-03-28 15:18:08 +080013
14#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
developer4f0d2ba2023-08-21 17:33:25 +080015#include <mtk_hnat/hnat.h>
16#include <mtk_hnat/nf_hnat_mtk.h>
developer12ea7142024-03-28 15:18:08 +080017#endif // HNAT
developer4f0d2ba2023-08-21 17:33:25 +080018
19#include <pce/cdrt.h>
20#include <pce/cls.h>
21#include <pce/netsys.h>
22
23#include <crypto-eip/ddk/configs/cs_hwpal_ext.h>
24#include <crypto-eip/ddk/kit/iotoken/iotoken.h>
25#include <crypto-eip/ddk/kit/iotoken/iotoken_ext.h>
26
27#include "crypto-eip/crypto-eip.h"
28#include "crypto-eip/ddk-wrapper.h"
29#include "crypto-eip/internal.h"
30
developer38937342024-07-05 15:43:05 +080031static struct xfrm_params_list xfrm_params_list = {
32 .list = LIST_HEAD_INIT(xfrm_params_list.list),
33 .lock = __SPIN_LOCK_UNLOCKED(xfrm_params_list.lock),
34};
developer4f0d2ba2023-08-21 17:33:25 +080035
developer12ea7142024-03-28 15:18:08 +080036#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
37extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
38
developer614e12d2024-07-05 13:30:41 +080039static inline bool is_tops_tunnel(struct sk_buff *skb)
developerad19d602023-10-19 11:24:25 +080040{
41 return skb_hnat_tops(skb) && (ntohs(skb->protocol) == ETH_P_IP) &&
developer614e12d2024-07-05 13:30:41 +080042 (ip_hdr(skb)->protocol == IPPROTO_UDP ||
43 ip_hdr(skb)->protocol == IPPROTO_GRE);
developerad19d602023-10-19 11:24:25 +080044}
45
46static inline bool is_tcp(struct sk_buff *skb)
47{
48 return (ntohs(skb->protocol) == ETH_P_IP) && (ip_hdr(skb)->protocol == IPPROTO_TCP);
49}
50
51static inline bool is_hnat_rate_reach(struct sk_buff *skb)
52{
53 return is_magic_tag_valid(skb) && (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH);
54}
developer12ea7142024-03-28 15:18:08 +080055#endif // HNAT
developerad19d602023-10-19 11:24:25 +080056
developer38937342024-07-05 15:43:05 +080057struct xfrm_params_list *mtk_xfrm_params_list_get(void)
58{
59 return &xfrm_params_list;
60}
61
developer4f0d2ba2023-08-21 17:33:25 +080062static void mtk_xfrm_offload_cdrt_tear_down(struct mtk_xfrm_params *xfrm_params)
63{
developer293bafd2024-08-01 17:36:15 +080064 if (!xfrm_params->cdrt)
65 return;
66
developer4f0d2ba2023-08-21 17:33:25 +080067 memset(&xfrm_params->cdrt->desc, 0, sizeof(struct cdrt_desc));
68
69 mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
70}
71
72static int mtk_xfrm_offload_cdrt_setup(struct mtk_xfrm_params *xfrm_params)
73{
74 struct cdrt_desc *cdesc = &xfrm_params->cdrt->desc;
75
76 cdesc->desc1.common.type = 3;
77 cdesc->desc1.token_len = 48;
78 cdesc->desc1.p_tr[0] = __pa(xfrm_params->p_tr) | 2;
79
developer614e12d2024-07-05 13:30:41 +080080 cdesc->desc2.hw_srv = 3;
developer4f0d2ba2023-08-21 17:33:25 +080081 cdesc->desc2.allow_pad = 1;
82 cdesc->desc2.strip_pad = 1;
83
84 return mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
85}
86
developeree493232023-08-25 11:03:33 +080087static void mtk_xfrm_offload_cls_entry_tear_down(struct mtk_xfrm_params *xfrm_params)
88{
developer293bafd2024-08-01 17:36:15 +080089 if (!xfrm_params->cdrt || !xfrm_params->cdrt->cls)
90 return;
91
developeree493232023-08-25 11:03:33 +080092 memset(&xfrm_params->cdrt->cls->cdesc, 0, sizeof(struct cls_desc));
93
94 mtk_pce_cls_entry_write(xfrm_params->cdrt->cls);
95
96 mtk_pce_cls_entry_free(xfrm_params->cdrt->cls);
97}
98
developer4f0d2ba2023-08-21 17:33:25 +080099static int mtk_xfrm_offload_cls_entry_setup(struct mtk_xfrm_params *xfrm_params)
100{
developeree493232023-08-25 11:03:33 +0800101 struct cls_desc *cdesc;
102
103 xfrm_params->cdrt->cls = mtk_pce_cls_entry_alloc();
104 if (IS_ERR(xfrm_params->cdrt->cls))
105 return PTR_ERR(xfrm_params->cdrt->cls);
developer4f0d2ba2023-08-21 17:33:25 +0800106
developeree493232023-08-25 11:03:33 +0800107 cdesc = &xfrm_params->cdrt->cls->cdesc;
developer4f0d2ba2023-08-21 17:33:25 +0800108
developerddc0d842024-02-26 19:01:58 +0800109 if (mtk_crypto_ppe_get_num() == 1)
110 CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE0);
111 else
112 CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE1);
developeree493232023-08-25 11:03:33 +0800113 CLS_DESC_DATA(cdesc, tport_idx, 0x2);
114 CLS_DESC_DATA(cdesc, cdrt_idx, xfrm_params->cdrt->idx);
115
116 CLS_DESC_MASK_DATA(cdesc, tag,
117 CLS_DESC_TAG_MASK, CLS_DESC_TAG_MATCH_L4_HDR);
118 CLS_DESC_MASK_DATA(cdesc, l4_udp_hdr_nez,
119 CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK,
120 CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK);
121 CLS_DESC_MASK_DATA(cdesc, l4_type,
122 CLS_DESC_L4_TYPE_MASK, IPPROTO_ESP);
123 CLS_DESC_MASK_DATA(cdesc, l4_valid,
124 0x3,
125 CLS_DESC_VALID_UPPER_HALF_WORD_BIT |
126 CLS_DESC_VALID_LOWER_HALF_WORD_BIT);
127 CLS_DESC_MASK_DATA(cdesc, l4_hdr_usr_data,
128 0xFFFFFFFF, be32_to_cpu(xfrm_params->xs->id.spi));
129
130 return mtk_pce_cls_entry_write(xfrm_params->cdrt->cls);
developer4f0d2ba2023-08-21 17:33:25 +0800131}
132
133static void mtk_xfrm_offload_context_tear_down(struct mtk_xfrm_params *xfrm_params)
134{
135 mtk_xfrm_offload_cdrt_tear_down(xfrm_params);
136
137 /* TODO: free context */
138 devm_kfree(crypto_dev, xfrm_params->p_tr);
139
140 /* TODO: transform record tear down */
141}
142
143static int mtk_xfrm_offload_context_setup(struct mtk_xfrm_params *xfrm_params)
144{
145 u32 *tr;
146 int ret;
147
148 xfrm_params->p_tr = devm_kcalloc(crypto_dev, sizeof(u32),
149 TRANSFORM_RECORD_LEN, GFP_KERNEL);
150 if (unlikely(!xfrm_params->p_tr))
151 return -ENOMEM;
152
153 switch (xfrm_params->xs->outer_mode.encap) {
154 case XFRM_MODE_TUNNEL:
155 tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TUNNEL);
156 break;
157 case XFRM_MODE_TRANSPORT:
158 tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TRANSPORT);
159 break;
160 default:
161 ret = -ENOMEM;
162 goto err_out;
163 }
164
165 if (!tr) {
166 ret = -EINVAL;
167 goto err_out;
168 }
169
170 memcpy(xfrm_params->p_tr, tr, sizeof(u32) * TRANSFORM_RECORD_LEN);
171
172 /* TODO: free tr */
173
174 return mtk_xfrm_offload_cdrt_setup(xfrm_params);
175
176err_out:
177 devm_kfree(crypto_dev, xfrm_params->p_tr);
178
179 return ret;
180}
181
182static int mtk_xfrm_offload_state_add_outbound(struct xfrm_state *xs,
183 struct mtk_xfrm_params *xfrm_params)
184{
185 int ret;
186
187 xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_ENCRYPT);
188 if (IS_ERR(xfrm_params->cdrt))
189 return PTR_ERR(xfrm_params->cdrt);
190
191 xfrm_params->dir = SAB_DIRECTION_OUTBOUND;
192
193 ret = mtk_xfrm_offload_context_setup(xfrm_params);
194 if (ret)
195 goto free_cdrt;
196
197 return ret;
198
199free_cdrt:
200 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
201
202 return ret;
203}
204
205static int mtk_xfrm_offload_state_add_inbound(struct xfrm_state *xs,
206 struct mtk_xfrm_params *xfrm_params)
207{
208 int ret;
209
210 xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_DECRYPT);
211 if (IS_ERR(xfrm_params->cdrt))
212 return PTR_ERR(xfrm_params->cdrt);
213
214 xfrm_params->dir = SAB_DIRECTION_INBOUND;
215
216 ret = mtk_xfrm_offload_context_setup(xfrm_params);
217 if (ret)
218 goto free_cdrt;
219
220 ret = mtk_xfrm_offload_cls_entry_setup(xfrm_params);
221 if (ret)
222 goto tear_down_context;
223
224 return ret;
225
226tear_down_context:
227 mtk_xfrm_offload_context_tear_down(xfrm_params);
228
229free_cdrt:
230 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
231
232 return ret;
233}
234
235int mtk_xfrm_offload_state_add(struct xfrm_state *xs)
236{
237 struct mtk_xfrm_params *xfrm_params;
developer38937342024-07-05 15:43:05 +0800238 unsigned long flags;
developer4f0d2ba2023-08-21 17:33:25 +0800239 int ret = 0;
240
241 /* TODO: maybe support IPv6 in the future? */
242 if (xs->props.family != AF_INET) {
243 CRYPTO_NOTICE("Only IPv4 xfrm states may be offloaded\n");
244 return -EINVAL;
245 }
246
247 /* only support ESP right now */
248 if (xs->id.proto != IPPROTO_ESP) {
249 CRYPTO_NOTICE("Unsupported protocol 0x%04x\n", xs->id.proto);
250 return -EINVAL;
251 }
252
253 /* only support tunnel mode or transport mode */
254 if (!(xs->outer_mode.encap == XFRM_MODE_TUNNEL
255 || xs->outer_mode.encap == XFRM_MODE_TRANSPORT))
256 return -EINVAL;
257
258 xfrm_params = devm_kzalloc(crypto_dev,
259 sizeof(struct mtk_xfrm_params),
260 GFP_KERNEL);
261 if (!xfrm_params)
262 return -ENOMEM;
263
264 xfrm_params->xs = xs;
265 INIT_LIST_HEAD(&xfrm_params->node);
266
267 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
268 /* rx path */
269 ret = mtk_xfrm_offload_state_add_inbound(xs, xfrm_params);
270 else
271 /* tx path */
272 ret = mtk_xfrm_offload_state_add_outbound(xs, xfrm_params);
273
274 if (ret) {
275 devm_kfree(crypto_dev, xfrm_params);
276 goto out;
277 }
278
279 xs->xso.offload_handle = (unsigned long)xfrm_params;
developer38937342024-07-05 15:43:05 +0800280
281 spin_lock_irqsave(&xfrm_params_list.lock, flags);
282
283 list_add_tail(&xfrm_params->node, &xfrm_params_list.list);
284
285 spin_unlock_irqrestore(&xfrm_params_list.lock, flags);
developer4f0d2ba2023-08-21 17:33:25 +0800286out:
287 return ret;
288}
289
290void mtk_xfrm_offload_state_delete(struct xfrm_state *xs)
291{
292}
293
294void mtk_xfrm_offload_state_free(struct xfrm_state *xs)
295{
296 struct mtk_xfrm_params *xfrm_params;
297
298 if (!xs->xso.offload_handle)
299 return;
300
301 xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
302
303 list_del(&xfrm_params->node);
304
305 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
developeree493232023-08-25 11:03:33 +0800306 mtk_xfrm_offload_cls_entry_tear_down(xfrm_params);
developer4f0d2ba2023-08-21 17:33:25 +0800307
308 mtk_xfrm_offload_context_tear_down(xfrm_params);
309
developer293bafd2024-08-01 17:36:15 +0800310 if (xfrm_params->cdrt)
311 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
developer4f0d2ba2023-08-21 17:33:25 +0800312
313 devm_kfree(crypto_dev, xfrm_params);
314}
315
316void mtk_xfrm_offload_state_tear_down(void)
317{
318 struct mtk_xfrm_params *xfrm_params, *tmp;
developer38937342024-07-05 15:43:05 +0800319 unsigned long flags;
developer4f0d2ba2023-08-21 17:33:25 +0800320
developer38937342024-07-05 15:43:05 +0800321 spin_lock_irqsave(&xfrm_params_list.lock, flags);
322
323 list_for_each_entry_safe(xfrm_params, tmp, &xfrm_params_list.list, node)
developer4f0d2ba2023-08-21 17:33:25 +0800324 mtk_xfrm_offload_state_free(xfrm_params->xs);
developer38937342024-07-05 15:43:05 +0800325
326 spin_unlock_irqrestore(&xfrm_params_list.lock, flags);
developer4f0d2ba2023-08-21 17:33:25 +0800327}
328
329int mtk_xfrm_offload_policy_add(struct xfrm_policy *xp)
330{
331 return 0;
332}
333
developerad19d602023-10-19 11:24:25 +0800334static inline struct neighbour *mtk_crypto_find_dst_mac(struct sk_buff *skb, struct xfrm_state *xs)
335{
336 struct neighbour *neigh;
developerad19d602023-10-19 11:24:25 +0800337 struct dst_entry *dst = skb_dst(skb);
developerad19d602023-10-19 11:24:25 +0800338
developer0b1a6db2024-07-01 16:11:43 +0800339 neigh = __ipv4_neigh_lookup_noref(dst->dev, xs->id.daddr.a4);
developerad19d602023-10-19 11:24:25 +0800340 if (unlikely(!neigh)) {
341 CRYPTO_INFO("%s: %s No neigh (daddr=%pI4)\n", __func__, dst->dev->name,
342 &xs->id.daddr.a4);
343 neigh = __neigh_create(&arp_tbl, &xs->id.daddr.a4, dst->dev, false);
developer8740d522024-01-09 14:52:17 +0800344 neigh_output(neigh, skb, false);
developerad19d602023-10-19 11:24:25 +0800345 return NULL;
346 }
347
348 return neigh;
349}
350
developer4f0d2ba2023-08-21 17:33:25 +0800351bool mtk_xfrm_offload_ok(struct sk_buff *skb,
352 struct xfrm_state *xs)
353{
354 struct mtk_xfrm_params *xfrm_params;
developerad19d602023-10-19 11:24:25 +0800355 struct neighbour *neigh;
356 struct dst_entry *dst = skb_dst(skb);
developer0b1a6db2024-07-01 16:11:43 +0800357 int fill_inner_info = 0;
developerad19d602023-10-19 11:24:25 +0800358
359 rcu_read_lock_bh();
360
361 neigh = mtk_crypto_find_dst_mac(skb, xs);
362 if (!neigh) {
363 rcu_read_unlock_bh();
364 return true;
365 }
366
developer0b1a6db2024-07-01 16:11:43 +0800367 /*
368 * For packet has pass through VTI (route-based VTI)
369 * The 'dev_queue_xmit' function called at network layer will cause both
370 * skb->mac_header and skb->network_header to point to the IP header
371 */
372 if (skb->mac_header == skb->network_header)
373 fill_inner_info = 1;
374
developerad19d602023-10-19 11:24:25 +0800375 skb_push(skb, sizeof(struct ethhdr));
376 skb_reset_mac_header(skb);
377
378 eth_hdr(skb)->h_proto = htons(ETH_P_IP);
379 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
380 memcpy(eth_hdr(skb)->h_source, dst->dev->dev_addr, ETH_ALEN);
381
382 rcu_read_unlock_bh();
383
384 xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
developer4f0d2ba2023-08-21 17:33:25 +0800385
developer12ea7142024-03-28 15:18:08 +0800386#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
387 skb_hnat_cdrt(skb) = xfrm_params->cdrt->idx;
developer5c851032023-09-04 11:13:18 +0800388 /*
389 * EIP197 does not support fragmentation. As a result, we can not bind UDP
390 * flow since it may cause network fail due to fragmentation
391 */
developer12ea7142024-03-28 15:18:08 +0800392 if (ra_sw_nat_hook_tx &&
developer614e12d2024-07-05 13:30:41 +0800393 ((is_tops_tunnel(skb) || is_tcp(skb)) && is_hnat_rate_reach(skb)))
developer0b1a6db2024-07-01 16:11:43 +0800394 hnat_bind_crypto_entry(skb, dst->dev, fill_inner_info);
developer5c851032023-09-04 11:13:18 +0800395
developerad19d602023-10-19 11:24:25 +0800396 /* Set magic tag for tport setting, reset to 0 after tport is set */
397 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
developer12ea7142024-03-28 15:18:08 +0800398#else
399 skb_tnl_cdrt(skb) = xfrm_params->cdrt->idx;
400 skb_tnl_magic_tag(skb) = TNL_MAGIC_TAG;
401#endif // HNAT
402
403 /* Since we're going to tx directly, set skb->dev to dst->dev */
404 skb->dev = dst->dev;
developer531292f2024-01-04 19:14:19 +0800405
406 /*
407 * Since skb headroom may not be copy when segment, we cannot rely on
408 * headroom data (ex. cdrt) to decide packets should send to EIP197.
409 * Here is a workaround that only skb with inner_protocol = ESP will
410 * be sent to EIP197.
411 */
412 skb->inner_protocol = IPPROTO_ESP;
developerad19d602023-10-19 11:24:25 +0800413 /*
414 * Tx packet to EIP197.
415 * To avoid conflict of SW and HW sequence number
416 * All offloadable packets send to EIP197
417 */
418 dev_queue_xmit(skb);
developer4f0d2ba2023-08-21 17:33:25 +0800419
developerad19d602023-10-19 11:24:25 +0800420 return true;
developer4f0d2ba2023-08-21 17:33:25 +0800421}