blob: 4e7a55c1f78cb9dba2ece3d2c9efc4b18baa8757 [file] [log] [blame]
developer94c513e2023-08-21 17:33:25 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2023 MediaTek Inc.
4 *
5 * Author: Chris.Chou <chris.chou@mediatek.com>
6 * Ren-Ting Wang <ren-ting.wang@mediatek.com>
7 */
8
9#include <linux/bitops.h>
developer1baf13c2024-07-05 15:43:05 +080010#include <linux/spinlock.h>
developer94c513e2023-08-21 17:33:25 +080011
12#include <mtk_eth_soc.h>
developer49489b82024-03-28 15:18:08 +080013
14#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
developer94c513e2023-08-21 17:33:25 +080015#include <mtk_hnat/hnat.h>
16#include <mtk_hnat/nf_hnat_mtk.h>
developer49489b82024-03-28 15:18:08 +080017#endif // HNAT
developer94c513e2023-08-21 17:33:25 +080018
19#include <pce/cdrt.h>
20#include <pce/cls.h>
21#include <pce/netsys.h>
22
developer94c513e2023-08-21 17:33:25 +080023#include <crypto-eip/ddk/kit/iotoken/iotoken.h>
24#include <crypto-eip/ddk/kit/iotoken/iotoken_ext.h>
25
26#include "crypto-eip/crypto-eip.h"
27#include "crypto-eip/ddk-wrapper.h"
28#include "crypto-eip/internal.h"
29
developer1baf13c2024-07-05 15:43:05 +080030static struct xfrm_params_list xfrm_params_list = {
31 .list = LIST_HEAD_INIT(xfrm_params_list.list),
32 .lock = __SPIN_LOCK_UNLOCKED(xfrm_params_list.lock),
33};
developer94c513e2023-08-21 17:33:25 +080034
developer49489b82024-03-28 15:18:08 +080035#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
36extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
37
developerc9adb362024-07-05 13:30:41 +080038static inline bool is_tops_tunnel(struct sk_buff *skb)
developerd0a7d722023-10-19 11:24:25 +080039{
40 return skb_hnat_tops(skb) && (ntohs(skb->protocol) == ETH_P_IP) &&
developerc9adb362024-07-05 13:30:41 +080041 (ip_hdr(skb)->protocol == IPPROTO_UDP ||
42 ip_hdr(skb)->protocol == IPPROTO_GRE);
developerd0a7d722023-10-19 11:24:25 +080043}
44
45static inline bool is_tcp(struct sk_buff *skb)
46{
47 return (ntohs(skb->protocol) == ETH_P_IP) && (ip_hdr(skb)->protocol == IPPROTO_TCP);
48}
49
50static inline bool is_hnat_rate_reach(struct sk_buff *skb)
51{
52 return is_magic_tag_valid(skb) && (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH);
53}
developer49489b82024-03-28 15:18:08 +080054#endif // HNAT
developerd0a7d722023-10-19 11:24:25 +080055
developer1baf13c2024-07-05 15:43:05 +080056struct xfrm_params_list *mtk_xfrm_params_list_get(void)
57{
58 return &xfrm_params_list;
59}
60
developer94c513e2023-08-21 17:33:25 +080061static void mtk_xfrm_offload_cdrt_tear_down(struct mtk_xfrm_params *xfrm_params)
62{
developer9d17aad2024-08-01 17:36:15 +080063 if (!xfrm_params->cdrt)
64 return;
65
developer94c513e2023-08-21 17:33:25 +080066 memset(&xfrm_params->cdrt->desc, 0, sizeof(struct cdrt_desc));
67
68 mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
69}
70
71static int mtk_xfrm_offload_cdrt_setup(struct mtk_xfrm_params *xfrm_params)
72{
73 struct cdrt_desc *cdesc = &xfrm_params->cdrt->desc;
74
75 cdesc->desc1.common.type = 3;
76 cdesc->desc1.token_len = 48;
77 cdesc->desc1.p_tr[0] = __pa(xfrm_params->p_tr) | 2;
78
developerc9adb362024-07-05 13:30:41 +080079 cdesc->desc2.hw_srv = 3;
developer94c513e2023-08-21 17:33:25 +080080 cdesc->desc2.allow_pad = 1;
81 cdesc->desc2.strip_pad = 1;
82
83 return mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
84}
85
developer8ac909d2023-08-25 11:03:33 +080086static void mtk_xfrm_offload_cls_entry_tear_down(struct mtk_xfrm_params *xfrm_params)
87{
developer9d17aad2024-08-01 17:36:15 +080088 if (!xfrm_params->cdrt || !xfrm_params->cdrt->cls)
89 return;
90
developer8ac909d2023-08-25 11:03:33 +080091 memset(&xfrm_params->cdrt->cls->cdesc, 0, sizeof(struct cls_desc));
92
93 mtk_pce_cls_entry_write(xfrm_params->cdrt->cls);
94
95 mtk_pce_cls_entry_free(xfrm_params->cdrt->cls);
96}
97
developer94c513e2023-08-21 17:33:25 +080098static int mtk_xfrm_offload_cls_entry_setup(struct mtk_xfrm_params *xfrm_params)
99{
developer8ac909d2023-08-25 11:03:33 +0800100 struct cls_desc *cdesc;
101
102 xfrm_params->cdrt->cls = mtk_pce_cls_entry_alloc();
103 if (IS_ERR(xfrm_params->cdrt->cls))
104 return PTR_ERR(xfrm_params->cdrt->cls);
developer94c513e2023-08-21 17:33:25 +0800105
developer8ac909d2023-08-25 11:03:33 +0800106 cdesc = &xfrm_params->cdrt->cls->cdesc;
developer94c513e2023-08-21 17:33:25 +0800107
developer5922d4d2024-02-26 19:01:58 +0800108 if (mtk_crypto_ppe_get_num() == 1)
109 CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE0);
110 else
111 CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE1);
developer8ac909d2023-08-25 11:03:33 +0800112 CLS_DESC_DATA(cdesc, tport_idx, 0x2);
113 CLS_DESC_DATA(cdesc, cdrt_idx, xfrm_params->cdrt->idx);
114
115 CLS_DESC_MASK_DATA(cdesc, tag,
116 CLS_DESC_TAG_MASK, CLS_DESC_TAG_MATCH_L4_HDR);
117 CLS_DESC_MASK_DATA(cdesc, l4_udp_hdr_nez,
118 CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK,
119 CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK);
120 CLS_DESC_MASK_DATA(cdesc, l4_type,
121 CLS_DESC_L4_TYPE_MASK, IPPROTO_ESP);
122 CLS_DESC_MASK_DATA(cdesc, l4_valid,
123 0x3,
124 CLS_DESC_VALID_UPPER_HALF_WORD_BIT |
125 CLS_DESC_VALID_LOWER_HALF_WORD_BIT);
126 CLS_DESC_MASK_DATA(cdesc, l4_hdr_usr_data,
127 0xFFFFFFFF, be32_to_cpu(xfrm_params->xs->id.spi));
128
129 return mtk_pce_cls_entry_write(xfrm_params->cdrt->cls);
developer94c513e2023-08-21 17:33:25 +0800130}
131
132static void mtk_xfrm_offload_context_tear_down(struct mtk_xfrm_params *xfrm_params)
133{
134 mtk_xfrm_offload_cdrt_tear_down(xfrm_params);
developer6ad3f362024-10-28 11:19:49 +0800135 mtk_ddk_invalidate_rec((void *) xfrm_params->p_handle, true);
136 crypto_free_sa((void *) xfrm_params->p_handle, 0);
developer94c513e2023-08-21 17:33:25 +0800137}
138
139static int mtk_xfrm_offload_context_setup(struct mtk_xfrm_params *xfrm_params)
140{
141 u32 *tr;
142 int ret;
143
developer94c513e2023-08-21 17:33:25 +0800144 switch (xfrm_params->xs->outer_mode.encap) {
145 case XFRM_MODE_TUNNEL:
146 tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TUNNEL);
147 break;
148 case XFRM_MODE_TRANSPORT:
149 tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TRANSPORT);
150 break;
151 default:
152 ret = -ENOMEM;
153 goto err_out;
154 }
155
156 if (!tr) {
157 ret = -EINVAL;
158 goto err_out;
159 }
160
developer6ad3f362024-10-28 11:19:49 +0800161 xfrm_params->p_tr = tr;
developer94c513e2023-08-21 17:33:25 +0800162
163 return mtk_xfrm_offload_cdrt_setup(xfrm_params);
164
165err_out:
developer94c513e2023-08-21 17:33:25 +0800166 return ret;
167}
168
169static int mtk_xfrm_offload_state_add_outbound(struct xfrm_state *xs,
170 struct mtk_xfrm_params *xfrm_params)
171{
172 int ret;
173
174 xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_ENCRYPT);
175 if (IS_ERR(xfrm_params->cdrt))
176 return PTR_ERR(xfrm_params->cdrt);
177
178 xfrm_params->dir = SAB_DIRECTION_OUTBOUND;
179
180 ret = mtk_xfrm_offload_context_setup(xfrm_params);
181 if (ret)
182 goto free_cdrt;
183
184 return ret;
185
186free_cdrt:
187 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
188
189 return ret;
190}
191
192static int mtk_xfrm_offload_state_add_inbound(struct xfrm_state *xs,
193 struct mtk_xfrm_params *xfrm_params)
194{
195 int ret;
196
197 xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_DECRYPT);
198 if (IS_ERR(xfrm_params->cdrt))
199 return PTR_ERR(xfrm_params->cdrt);
200
201 xfrm_params->dir = SAB_DIRECTION_INBOUND;
202
203 ret = mtk_xfrm_offload_context_setup(xfrm_params);
204 if (ret)
205 goto free_cdrt;
206
207 ret = mtk_xfrm_offload_cls_entry_setup(xfrm_params);
208 if (ret)
209 goto tear_down_context;
210
211 return ret;
212
213tear_down_context:
214 mtk_xfrm_offload_context_tear_down(xfrm_params);
215
216free_cdrt:
217 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
218
219 return ret;
220}
221
222int mtk_xfrm_offload_state_add(struct xfrm_state *xs)
223{
224 struct mtk_xfrm_params *xfrm_params;
225 int ret = 0;
226
227 /* TODO: maybe support IPv6 in the future? */
228 if (xs->props.family != AF_INET) {
229 CRYPTO_NOTICE("Only IPv4 xfrm states may be offloaded\n");
230 return -EINVAL;
231 }
232
233 /* only support ESP right now */
234 if (xs->id.proto != IPPROTO_ESP) {
235 CRYPTO_NOTICE("Unsupported protocol 0x%04x\n", xs->id.proto);
236 return -EINVAL;
237 }
238
239 /* only support tunnel mode or transport mode */
240 if (!(xs->outer_mode.encap == XFRM_MODE_TUNNEL
241 || xs->outer_mode.encap == XFRM_MODE_TRANSPORT))
242 return -EINVAL;
243
244 xfrm_params = devm_kzalloc(crypto_dev,
245 sizeof(struct mtk_xfrm_params),
246 GFP_KERNEL);
247 if (!xfrm_params)
248 return -ENOMEM;
249
250 xfrm_params->xs = xs;
251 INIT_LIST_HEAD(&xfrm_params->node);
252
253 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
254 /* rx path */
255 ret = mtk_xfrm_offload_state_add_inbound(xs, xfrm_params);
256 else
257 /* tx path */
258 ret = mtk_xfrm_offload_state_add_outbound(xs, xfrm_params);
259
260 if (ret) {
261 devm_kfree(crypto_dev, xfrm_params);
262 goto out;
263 }
264
265 xs->xso.offload_handle = (unsigned long)xfrm_params;
developer1baf13c2024-07-05 15:43:05 +0800266
developer6ad3f362024-10-28 11:19:49 +0800267 spin_lock_bh(&xfrm_params_list.lock);
developer1baf13c2024-07-05 15:43:05 +0800268
269 list_add_tail(&xfrm_params->node, &xfrm_params_list.list);
270
developer6ad3f362024-10-28 11:19:49 +0800271 spin_unlock_bh(&xfrm_params_list.lock);
272
developer94c513e2023-08-21 17:33:25 +0800273out:
274 return ret;
275}
276
277void mtk_xfrm_offload_state_delete(struct xfrm_state *xs)
278{
279}
280
281void mtk_xfrm_offload_state_free(struct xfrm_state *xs)
282{
283 struct mtk_xfrm_params *xfrm_params;
284
285 if (!xs->xso.offload_handle)
286 return;
287
288 xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
developer6ad3f362024-10-28 11:19:49 +0800289 xs->xso.offload_handle = 0;
developer94c513e2023-08-21 17:33:25 +0800290
developer6ad3f362024-10-28 11:19:49 +0800291 spin_lock_bh(&xfrm_params_list.lock);
developer94c513e2023-08-21 17:33:25 +0800292 list_del(&xfrm_params->node);
developer6ad3f362024-10-28 11:19:49 +0800293 spin_unlock_bh(&xfrm_params_list.lock);
developer94c513e2023-08-21 17:33:25 +0800294
295 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
developer8ac909d2023-08-25 11:03:33 +0800296 mtk_xfrm_offload_cls_entry_tear_down(xfrm_params);
developer94c513e2023-08-21 17:33:25 +0800297
developer9d17aad2024-08-01 17:36:15 +0800298 if (xfrm_params->cdrt)
299 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
developer94c513e2023-08-21 17:33:25 +0800300
developer6ad3f362024-10-28 11:19:49 +0800301 mtk_xfrm_offload_context_tear_down(xfrm_params);
302
developer94c513e2023-08-21 17:33:25 +0800303 devm_kfree(crypto_dev, xfrm_params);
304}
305
306void mtk_xfrm_offload_state_tear_down(void)
307{
308 struct mtk_xfrm_params *xfrm_params, *tmp;
309
developer6ad3f362024-10-28 11:19:49 +0800310 spin_lock_bh(&xfrm_params_list.lock);
developer1baf13c2024-07-05 15:43:05 +0800311
312 list_for_each_entry_safe(xfrm_params, tmp, &xfrm_params_list.list, node)
developer94c513e2023-08-21 17:33:25 +0800313 mtk_xfrm_offload_state_free(xfrm_params->xs);
developer1baf13c2024-07-05 15:43:05 +0800314
developer6ad3f362024-10-28 11:19:49 +0800315 spin_unlock_bh(&xfrm_params_list.lock);
developer94c513e2023-08-21 17:33:25 +0800316}
317
318int mtk_xfrm_offload_policy_add(struct xfrm_policy *xp)
319{
320 return 0;
321}
322
developer6ad3f362024-10-28 11:19:49 +0800323void mtk_xfrm_offload_policy_delete(struct xfrm_policy *xp)
324{
325}
326
327void mtk_xfrm_offload_policy_free(struct xfrm_policy *xp)
328{
329#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
330 foe_clear_crypto_entry(xp->selector);
331 return;
332#endif
333}
334
developerd0a7d722023-10-19 11:24:25 +0800335static inline struct neighbour *mtk_crypto_find_dst_mac(struct sk_buff *skb, struct xfrm_state *xs)
336{
337 struct neighbour *neigh;
developerd0a7d722023-10-19 11:24:25 +0800338 struct dst_entry *dst = skb_dst(skb);
developerd0a7d722023-10-19 11:24:25 +0800339
developer8ac52a62024-07-01 16:11:43 +0800340 neigh = __ipv4_neigh_lookup_noref(dst->dev, xs->id.daddr.a4);
developerd0a7d722023-10-19 11:24:25 +0800341 if (unlikely(!neigh)) {
342 CRYPTO_INFO("%s: %s No neigh (daddr=%pI4)\n", __func__, dst->dev->name,
343 &xs->id.daddr.a4);
344 neigh = __neigh_create(&arp_tbl, &xs->id.daddr.a4, dst->dev, false);
developerfba064e2024-01-09 14:52:17 +0800345 neigh_output(neigh, skb, false);
developerd0a7d722023-10-19 11:24:25 +0800346 return NULL;
347 }
348
349 return neigh;
350}
351
developer94c513e2023-08-21 17:33:25 +0800352bool mtk_xfrm_offload_ok(struct sk_buff *skb,
353 struct xfrm_state *xs)
354{
355 struct mtk_xfrm_params *xfrm_params;
developerd0a7d722023-10-19 11:24:25 +0800356 struct neighbour *neigh;
357 struct dst_entry *dst = skb_dst(skb);
developer8ac52a62024-07-01 16:11:43 +0800358 int fill_inner_info = 0;
developerd0a7d722023-10-19 11:24:25 +0800359
360 rcu_read_lock_bh();
361
362 neigh = mtk_crypto_find_dst_mac(skb, xs);
363 if (!neigh) {
364 rcu_read_unlock_bh();
365 return true;
366 }
367
developer8ac52a62024-07-01 16:11:43 +0800368 /*
369 * For packet has pass through VTI (route-based VTI)
370 * The 'dev_queue_xmit' function called at network layer will cause both
371 * skb->mac_header and skb->network_header to point to the IP header
372 */
373 if (skb->mac_header == skb->network_header)
374 fill_inner_info = 1;
375
developerd0a7d722023-10-19 11:24:25 +0800376 skb_push(skb, sizeof(struct ethhdr));
377 skb_reset_mac_header(skb);
378
379 eth_hdr(skb)->h_proto = htons(ETH_P_IP);
380 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
381 memcpy(eth_hdr(skb)->h_source, dst->dev->dev_addr, ETH_ALEN);
382
383 rcu_read_unlock_bh();
384
385 xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
developer94c513e2023-08-21 17:33:25 +0800386
developer49489b82024-03-28 15:18:08 +0800387#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
388 skb_hnat_cdrt(skb) = xfrm_params->cdrt->idx;
developer52e00b72023-09-04 11:13:18 +0800389 /*
390 * EIP197 does not support fragmentation. As a result, we can not bind UDP
391 * flow since it may cause network fail due to fragmentation
392 */
developer49489b82024-03-28 15:18:08 +0800393 if (ra_sw_nat_hook_tx &&
developerc9adb362024-07-05 13:30:41 +0800394 ((is_tops_tunnel(skb) || is_tcp(skb)) && is_hnat_rate_reach(skb)))
developer8ac52a62024-07-01 16:11:43 +0800395 hnat_bind_crypto_entry(skb, dst->dev, fill_inner_info);
developer52e00b72023-09-04 11:13:18 +0800396
developerd0a7d722023-10-19 11:24:25 +0800397 /* Set magic tag for tport setting, reset to 0 after tport is set */
398 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
developer49489b82024-03-28 15:18:08 +0800399#else
400 skb_tnl_cdrt(skb) = xfrm_params->cdrt->idx;
401 skb_tnl_magic_tag(skb) = TNL_MAGIC_TAG;
402#endif // HNAT
403
404 /* Since we're going to tx directly, set skb->dev to dst->dev */
405 skb->dev = dst->dev;
developer05215d12024-01-04 19:14:19 +0800406
407 /*
408 * Since skb headroom may not be copy when segment, we cannot rely on
409 * headroom data (ex. cdrt) to decide packets should send to EIP197.
410 * Here is a workaround that only skb with inner_protocol = ESP will
411 * be sent to EIP197.
412 */
413 skb->inner_protocol = IPPROTO_ESP;
developerd0a7d722023-10-19 11:24:25 +0800414 /*
415 * Tx packet to EIP197.
416 * To avoid conflict of SW and HW sequence number
417 * All offloadable packets send to EIP197
418 */
419 dev_queue_xmit(skb);
developer94c513e2023-08-21 17:33:25 +0800420
developerd0a7d722023-10-19 11:24:25 +0800421 return true;
developer94c513e2023-08-21 17:33:25 +0800422}