blob: 2d8702dea50e214314eff1464699fe1b730e27d1 [file] [log] [blame]
developer94c513e2023-08-21 17:33:25 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2023 MediaTek Inc.
4 *
5 * Author: Chris.Chou <chris.chou@mediatek.com>
6 * Ren-Ting Wang <ren-ting.wang@mediatek.com>
7 */
8
9#include <linux/bitops.h>
10
11#include <mtk_eth_soc.h>
developer49489b82024-03-28 15:18:08 +080012
13#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
developer94c513e2023-08-21 17:33:25 +080014#include <mtk_hnat/hnat.h>
15#include <mtk_hnat/nf_hnat_mtk.h>
developer49489b82024-03-28 15:18:08 +080016#endif // HNAT
developer94c513e2023-08-21 17:33:25 +080017
18#include <pce/cdrt.h>
19#include <pce/cls.h>
20#include <pce/netsys.h>
21
22#include <crypto-eip/ddk/configs/cs_hwpal_ext.h>
23#include <crypto-eip/ddk/kit/iotoken/iotoken.h>
24#include <crypto-eip/ddk/kit/iotoken/iotoken_ext.h>
25
26#include "crypto-eip/crypto-eip.h"
27#include "crypto-eip/ddk-wrapper.h"
28#include "crypto-eip/internal.h"
29
30static LIST_HEAD(xfrm_params_head);
31
developer49489b82024-03-28 15:18:08 +080032#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
33extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
34
developerc9adb362024-07-05 13:30:41 +080035static inline bool is_tops_tunnel(struct sk_buff *skb)
developerd0a7d722023-10-19 11:24:25 +080036{
37 return skb_hnat_tops(skb) && (ntohs(skb->protocol) == ETH_P_IP) &&
developerc9adb362024-07-05 13:30:41 +080038 (ip_hdr(skb)->protocol == IPPROTO_UDP ||
39 ip_hdr(skb)->protocol == IPPROTO_GRE);
developerd0a7d722023-10-19 11:24:25 +080040}
41
42static inline bool is_tcp(struct sk_buff *skb)
43{
44 return (ntohs(skb->protocol) == ETH_P_IP) && (ip_hdr(skb)->protocol == IPPROTO_TCP);
45}
46
47static inline bool is_hnat_rate_reach(struct sk_buff *skb)
48{
49 return is_magic_tag_valid(skb) && (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH);
50}
developer49489b82024-03-28 15:18:08 +080051#endif // HNAT
developerd0a7d722023-10-19 11:24:25 +080052
developer94c513e2023-08-21 17:33:25 +080053static void mtk_xfrm_offload_cdrt_tear_down(struct mtk_xfrm_params *xfrm_params)
54{
55 memset(&xfrm_params->cdrt->desc, 0, sizeof(struct cdrt_desc));
56
57 mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
58}
59
60static int mtk_xfrm_offload_cdrt_setup(struct mtk_xfrm_params *xfrm_params)
61{
62 struct cdrt_desc *cdesc = &xfrm_params->cdrt->desc;
63
64 cdesc->desc1.common.type = 3;
65 cdesc->desc1.token_len = 48;
66 cdesc->desc1.p_tr[0] = __pa(xfrm_params->p_tr) | 2;
67
developerc9adb362024-07-05 13:30:41 +080068 cdesc->desc2.hw_srv = 3;
developer94c513e2023-08-21 17:33:25 +080069 cdesc->desc2.allow_pad = 1;
70 cdesc->desc2.strip_pad = 1;
71
72 return mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
73}
74
developer8ac909d2023-08-25 11:03:33 +080075static void mtk_xfrm_offload_cls_entry_tear_down(struct mtk_xfrm_params *xfrm_params)
76{
77 memset(&xfrm_params->cdrt->cls->cdesc, 0, sizeof(struct cls_desc));
78
79 mtk_pce_cls_entry_write(xfrm_params->cdrt->cls);
80
81 mtk_pce_cls_entry_free(xfrm_params->cdrt->cls);
82}
83
developer94c513e2023-08-21 17:33:25 +080084static int mtk_xfrm_offload_cls_entry_setup(struct mtk_xfrm_params *xfrm_params)
85{
developer8ac909d2023-08-25 11:03:33 +080086 struct cls_desc *cdesc;
87
88 xfrm_params->cdrt->cls = mtk_pce_cls_entry_alloc();
89 if (IS_ERR(xfrm_params->cdrt->cls))
90 return PTR_ERR(xfrm_params->cdrt->cls);
developer94c513e2023-08-21 17:33:25 +080091
developer8ac909d2023-08-25 11:03:33 +080092 cdesc = &xfrm_params->cdrt->cls->cdesc;
developer94c513e2023-08-21 17:33:25 +080093
developer5922d4d2024-02-26 19:01:58 +080094 if (mtk_crypto_ppe_get_num() == 1)
95 CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE0);
96 else
97 CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE1);
developer8ac909d2023-08-25 11:03:33 +080098 CLS_DESC_DATA(cdesc, tport_idx, 0x2);
99 CLS_DESC_DATA(cdesc, cdrt_idx, xfrm_params->cdrt->idx);
100
101 CLS_DESC_MASK_DATA(cdesc, tag,
102 CLS_DESC_TAG_MASK, CLS_DESC_TAG_MATCH_L4_HDR);
103 CLS_DESC_MASK_DATA(cdesc, l4_udp_hdr_nez,
104 CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK,
105 CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK);
106 CLS_DESC_MASK_DATA(cdesc, l4_type,
107 CLS_DESC_L4_TYPE_MASK, IPPROTO_ESP);
108 CLS_DESC_MASK_DATA(cdesc, l4_valid,
109 0x3,
110 CLS_DESC_VALID_UPPER_HALF_WORD_BIT |
111 CLS_DESC_VALID_LOWER_HALF_WORD_BIT);
112 CLS_DESC_MASK_DATA(cdesc, l4_hdr_usr_data,
113 0xFFFFFFFF, be32_to_cpu(xfrm_params->xs->id.spi));
114
115 return mtk_pce_cls_entry_write(xfrm_params->cdrt->cls);
developer94c513e2023-08-21 17:33:25 +0800116}
117
118static void mtk_xfrm_offload_context_tear_down(struct mtk_xfrm_params *xfrm_params)
119{
120 mtk_xfrm_offload_cdrt_tear_down(xfrm_params);
121
122 /* TODO: free context */
123 devm_kfree(crypto_dev, xfrm_params->p_tr);
124
125 /* TODO: transform record tear down */
126}
127
128static int mtk_xfrm_offload_context_setup(struct mtk_xfrm_params *xfrm_params)
129{
130 u32 *tr;
131 int ret;
132
133 xfrm_params->p_tr = devm_kcalloc(crypto_dev, sizeof(u32),
134 TRANSFORM_RECORD_LEN, GFP_KERNEL);
135 if (unlikely(!xfrm_params->p_tr))
136 return -ENOMEM;
137
138 switch (xfrm_params->xs->outer_mode.encap) {
139 case XFRM_MODE_TUNNEL:
140 tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TUNNEL);
141 break;
142 case XFRM_MODE_TRANSPORT:
143 tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TRANSPORT);
144 break;
145 default:
146 ret = -ENOMEM;
147 goto err_out;
148 }
149
150 if (!tr) {
151 ret = -EINVAL;
152 goto err_out;
153 }
154
155 memcpy(xfrm_params->p_tr, tr, sizeof(u32) * TRANSFORM_RECORD_LEN);
156
157 /* TODO: free tr */
158
159 return mtk_xfrm_offload_cdrt_setup(xfrm_params);
160
161err_out:
162 devm_kfree(crypto_dev, xfrm_params->p_tr);
163
164 return ret;
165}
166
167static int mtk_xfrm_offload_state_add_outbound(struct xfrm_state *xs,
168 struct mtk_xfrm_params *xfrm_params)
169{
170 int ret;
171
172 xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_ENCRYPT);
173 if (IS_ERR(xfrm_params->cdrt))
174 return PTR_ERR(xfrm_params->cdrt);
175
176 xfrm_params->dir = SAB_DIRECTION_OUTBOUND;
177
178 ret = mtk_xfrm_offload_context_setup(xfrm_params);
179 if (ret)
180 goto free_cdrt;
181
182 return ret;
183
184free_cdrt:
185 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
186
187 return ret;
188}
189
190static int mtk_xfrm_offload_state_add_inbound(struct xfrm_state *xs,
191 struct mtk_xfrm_params *xfrm_params)
192{
193 int ret;
194
195 xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_DECRYPT);
196 if (IS_ERR(xfrm_params->cdrt))
197 return PTR_ERR(xfrm_params->cdrt);
198
199 xfrm_params->dir = SAB_DIRECTION_INBOUND;
200
201 ret = mtk_xfrm_offload_context_setup(xfrm_params);
202 if (ret)
203 goto free_cdrt;
204
205 ret = mtk_xfrm_offload_cls_entry_setup(xfrm_params);
206 if (ret)
207 goto tear_down_context;
208
209 return ret;
210
211tear_down_context:
212 mtk_xfrm_offload_context_tear_down(xfrm_params);
213
214free_cdrt:
215 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
216
217 return ret;
218}
219
220int mtk_xfrm_offload_state_add(struct xfrm_state *xs)
221{
222 struct mtk_xfrm_params *xfrm_params;
223 int ret = 0;
224
225 /* TODO: maybe support IPv6 in the future? */
226 if (xs->props.family != AF_INET) {
227 CRYPTO_NOTICE("Only IPv4 xfrm states may be offloaded\n");
228 return -EINVAL;
229 }
230
231 /* only support ESP right now */
232 if (xs->id.proto != IPPROTO_ESP) {
233 CRYPTO_NOTICE("Unsupported protocol 0x%04x\n", xs->id.proto);
234 return -EINVAL;
235 }
236
237 /* only support tunnel mode or transport mode */
238 if (!(xs->outer_mode.encap == XFRM_MODE_TUNNEL
239 || xs->outer_mode.encap == XFRM_MODE_TRANSPORT))
240 return -EINVAL;
241
242 xfrm_params = devm_kzalloc(crypto_dev,
243 sizeof(struct mtk_xfrm_params),
244 GFP_KERNEL);
245 if (!xfrm_params)
246 return -ENOMEM;
247
248 xfrm_params->xs = xs;
249 INIT_LIST_HEAD(&xfrm_params->node);
250
251 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
252 /* rx path */
253 ret = mtk_xfrm_offload_state_add_inbound(xs, xfrm_params);
254 else
255 /* tx path */
256 ret = mtk_xfrm_offload_state_add_outbound(xs, xfrm_params);
257
258 if (ret) {
259 devm_kfree(crypto_dev, xfrm_params);
260 goto out;
261 }
262
263 xs->xso.offload_handle = (unsigned long)xfrm_params;
264 list_add_tail(&xfrm_params->node, &xfrm_params_head);
265out:
266 return ret;
267}
268
269void mtk_xfrm_offload_state_delete(struct xfrm_state *xs)
270{
271}
272
273void mtk_xfrm_offload_state_free(struct xfrm_state *xs)
274{
275 struct mtk_xfrm_params *xfrm_params;
276
277 if (!xs->xso.offload_handle)
278 return;
279
280 xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
281
282 list_del(&xfrm_params->node);
283
284 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
developer8ac909d2023-08-25 11:03:33 +0800285 mtk_xfrm_offload_cls_entry_tear_down(xfrm_params);
developer94c513e2023-08-21 17:33:25 +0800286
287 mtk_xfrm_offload_context_tear_down(xfrm_params);
288
289 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
290
291 devm_kfree(crypto_dev, xfrm_params);
292}
293
294void mtk_xfrm_offload_state_tear_down(void)
295{
296 struct mtk_xfrm_params *xfrm_params, *tmp;
297
298 list_for_each_entry_safe(xfrm_params, tmp, &xfrm_params_head, node)
299 mtk_xfrm_offload_state_free(xfrm_params->xs);
300}
301
302int mtk_xfrm_offload_policy_add(struct xfrm_policy *xp)
303{
304 return 0;
305}
306
developerd0a7d722023-10-19 11:24:25 +0800307static inline struct neighbour *mtk_crypto_find_dst_mac(struct sk_buff *skb, struct xfrm_state *xs)
308{
309 struct neighbour *neigh;
developerd0a7d722023-10-19 11:24:25 +0800310 struct dst_entry *dst = skb_dst(skb);
developerd0a7d722023-10-19 11:24:25 +0800311
developer8ac52a62024-07-01 16:11:43 +0800312 neigh = __ipv4_neigh_lookup_noref(dst->dev, xs->id.daddr.a4);
developerd0a7d722023-10-19 11:24:25 +0800313 if (unlikely(!neigh)) {
314 CRYPTO_INFO("%s: %s No neigh (daddr=%pI4)\n", __func__, dst->dev->name,
315 &xs->id.daddr.a4);
316 neigh = __neigh_create(&arp_tbl, &xs->id.daddr.a4, dst->dev, false);
developerfba064e2024-01-09 14:52:17 +0800317 neigh_output(neigh, skb, false);
developerd0a7d722023-10-19 11:24:25 +0800318 return NULL;
319 }
320
321 return neigh;
322}
323
developer94c513e2023-08-21 17:33:25 +0800324bool mtk_xfrm_offload_ok(struct sk_buff *skb,
325 struct xfrm_state *xs)
326{
327 struct mtk_xfrm_params *xfrm_params;
developerd0a7d722023-10-19 11:24:25 +0800328 struct neighbour *neigh;
329 struct dst_entry *dst = skb_dst(skb);
developer8ac52a62024-07-01 16:11:43 +0800330 int fill_inner_info = 0;
developerd0a7d722023-10-19 11:24:25 +0800331
332 rcu_read_lock_bh();
333
334 neigh = mtk_crypto_find_dst_mac(skb, xs);
335 if (!neigh) {
336 rcu_read_unlock_bh();
337 return true;
338 }
339
developer8ac52a62024-07-01 16:11:43 +0800340 /*
341 * For packet has pass through VTI (route-based VTI)
342 * The 'dev_queue_xmit' function called at network layer will cause both
343 * skb->mac_header and skb->network_header to point to the IP header
344 */
345 if (skb->mac_header == skb->network_header)
346 fill_inner_info = 1;
347
developerd0a7d722023-10-19 11:24:25 +0800348 skb_push(skb, sizeof(struct ethhdr));
349 skb_reset_mac_header(skb);
350
351 eth_hdr(skb)->h_proto = htons(ETH_P_IP);
352 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
353 memcpy(eth_hdr(skb)->h_source, dst->dev->dev_addr, ETH_ALEN);
354
355 rcu_read_unlock_bh();
356
357 xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
developer94c513e2023-08-21 17:33:25 +0800358
developer49489b82024-03-28 15:18:08 +0800359#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
360 skb_hnat_cdrt(skb) = xfrm_params->cdrt->idx;
developer52e00b72023-09-04 11:13:18 +0800361 /*
362 * EIP197 does not support fragmentation. As a result, we can not bind UDP
363 * flow since it may cause network fail due to fragmentation
364 */
developer49489b82024-03-28 15:18:08 +0800365 if (ra_sw_nat_hook_tx &&
developerc9adb362024-07-05 13:30:41 +0800366 ((is_tops_tunnel(skb) || is_tcp(skb)) && is_hnat_rate_reach(skb)))
developer8ac52a62024-07-01 16:11:43 +0800367 hnat_bind_crypto_entry(skb, dst->dev, fill_inner_info);
developer52e00b72023-09-04 11:13:18 +0800368
developerd0a7d722023-10-19 11:24:25 +0800369 /* Set magic tag for tport setting, reset to 0 after tport is set */
370 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
developer49489b82024-03-28 15:18:08 +0800371#else
372 skb_tnl_cdrt(skb) = xfrm_params->cdrt->idx;
373 skb_tnl_magic_tag(skb) = TNL_MAGIC_TAG;
374#endif // HNAT
375
376 /* Since we're going to tx directly, set skb->dev to dst->dev */
377 skb->dev = dst->dev;
developer05215d12024-01-04 19:14:19 +0800378
379 /*
380 * Since skb headroom may not be copy when segment, we cannot rely on
381 * headroom data (ex. cdrt) to decide packets should send to EIP197.
382 * Here is a workaround that only skb with inner_protocol = ESP will
383 * be sent to EIP197.
384 */
385 skb->inner_protocol = IPPROTO_ESP;
developerd0a7d722023-10-19 11:24:25 +0800386 /*
387 * Tx packet to EIP197.
388 * To avoid conflict of SW and HW sequence number
389 * All offloadable packets send to EIP197
390 */
391 dev_queue_xmit(skb);
developer94c513e2023-08-21 17:33:25 +0800392
developerd0a7d722023-10-19 11:24:25 +0800393 return true;
developer94c513e2023-08-21 17:33:25 +0800394}