blob: 56908ac6b562d2b9e063635878ac046223e1be0f [file] [log] [blame]
developer4f0d2ba2023-08-21 17:33:25 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2023 MediaTek Inc.
4 *
5 * Author: Chris.Chou <chris.chou@mediatek.com>
6 * Ren-Ting Wang <ren-ting.wang@mediatek.com>
7 */
8
9#include <linux/bitops.h>
10
11#include <mtk_eth_soc.h>
developer12ea7142024-03-28 15:18:08 +080012
13#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
developer4f0d2ba2023-08-21 17:33:25 +080014#include <mtk_hnat/hnat.h>
15#include <mtk_hnat/nf_hnat_mtk.h>
developer12ea7142024-03-28 15:18:08 +080016#endif // HNAT
developer4f0d2ba2023-08-21 17:33:25 +080017
18#include <pce/cdrt.h>
19#include <pce/cls.h>
20#include <pce/netsys.h>
21
22#include <crypto-eip/ddk/configs/cs_hwpal_ext.h>
23#include <crypto-eip/ddk/kit/iotoken/iotoken.h>
24#include <crypto-eip/ddk/kit/iotoken/iotoken_ext.h>
25
26#include "crypto-eip/crypto-eip.h"
27#include "crypto-eip/ddk-wrapper.h"
28#include "crypto-eip/internal.h"
29
30static LIST_HEAD(xfrm_params_head);
31
developer12ea7142024-03-28 15:18:08 +080032#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
33extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no);
34
developerad19d602023-10-19 11:24:25 +080035static inline bool is_tops_udp_tunnel(struct sk_buff *skb)
36{
37 return skb_hnat_tops(skb) && (ntohs(skb->protocol) == ETH_P_IP) &&
38 (ip_hdr(skb)->protocol == IPPROTO_UDP);
39}
40
41static inline bool is_tcp(struct sk_buff *skb)
42{
43 return (ntohs(skb->protocol) == ETH_P_IP) && (ip_hdr(skb)->protocol == IPPROTO_TCP);
44}
45
46static inline bool is_hnat_rate_reach(struct sk_buff *skb)
47{
48 return is_magic_tag_valid(skb) && (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH);
49}
developer12ea7142024-03-28 15:18:08 +080050#endif // HNAT
developerad19d602023-10-19 11:24:25 +080051
developer4f0d2ba2023-08-21 17:33:25 +080052static void mtk_xfrm_offload_cdrt_tear_down(struct mtk_xfrm_params *xfrm_params)
53{
54 memset(&xfrm_params->cdrt->desc, 0, sizeof(struct cdrt_desc));
55
56 mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
57}
58
59static int mtk_xfrm_offload_cdrt_setup(struct mtk_xfrm_params *xfrm_params)
60{
61 struct cdrt_desc *cdesc = &xfrm_params->cdrt->desc;
62
63 cdesc->desc1.common.type = 3;
64 cdesc->desc1.token_len = 48;
65 cdesc->desc1.p_tr[0] = __pa(xfrm_params->p_tr) | 2;
66
67 cdesc->desc2.hw_srv = 2;
68 cdesc->desc2.allow_pad = 1;
69 cdesc->desc2.strip_pad = 1;
70
71 return mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
72}
73
developeree493232023-08-25 11:03:33 +080074static void mtk_xfrm_offload_cls_entry_tear_down(struct mtk_xfrm_params *xfrm_params)
75{
76 memset(&xfrm_params->cdrt->cls->cdesc, 0, sizeof(struct cls_desc));
77
78 mtk_pce_cls_entry_write(xfrm_params->cdrt->cls);
79
80 mtk_pce_cls_entry_free(xfrm_params->cdrt->cls);
81}
82
developer4f0d2ba2023-08-21 17:33:25 +080083static int mtk_xfrm_offload_cls_entry_setup(struct mtk_xfrm_params *xfrm_params)
84{
developeree493232023-08-25 11:03:33 +080085 struct cls_desc *cdesc;
86
87 xfrm_params->cdrt->cls = mtk_pce_cls_entry_alloc();
88 if (IS_ERR(xfrm_params->cdrt->cls))
89 return PTR_ERR(xfrm_params->cdrt->cls);
developer4f0d2ba2023-08-21 17:33:25 +080090
developeree493232023-08-25 11:03:33 +080091 cdesc = &xfrm_params->cdrt->cls->cdesc;
developer4f0d2ba2023-08-21 17:33:25 +080092
developerddc0d842024-02-26 19:01:58 +080093 if (mtk_crypto_ppe_get_num() == 1)
94 CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE0);
95 else
96 CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE1);
developeree493232023-08-25 11:03:33 +080097 CLS_DESC_DATA(cdesc, tport_idx, 0x2);
98 CLS_DESC_DATA(cdesc, cdrt_idx, xfrm_params->cdrt->idx);
99
100 CLS_DESC_MASK_DATA(cdesc, tag,
101 CLS_DESC_TAG_MASK, CLS_DESC_TAG_MATCH_L4_HDR);
102 CLS_DESC_MASK_DATA(cdesc, l4_udp_hdr_nez,
103 CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK,
104 CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK);
105 CLS_DESC_MASK_DATA(cdesc, l4_type,
106 CLS_DESC_L4_TYPE_MASK, IPPROTO_ESP);
107 CLS_DESC_MASK_DATA(cdesc, l4_valid,
108 0x3,
109 CLS_DESC_VALID_UPPER_HALF_WORD_BIT |
110 CLS_DESC_VALID_LOWER_HALF_WORD_BIT);
111 CLS_DESC_MASK_DATA(cdesc, l4_hdr_usr_data,
112 0xFFFFFFFF, be32_to_cpu(xfrm_params->xs->id.spi));
113
114 return mtk_pce_cls_entry_write(xfrm_params->cdrt->cls);
developer4f0d2ba2023-08-21 17:33:25 +0800115}
116
117static void mtk_xfrm_offload_context_tear_down(struct mtk_xfrm_params *xfrm_params)
118{
119 mtk_xfrm_offload_cdrt_tear_down(xfrm_params);
120
121 /* TODO: free context */
122 devm_kfree(crypto_dev, xfrm_params->p_tr);
123
124 /* TODO: transform record tear down */
125}
126
127static int mtk_xfrm_offload_context_setup(struct mtk_xfrm_params *xfrm_params)
128{
129 u32 *tr;
130 int ret;
131
132 xfrm_params->p_tr = devm_kcalloc(crypto_dev, sizeof(u32),
133 TRANSFORM_RECORD_LEN, GFP_KERNEL);
134 if (unlikely(!xfrm_params->p_tr))
135 return -ENOMEM;
136
137 switch (xfrm_params->xs->outer_mode.encap) {
138 case XFRM_MODE_TUNNEL:
139 tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TUNNEL);
140 break;
141 case XFRM_MODE_TRANSPORT:
142 tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TRANSPORT);
143 break;
144 default:
145 ret = -ENOMEM;
146 goto err_out;
147 }
148
149 if (!tr) {
150 ret = -EINVAL;
151 goto err_out;
152 }
153
154 memcpy(xfrm_params->p_tr, tr, sizeof(u32) * TRANSFORM_RECORD_LEN);
155
156 /* TODO: free tr */
157
158 return mtk_xfrm_offload_cdrt_setup(xfrm_params);
159
160err_out:
161 devm_kfree(crypto_dev, xfrm_params->p_tr);
162
163 return ret;
164}
165
166static int mtk_xfrm_offload_state_add_outbound(struct xfrm_state *xs,
167 struct mtk_xfrm_params *xfrm_params)
168{
169 int ret;
170
171 xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_ENCRYPT);
172 if (IS_ERR(xfrm_params->cdrt))
173 return PTR_ERR(xfrm_params->cdrt);
174
175 xfrm_params->dir = SAB_DIRECTION_OUTBOUND;
176
177 ret = mtk_xfrm_offload_context_setup(xfrm_params);
178 if (ret)
179 goto free_cdrt;
180
181 return ret;
182
183free_cdrt:
184 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
185
186 return ret;
187}
188
189static int mtk_xfrm_offload_state_add_inbound(struct xfrm_state *xs,
190 struct mtk_xfrm_params *xfrm_params)
191{
192 int ret;
193
194 xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_DECRYPT);
195 if (IS_ERR(xfrm_params->cdrt))
196 return PTR_ERR(xfrm_params->cdrt);
197
198 xfrm_params->dir = SAB_DIRECTION_INBOUND;
199
200 ret = mtk_xfrm_offload_context_setup(xfrm_params);
201 if (ret)
202 goto free_cdrt;
203
204 ret = mtk_xfrm_offload_cls_entry_setup(xfrm_params);
205 if (ret)
206 goto tear_down_context;
207
208 return ret;
209
210tear_down_context:
211 mtk_xfrm_offload_context_tear_down(xfrm_params);
212
213free_cdrt:
214 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
215
216 return ret;
217}
218
219int mtk_xfrm_offload_state_add(struct xfrm_state *xs)
220{
221 struct mtk_xfrm_params *xfrm_params;
222 int ret = 0;
223
224 /* TODO: maybe support IPv6 in the future? */
225 if (xs->props.family != AF_INET) {
226 CRYPTO_NOTICE("Only IPv4 xfrm states may be offloaded\n");
227 return -EINVAL;
228 }
229
230 /* only support ESP right now */
231 if (xs->id.proto != IPPROTO_ESP) {
232 CRYPTO_NOTICE("Unsupported protocol 0x%04x\n", xs->id.proto);
233 return -EINVAL;
234 }
235
236 /* only support tunnel mode or transport mode */
237 if (!(xs->outer_mode.encap == XFRM_MODE_TUNNEL
238 || xs->outer_mode.encap == XFRM_MODE_TRANSPORT))
239 return -EINVAL;
240
241 xfrm_params = devm_kzalloc(crypto_dev,
242 sizeof(struct mtk_xfrm_params),
243 GFP_KERNEL);
244 if (!xfrm_params)
245 return -ENOMEM;
246
247 xfrm_params->xs = xs;
248 INIT_LIST_HEAD(&xfrm_params->node);
249
250 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
251 /* rx path */
252 ret = mtk_xfrm_offload_state_add_inbound(xs, xfrm_params);
253 else
254 /* tx path */
255 ret = mtk_xfrm_offload_state_add_outbound(xs, xfrm_params);
256
257 if (ret) {
258 devm_kfree(crypto_dev, xfrm_params);
259 goto out;
260 }
261
262 xs->xso.offload_handle = (unsigned long)xfrm_params;
263 list_add_tail(&xfrm_params->node, &xfrm_params_head);
264out:
265 return ret;
266}
267
268void mtk_xfrm_offload_state_delete(struct xfrm_state *xs)
269{
270}
271
272void mtk_xfrm_offload_state_free(struct xfrm_state *xs)
273{
274 struct mtk_xfrm_params *xfrm_params;
275
276 if (!xs->xso.offload_handle)
277 return;
278
279 xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
280
281 list_del(&xfrm_params->node);
282
283 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
developeree493232023-08-25 11:03:33 +0800284 mtk_xfrm_offload_cls_entry_tear_down(xfrm_params);
developer4f0d2ba2023-08-21 17:33:25 +0800285
286 mtk_xfrm_offload_context_tear_down(xfrm_params);
287
288 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
289
290 devm_kfree(crypto_dev, xfrm_params);
291}
292
293void mtk_xfrm_offload_state_tear_down(void)
294{
295 struct mtk_xfrm_params *xfrm_params, *tmp;
296
297 list_for_each_entry_safe(xfrm_params, tmp, &xfrm_params_head, node)
298 mtk_xfrm_offload_state_free(xfrm_params->xs);
299}
300
301int mtk_xfrm_offload_policy_add(struct xfrm_policy *xp)
302{
303 return 0;
304}
305
developerad19d602023-10-19 11:24:25 +0800306static inline struct neighbour *mtk_crypto_find_dst_mac(struct sk_buff *skb, struct xfrm_state *xs)
307{
308 struct neighbour *neigh;
developerad19d602023-10-19 11:24:25 +0800309 struct dst_entry *dst = skb_dst(skb);
developerad19d602023-10-19 11:24:25 +0800310
developer0b1a6db2024-07-01 16:11:43 +0800311 neigh = __ipv4_neigh_lookup_noref(dst->dev, xs->id.daddr.a4);
developerad19d602023-10-19 11:24:25 +0800312 if (unlikely(!neigh)) {
313 CRYPTO_INFO("%s: %s No neigh (daddr=%pI4)\n", __func__, dst->dev->name,
314 &xs->id.daddr.a4);
315 neigh = __neigh_create(&arp_tbl, &xs->id.daddr.a4, dst->dev, false);
developer8740d522024-01-09 14:52:17 +0800316 neigh_output(neigh, skb, false);
developerad19d602023-10-19 11:24:25 +0800317 return NULL;
318 }
319
320 return neigh;
321}
322
developer4f0d2ba2023-08-21 17:33:25 +0800323bool mtk_xfrm_offload_ok(struct sk_buff *skb,
324 struct xfrm_state *xs)
325{
326 struct mtk_xfrm_params *xfrm_params;
developerad19d602023-10-19 11:24:25 +0800327 struct neighbour *neigh;
328 struct dst_entry *dst = skb_dst(skb);
developer0b1a6db2024-07-01 16:11:43 +0800329 int fill_inner_info = 0;
developerad19d602023-10-19 11:24:25 +0800330
331 rcu_read_lock_bh();
332
333 neigh = mtk_crypto_find_dst_mac(skb, xs);
334 if (!neigh) {
335 rcu_read_unlock_bh();
336 return true;
337 }
338
developer0b1a6db2024-07-01 16:11:43 +0800339 /*
340 * For packet has pass through VTI (route-based VTI)
341 * The 'dev_queue_xmit' function called at network layer will cause both
342 * skb->mac_header and skb->network_header to point to the IP header
343 */
344 if (skb->mac_header == skb->network_header)
345 fill_inner_info = 1;
346
developerad19d602023-10-19 11:24:25 +0800347 skb_push(skb, sizeof(struct ethhdr));
348 skb_reset_mac_header(skb);
349
350 eth_hdr(skb)->h_proto = htons(ETH_P_IP);
351 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
352 memcpy(eth_hdr(skb)->h_source, dst->dev->dev_addr, ETH_ALEN);
353
354 rcu_read_unlock_bh();
355
356 xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
developer4f0d2ba2023-08-21 17:33:25 +0800357
developer12ea7142024-03-28 15:18:08 +0800358#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT)
359 skb_hnat_cdrt(skb) = xfrm_params->cdrt->idx;
developer5c851032023-09-04 11:13:18 +0800360 /*
361 * EIP197 does not support fragmentation. As a result, we can not bind UDP
362 * flow since it may cause network fail due to fragmentation
363 */
developer12ea7142024-03-28 15:18:08 +0800364 if (ra_sw_nat_hook_tx &&
365 ((is_tops_udp_tunnel(skb) || is_tcp(skb)) && is_hnat_rate_reach(skb)))
developer0b1a6db2024-07-01 16:11:43 +0800366 hnat_bind_crypto_entry(skb, dst->dev, fill_inner_info);
developer5c851032023-09-04 11:13:18 +0800367
developerad19d602023-10-19 11:24:25 +0800368 /* Set magic tag for tport setting, reset to 0 after tport is set */
369 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
developer12ea7142024-03-28 15:18:08 +0800370#else
371 skb_tnl_cdrt(skb) = xfrm_params->cdrt->idx;
372 skb_tnl_magic_tag(skb) = TNL_MAGIC_TAG;
373#endif // HNAT
374
375 /* Since we're going to tx directly, set skb->dev to dst->dev */
376 skb->dev = dst->dev;
developer531292f2024-01-04 19:14:19 +0800377
378 /*
379 * Since skb headroom may not be copy when segment, we cannot rely on
380 * headroom data (ex. cdrt) to decide packets should send to EIP197.
381 * Here is a workaround that only skb with inner_protocol = ESP will
382 * be sent to EIP197.
383 */
384 skb->inner_protocol = IPPROTO_ESP;
developerad19d602023-10-19 11:24:25 +0800385 /*
386 * Tx packet to EIP197.
387 * To avoid conflict of SW and HW sequence number
388 * All offloadable packets send to EIP197
389 */
390 dev_queue_xmit(skb);
developer4f0d2ba2023-08-21 17:33:25 +0800391
developerad19d602023-10-19 11:24:25 +0800392 return true;
developer4f0d2ba2023-08-21 17:33:25 +0800393}