blob: 28f4555dae5e4e261efb7a5ebaffbb7d56c1c2c7 [file] [log] [blame]
developer94c513e2023-08-21 17:33:25 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2023 MediaTek Inc.
4 *
5 * Author: Chris.Chou <chris.chou@mediatek.com>
6 * Ren-Ting Wang <ren-ting.wang@mediatek.com>
7 */
8
9#include <linux/bitops.h>
10
11#include <mtk_eth_soc.h>
12#include <mtk_hnat/hnat.h>
13#include <mtk_hnat/nf_hnat_mtk.h>
14
15#include <pce/cdrt.h>
16#include <pce/cls.h>
17#include <pce/netsys.h>
18
19#include <crypto-eip/ddk/configs/cs_hwpal_ext.h>
20#include <crypto-eip/ddk/kit/iotoken/iotoken.h>
21#include <crypto-eip/ddk/kit/iotoken/iotoken_ext.h>
22
23#include "crypto-eip/crypto-eip.h"
24#include "crypto-eip/ddk-wrapper.h"
25#include "crypto-eip/internal.h"
26
27static LIST_HEAD(xfrm_params_head);
28
developerd0a7d722023-10-19 11:24:25 +080029static inline bool is_tops_udp_tunnel(struct sk_buff *skb)
30{
31 return skb_hnat_tops(skb) && (ntohs(skb->protocol) == ETH_P_IP) &&
32 (ip_hdr(skb)->protocol == IPPROTO_UDP);
33}
34
35static inline bool is_tcp(struct sk_buff *skb)
36{
37 return (ntohs(skb->protocol) == ETH_P_IP) && (ip_hdr(skb)->protocol == IPPROTO_TCP);
38}
39
40static inline bool is_hnat_rate_reach(struct sk_buff *skb)
41{
42 return is_magic_tag_valid(skb) && (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH);
43}
44
developer94c513e2023-08-21 17:33:25 +080045static void mtk_xfrm_offload_cdrt_tear_down(struct mtk_xfrm_params *xfrm_params)
46{
47 memset(&xfrm_params->cdrt->desc, 0, sizeof(struct cdrt_desc));
48
49 mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
50}
51
52static int mtk_xfrm_offload_cdrt_setup(struct mtk_xfrm_params *xfrm_params)
53{
54 struct cdrt_desc *cdesc = &xfrm_params->cdrt->desc;
55
56 cdesc->desc1.common.type = 3;
57 cdesc->desc1.token_len = 48;
58 cdesc->desc1.p_tr[0] = __pa(xfrm_params->p_tr) | 2;
59
60 cdesc->desc2.hw_srv = 2;
61 cdesc->desc2.allow_pad = 1;
62 cdesc->desc2.strip_pad = 1;
63
64 return mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
65}
66
developer8ac909d2023-08-25 11:03:33 +080067static void mtk_xfrm_offload_cls_entry_tear_down(struct mtk_xfrm_params *xfrm_params)
68{
69 memset(&xfrm_params->cdrt->cls->cdesc, 0, sizeof(struct cls_desc));
70
71 mtk_pce_cls_entry_write(xfrm_params->cdrt->cls);
72
73 mtk_pce_cls_entry_free(xfrm_params->cdrt->cls);
74}
75
developer94c513e2023-08-21 17:33:25 +080076static int mtk_xfrm_offload_cls_entry_setup(struct mtk_xfrm_params *xfrm_params)
77{
developer8ac909d2023-08-25 11:03:33 +080078 struct cls_desc *cdesc;
79
80 xfrm_params->cdrt->cls = mtk_pce_cls_entry_alloc();
81 if (IS_ERR(xfrm_params->cdrt->cls))
82 return PTR_ERR(xfrm_params->cdrt->cls);
developer94c513e2023-08-21 17:33:25 +080083
developer8ac909d2023-08-25 11:03:33 +080084 cdesc = &xfrm_params->cdrt->cls->cdesc;
developer94c513e2023-08-21 17:33:25 +080085
developer5922d4d2024-02-26 19:01:58 +080086 if (mtk_crypto_ppe_get_num() == 1)
87 CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE0);
88 else
89 CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE1);
developer8ac909d2023-08-25 11:03:33 +080090 CLS_DESC_DATA(cdesc, tport_idx, 0x2);
91 CLS_DESC_DATA(cdesc, cdrt_idx, xfrm_params->cdrt->idx);
92
93 CLS_DESC_MASK_DATA(cdesc, tag,
94 CLS_DESC_TAG_MASK, CLS_DESC_TAG_MATCH_L4_HDR);
95 CLS_DESC_MASK_DATA(cdesc, l4_udp_hdr_nez,
96 CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK,
97 CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK);
98 CLS_DESC_MASK_DATA(cdesc, l4_type,
99 CLS_DESC_L4_TYPE_MASK, IPPROTO_ESP);
100 CLS_DESC_MASK_DATA(cdesc, l4_valid,
101 0x3,
102 CLS_DESC_VALID_UPPER_HALF_WORD_BIT |
103 CLS_DESC_VALID_LOWER_HALF_WORD_BIT);
104 CLS_DESC_MASK_DATA(cdesc, l4_hdr_usr_data,
105 0xFFFFFFFF, be32_to_cpu(xfrm_params->xs->id.spi));
106
107 return mtk_pce_cls_entry_write(xfrm_params->cdrt->cls);
developer94c513e2023-08-21 17:33:25 +0800108}
109
110static void mtk_xfrm_offload_context_tear_down(struct mtk_xfrm_params *xfrm_params)
111{
112 mtk_xfrm_offload_cdrt_tear_down(xfrm_params);
113
114 /* TODO: free context */
115 devm_kfree(crypto_dev, xfrm_params->p_tr);
116
117 /* TODO: transform record tear down */
118}
119
120static int mtk_xfrm_offload_context_setup(struct mtk_xfrm_params *xfrm_params)
121{
122 u32 *tr;
123 int ret;
124
125 xfrm_params->p_tr = devm_kcalloc(crypto_dev, sizeof(u32),
126 TRANSFORM_RECORD_LEN, GFP_KERNEL);
127 if (unlikely(!xfrm_params->p_tr))
128 return -ENOMEM;
129
130 switch (xfrm_params->xs->outer_mode.encap) {
131 case XFRM_MODE_TUNNEL:
132 tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TUNNEL);
133 break;
134 case XFRM_MODE_TRANSPORT:
135 tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TRANSPORT);
136 break;
137 default:
138 ret = -ENOMEM;
139 goto err_out;
140 }
141
142 if (!tr) {
143 ret = -EINVAL;
144 goto err_out;
145 }
146
147 memcpy(xfrm_params->p_tr, tr, sizeof(u32) * TRANSFORM_RECORD_LEN);
148
149 /* TODO: free tr */
150
151 return mtk_xfrm_offload_cdrt_setup(xfrm_params);
152
153err_out:
154 devm_kfree(crypto_dev, xfrm_params->p_tr);
155
156 return ret;
157}
158
159static int mtk_xfrm_offload_state_add_outbound(struct xfrm_state *xs,
160 struct mtk_xfrm_params *xfrm_params)
161{
162 int ret;
163
164 xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_ENCRYPT);
165 if (IS_ERR(xfrm_params->cdrt))
166 return PTR_ERR(xfrm_params->cdrt);
167
168 xfrm_params->dir = SAB_DIRECTION_OUTBOUND;
169
170 ret = mtk_xfrm_offload_context_setup(xfrm_params);
171 if (ret)
172 goto free_cdrt;
173
174 return ret;
175
176free_cdrt:
177 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
178
179 return ret;
180}
181
182static int mtk_xfrm_offload_state_add_inbound(struct xfrm_state *xs,
183 struct mtk_xfrm_params *xfrm_params)
184{
185 int ret;
186
187 xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_DECRYPT);
188 if (IS_ERR(xfrm_params->cdrt))
189 return PTR_ERR(xfrm_params->cdrt);
190
191 xfrm_params->dir = SAB_DIRECTION_INBOUND;
192
193 ret = mtk_xfrm_offload_context_setup(xfrm_params);
194 if (ret)
195 goto free_cdrt;
196
197 ret = mtk_xfrm_offload_cls_entry_setup(xfrm_params);
198 if (ret)
199 goto tear_down_context;
200
201 return ret;
202
203tear_down_context:
204 mtk_xfrm_offload_context_tear_down(xfrm_params);
205
206free_cdrt:
207 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
208
209 return ret;
210}
211
212int mtk_xfrm_offload_state_add(struct xfrm_state *xs)
213{
214 struct mtk_xfrm_params *xfrm_params;
215 int ret = 0;
216
217 /* TODO: maybe support IPv6 in the future? */
218 if (xs->props.family != AF_INET) {
219 CRYPTO_NOTICE("Only IPv4 xfrm states may be offloaded\n");
220 return -EINVAL;
221 }
222
223 /* only support ESP right now */
224 if (xs->id.proto != IPPROTO_ESP) {
225 CRYPTO_NOTICE("Unsupported protocol 0x%04x\n", xs->id.proto);
226 return -EINVAL;
227 }
228
229 /* only support tunnel mode or transport mode */
230 if (!(xs->outer_mode.encap == XFRM_MODE_TUNNEL
231 || xs->outer_mode.encap == XFRM_MODE_TRANSPORT))
232 return -EINVAL;
233
234 xfrm_params = devm_kzalloc(crypto_dev,
235 sizeof(struct mtk_xfrm_params),
236 GFP_KERNEL);
237 if (!xfrm_params)
238 return -ENOMEM;
239
240 xfrm_params->xs = xs;
241 INIT_LIST_HEAD(&xfrm_params->node);
242
243 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
244 /* rx path */
245 ret = mtk_xfrm_offload_state_add_inbound(xs, xfrm_params);
246 else
247 /* tx path */
248 ret = mtk_xfrm_offload_state_add_outbound(xs, xfrm_params);
249
250 if (ret) {
251 devm_kfree(crypto_dev, xfrm_params);
252 goto out;
253 }
254
255 xs->xso.offload_handle = (unsigned long)xfrm_params;
256 list_add_tail(&xfrm_params->node, &xfrm_params_head);
257out:
258 return ret;
259}
260
261void mtk_xfrm_offload_state_delete(struct xfrm_state *xs)
262{
263}
264
265void mtk_xfrm_offload_state_free(struct xfrm_state *xs)
266{
267 struct mtk_xfrm_params *xfrm_params;
268
269 if (!xs->xso.offload_handle)
270 return;
271
272 xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
273
274 list_del(&xfrm_params->node);
275
276 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
developer8ac909d2023-08-25 11:03:33 +0800277 mtk_xfrm_offload_cls_entry_tear_down(xfrm_params);
developer94c513e2023-08-21 17:33:25 +0800278
279 mtk_xfrm_offload_context_tear_down(xfrm_params);
280
281 mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
282
283 devm_kfree(crypto_dev, xfrm_params);
284}
285
286void mtk_xfrm_offload_state_tear_down(void)
287{
288 struct mtk_xfrm_params *xfrm_params, *tmp;
289
290 list_for_each_entry_safe(xfrm_params, tmp, &xfrm_params_head, node)
291 mtk_xfrm_offload_state_free(xfrm_params->xs);
292}
293
294int mtk_xfrm_offload_policy_add(struct xfrm_policy *xp)
295{
296 return 0;
297}
298
developerd0a7d722023-10-19 11:24:25 +0800299static inline struct neighbour *mtk_crypto_find_dst_mac(struct sk_buff *skb, struct xfrm_state *xs)
300{
301 struct neighbour *neigh;
302 u32 nexthop;
303 struct dst_entry *dst = skb_dst(skb);
304 struct rtable *rt = (struct rtable *) dst;
305
306 nexthop = (__force u32) rt_nexthop(rt, xs->id.daddr.a4);
307 neigh = __ipv4_neigh_lookup_noref(dst->dev, nexthop);
308 if (unlikely(!neigh)) {
309 CRYPTO_INFO("%s: %s No neigh (daddr=%pI4)\n", __func__, dst->dev->name,
310 &xs->id.daddr.a4);
311 neigh = __neigh_create(&arp_tbl, &xs->id.daddr.a4, dst->dev, false);
developerfba064e2024-01-09 14:52:17 +0800312 neigh_output(neigh, skb, false);
developerd0a7d722023-10-19 11:24:25 +0800313 return NULL;
314 }
315
316 return neigh;
317}
318
developer94c513e2023-08-21 17:33:25 +0800319bool mtk_xfrm_offload_ok(struct sk_buff *skb,
320 struct xfrm_state *xs)
321{
322 struct mtk_xfrm_params *xfrm_params;
developerd0a7d722023-10-19 11:24:25 +0800323 struct neighbour *neigh;
324 struct dst_entry *dst = skb_dst(skb);
325
326 rcu_read_lock_bh();
327
328 neigh = mtk_crypto_find_dst_mac(skb, xs);
329 if (!neigh) {
330 rcu_read_unlock_bh();
331 return true;
332 }
333
334 skb_push(skb, sizeof(struct ethhdr));
335 skb_reset_mac_header(skb);
336
337 eth_hdr(skb)->h_proto = htons(ETH_P_IP);
338 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
339 memcpy(eth_hdr(skb)->h_source, dst->dev->dev_addr, ETH_ALEN);
340
341 rcu_read_unlock_bh();
342
343 xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
344 skb_hnat_cdrt(skb) = xfrm_params->cdrt->idx;
developer94c513e2023-08-21 17:33:25 +0800345
developer52e00b72023-09-04 11:13:18 +0800346 /*
347 * EIP197 does not support fragmentation. As a result, we can not bind UDP
348 * flow since it may cause network fail due to fragmentation
349 */
developerd0a7d722023-10-19 11:24:25 +0800350 if ((is_tops_udp_tunnel(skb) || is_tcp(skb)) && is_hnat_rate_reach(skb))
351 hnat_bind_crypto_entry(skb, dst->dev);
developer52e00b72023-09-04 11:13:18 +0800352
developerd0a7d722023-10-19 11:24:25 +0800353 /* Since we're going to tx directly, set skb->dev to dst->dev */
354 skb->dev = dst->dev;
355 /* Set magic tag for tport setting, reset to 0 after tport is set */
356 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
developer05215d12024-01-04 19:14:19 +0800357
358 /*
359 * Since skb headroom may not be copy when segment, we cannot rely on
360 * headroom data (ex. cdrt) to decide packets should send to EIP197.
361 * Here is a workaround that only skb with inner_protocol = ESP will
362 * be sent to EIP197.
363 */
364 skb->inner_protocol = IPPROTO_ESP;
developerd0a7d722023-10-19 11:24:25 +0800365 /*
366 * Tx packet to EIP197.
367 * To avoid conflict of SW and HW sequence number
368 * All offloadable packets send to EIP197
369 */
370 dev_queue_xmit(skb);
developer94c513e2023-08-21 17:33:25 +0800371
developerd0a7d722023-10-19 11:24:25 +0800372 return true;
developer94c513e2023-08-21 17:33:25 +0800373}